@@ -445,6 +445,8 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
copy_query_dev_fields(file, &resp, &attr);
+ if (resp.atomic_cap > IB_ATOMIC_GLOB)
+ resp.atomic_cap = IB_ATOMIC_NONE;
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
@@ -3186,7 +3188,7 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (err)
return err;
- if (cmd.comp_mask)
+ if (cmd.comp_mask > IB_UVERBS_EX_QUERY_DEV_MAX_MASK)
return -EINVAL;
err = device->ex_query_device(device, &attr, uhw);
@@ -3197,8 +3199,19 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
copy_query_dev_fields(file,
(struct ib_uverbs_query_device_resp *)(&resp),
(struct ib_device_attr *)&attr);
- resp.comp_mask = 0;
+ if (cmd.comp_mask & IB_UVERBS_EX_QUERY_DEV_MASKED_ATOMIC) {
+ resp.masked_atomic_cap = attr.masked_atomic_cap;
+ resp.log_atomic_arg_sizes = attr.log_atomic_arg_sizes;
+ resp.max_fa_bit_boundary = attr.max_fa_bit_boundary;
+ resp.log_max_atomic_inline = attr.log_max_atomic_inline;
+ resp.comp_mask |= IB_UVERBS_EX_QUERY_DEV_MASKED_ATOMIC;
+ } else {
+ resp.comp_mask = IB_ATOMIC_NONE;
+ resp.log_atomic_arg_sizes = 0;
+ resp.max_fa_bit_boundary = 0;
+ resp.log_max_atomic_inline = 0;
+ }
err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
if (err)
return err;
@@ -140,7 +140,9 @@ enum ib_signature_guard_cap {
enum ib_atomic_cap {
IB_ATOMIC_NONE,
IB_ATOMIC_HCA,
- IB_ATOMIC_GLOB
+ IB_ATOMIC_GLOB,
+ IB_ATOMIC_HCA_REPLY_BE,
+ IB_ATOMIC_REPLY_BE_GLOB,
};
struct ib_device_attr {
@@ -186,6 +188,9 @@ struct ib_device_attr {
u8 local_ca_ack_delay;
int sig_prot_cap;
int sig_guard_cap;
+ u32 log_atomic_arg_sizes; /* bit-mask of supported sizes */
+ u32 max_fa_bit_boundary;
+ u32 log_max_atomic_inline;
};
enum ib_mtu {
@@ -202,6 +202,12 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4];
};
+/* the ...MAX_MASK value is calculated by the last shift value as below */
+enum {
+ IB_UVERBS_EX_QUERY_DEV_MASKED_ATOMIC = 1 << 0,
+ IB_UVERBS_EX_QUERY_DEV_MAX_MASK = (1 << (0 /*last shift value */ + 1)) - 1,
+};
+
struct ib_uverbs_ex_query_device {
__u64 driver_data[0];
__u32 comp_mask;
@@ -251,6 +257,10 @@ struct ib_uverbs_ex_query_device_resp {
__u8 phys_port_cnt;
__u8 reserved[4];
__u32 comp_mask;
+ __u32 masked_atomic_cap;
+ __u32 log_atomic_arg_sizes; /* bit-mask of supported sizes */
+ __u32 max_fa_bit_boundary;
+ __u32 log_max_atomic_inline;
};
struct ib_uverbs_query_port {
Add the option to define response for atomic operations in network order. This is required by the subsequent patch adding support for Connect-IB extended atomic operations. Connect-IB extended atomic operations provided masked compare and swap and multifield fetch and add operations, which were introduced in ConnectX devices with arguments of 8 bytes, with varying argument sizes. enum ib_atomic_cap is extended to have big endian variants. The device attributes struct defines three new fields: log_atomic_arg_sizes - is a bit mask which encodes which argument sizes are supported. A set bit at location n (zero based) means an argument of size 2 ^ n is supported. max_fa_bit_boundary - Max fetch and add bit boundary. Multi field fetch and add operations use a bit mask that defines bit locations where carry bit is not passed to the next higher order bit. So, if this field has the value 64, it means that the max value subject to fetch and add is 64 bits which means no carry from bit 63 to 64 or from bit 127 to 128 etc. log_max_atomic_inline - atomic arguments can be inline in the WQE or be referenced through a memory key. This value defines the max inline argument size possible. Extended atomic operations were first introduced here 5e80ba8ff0bd Signed-off-by: Eli Cohen <eli@mellanox.com> --- drivers/infiniband/core/uverbs_cmd.c | 17 +++++++++++++++-- include/rdma/ib_verbs.h | 7 ++++++- include/uapi/rdma/ib_user_verbs.h | 10 ++++++++++ 3 files changed, 31 insertions(+), 3 deletions(-)