1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6 #include <linux/overflow.h>
7 #include <rdma/uverbs_std_types.h>
8 #include "rdma_core.h"
9 #include "uverbs.h"
10 #include <rdma/uverbs_ioctl.h>
11 #include <rdma/opa_addr.h>
12 #include <rdma/ib_cache.h>
13
14 /*
15 * This ioctl method allows calling any defined write or write_ex
16 * handler. This essentially replaces the hdr/ex_hdr system with the ioctl
17 * marshalling, and brings the non-ex path into the same marshalling as the ex
18 * path.
19 */
UVERBS_METHOD_INVOKE_WRITE(struct uverbs_attr_bundle * attrs)20 static int UVERBS_HANDLER(UVERBS_METHOD_INVOKE_WRITE)(
21 struct uverbs_attr_bundle *attrs)
22 {
23 struct uverbs_api *uapi = attrs->ufile->device->uapi;
24 const struct uverbs_api_write_method *method_elm;
25 u32 cmd;
26 int rc;
27
28 rc = uverbs_get_const(&cmd, attrs, UVERBS_ATTR_WRITE_CMD);
29 if (rc)
30 return rc;
31
32 method_elm = uapi_get_method(uapi, cmd);
33 if (IS_ERR(method_elm))
34 return PTR_ERR(method_elm);
35
36 uverbs_fill_udata(attrs, &attrs->ucore, UVERBS_ATTR_CORE_IN,
37 UVERBS_ATTR_CORE_OUT);
38
39 if (attrs->ucore.inlen < method_elm->req_size ||
40 attrs->ucore.outlen < method_elm->resp_size)
41 return -ENOSPC;
42
43 attrs->uobject = NULL;
44 rc = method_elm->handler(attrs);
45 if (attrs->uobject)
46 uverbs_finalize_object(attrs->uobject, UVERBS_ACCESS_NEW, true,
47 !rc, attrs);
48 return rc;
49 }
50
51 DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_INVOKE_WRITE,
52 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_WRITE_CMD,
53 enum ib_uverbs_write_cmds,
54 UA_MANDATORY),
55 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CORE_IN,
56 UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
57 UA_OPTIONAL),
58 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CORE_OUT,
59 UVERBS_ATTR_MIN_SIZE(0),
60 UA_OPTIONAL),
61 UVERBS_ATTR_UHW());
62
63 static uint32_t *
gather_objects_handle(struct ib_uverbs_file * ufile,const struct uverbs_api_object * uapi_object,struct uverbs_attr_bundle * attrs,ssize_t out_len,u64 * total)64 gather_objects_handle(struct ib_uverbs_file *ufile,
65 const struct uverbs_api_object *uapi_object,
66 struct uverbs_attr_bundle *attrs,
67 ssize_t out_len,
68 u64 *total)
69 {
70 u64 max_count = out_len / sizeof(u32);
71 struct ib_uobject *obj;
72 u64 count = 0;
73 u32 *handles;
74
75 /* Allocated memory that cannot page out where we gather
76 * all object ids under a spin_lock.
77 */
78 handles = uverbs_zalloc(attrs, out_len);
79 if (IS_ERR(handles))
80 return handles;
81
82 spin_lock_irq(&ufile->uobjects_lock);
83 list_for_each_entry(obj, &ufile->uobjects, list) {
84 u32 obj_id = obj->id;
85
86 if (obj->uapi_object != uapi_object)
87 continue;
88
89 if (count >= max_count)
90 break;
91
92 handles[count] = obj_id;
93 count++;
94 }
95 spin_unlock_irq(&ufile->uobjects_lock);
96
97 *total = count;
98 return handles;
99 }
100
UVERBS_METHOD_INFO_HANDLES(struct uverbs_attr_bundle * attrs)101 static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
102 struct uverbs_attr_bundle *attrs)
103 {
104 const struct uverbs_api_object *uapi_object;
105 ssize_t out_len;
106 u64 total = 0;
107 u16 object_id;
108 u32 *handles;
109 int ret;
110
111 out_len = uverbs_attr_get_len(attrs, UVERBS_ATTR_INFO_HANDLES_LIST);
112 if (out_len <= 0 || (out_len % sizeof(u32) != 0))
113 return -EINVAL;
114
115 ret = uverbs_get_const(&object_id, attrs, UVERBS_ATTR_INFO_OBJECT_ID);
116 if (ret)
117 return ret;
118
119 uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
120 if (IS_ERR(uapi_object))
121 return PTR_ERR(uapi_object);
122
123 handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
124 out_len, &total);
125 if (IS_ERR(handles))
126 return PTR_ERR(handles);
127
128 ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_HANDLES_LIST, handles,
129 sizeof(u32) * total);
130 if (ret)
131 goto err;
132
133 ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_TOTAL_HANDLES, &total,
134 sizeof(total));
135 err:
136 return ret;
137 }
138
copy_port_attr_to_resp(struct ib_port_attr * attr,struct ib_uverbs_query_port_resp * resp,struct ib_device * ib_dev,u8 port_num)139 void copy_port_attr_to_resp(struct ib_port_attr *attr,
140 struct ib_uverbs_query_port_resp *resp,
141 struct ib_device *ib_dev, u8 port_num)
142 {
143 resp->state = attr->state;
144 resp->max_mtu = attr->max_mtu;
145 resp->active_mtu = attr->active_mtu;
146 resp->gid_tbl_len = attr->gid_tbl_len;
147 resp->port_cap_flags = make_port_cap_flags(attr);
148 resp->max_msg_sz = attr->max_msg_sz;
149 resp->bad_pkey_cntr = attr->bad_pkey_cntr;
150 resp->qkey_viol_cntr = attr->qkey_viol_cntr;
151 resp->pkey_tbl_len = attr->pkey_tbl_len;
152
153 if (rdma_is_grh_required(ib_dev, port_num))
154 resp->flags |= IB_UVERBS_QPF_GRH_REQUIRED;
155
156 if (rdma_cap_opa_ah(ib_dev, port_num)) {
157 resp->lid = OPA_TO_IB_UCAST_LID(attr->lid);
158 resp->sm_lid = OPA_TO_IB_UCAST_LID(attr->sm_lid);
159 } else {
160 resp->lid = ib_lid_cpu16(attr->lid);
161 resp->sm_lid = ib_lid_cpu16(attr->sm_lid);
162 }
163
164 resp->lmc = attr->lmc;
165 resp->max_vl_num = attr->max_vl_num;
166 resp->sm_sl = attr->sm_sl;
167 resp->subnet_timeout = attr->subnet_timeout;
168 resp->init_type_reply = attr->init_type_reply;
169 resp->active_width = attr->active_width;
170 /* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */
171 resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR);
172 resp->phys_state = attr->phys_state;
173 resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num);
174 }
175
UVERBS_METHOD_QUERY_PORT(struct uverbs_attr_bundle * attrs)176 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
177 struct uverbs_attr_bundle *attrs)
178 {
179 struct ib_device *ib_dev;
180 struct ib_port_attr attr = {};
181 struct ib_uverbs_query_port_resp_ex resp = {};
182 struct ib_ucontext *ucontext;
183 int ret;
184 u8 port_num;
185
186 ucontext = ib_uverbs_get_ucontext(attrs);
187 if (IS_ERR(ucontext))
188 return PTR_ERR(ucontext);
189 ib_dev = ucontext->device;
190
191 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
192 if (!ib_dev->ops.query_port)
193 return -EOPNOTSUPP;
194
195 ret = uverbs_get_const(&port_num, attrs,
196 UVERBS_ATTR_QUERY_PORT_PORT_NUM);
197 if (ret)
198 return ret;
199
200 ret = ib_query_port(ib_dev, port_num, &attr);
201 if (ret)
202 return ret;
203
204 copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num);
205 resp.port_cap_flags2 = attr.port_cap_flags2;
206 resp.active_speed_ex = attr.active_speed;
207
208 return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP,
209 &resp, sizeof(resp));
210 }
211
UVERBS_METHOD_QUERY_PORT_SPEED(struct uverbs_attr_bundle * attrs)212 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT_SPEED)(
213 struct uverbs_attr_bundle *attrs)
214 {
215 struct ib_ucontext *ucontext;
216 struct ib_device *ib_dev;
217 u32 port_num;
218 u64 speed;
219 int ret;
220
221 ucontext = ib_uverbs_get_ucontext(attrs);
222 if (IS_ERR(ucontext))
223 return PTR_ERR(ucontext);
224 ib_dev = ucontext->device;
225
226 if (!ib_dev->ops.query_port_speed)
227 return -EOPNOTSUPP;
228
229 ret = uverbs_get_const(&port_num, attrs,
230 UVERBS_ATTR_QUERY_PORT_SPEED_PORT_NUM);
231 if (ret)
232 return ret;
233
234 if (!rdma_is_port_valid(ib_dev, port_num))
235 return -EINVAL;
236
237 ret = ib_dev->ops.query_port_speed(ib_dev, port_num, &speed);
238 if (ret)
239 return ret;
240
241 return uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_PORT_SPEED_RESP,
242 &speed, sizeof(speed));
243 }
244
UVERBS_METHOD_GET_CONTEXT(struct uverbs_attr_bundle * attrs)245 static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)(
246 struct uverbs_attr_bundle *attrs)
247 {
248 u32 num_comp = attrs->ufile->device->num_comp_vectors;
249 u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
250 int ret;
251
252 ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
253 &num_comp, sizeof(num_comp));
254 if (IS_UVERBS_COPY_ERR(ret))
255 return ret;
256
257 ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
258 &core_support, sizeof(core_support));
259 if (IS_UVERBS_COPY_ERR(ret))
260 return ret;
261
262 ret = ib_alloc_ucontext(attrs);
263 if (ret)
264 return ret;
265 ret = ib_init_ucontext(attrs);
266 if (ret) {
267 kfree(attrs->context);
268 attrs->context = NULL;
269 return ret;
270 }
271 return 0;
272 }
273
UVERBS_METHOD_QUERY_CONTEXT(struct uverbs_attr_bundle * attrs)274 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)(
275 struct uverbs_attr_bundle *attrs)
276 {
277 u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
278 struct ib_ucontext *ucontext;
279 struct ib_device *ib_dev;
280 u32 num_comp;
281 int ret;
282
283 ucontext = ib_uverbs_get_ucontext(attrs);
284 if (IS_ERR(ucontext))
285 return PTR_ERR(ucontext);
286 ib_dev = ucontext->device;
287
288 if (!ib_dev->ops.query_ucontext)
289 return -EOPNOTSUPP;
290
291 num_comp = attrs->ufile->device->num_comp_vectors;
292 ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
293 &num_comp, sizeof(num_comp));
294 if (IS_UVERBS_COPY_ERR(ret))
295 return ret;
296
297 ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
298 &core_support, sizeof(core_support));
299 if (IS_UVERBS_COPY_ERR(ret))
300 return ret;
301
302 return ucontext->device->ops.query_ucontext(ucontext, attrs);
303 }
304
copy_gid_entries_to_user(struct uverbs_attr_bundle * attrs,struct ib_uverbs_gid_entry * entries,size_t num_entries,size_t user_entry_size)305 static int copy_gid_entries_to_user(struct uverbs_attr_bundle *attrs,
306 struct ib_uverbs_gid_entry *entries,
307 size_t num_entries, size_t user_entry_size)
308 {
309 const struct uverbs_attr *attr;
310 void __user *user_entries;
311 size_t copy_len;
312 int ret;
313 int i;
314
315 if (user_entry_size == sizeof(*entries)) {
316 ret = uverbs_copy_to(attrs,
317 UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
318 entries, sizeof(*entries) * num_entries);
319 return ret;
320 }
321
322 copy_len = min_t(size_t, user_entry_size, sizeof(*entries));
323 attr = uverbs_attr_get(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
324 if (IS_ERR(attr))
325 return PTR_ERR(attr);
326
327 user_entries = u64_to_user_ptr(attr->ptr_attr.data);
328 for (i = 0; i < num_entries; i++) {
329 if (copy_to_user(user_entries, entries, copy_len))
330 return -EFAULT;
331
332 if (user_entry_size > sizeof(*entries)) {
333 if (clear_user(user_entries + sizeof(*entries),
334 user_entry_size - sizeof(*entries)))
335 return -EFAULT;
336 }
337
338 entries++;
339 user_entries += user_entry_size;
340 }
341
342 return uverbs_output_written(attrs,
343 UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
344 }
345
UVERBS_METHOD_QUERY_GID_TABLE(struct uverbs_attr_bundle * attrs)346 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
347 struct uverbs_attr_bundle *attrs)
348 {
349 struct ib_uverbs_gid_entry *entries;
350 struct ib_ucontext *ucontext;
351 struct ib_device *ib_dev;
352 size_t user_entry_size;
353 ssize_t num_entries;
354 int max_entries;
355 u32 flags;
356 int ret;
357
358 ret = uverbs_get_flags32(&flags, attrs,
359 UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, 0);
360 if (ret)
361 return ret;
362
363 ret = uverbs_get_const(&user_entry_size, attrs,
364 UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE);
365 if (ret)
366 return ret;
367
368 if (!user_entry_size)
369 return -EINVAL;
370
371 max_entries = uverbs_attr_ptr_get_array_size(
372 attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
373 user_entry_size);
374 if (max_entries <= 0)
375 return max_entries ?: -EINVAL;
376
377 ucontext = ib_uverbs_get_ucontext(attrs);
378 if (IS_ERR(ucontext))
379 return PTR_ERR(ucontext);
380 ib_dev = ucontext->device;
381
382 entries = uverbs_kcalloc(attrs, max_entries, sizeof(*entries));
383 if (IS_ERR(entries))
384 return PTR_ERR(entries);
385
386 num_entries = rdma_query_gid_table(ib_dev, entries, max_entries);
387 if (num_entries < 0)
388 return -EINVAL;
389
390 ret = copy_gid_entries_to_user(attrs, entries, num_entries,
391 user_entry_size);
392 if (ret)
393 return ret;
394
395 ret = uverbs_copy_to(attrs,
396 UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
397 &num_entries, sizeof(num_entries));
398 return ret;
399 }
400
UVERBS_METHOD_QUERY_GID_ENTRY(struct uverbs_attr_bundle * attrs)401 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)(
402 struct uverbs_attr_bundle *attrs)
403 {
404 struct ib_uverbs_gid_entry entry = {};
405 const struct ib_gid_attr *gid_attr;
406 struct ib_ucontext *ucontext;
407 struct ib_device *ib_dev;
408 struct net_device *ndev;
409 u32 gid_index;
410 u32 port_num;
411 u32 flags;
412 int ret;
413
414 ret = uverbs_get_flags32(&flags, attrs,
415 UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, 0);
416 if (ret)
417 return ret;
418
419 ret = uverbs_get_const(&port_num, attrs,
420 UVERBS_ATTR_QUERY_GID_ENTRY_PORT);
421 if (ret)
422 return ret;
423
424 ret = uverbs_get_const(&gid_index, attrs,
425 UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX);
426 if (ret)
427 return ret;
428
429 ucontext = ib_uverbs_get_ucontext(attrs);
430 if (IS_ERR(ucontext))
431 return PTR_ERR(ucontext);
432 ib_dev = ucontext->device;
433
434 if (!rdma_is_port_valid(ib_dev, port_num))
435 return -EINVAL;
436
437 gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index);
438 if (IS_ERR(gid_attr))
439 return PTR_ERR(gid_attr);
440
441 memcpy(&entry.gid, &gid_attr->gid, sizeof(gid_attr->gid));
442 entry.gid_index = gid_attr->index;
443 entry.port_num = gid_attr->port_num;
444 entry.gid_type = gid_attr->gid_type;
445
446 rcu_read_lock();
447 ndev = rdma_read_gid_attr_ndev_rcu(gid_attr);
448 if (IS_ERR(ndev)) {
449 if (PTR_ERR(ndev) != -ENODEV) {
450 ret = PTR_ERR(ndev);
451 rcu_read_unlock();
452 goto out;
453 }
454 } else {
455 entry.netdev_ifindex = ndev->ifindex;
456 }
457 rcu_read_unlock();
458
459 ret = uverbs_copy_to_struct_or_zero(
460 attrs, UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, &entry,
461 sizeof(entry));
462 out:
463 rdma_put_gid_attr(gid_attr);
464 return ret;
465 }
466
467 DECLARE_UVERBS_NAMED_METHOD(
468 UVERBS_METHOD_GET_CONTEXT,
469 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
470 UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
471 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
472 UVERBS_ATTR_TYPE(u64), UA_OPTIONAL),
473 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_GET_CONTEXT_FD_ARR,
474 UVERBS_ATTR_MIN_SIZE(sizeof(int)),
475 UA_OPTIONAL,
476 UA_ALLOC_AND_COPY),
477 UVERBS_ATTR_UHW());
478
479 DECLARE_UVERBS_NAMED_METHOD(
480 UVERBS_METHOD_QUERY_CONTEXT,
481 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
482 UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
483 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
484 UVERBS_ATTR_TYPE(u64), UA_OPTIONAL));
485
486 DECLARE_UVERBS_NAMED_METHOD(
487 UVERBS_METHOD_INFO_HANDLES,
488 /* Also includes any device specific object ids */
489 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_INFO_OBJECT_ID,
490 enum uverbs_default_objects, UA_MANDATORY),
491 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_TOTAL_HANDLES,
492 UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
493 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_HANDLES_LIST,
494 UVERBS_ATTR_MIN_SIZE(sizeof(u32)), UA_OPTIONAL));
495
496 DECLARE_UVERBS_NAMED_METHOD(
497 UVERBS_METHOD_QUERY_PORT,
498 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_PORT_PORT_NUM, u8, UA_MANDATORY),
499 UVERBS_ATTR_PTR_OUT(
500 UVERBS_ATTR_QUERY_PORT_RESP,
501 UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex,
502 active_speed_ex),
503 UA_MANDATORY));
504
505 DECLARE_UVERBS_NAMED_METHOD(
506 UVERBS_METHOD_QUERY_PORT_SPEED,
507 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_PORT_SPEED_PORT_NUM, u32,
508 UA_MANDATORY),
509 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_PORT_SPEED_RESP,
510 UVERBS_ATTR_TYPE(u64),
511 UA_MANDATORY));
512
513 DECLARE_UVERBS_NAMED_METHOD(
514 UVERBS_METHOD_QUERY_GID_TABLE,
515 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, u64,
516 UA_MANDATORY),
517 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, u32,
518 UA_OPTIONAL),
519 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
520 UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY),
521 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
522 UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
523
524 DECLARE_UVERBS_NAMED_METHOD(
525 UVERBS_METHOD_QUERY_GID_ENTRY,
526 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_PORT, u32,
527 UA_MANDATORY),
528 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, u32,
529 UA_MANDATORY),
530 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, u32,
531 UA_MANDATORY),
532 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY,
533 UVERBS_ATTR_STRUCT(struct ib_uverbs_gid_entry,
534 netdev_ifindex),
535 UA_MANDATORY));
536
537 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
538 &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
539 &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
540 &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
541 &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT),
542 &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT_SPEED),
543 &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT),
544 &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_TABLE),
545 &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_ENTRY));
546
547 const struct uapi_definition uverbs_def_obj_device[] = {
548 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE),
549 {},
550 };
551