1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved.
8 */
9
10 #ifndef IB_MAD_H
11 #define IB_MAD_H
12
13 #include <linux/list.h>
14
15 #include <rdma/ib_verbs.h>
16 #include <uapi/rdma/ib_user_mad.h>
17
18 /* Management base versions */
19 #define IB_MGMT_BASE_VERSION 1
20 #define OPA_MGMT_BASE_VERSION 0x80
21
22 #define OPA_SM_CLASS_VERSION 0x80
23
24 /* Management classes */
25 #define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
26 #define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81
27 #define IB_MGMT_CLASS_SUBN_ADM 0x03
28 #define IB_MGMT_CLASS_PERF_MGMT 0x04
29 #define IB_MGMT_CLASS_BM 0x05
30 #define IB_MGMT_CLASS_DEVICE_MGMT 0x06
31 #define IB_MGMT_CLASS_CM 0x07
32 #define IB_MGMT_CLASS_SNMP 0x08
33 #define IB_MGMT_CLASS_DEVICE_ADM 0x10
34 #define IB_MGMT_CLASS_BOOT_MGMT 0x11
35 #define IB_MGMT_CLASS_BIS 0x12
36 #define IB_MGMT_CLASS_CONG_MGMT 0x21
37 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
38 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
39
40 #define IB_OPENIB_OUI (0x001405)
41
42 /* Management methods */
43 #define IB_MGMT_METHOD_GET 0x01
44 #define IB_MGMT_METHOD_SET 0x02
45 #define IB_MGMT_METHOD_GET_RESP 0x81
46 #define IB_MGMT_METHOD_SEND 0x03
47 #define IB_MGMT_METHOD_TRAP 0x05
48 #define IB_MGMT_METHOD_REPORT 0x06
49 #define IB_MGMT_METHOD_REPORT_RESP 0x86
50 #define IB_MGMT_METHOD_TRAP_REPRESS 0x07
51 #define IB_MGMT_METHOD_GET_TABLE 0x12
52
53 #define IB_MGMT_METHOD_RESP 0x80
54 #define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
55
56 #define IB_MGMT_MAX_METHODS 128
57
58 /* MAD Status field bit masks */
59 #define IB_MGMT_MAD_STATUS_SUCCESS 0x0000
60 #define IB_MGMT_MAD_STATUS_BUSY 0x0001
61 #define IB_MGMT_MAD_STATUS_REDIRECT_REQD 0x0002
62 #define IB_MGMT_MAD_STATUS_BAD_VERSION 0x0004
63 #define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD 0x0008
64 #define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB 0x000c
65 #define IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE 0x001c
66
67 /* RMPP information */
68 #define IB_MGMT_RMPP_VERSION 1
69
70 #define IB_MGMT_RMPP_TYPE_DATA 1
71 #define IB_MGMT_RMPP_TYPE_ACK 2
72 #define IB_MGMT_RMPP_TYPE_STOP 3
73 #define IB_MGMT_RMPP_TYPE_ABORT 4
74
75 #define IB_MGMT_RMPP_FLAG_ACTIVE 1
76 #define IB_MGMT_RMPP_FLAG_FIRST (1<<1)
77 #define IB_MGMT_RMPP_FLAG_LAST (1<<2)
78
79 #define IB_MGMT_RMPP_NO_RESPTIME 0x1F
80
81 #define IB_MGMT_RMPP_STATUS_SUCCESS 0
82 #define IB_MGMT_RMPP_STATUS_RESX 1
83 #define IB_MGMT_RMPP_STATUS_ABORT_MIN 118
84 #define IB_MGMT_RMPP_STATUS_T2L 118
85 #define IB_MGMT_RMPP_STATUS_BAD_LEN 119
86 #define IB_MGMT_RMPP_STATUS_BAD_SEG 120
87 #define IB_MGMT_RMPP_STATUS_BADT 121
88 #define IB_MGMT_RMPP_STATUS_W2S 122
89 #define IB_MGMT_RMPP_STATUS_S2B 123
90 #define IB_MGMT_RMPP_STATUS_BAD_STATUS 124
91 #define IB_MGMT_RMPP_STATUS_UNV 125
92 #define IB_MGMT_RMPP_STATUS_TMR 126
93 #define IB_MGMT_RMPP_STATUS_UNSPEC 127
94 #define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
95
96 #define IB_QP0 0
97 #define IB_QP1 cpu_to_be32(1)
98 #define IB_QP1_QKEY 0x80010000
99 #define IB_QP_SET_QKEY 0x80000000
100
101 #define IB_DEFAULT_PKEY_PARTIAL 0x7FFF
102 #define IB_DEFAULT_PKEY_FULL 0xFFFF
103
104 /*
105 * Generic trap/notice types
106 */
107 #define IB_NOTICE_TYPE_FATAL 0x80
108 #define IB_NOTICE_TYPE_URGENT 0x81
109 #define IB_NOTICE_TYPE_SECURITY 0x82
110 #define IB_NOTICE_TYPE_SM 0x83
111 #define IB_NOTICE_TYPE_INFO 0x84
112
113 /*
114 * Generic trap/notice producers
115 */
116 #define IB_NOTICE_PROD_CA cpu_to_be16(1)
117 #define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
118 #define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
119 #define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
120
121 enum {
122 IB_MGMT_MAD_HDR = 24,
123 IB_MGMT_MAD_DATA = 232,
124 IB_MGMT_RMPP_HDR = 36,
125 IB_MGMT_RMPP_DATA = 220,
126 IB_MGMT_VENDOR_HDR = 40,
127 IB_MGMT_VENDOR_DATA = 216,
128 IB_MGMT_SA_HDR = 56,
129 IB_MGMT_SA_DATA = 200,
130 IB_MGMT_DEVICE_HDR = 64,
131 IB_MGMT_DEVICE_DATA = 192,
132 IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
133 OPA_MGMT_MAD_DATA = 2024,
134 OPA_MGMT_RMPP_DATA = 2012,
135 OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
136 };
137
138 struct ib_mad_hdr {
139 u8 base_version;
140 u8 mgmt_class;
141 u8 class_version;
142 u8 method;
143 __be16 status;
144 __be16 class_specific;
145 __be64 tid;
146 __be16 attr_id;
147 __be16 resv;
148 __be32 attr_mod;
149 };
150
151 struct ib_rmpp_hdr {
152 u8 rmpp_version;
153 u8 rmpp_type;
154 u8 rmpp_rtime_flags;
155 u8 rmpp_status;
156 __be32 seg_num;
157 __be32 paylen_newwin;
158 };
159
160 typedef u64 __bitwise ib_sa_comp_mask;
161
162 #define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << (n)))
163
164 /*
165 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
166 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
167 * lay them out wrong otherwise. (And unfortunately they are sent on
168 * the wire so we can't change the layout)
169 */
170 struct ib_sa_hdr {
171 __be64 sm_key;
172 __be16 attr_offset;
173 __be16 reserved;
174 ib_sa_comp_mask comp_mask;
175 } __packed;
176
177 struct ib_mad {
178 struct ib_mad_hdr mad_hdr;
179 u8 data[IB_MGMT_MAD_DATA];
180 };
181
182 struct opa_mad {
183 struct ib_mad_hdr mad_hdr;
184 u8 data[OPA_MGMT_MAD_DATA];
185 };
186
187 struct ib_rmpp_mad {
188 struct ib_mad_hdr mad_hdr;
189 struct ib_rmpp_hdr rmpp_hdr;
190 u8 data[IB_MGMT_RMPP_DATA];
191 };
192
193 struct opa_rmpp_mad {
194 struct ib_mad_hdr mad_hdr;
195 struct ib_rmpp_hdr rmpp_hdr;
196 u8 data[OPA_MGMT_RMPP_DATA];
197 };
198
199 struct ib_sa_mad {
200 struct ib_mad_hdr mad_hdr;
201 struct ib_rmpp_hdr rmpp_hdr;
202 struct ib_sa_hdr sa_hdr;
203 u8 data[IB_MGMT_SA_DATA];
204 } __packed;
205
206 struct ib_vendor_mad {
207 struct ib_mad_hdr mad_hdr;
208 struct ib_rmpp_hdr rmpp_hdr;
209 u8 reserved;
210 u8 oui[3];
211 u8 data[IB_MGMT_VENDOR_DATA];
212 };
213
214 #define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
215
216 #define IB_CLASS_PORT_INFO_RESP_TIME_MASK 0x1F
217 #define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5
218
219 struct ib_class_port_info {
220 u8 base_version;
221 u8 class_version;
222 __be16 capability_mask;
223 /* 27 bits for cap_mask2, 5 bits for resp_time */
224 __be32 cap_mask2_resp_time;
225 u8 redirect_gid[16];
226 __be32 redirect_tcslfl;
227 __be16 redirect_lid;
228 __be16 redirect_pkey;
229 __be32 redirect_qp;
230 __be32 redirect_qkey;
231 u8 trap_gid[16];
232 __be32 trap_tcslfl;
233 __be16 trap_lid;
234 __be16 trap_pkey;
235 __be32 trap_hlqp;
236 __be32 trap_qkey;
237 };
238
239 /* PortInfo CapabilityMask */
240 enum ib_port_capability_mask_bits {
241 IB_PORT_SM = 1 << 1,
242 IB_PORT_NOTICE_SUP = 1 << 2,
243 IB_PORT_TRAP_SUP = 1 << 3,
244 IB_PORT_OPT_IPD_SUP = 1 << 4,
245 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
246 IB_PORT_SL_MAP_SUP = 1 << 6,
247 IB_PORT_MKEY_NVRAM = 1 << 7,
248 IB_PORT_PKEY_NVRAM = 1 << 8,
249 IB_PORT_LED_INFO_SUP = 1 << 9,
250 IB_PORT_SM_DISABLED = 1 << 10,
251 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
252 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
253 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
254 IB_PORT_CAP_MASK2_SUP = 1 << 15,
255 IB_PORT_CM_SUP = 1 << 16,
256 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
257 IB_PORT_REINIT_SUP = 1 << 18,
258 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
259 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
260 IB_PORT_DR_NOTICE_SUP = 1 << 21,
261 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
262 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
263 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
264 IB_PORT_CLIENT_REG_SUP = 1 << 25,
265 IB_PORT_OTHER_LOCAL_CHANGES_SUP = 1 << 26,
266 IB_PORT_LINK_SPEED_WIDTH_TABLE_SUP = 1 << 27,
267 IB_PORT_VENDOR_SPECIFIC_MADS_TABLE_SUP = 1 << 28,
268 IB_PORT_MCAST_PKEY_TRAP_SUPPRESSION_SUP = 1 << 29,
269 IB_PORT_MCAST_FDB_TOP_SUP = 1 << 30,
270 IB_PORT_HIERARCHY_INFO_SUP = 1ULL << 31,
271 };
272
273 enum ib_port_capability_mask2_bits {
274 IB_PORT_SET_NODE_DESC_SUP = 1 << 0,
275 IB_PORT_EX_PORT_INFO_EX_SUP = 1 << 1,
276 IB_PORT_VIRT_SUP = 1 << 2,
277 IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3,
278 IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
279 IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
280 IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10,
281 IB_PORT_EXTENDED_SPEEDS2_SUP = 1 << 11,
282 IB_PORT_LINK_SPEED_XDR_SUP = 1 << 12,
283 };
284
285 #define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
286
287 struct opa_class_port_info {
288 u8 base_version;
289 u8 class_version;
290 __be16 cap_mask;
291 __be32 cap_mask2_resp_time;
292
293 u8 redirect_gid[16];
294 __be32 redirect_tc_fl;
295 __be32 redirect_lid;
296 __be32 redirect_sl_qp;
297 __be32 redirect_qkey;
298
299 u8 trap_gid[16];
300 __be32 trap_tc_fl;
301 __be32 trap_lid;
302 __be32 trap_hl_qp;
303 __be32 trap_qkey;
304
305 __be16 trap_pkey;
306 __be16 redirect_pkey;
307
308 u8 trap_sl_rsvd;
309 u8 reserved[3];
310 } __packed;
311
312 /**
313 * ib_get_cpi_resp_time - Returns the resp_time value from
314 * cap_mask2_resp_time in ib_class_port_info.
315 * @cpi: A struct ib_class_port_info mad.
316 */
ib_get_cpi_resp_time(struct ib_class_port_info * cpi)317 static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
318 {
319 return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
320 IB_CLASS_PORT_INFO_RESP_TIME_MASK);
321 }
322
323 /**
324 * ib_set_cpi_resptime - Sets the response time in an
325 * ib_class_port_info mad.
326 * @cpi: A struct ib_class_port_info.
327 * @rtime: The response time to set.
328 */
ib_set_cpi_resp_time(struct ib_class_port_info * cpi,u8 rtime)329 static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
330 u8 rtime)
331 {
332 cpi->cap_mask2_resp_time =
333 (cpi->cap_mask2_resp_time &
334 cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
335 cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
336 }
337
338 /**
339 * ib_get_cpi_capmask2 - Returns the capmask2 value from
340 * cap_mask2_resp_time in ib_class_port_info.
341 * @cpi: A struct ib_class_port_info mad.
342 */
ib_get_cpi_capmask2(struct ib_class_port_info * cpi)343 static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
344 {
345 return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
346 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
347 }
348
349 /**
350 * ib_set_cpi_capmask2 - Sets the capmask2 in an
351 * ib_class_port_info mad.
352 * @cpi: A struct ib_class_port_info.
353 * @capmask2: The capmask2 to set.
354 */
ib_set_cpi_capmask2(struct ib_class_port_info * cpi,u32 capmask2)355 static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
356 u32 capmask2)
357 {
358 cpi->cap_mask2_resp_time =
359 (cpi->cap_mask2_resp_time &
360 cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
361 cpu_to_be32(capmask2 <<
362 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
363 }
364
365 /**
366 * opa_get_cpi_capmask2 - Returns the capmask2 value from
367 * cap_mask2_resp_time in ib_class_port_info.
368 * @cpi: A struct opa_class_port_info mad.
369 */
opa_get_cpi_capmask2(struct opa_class_port_info * cpi)370 static inline u32 opa_get_cpi_capmask2(struct opa_class_port_info *cpi)
371 {
372 return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
373 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
374 }
375
376 struct ib_mad_notice_attr {
377 u8 generic_type;
378 u8 prod_type_msb;
379 __be16 prod_type_lsb;
380 __be16 trap_num;
381 __be16 issuer_lid;
382 __be16 toggle_count;
383
384 union {
385 struct {
386 u8 details[54];
387 } raw_data;
388
389 struct {
390 __be16 reserved;
391 __be16 lid; /* where violation happened */
392 u8 port_num; /* where violation happened */
393 } __packed ntc_129_131;
394
395 struct {
396 __be16 reserved;
397 __be16 lid; /* LID where change occurred */
398 u8 reserved2;
399 u8 local_changes; /* low bit - local changes */
400 __be32 new_cap_mask; /* new capability mask */
401 u8 reserved3;
402 u8 change_flags; /* low 3 bits only */
403 } __packed ntc_144;
404
405 struct {
406 __be16 reserved;
407 __be16 lid; /* lid where sys guid changed */
408 __be16 reserved2;
409 __be64 new_sys_guid;
410 } __packed ntc_145;
411
412 struct {
413 __be16 reserved;
414 __be16 lid;
415 __be16 dr_slid;
416 u8 method;
417 u8 reserved2;
418 __be16 attr_id;
419 __be32 attr_mod;
420 __be64 mkey;
421 u8 reserved3;
422 u8 dr_trunc_hop;
423 u8 dr_rtn_path[30];
424 } __packed ntc_256;
425
426 struct {
427 __be16 reserved;
428 __be16 lid1;
429 __be16 lid2;
430 __be32 key;
431 __be32 sl_qp1; /* SL: high 4 bits */
432 __be32 qp2; /* high 8 bits reserved */
433 union ib_gid gid1;
434 union ib_gid gid2;
435 } __packed ntc_257_258;
436
437 } details;
438 };
439
440 /**
441 * ib_mad_send_buf - MAD data buffer and work request for sends.
442 * @next: A pointer used to chain together MADs for posting.
443 * @mad: References an allocated MAD data buffer for MADs that do not have
444 * RMPP active. For MADs using RMPP, references the common and management
445 * class specific headers.
446 * @mad_agent: MAD agent that allocated the buffer.
447 * @ah: The address handle to use when sending the MAD.
448 * @context: User-controlled context fields.
449 * @hdr_len: Indicates the size of the data header of the MAD. This length
450 * includes the common MAD, RMPP, and class specific headers.
451 * @data_len: Indicates the total size of user-transferred data.
452 * @seg_count: The number of RMPP segments allocated for this send.
453 * @seg_size: Size of the data in each RMPP segment. This does not include
454 * class specific headers.
455 * @seg_rmpp_size: Size of each RMPP segment including the class specific
456 * headers.
457 * @timeout_ms: Time to wait for a response.
458 * @retries: Number of times to retry a request for a response. For MADs
459 * using RMPP, this applies per window. On completion, returns the number
460 * of retries needed to complete the transfer.
461 *
462 * Users are responsible for initializing the MAD buffer itself, with the
463 * exception of any RMPP header. Additional segment buffer space allocated
464 * beyond data_len is padding.
465 */
466 struct ib_mad_send_buf {
467 struct ib_mad_send_buf *next;
468 void *mad;
469 struct ib_mad_agent *mad_agent;
470 struct ib_ah *ah;
471 void *context[2];
472 int hdr_len;
473 int data_len;
474 int seg_count;
475 int seg_size;
476 int seg_rmpp_size;
477 int timeout_ms;
478 int retries;
479 };
480
481 /**
482 * ib_response_mad - Returns if the specified MAD has been generated in
483 * response to a sent request or trap.
484 */
485 int ib_response_mad(const struct ib_mad_hdr *hdr);
486
487 /**
488 * ib_get_rmpp_resptime - Returns the RMPP response time.
489 * @rmpp_hdr: An RMPP header.
490 */
ib_get_rmpp_resptime(struct ib_rmpp_hdr * rmpp_hdr)491 static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
492 {
493 return rmpp_hdr->rmpp_rtime_flags >> 3;
494 }
495
496 /**
497 * ib_get_rmpp_flags - Returns the RMPP flags.
498 * @rmpp_hdr: An RMPP header.
499 */
ib_get_rmpp_flags(struct ib_rmpp_hdr * rmpp_hdr)500 static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
501 {
502 return rmpp_hdr->rmpp_rtime_flags & 0x7;
503 }
504
505 /**
506 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
507 * @rmpp_hdr: An RMPP header.
508 * @rtime: The response time to set.
509 */
ib_set_rmpp_resptime(struct ib_rmpp_hdr * rmpp_hdr,u8 rtime)510 static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
511 {
512 rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
513 }
514
515 /**
516 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
517 * @rmpp_hdr: An RMPP header.
518 * @flags: The flags to set.
519 */
ib_set_rmpp_flags(struct ib_rmpp_hdr * rmpp_hdr,u8 flags)520 static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
521 {
522 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
523 (flags & 0x7);
524 }
525
526 struct ib_mad_agent;
527 struct ib_mad_send_wc;
528 struct ib_mad_recv_wc;
529
530 /**
531 * ib_mad_send_handler - callback handler for a sent MAD.
532 * @mad_agent: MAD agent that sent the MAD.
533 * @mad_send_wc: Send work completion information on the sent MAD.
534 */
535 typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
536 struct ib_mad_send_wc *mad_send_wc);
537
538 /**
539 * ib_mad_recv_handler - callback handler for a received MAD.
540 * @mad_agent: MAD agent requesting the received MAD.
541 * @send_buf: Send buffer if found, else NULL
542 * @mad_recv_wc: Received work completion information on the received MAD.
543 *
544 * MADs received in response to a send request operation will be handed to
545 * the user before the send operation completes. All data buffers given
546 * to registered agents through this routine are owned by the receiving
547 * client.
548 */
549 typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
550 struct ib_mad_send_buf *send_buf,
551 struct ib_mad_recv_wc *mad_recv_wc);
552
553 /**
554 * ib_mad_agent - Used to track MAD registration with the access layer.
555 * @device: Reference to device registration is on.
556 * @qp: Reference to QP used for sending and receiving MADs.
557 * @mr: Memory region for system memory usable for DMA.
558 * @recv_handler: Callback handler for a received MAD.
559 * @send_handler: Callback handler for a sent MAD.
560 * @context: User-specified context associated with this registration.
561 * @hi_tid: Access layer assigned transaction ID for this client.
562 * Unsolicited MADs sent by this client will have the upper 32-bits
563 * of their TID set to this value.
564 * @flags: registration flags
565 * @port_num: Port number on which QP is registered
566 * @rmpp_version: If set, indicates the RMPP version used by this agent.
567 */
568 enum {
569 IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP,
570 };
571 struct ib_mad_agent {
572 struct ib_device *device;
573 struct ib_qp *qp;
574 ib_mad_recv_handler recv_handler;
575 ib_mad_send_handler send_handler;
576 void *context;
577 u32 hi_tid;
578 u32 flags;
579 void *security;
580 struct list_head mad_agent_sec_list;
581 u8 port_num;
582 u8 rmpp_version;
583 bool smp_allowed;
584 };
585
586 /**
587 * ib_mad_send_wc - MAD send completion information.
588 * @send_buf: Send MAD data buffer associated with the send MAD request.
589 * @status: Completion status.
590 * @vendor_err: Optional vendor error information returned with a failed
591 * request.
592 */
593 struct ib_mad_send_wc {
594 struct ib_mad_send_buf *send_buf;
595 enum ib_wc_status status;
596 u32 vendor_err;
597 };
598
599 /**
600 * ib_mad_recv_buf - received MAD buffer information.
601 * @list: Reference to next data buffer for a received RMPP MAD.
602 * @grh: References a data buffer containing the global route header.
603 * The data refereced by this buffer is only valid if the GRH is
604 * valid.
605 * @mad: References the start of the received MAD.
606 */
607 struct ib_mad_recv_buf {
608 struct list_head list;
609 struct ib_grh *grh;
610 union {
611 struct ib_mad *mad;
612 struct opa_mad *opa_mad;
613 };
614 };
615
616 /**
617 * ib_mad_recv_wc - received MAD information.
618 * @wc: Completion information for the received data.
619 * @recv_buf: Specifies the location of the received data buffer(s).
620 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
621 * @mad_len: The length of the received MAD, without duplicated headers.
622 * @mad_seg_size: The size of individual MAD segments
623 *
624 * For received response, the wr_id contains a pointer to the ib_mad_send_buf
625 * for the corresponding send request.
626 */
627 struct ib_mad_recv_wc {
628 struct ib_wc *wc;
629 struct ib_mad_recv_buf recv_buf;
630 struct list_head rmpp_list;
631 int mad_len;
632 size_t mad_seg_size;
633 };
634
635 /**
636 * ib_mad_reg_req - MAD registration request
637 * @mgmt_class: Indicates which management class of MADs should be receive
638 * by the caller. This field is only required if the user wishes to
639 * receive unsolicited MADs, otherwise it should be 0.
640 * @mgmt_class_version: Indicates which version of MADs for the given
641 * management class to receive.
642 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
643 * in the range from 0x30 to 0x4f. Otherwise not used.
644 * @method_mask: The caller will receive unsolicited MADs for any method
645 * where @method_mask = 1.
646 *
647 */
648 struct ib_mad_reg_req {
649 u8 mgmt_class;
650 u8 mgmt_class_version;
651 u8 oui[3];
652 DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
653 };
654
655 /**
656 * ib_register_mad_agent - Register to send/receive MADs.
657 * @device: The device to register with.
658 * @port_num: The port on the specified device to use.
659 * @qp_type: Specifies which QP to access. Must be either
660 * IB_QPT_SMI or IB_QPT_GSI.
661 * @mad_reg_req: Specifies which unsolicited MADs should be received
662 * by the caller. This parameter may be NULL if the caller only
663 * wishes to receive solicited responses.
664 * @rmpp_version: If set, indicates that the client will send
665 * and receive MADs that contain the RMPP header for the given version.
666 * If set to 0, indicates that RMPP is not used by this client.
667 * @send_handler: The completion callback routine invoked after a send
668 * request has completed.
669 * @recv_handler: The completion callback routine invoked for a received
670 * MAD.
671 * @context: User specified context associated with the registration.
672 * @registration_flags: Registration flags to set for this agent
673 */
674 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
675 u32 port_num,
676 enum ib_qp_type qp_type,
677 struct ib_mad_reg_req *mad_reg_req,
678 u8 rmpp_version,
679 ib_mad_send_handler send_handler,
680 ib_mad_recv_handler recv_handler,
681 void *context,
682 u32 registration_flags);
683 /**
684 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
685 * @mad_agent: Corresponding MAD registration request to deregister.
686 *
687 * After invoking this routine, MAD services are no longer usable by the
688 * client on the associated QP.
689 */
690 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
691
692 /**
693 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
694 * with the registered client.
695 * @send_buf: Specifies the information needed to send the MAD(s).
696 * @bad_send_buf: Specifies the MAD on which an error was encountered. This
697 * parameter is optional if only a single MAD is posted.
698 *
699 * Sent MADs are not guaranteed to complete in the order that they were posted.
700 *
701 * If the MAD requires RMPP, the data buffer should contain a single copy
702 * of the common MAD, RMPP, and class specific headers, followed by the class
703 * defined data. If the class defined data would not divide evenly into
704 * RMPP segments, then space must be allocated at the end of the referenced
705 * buffer for any required padding. To indicate the amount of class defined
706 * data being transferred, the paylen_newwin field in the RMPP header should
707 * be set to the size of the class specific header plus the amount of class
708 * defined data being transferred. The paylen_newwin field should be
709 * specified in network-byte order.
710 */
711 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
712 struct ib_mad_send_buf **bad_send_buf);
713
714
715 /**
716 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
717 * @mad_recv_wc: Work completion information for a received MAD.
718 *
719 * Clients receiving MADs through their ib_mad_recv_handler must call this
720 * routine to return the work completion buffers to the access layer.
721 */
722 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
723
724 /**
725 * ib_modify_mad - Modifies an outstanding send MAD operation.
726 * @send_buf: Indicates the MAD to modify.
727 * @timeout_ms: New timeout value for sent MAD.
728 *
729 * This call will reset the timeout value for a sent MAD to the specified
730 * value.
731 */
732 int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms);
733
734 /**
735 * ib_cancel_mad - Cancels an outstanding send MAD operation.
736 * @send_buf: Indicates the MAD to cancel.
737 *
738 * MADs will be returned to the user through the corresponding
739 * ib_mad_send_handler.
740 */
ib_cancel_mad(struct ib_mad_send_buf * send_buf)741 static inline void ib_cancel_mad(struct ib_mad_send_buf *send_buf)
742 {
743 ib_modify_mad(send_buf, 0);
744 }
745
746 /**
747 * ib_create_send_mad - Allocate and initialize a data buffer and work request
748 * for sending a MAD.
749 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
750 * @remote_qpn: Specifies the QPN of the receiving node.
751 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
752 * is valid only if the remote_qpn is QP 1.
753 * @rmpp_active: Indicates if the send will enable RMPP.
754 * @hdr_len: Indicates the size of the data header of the MAD. This length
755 * should include the common MAD header, RMPP header, plus any class
756 * specific header.
757 * @data_len: Indicates the size of any user-transferred data. The call will
758 * automatically adjust the allocated buffer size to account for any
759 * additional padding that may be necessary.
760 * @gfp_mask: GFP mask used for the memory allocation.
761 * @base_version: Base Version of this MAD
762 *
763 * This routine allocates a MAD for sending. The returned MAD send buffer
764 * will reference a data buffer usable for sending a MAD, along
765 * with an initialized work request structure. Users may modify the returned
766 * MAD data buffer before posting the send.
767 *
768 * The returned MAD header, class specific headers, and any padding will be
769 * cleared. Users are responsible for initializing the common MAD header,
770 * any class specific header, and MAD data area.
771 * If @rmpp_active is set, the RMPP header will be initialized for sending.
772 */
773 struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
774 u32 remote_qpn, u16 pkey_index,
775 int rmpp_active,
776 int hdr_len, int data_len,
777 gfp_t gfp_mask,
778 u8 base_version);
779
780 /**
781 * ib_is_mad_class_rmpp - returns whether given management class
782 * supports RMPP.
783 * @mgmt_class: management class
784 *
785 * This routine returns whether the management class supports RMPP.
786 */
787 int ib_is_mad_class_rmpp(u8 mgmt_class);
788
789 /**
790 * ib_get_mad_data_offset - returns the data offset for a given
791 * management class.
792 * @mgmt_class: management class
793 *
794 * This routine returns the data offset in the MAD for the management
795 * class requested.
796 */
797 int ib_get_mad_data_offset(u8 mgmt_class);
798
799 /**
800 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
801 * @send_buf: Previously allocated send data buffer.
802 * @seg_num: number of segment to return
803 *
804 * This routine returns a pointer to the data buffer of an RMPP MAD.
805 * Users must provide synchronization to @send_buf around this call.
806 */
807 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
808
809 /**
810 * ib_free_send_mad - Returns data buffers used to send a MAD.
811 * @send_buf: Previously allocated send data buffer.
812 */
813 void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
814
815 /**
816 * ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP.
817 * @agent: the agent in question
818 * @return: true if agent is performing rmpp, false otherwise.
819 */
820 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
821
822 #endif /* IB_MAD_H */
823