1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #ifndef RXE_H
8 #define RXE_H
9
10 #ifdef pr_fmt
11 #undef pr_fmt
12 #endif
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/skbuff.h>
16
17 #include <rdma/ib_verbs.h>
18 #include <rdma/ib_user_verbs.h>
19 #include <rdma/ib_pack.h>
20 #include <rdma/ib_smi.h>
21 #include <rdma/ib_umem.h>
22 #include <rdma/ib_cache.h>
23 #include <rdma/ib_addr.h>
24
25 #include "rxe_net.h"
26 #include "rxe_opcode.h"
27 #include "rxe_hdr.h"
28 #include "rxe_param.h"
29 #include "rxe_verbs.h"
30 #include "rxe_loc.h"
31
32 /*
33 * Version 1 and Version 2 are identical on 64 bit machines, but on 32 bit
34 * machines Version 2 has a different struct layout.
35 */
36 #define RXE_UVERBS_ABI_VERSION 2
37
38 #define RXE_ROCE_V2_SPORT (0xc000)
39
40 #define rxe_dbg(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
41 #define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
42 "%s: " fmt, __func__, ##__VA_ARGS__)
43 #define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
44 "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
45 #define rxe_dbg_pd(pd, fmt, ...) ibdev_dbg((pd)->ibpd.device, \
46 "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
47 #define rxe_dbg_ah(ah, fmt, ...) ibdev_dbg((ah)->ibah.device, \
48 "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
49 #define rxe_dbg_srq(srq, fmt, ...) ibdev_dbg((srq)->ibsrq.device, \
50 "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
51 #define rxe_dbg_qp(qp, fmt, ...) ibdev_dbg((qp)->ibqp.device, \
52 "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
53 #define rxe_dbg_cq(cq, fmt, ...) ibdev_dbg((cq)->ibcq.device, \
54 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
55 #define rxe_dbg_mr(mr, fmt, ...) ibdev_dbg((mr)->ibmr.device, \
56 "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
57 #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
58 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
59
60 #define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt, __func__, \
61 ##__VA_ARGS__)
62 #define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
63 "%s: " fmt, __func__, ##__VA_ARGS__)
64 #define rxe_err_uc(uc, fmt, ...) ibdev_err_ratelimited((uc)->ibuc.device, \
65 "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
66 #define rxe_err_pd(pd, fmt, ...) ibdev_err_ratelimited((pd)->ibpd.device, \
67 "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
68 #define rxe_err_ah(ah, fmt, ...) ibdev_err_ratelimited((ah)->ibah.device, \
69 "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
70 #define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \
71 "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
72 #define rxe_err_qp(qp, fmt, ...) ibdev_err_ratelimited((qp)->ibqp.device, \
73 "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
74 #define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \
75 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
76 #define rxe_err_mr(mr, fmt, ...) ibdev_err_ratelimited((mr)->ibmr.device, \
77 "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
78 #define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
79 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
80
81 #define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt, __func__, \
82 ##__VA_ARGS__)
83 #define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
84 "%s: " fmt, __func__, ##__VA_ARGS__)
85 #define rxe_info_uc(uc, fmt, ...) ibdev_info_ratelimited((uc)->ibuc.device, \
86 "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
87 #define rxe_info_pd(pd, fmt, ...) ibdev_info_ratelimited((pd)->ibpd.device, \
88 "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
89 #define rxe_info_ah(ah, fmt, ...) ibdev_info_ratelimited((ah)->ibah.device, \
90 "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
91 #define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)->ibsrq.device, \
92 "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
93 #define rxe_info_qp(qp, fmt, ...) ibdev_info_ratelimited((qp)->ibqp.device, \
94 "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
95 #define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \
96 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
97 #define rxe_info_mr(mr, fmt, ...) ibdev_info_ratelimited((mr)->ibmr.device, \
98 "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
99 #define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \
100 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
101
102 void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
103
104 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
105 struct net_device *ndev);
106
107 void rxe_rcv(struct sk_buff *skb);
108
109 /* The caller must do a matching ib_device_put(&dev->ib_dev) */
rxe_get_dev_from_net(struct net_device * ndev)110 static inline struct rxe_dev *rxe_get_dev_from_net(struct net_device *ndev)
111 {
112 struct ib_device *ibdev =
113 ib_device_get_by_netdev(ndev, RDMA_DRIVER_RXE);
114
115 if (!ibdev)
116 return NULL;
117 return container_of(ibdev, struct rxe_dev, ib_dev);
118 }
119
120 void rxe_port_up(struct rxe_dev *rxe);
121 void rxe_port_down(struct rxe_dev *rxe);
122 void rxe_set_port_state(struct rxe_dev *rxe);
123
124 #endif /* RXE_H */
125