1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include "rxe.h" 8 9 #define RXE_POOL_TIMEOUT (200) 10 #define RXE_POOL_ALIGN (16) 11 12 static const struct rxe_type_info { 13 const char *name; 14 size_t size; 15 size_t elem_offset; 16 void (*cleanup)(struct rxe_pool_elem *elem); 17 u32 min_index; 18 u32 max_index; 19 u32 max_elem; 20 } rxe_type_info[RXE_NUM_TYPES] = { 21 [RXE_TYPE_UC] = { 22 .name = "uc", 23 .size = sizeof(struct rxe_ucontext), 24 .elem_offset = offsetof(struct rxe_ucontext, elem), 25 .min_index = 1, 26 .max_index = RXE_MAX_UCONTEXT, 27 .max_elem = RXE_MAX_UCONTEXT, 28 }, 29 [RXE_TYPE_PD] = { 30 .name = "pd", 31 .size = sizeof(struct rxe_pd), 32 .elem_offset = offsetof(struct rxe_pd, elem), 33 .min_index = 1, 34 .max_index = RXE_MAX_PD, 35 .max_elem = RXE_MAX_PD, 36 }, 37 [RXE_TYPE_AH] = { 38 .name = "ah", 39 .size = sizeof(struct rxe_ah), 40 .elem_offset = offsetof(struct rxe_ah, elem), 41 .min_index = RXE_MIN_AH_INDEX, 42 .max_index = RXE_MAX_AH_INDEX, 43 .max_elem = RXE_MAX_AH, 44 }, 45 [RXE_TYPE_SRQ] = { 46 .name = "srq", 47 .size = sizeof(struct rxe_srq), 48 .elem_offset = offsetof(struct rxe_srq, elem), 49 .cleanup = rxe_srq_cleanup, 50 .min_index = RXE_MIN_SRQ_INDEX, 51 .max_index = RXE_MAX_SRQ_INDEX, 52 .max_elem = RXE_MAX_SRQ, 53 }, 54 [RXE_TYPE_QP] = { 55 .name = "qp", 56 .size = sizeof(struct rxe_qp), 57 .elem_offset = offsetof(struct rxe_qp, elem), 58 .cleanup = rxe_qp_cleanup, 59 .min_index = RXE_MIN_QP_INDEX, 60 .max_index = RXE_MAX_QP_INDEX, 61 .max_elem = RXE_MAX_QP, 62 }, 63 [RXE_TYPE_CQ] = { 64 .name = "cq", 65 .size = sizeof(struct rxe_cq), 66 .elem_offset = offsetof(struct rxe_cq, elem), 67 .cleanup = rxe_cq_cleanup, 68 .min_index = 1, 69 .max_index = RXE_MAX_CQ, 70 .max_elem = RXE_MAX_CQ, 71 }, 72 [RXE_TYPE_MR] = { 73 .name = "mr", 74 .size = sizeof(struct rxe_mr), 75 .elem_offset = offsetof(struct rxe_mr, elem), 76 .cleanup = rxe_mr_cleanup, 77 .min_index = RXE_MIN_MR_INDEX, 78 .max_index = RXE_MAX_MR_INDEX, 79 .max_elem = RXE_MAX_MR, 80 }, 81 [RXE_TYPE_MW] = { 82 .name = "mw", 83 .size = sizeof(struct rxe_mw), 84 .elem_offset = offsetof(struct rxe_mw, elem), 85 .cleanup = rxe_mw_cleanup, 86 .min_index = RXE_MIN_MW_INDEX, 87 .max_index = RXE_MAX_MW_INDEX, 88 .max_elem = RXE_MAX_MW, 89 }, 90 }; 91 92 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, 93 enum rxe_elem_type type) 94 { 95 const struct rxe_type_info *info = &rxe_type_info[type]; 96 97 memset(pool, 0, sizeof(*pool)); 98 99 pool->rxe = rxe; 100 pool->name = info->name; 101 pool->type = type; 102 pool->max_elem = info->max_elem; 103 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); 104 pool->elem_offset = info->elem_offset; 105 pool->cleanup = info->cleanup; 106 107 atomic_set(&pool->num_elem, 0); 108 109 xa_init_flags(&pool->xa, XA_FLAGS_ALLOC); 110 pool->limit.min = info->min_index; 111 pool->limit.max = info->max_index; 112 } 113 114 void rxe_pool_cleanup(struct rxe_pool *pool) 115 { 116 WARN_ON(!xa_empty(&pool->xa)); 117 } 118 119 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem, 120 bool sleepable) 121 { 122 int err = -EINVAL; 123 gfp_t gfp_flags; 124 125 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 126 goto err_cnt; 127 128 elem->pool = pool; 129 elem->obj = (u8 *)elem - pool->elem_offset; 130 kref_init(&elem->ref_cnt); 131 init_completion(&elem->complete); 132 133 /* AH objects are unique in that the create_ah verb 134 * can be called in atomic context. If the create_ah 135 * call is not sleepable use GFP_ATOMIC. 136 */ 137 gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC; 138 139 if (sleepable) 140 might_sleep(); 141 err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit, 142 &pool->next, gfp_flags); 143 if (err < 0) 144 goto err_cnt; 145 146 return 0; 147 148 err_cnt: 149 atomic_dec(&pool->num_elem); 150 return err; 151 } 152 153 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) 154 { 155 struct rxe_pool_elem *elem; 156 struct xarray *xa = &pool->xa; 157 void *obj; 158 159 rcu_read_lock(); 160 elem = xa_load(xa, index); 161 if (elem && kref_get_unless_zero(&elem->ref_cnt)) 162 obj = elem->obj; 163 else 164 obj = NULL; 165 rcu_read_unlock(); 166 167 return obj; 168 } 169 170 static void rxe_elem_release(struct kref *kref) 171 { 172 struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt); 173 174 complete(&elem->complete); 175 } 176 177 int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) 178 { 179 struct rxe_pool *pool = elem->pool; 180 struct xarray *xa = &pool->xa; 181 int ret, err = 0; 182 void *xa_ret; 183 184 if (sleepable) 185 might_sleep(); 186 187 /* erase xarray entry to prevent looking up 188 * the pool elem from its index 189 */ 190 xa_ret = xa_erase(xa, elem->index); 191 WARN_ON(xa_err(xa_ret)); 192 193 /* if this is the last call to rxe_put complete the 194 * object. It is safe to touch obj->elem after this since 195 * it is freed below 196 */ 197 __rxe_put(elem); 198 199 /* wait until all references to the object have been 200 * dropped before final object specific cleanup and 201 * return to rdma-core 202 */ 203 if (sleepable) { 204 if (!completion_done(&elem->complete)) { 205 ret = wait_for_completion_timeout(&elem->complete, 206 msecs_to_jiffies(50000)); 207 208 /* Shouldn't happen. There are still references to 209 * the object but, rather than deadlock, free the 210 * object or pass back to rdma-core. 211 */ 212 if (WARN_ON(!ret)) 213 err = -ETIMEDOUT; 214 } 215 } else { 216 unsigned long until = jiffies + RXE_POOL_TIMEOUT; 217 218 /* AH objects are unique in that the destroy_ah verb 219 * can be called in atomic context. This delay 220 * replaces the wait_for_completion call above 221 * when the destroy_ah call is not sleepable 222 */ 223 while (!completion_done(&elem->complete) && 224 time_before(jiffies, until)) 225 mdelay(1); 226 227 if (WARN_ON(!completion_done(&elem->complete))) 228 err = -ETIMEDOUT; 229 } 230 231 if (pool->cleanup) 232 pool->cleanup(elem); 233 234 atomic_dec(&pool->num_elem); 235 236 return err; 237 } 238 239 int __rxe_get(struct rxe_pool_elem *elem) 240 { 241 return kref_get_unless_zero(&elem->ref_cnt); 242 } 243 244 int __rxe_put(struct rxe_pool_elem *elem) 245 { 246 return kref_put(&elem->ref_cnt, rxe_elem_release); 247 } 248 249 void __rxe_finalize(struct rxe_pool_elem *elem) 250 { 251 void *xa_ret; 252 253 xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL); 254 WARN_ON(xa_err(xa_ret)); 255 } 256