1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include "hns_roce_device.h"
35 #include "hns_roce_hem.h"
36 #include "hns_roce_common.h"
37
38 #define HEM_INDEX_BUF BIT(0)
39 #define HEM_INDEX_L0 BIT(1)
40 #define HEM_INDEX_L1 BIT(2)
41 struct hns_roce_hem_index {
42 u64 buf;
43 u64 l0;
44 u64 l1;
45 u32 inited; /* indicate which index is available */
46 };
47
hns_roce_check_whether_mhop(struct hns_roce_dev * hr_dev,u32 type)48 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
49 {
50 int hop_num = 0;
51
52 switch (type) {
53 case HEM_TYPE_QPC:
54 hop_num = hr_dev->caps.qpc_hop_num;
55 break;
56 case HEM_TYPE_MTPT:
57 hop_num = hr_dev->caps.mpt_hop_num;
58 break;
59 case HEM_TYPE_CQC:
60 hop_num = hr_dev->caps.cqc_hop_num;
61 break;
62 case HEM_TYPE_SRQC:
63 hop_num = hr_dev->caps.srqc_hop_num;
64 break;
65 case HEM_TYPE_SCCC:
66 hop_num = hr_dev->caps.sccc_hop_num;
67 break;
68 case HEM_TYPE_QPC_TIMER:
69 hop_num = hr_dev->caps.qpc_timer_hop_num;
70 break;
71 case HEM_TYPE_CQC_TIMER:
72 hop_num = hr_dev->caps.cqc_timer_hop_num;
73 break;
74 case HEM_TYPE_GMV:
75 hop_num = hr_dev->caps.gmv_hop_num;
76 break;
77 default:
78 return false;
79 }
80
81 return hop_num;
82 }
83
hns_roce_check_hem_null(struct hns_roce_hem ** hem,u64 hem_idx,u32 bt_chunk_num,u64 hem_max_num)84 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
85 u32 bt_chunk_num, u64 hem_max_num)
86 {
87 u64 start_idx = round_down(hem_idx, bt_chunk_num);
88 u64 check_max_num = start_idx + bt_chunk_num;
89 u64 i;
90
91 for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
92 if (i != hem_idx && hem[i])
93 return false;
94
95 return true;
96 }
97
hns_roce_check_bt_null(u64 ** bt,u64 ba_idx,u32 bt_chunk_num)98 static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
99 {
100 u64 start_idx = round_down(ba_idx, bt_chunk_num);
101 int i;
102
103 for (i = 0; i < bt_chunk_num; i++)
104 if (i != ba_idx && bt[start_idx + i])
105 return false;
106
107 return true;
108 }
109
hns_roce_get_bt_num(u32 table_type,u32 hop_num)110 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
111 {
112 if (check_whether_bt_num_3(table_type, hop_num))
113 return 3;
114 else if (check_whether_bt_num_2(table_type, hop_num))
115 return 2;
116 else if (check_whether_bt_num_1(table_type, hop_num))
117 return 1;
118 else
119 return 0;
120 }
121
get_hem_table_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_mhop * mhop,u32 type)122 static int get_hem_table_config(struct hns_roce_dev *hr_dev,
123 struct hns_roce_hem_mhop *mhop,
124 u32 type)
125 {
126 struct device *dev = hr_dev->dev;
127
128 switch (type) {
129 case HEM_TYPE_QPC:
130 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
131 + PAGE_SHIFT);
132 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
133 + PAGE_SHIFT);
134 mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
135 mhop->hop_num = hr_dev->caps.qpc_hop_num;
136 break;
137 case HEM_TYPE_MTPT:
138 mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
139 + PAGE_SHIFT);
140 mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
141 + PAGE_SHIFT);
142 mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
143 mhop->hop_num = hr_dev->caps.mpt_hop_num;
144 break;
145 case HEM_TYPE_CQC:
146 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
147 + PAGE_SHIFT);
148 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
149 + PAGE_SHIFT);
150 mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
151 mhop->hop_num = hr_dev->caps.cqc_hop_num;
152 break;
153 case HEM_TYPE_SCCC:
154 mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
155 + PAGE_SHIFT);
156 mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
157 + PAGE_SHIFT);
158 mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
159 mhop->hop_num = hr_dev->caps.sccc_hop_num;
160 break;
161 case HEM_TYPE_QPC_TIMER:
162 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
163 + PAGE_SHIFT);
164 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
165 + PAGE_SHIFT);
166 mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
167 mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
168 break;
169 case HEM_TYPE_CQC_TIMER:
170 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
171 + PAGE_SHIFT);
172 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
173 + PAGE_SHIFT);
174 mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
175 mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
176 break;
177 case HEM_TYPE_SRQC:
178 mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
179 + PAGE_SHIFT);
180 mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
181 + PAGE_SHIFT);
182 mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
183 mhop->hop_num = hr_dev->caps.srqc_hop_num;
184 break;
185 case HEM_TYPE_GMV:
186 mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
187 PAGE_SHIFT);
188 mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
189 PAGE_SHIFT);
190 mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
191 mhop->hop_num = hr_dev->caps.gmv_hop_num;
192 break;
193 default:
194 dev_err(dev, "table %u not support multi-hop addressing!\n",
195 type);
196 return -EINVAL;
197 }
198
199 return 0;
200 }
201
hns_roce_calc_hem_mhop(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long * obj,struct hns_roce_hem_mhop * mhop)202 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
203 struct hns_roce_hem_table *table, unsigned long *obj,
204 struct hns_roce_hem_mhop *mhop)
205 {
206 struct device *dev = hr_dev->dev;
207 u32 chunk_ba_num;
208 u32 chunk_size;
209 u32 table_idx;
210 u32 bt_num;
211
212 if (get_hem_table_config(hr_dev, mhop, table->type))
213 return -EINVAL;
214
215 if (!obj)
216 return 0;
217
218 /*
219 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
220 * MTT/CQE alloc hem for bt pages.
221 */
222 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
223 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
224 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
225 mhop->bt_chunk_size;
226 table_idx = *obj / (chunk_size / table->obj_size);
227 switch (bt_num) {
228 case 3:
229 mhop->l2_idx = table_idx & (chunk_ba_num - 1);
230 mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
231 mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
232 break;
233 case 2:
234 mhop->l1_idx = table_idx & (chunk_ba_num - 1);
235 mhop->l0_idx = table_idx / chunk_ba_num;
236 break;
237 case 1:
238 mhop->l0_idx = table_idx;
239 break;
240 default:
241 dev_err(dev, "table %u not support hop_num = %u!\n",
242 table->type, mhop->hop_num);
243 return -EINVAL;
244 }
245 if (mhop->l0_idx >= mhop->ba_l0_num)
246 mhop->l0_idx %= mhop->ba_l0_num;
247
248 return 0;
249 }
250
hns_roce_alloc_hem(struct hns_roce_dev * hr_dev,unsigned long hem_alloc_size)251 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
252 unsigned long hem_alloc_size)
253 {
254 struct hns_roce_hem *hem;
255 int order;
256 void *buf;
257
258 order = get_order(hem_alloc_size);
259 if (PAGE_SIZE << order != hem_alloc_size) {
260 dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
261 hem_alloc_size);
262 return NULL;
263 }
264
265 hem = kmalloc(sizeof(*hem), GFP_KERNEL);
266 if (!hem)
267 return NULL;
268
269 buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
270 &hem->dma, GFP_KERNEL);
271 if (!buf)
272 goto fail;
273
274 hem->buf = buf;
275 hem->size = hem_alloc_size;
276
277 return hem;
278
279 fail:
280 kfree(hem);
281 return NULL;
282 }
283
hns_roce_free_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem * hem)284 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
285 {
286 if (!hem)
287 return;
288
289 dma_free_coherent(hr_dev->dev, hem->size, hem->buf, hem->dma);
290
291 kfree(hem);
292 }
293
calc_hem_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)294 static int calc_hem_config(struct hns_roce_dev *hr_dev,
295 struct hns_roce_hem_table *table, unsigned long obj,
296 struct hns_roce_hem_mhop *mhop,
297 struct hns_roce_hem_index *index)
298 {
299 struct device *dev = hr_dev->dev;
300 unsigned long mhop_obj = obj;
301 u32 l0_idx, l1_idx, l2_idx;
302 u32 chunk_ba_num;
303 u32 bt_num;
304 int ret;
305
306 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
307 if (ret)
308 return ret;
309
310 l0_idx = mhop->l0_idx;
311 l1_idx = mhop->l1_idx;
312 l2_idx = mhop->l2_idx;
313 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
314 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
315 switch (bt_num) {
316 case 3:
317 index->l1 = l0_idx * chunk_ba_num + l1_idx;
318 index->l0 = l0_idx;
319 index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
320 l1_idx * chunk_ba_num + l2_idx;
321 break;
322 case 2:
323 index->l0 = l0_idx;
324 index->buf = l0_idx * chunk_ba_num + l1_idx;
325 break;
326 case 1:
327 index->buf = l0_idx;
328 break;
329 default:
330 dev_err(dev, "table %u not support mhop.hop_num = %u!\n",
331 table->type, mhop->hop_num);
332 return -EINVAL;
333 }
334
335 if (unlikely(index->buf >= table->num_hem)) {
336 dev_err(dev, "table %u exceed hem limt idx %llu, max %lu!\n",
337 table->type, index->buf, table->num_hem);
338 return -EINVAL;
339 }
340
341 return 0;
342 }
343
free_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)344 static void free_mhop_hem(struct hns_roce_dev *hr_dev,
345 struct hns_roce_hem_table *table,
346 struct hns_roce_hem_mhop *mhop,
347 struct hns_roce_hem_index *index)
348 {
349 u32 bt_size = mhop->bt_chunk_size;
350 struct device *dev = hr_dev->dev;
351
352 if (index->inited & HEM_INDEX_BUF) {
353 hns_roce_free_hem(hr_dev, table->hem[index->buf]);
354 table->hem[index->buf] = NULL;
355 }
356
357 if (index->inited & HEM_INDEX_L1) {
358 dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
359 table->bt_l1_dma_addr[index->l1]);
360 table->bt_l1[index->l1] = NULL;
361 }
362
363 if (index->inited & HEM_INDEX_L0) {
364 dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
365 table->bt_l0_dma_addr[index->l0]);
366 table->bt_l0[index->l0] = NULL;
367 }
368 }
369
alloc_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)370 static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
371 struct hns_roce_hem_table *table,
372 struct hns_roce_hem_mhop *mhop,
373 struct hns_roce_hem_index *index)
374 {
375 u32 bt_size = mhop->bt_chunk_size;
376 struct device *dev = hr_dev->dev;
377 u64 bt_ba;
378 u32 size;
379 int ret;
380
381 /* alloc L1 BA's chunk */
382 if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
383 check_whether_bt_num_2(table->type, mhop->hop_num)) &&
384 !table->bt_l0[index->l0]) {
385 table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
386 &table->bt_l0_dma_addr[index->l0],
387 GFP_KERNEL);
388 if (!table->bt_l0[index->l0]) {
389 ret = -ENOMEM;
390 goto out;
391 }
392 index->inited |= HEM_INDEX_L0;
393 }
394
395 /* alloc L2 BA's chunk */
396 if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
397 !table->bt_l1[index->l1]) {
398 table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
399 &table->bt_l1_dma_addr[index->l1],
400 GFP_KERNEL);
401 if (!table->bt_l1[index->l1]) {
402 ret = -ENOMEM;
403 goto err_alloc_hem;
404 }
405 index->inited |= HEM_INDEX_L1;
406 *(table->bt_l0[index->l0] + mhop->l1_idx) =
407 table->bt_l1_dma_addr[index->l1];
408 }
409
410 /*
411 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
412 * alloc bt space chunk for MTT/CQE.
413 */
414 size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
415 table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size);
416 if (!table->hem[index->buf]) {
417 ret = -ENOMEM;
418 goto err_alloc_hem;
419 }
420
421 index->inited |= HEM_INDEX_BUF;
422 bt_ba = table->hem[index->buf]->dma;
423
424 if (table->type < HEM_TYPE_MTT) {
425 if (mhop->hop_num == 2)
426 *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
427 else if (mhop->hop_num == 1)
428 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
429 } else if (mhop->hop_num == 2) {
430 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
431 }
432
433 return 0;
434 err_alloc_hem:
435 free_mhop_hem(hr_dev, table, mhop, index);
436 out:
437 return ret;
438 }
439
set_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)440 static int set_mhop_hem(struct hns_roce_dev *hr_dev,
441 struct hns_roce_hem_table *table, unsigned long obj,
442 struct hns_roce_hem_mhop *mhop,
443 struct hns_roce_hem_index *index)
444 {
445 struct device *dev = hr_dev->dev;
446 u32 step_idx;
447 int ret = 0;
448
449 if (index->inited & HEM_INDEX_L0) {
450 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
451 if (ret) {
452 dev_err(dev, "set HEM step 0 failed!\n");
453 goto out;
454 }
455 }
456
457 if (index->inited & HEM_INDEX_L1) {
458 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
459 if (ret) {
460 dev_err(dev, "set HEM step 1 failed!\n");
461 goto out;
462 }
463 }
464
465 if (index->inited & HEM_INDEX_BUF) {
466 if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
467 step_idx = 0;
468 else
469 step_idx = mhop->hop_num;
470 ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
471 if (ret)
472 dev_err(dev, "set HEM step last failed!\n");
473 }
474 out:
475 return ret;
476 }
477
hns_roce_table_mhop_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)478 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
479 struct hns_roce_hem_table *table,
480 unsigned long obj)
481 {
482 struct hns_roce_hem_index index = {};
483 struct hns_roce_hem_mhop mhop = {};
484 struct device *dev = hr_dev->dev;
485 int ret;
486
487 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
488 if (ret) {
489 dev_err(dev, "calc hem config failed!\n");
490 return ret;
491 }
492
493 mutex_lock(&table->mutex);
494 if (table->hem[index.buf]) {
495 refcount_inc(&table->hem[index.buf]->refcount);
496 goto out;
497 }
498
499 ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
500 if (ret) {
501 dev_err(dev, "alloc mhop hem failed!\n");
502 goto out;
503 }
504
505 /* set HEM base address to hardware */
506 if (table->type < HEM_TYPE_MTT) {
507 ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
508 if (ret) {
509 dev_err(dev, "set HEM address to HW failed!\n");
510 goto err_alloc;
511 }
512 }
513
514 refcount_set(&table->hem[index.buf]->refcount, 1);
515 goto out;
516
517 err_alloc:
518 free_mhop_hem(hr_dev, table, &mhop, &index);
519 out:
520 mutex_unlock(&table->mutex);
521 return ret;
522 }
523
hns_roce_table_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)524 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
525 struct hns_roce_hem_table *table, unsigned long obj)
526 {
527 struct device *dev = hr_dev->dev;
528 unsigned long i;
529 int ret = 0;
530
531 if (hns_roce_check_whether_mhop(hr_dev, table->type))
532 return hns_roce_table_mhop_get(hr_dev, table, obj);
533
534 i = obj / (table->table_chunk_size / table->obj_size);
535
536 mutex_lock(&table->mutex);
537
538 if (table->hem[i]) {
539 refcount_inc(&table->hem[i]->refcount);
540 goto out;
541 }
542
543 table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size);
544 if (!table->hem[i]) {
545 ret = -ENOMEM;
546 goto out;
547 }
548
549 /* Set HEM base address(128K/page, pa) to Hardware */
550 ret = hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
551 if (ret) {
552 hns_roce_free_hem(hr_dev, table->hem[i]);
553 table->hem[i] = NULL;
554 dev_err(dev, "set HEM base address to HW failed, ret = %d.\n",
555 ret);
556 goto out;
557 }
558
559 refcount_set(&table->hem[i]->refcount, 1);
560 out:
561 mutex_unlock(&table->mutex);
562 return ret;
563 }
564
clear_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)565 static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
566 struct hns_roce_hem_table *table, unsigned long obj,
567 struct hns_roce_hem_mhop *mhop,
568 struct hns_roce_hem_index *index)
569 {
570 struct device *dev = hr_dev->dev;
571 u32 hop_num = mhop->hop_num;
572 u32 chunk_ba_num;
573 u32 step_idx;
574 int ret;
575
576 index->inited = HEM_INDEX_BUF;
577 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
578 if (check_whether_bt_num_2(table->type, hop_num)) {
579 if (hns_roce_check_hem_null(table->hem, index->buf,
580 chunk_ba_num, table->num_hem))
581 index->inited |= HEM_INDEX_L0;
582 } else if (check_whether_bt_num_3(table->type, hop_num)) {
583 if (hns_roce_check_hem_null(table->hem, index->buf,
584 chunk_ba_num, table->num_hem)) {
585 index->inited |= HEM_INDEX_L1;
586 if (hns_roce_check_bt_null(table->bt_l1, index->l1,
587 chunk_ba_num))
588 index->inited |= HEM_INDEX_L0;
589 }
590 }
591
592 if (table->type < HEM_TYPE_MTT) {
593 if (hop_num == HNS_ROCE_HOP_NUM_0)
594 step_idx = 0;
595 else
596 step_idx = hop_num;
597
598 ret = hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx);
599 if (ret)
600 dev_warn(dev, "failed to clear hop%u HEM, ret = %d.\n",
601 hop_num, ret);
602
603 if (index->inited & HEM_INDEX_L1) {
604 ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 1);
605 if (ret)
606 dev_warn(dev, "failed to clear HEM step 1, ret = %d.\n",
607 ret);
608 }
609
610 if (index->inited & HEM_INDEX_L0) {
611 ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
612 if (ret)
613 dev_warn(dev, "failed to clear HEM step 0, ret = %d.\n",
614 ret);
615 }
616 }
617 }
618
hns_roce_table_mhop_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,int check_refcount)619 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
620 struct hns_roce_hem_table *table,
621 unsigned long obj,
622 int check_refcount)
623 {
624 struct hns_roce_hem_index index = {};
625 struct hns_roce_hem_mhop mhop = {};
626 struct device *dev = hr_dev->dev;
627 int ret;
628
629 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
630 if (ret) {
631 dev_err(dev, "calc hem config failed!\n");
632 return;
633 }
634
635 if (!check_refcount)
636 mutex_lock(&table->mutex);
637 else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
638 &table->mutex))
639 return;
640
641 clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
642 free_mhop_hem(hr_dev, table, &mhop, &index);
643
644 mutex_unlock(&table->mutex);
645 }
646
hns_roce_table_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)647 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
648 struct hns_roce_hem_table *table, unsigned long obj)
649 {
650 struct device *dev = hr_dev->dev;
651 unsigned long i;
652 int ret;
653
654 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
655 hns_roce_table_mhop_put(hr_dev, table, obj, 1);
656 return;
657 }
658
659 i = obj / (table->table_chunk_size / table->obj_size);
660
661 if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
662 &table->mutex))
663 return;
664
665 ret = hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
666 if (ret)
667 dev_warn_ratelimited(dev, "failed to clear HEM base address, ret = %d.\n",
668 ret);
669
670 hns_roce_free_hem(hr_dev, table->hem[i]);
671 table->hem[i] = NULL;
672
673 mutex_unlock(&table->mutex);
674 }
675
hns_roce_table_find(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,dma_addr_t * dma_handle)676 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
677 struct hns_roce_hem_table *table,
678 unsigned long obj, dma_addr_t *dma_handle)
679 {
680 struct hns_roce_hem_mhop mhop;
681 struct hns_roce_hem *hem;
682 unsigned long mhop_obj = obj;
683 unsigned long obj_per_chunk;
684 unsigned long idx_offset;
685 int offset, dma_offset;
686 void *addr = NULL;
687 u32 hem_idx = 0;
688 int i, j;
689
690 mutex_lock(&table->mutex);
691
692 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
693 obj_per_chunk = table->table_chunk_size / table->obj_size;
694 hem = table->hem[obj / obj_per_chunk];
695 idx_offset = obj % obj_per_chunk;
696 dma_offset = offset = idx_offset * table->obj_size;
697 } else {
698 u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
699
700 if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
701 goto out;
702 /* mtt mhop */
703 i = mhop.l0_idx;
704 j = mhop.l1_idx;
705 if (mhop.hop_num == 2)
706 hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
707 else if (mhop.hop_num == 1 ||
708 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
709 hem_idx = i;
710
711 hem = table->hem[hem_idx];
712 dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
713 if (mhop.hop_num == 2)
714 dma_offset = offset = 0;
715 }
716
717 if (!hem)
718 goto out;
719
720 *dma_handle = hem->dma + dma_offset;
721 addr = hem->buf + offset;
722
723 out:
724 mutex_unlock(&table->mutex);
725 return addr;
726 }
727
hns_roce_init_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,u32 type,unsigned long obj_size,unsigned long nobj)728 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
729 struct hns_roce_hem_table *table, u32 type,
730 unsigned long obj_size, unsigned long nobj)
731 {
732 unsigned long obj_per_chunk;
733 unsigned long num_hem;
734
735 if (!hns_roce_check_whether_mhop(hr_dev, type)) {
736 table->table_chunk_size = hr_dev->caps.chunk_sz;
737 obj_per_chunk = table->table_chunk_size / obj_size;
738 num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
739
740 table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
741 if (!table->hem)
742 return -ENOMEM;
743 } else {
744 struct hns_roce_hem_mhop mhop = {};
745 unsigned long buf_chunk_size;
746 unsigned long bt_chunk_size;
747 unsigned long bt_chunk_num;
748 unsigned long num_bt_l0;
749 u32 hop_num;
750
751 if (get_hem_table_config(hr_dev, &mhop, type))
752 return -EINVAL;
753
754 buf_chunk_size = mhop.buf_chunk_size;
755 bt_chunk_size = mhop.bt_chunk_size;
756 num_bt_l0 = mhop.ba_l0_num;
757 hop_num = mhop.hop_num;
758
759 obj_per_chunk = buf_chunk_size / obj_size;
760 num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
761 bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
762
763 if (type >= HEM_TYPE_MTT)
764 num_bt_l0 = bt_chunk_num;
765
766 table->hem = kcalloc(num_hem, sizeof(*table->hem),
767 GFP_KERNEL);
768 if (!table->hem)
769 goto err_kcalloc_hem_buf;
770
771 if (check_whether_bt_num_3(type, hop_num)) {
772 unsigned long num_bt_l1;
773
774 num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
775 table->bt_l1 = kcalloc(num_bt_l1,
776 sizeof(*table->bt_l1),
777 GFP_KERNEL);
778 if (!table->bt_l1)
779 goto err_kcalloc_bt_l1;
780
781 table->bt_l1_dma_addr = kcalloc(num_bt_l1,
782 sizeof(*table->bt_l1_dma_addr),
783 GFP_KERNEL);
784
785 if (!table->bt_l1_dma_addr)
786 goto err_kcalloc_l1_dma;
787 }
788
789 if (check_whether_bt_num_2(type, hop_num) ||
790 check_whether_bt_num_3(type, hop_num)) {
791 table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
792 GFP_KERNEL);
793 if (!table->bt_l0)
794 goto err_kcalloc_bt_l0;
795
796 table->bt_l0_dma_addr = kcalloc(num_bt_l0,
797 sizeof(*table->bt_l0_dma_addr),
798 GFP_KERNEL);
799 if (!table->bt_l0_dma_addr)
800 goto err_kcalloc_l0_dma;
801 }
802 }
803
804 table->type = type;
805 table->num_hem = num_hem;
806 table->obj_size = obj_size;
807 mutex_init(&table->mutex);
808
809 return 0;
810
811 err_kcalloc_l0_dma:
812 kfree(table->bt_l0);
813 table->bt_l0 = NULL;
814
815 err_kcalloc_bt_l0:
816 kfree(table->bt_l1_dma_addr);
817 table->bt_l1_dma_addr = NULL;
818
819 err_kcalloc_l1_dma:
820 kfree(table->bt_l1);
821 table->bt_l1 = NULL;
822
823 err_kcalloc_bt_l1:
824 kfree(table->hem);
825 table->hem = NULL;
826
827 err_kcalloc_hem_buf:
828 return -ENOMEM;
829 }
830
hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)831 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
832 struct hns_roce_hem_table *table)
833 {
834 struct hns_roce_hem_mhop mhop;
835 u32 buf_chunk_size;
836 u64 obj;
837 int i;
838
839 if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
840 return;
841 buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
842 mhop.bt_chunk_size;
843
844 for (i = 0; i < table->num_hem; ++i) {
845 obj = i * buf_chunk_size / table->obj_size;
846 if (table->hem[i])
847 hns_roce_table_mhop_put(hr_dev, table, obj, 0);
848 }
849
850 kfree(table->hem);
851 table->hem = NULL;
852 kfree(table->bt_l1);
853 table->bt_l1 = NULL;
854 kfree(table->bt_l1_dma_addr);
855 table->bt_l1_dma_addr = NULL;
856 kfree(table->bt_l0);
857 table->bt_l0 = NULL;
858 kfree(table->bt_l0_dma_addr);
859 table->bt_l0_dma_addr = NULL;
860 }
861
hns_roce_cleanup_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)862 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
863 struct hns_roce_hem_table *table)
864 {
865 struct device *dev = hr_dev->dev;
866 unsigned long i;
867 int obj;
868 int ret;
869
870 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
871 hns_roce_cleanup_mhop_hem_table(hr_dev, table);
872 mutex_destroy(&table->mutex);
873 return;
874 }
875
876 for (i = 0; i < table->num_hem; ++i)
877 if (table->hem[i]) {
878 obj = i * table->table_chunk_size / table->obj_size;
879 ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
880 if (ret)
881 dev_err(dev, "clear HEM base address failed, ret = %d.\n",
882 ret);
883
884 hns_roce_free_hem(hr_dev, table->hem[i]);
885 }
886
887 mutex_destroy(&table->mutex);
888 kfree(table->hem);
889 }
890
hns_roce_cleanup_hem(struct hns_roce_dev * hr_dev)891 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
892 {
893 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
894 hns_roce_cleanup_hem_table(hr_dev,
895 &hr_dev->srq_table.table);
896 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
897 if (hr_dev->caps.qpc_timer_entry_sz)
898 hns_roce_cleanup_hem_table(hr_dev,
899 &hr_dev->qpc_timer_table);
900 if (hr_dev->caps.cqc_timer_entry_sz)
901 hns_roce_cleanup_hem_table(hr_dev,
902 &hr_dev->cqc_timer_table);
903 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
904 hns_roce_cleanup_hem_table(hr_dev,
905 &hr_dev->qp_table.sccc_table);
906 if (hr_dev->caps.trrl_entry_sz)
907 hns_roce_cleanup_hem_table(hr_dev,
908 &hr_dev->qp_table.trrl_table);
909
910 if (hr_dev->caps.gmv_entry_sz)
911 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
912
913 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
914 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
915 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
916 }
917
918 struct hns_roce_hem_item {
919 struct list_head list; /* link all hems in the same bt level */
920 struct list_head sibling; /* link all hems in last hop for mtt */
921 void *addr;
922 dma_addr_t dma_addr;
923 size_t count; /* max ba numbers */
924 int start; /* start buf offset in this hem */
925 int end; /* end buf offset in this hem */
926 bool exist_bt;
927 };
928
929 /* All HEM items are linked in a tree structure */
930 struct hns_roce_hem_head {
931 struct list_head branch[HNS_ROCE_MAX_BT_REGION];
932 struct list_head root;
933 struct list_head leaf;
934 };
935
936 static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev * hr_dev,int start,int end,int count,bool exist_bt)937 hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
938 bool exist_bt)
939 {
940 struct hns_roce_hem_item *hem;
941
942 hem = kzalloc(sizeof(*hem), GFP_KERNEL);
943 if (!hem)
944 return NULL;
945
946 if (exist_bt) {
947 hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
948 &hem->dma_addr, GFP_KERNEL);
949 if (!hem->addr) {
950 kfree(hem);
951 return NULL;
952 }
953 }
954
955 hem->exist_bt = exist_bt;
956 hem->count = count;
957 hem->start = start;
958 hem->end = end;
959 INIT_LIST_HEAD(&hem->list);
960 INIT_LIST_HEAD(&hem->sibling);
961
962 return hem;
963 }
964
hem_list_free_item(struct hns_roce_dev * hr_dev,struct hns_roce_hem_item * hem)965 static void hem_list_free_item(struct hns_roce_dev *hr_dev,
966 struct hns_roce_hem_item *hem)
967 {
968 if (hem->exist_bt)
969 dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
970 hem->addr, hem->dma_addr);
971 kfree(hem);
972 }
973
hem_list_free_all(struct hns_roce_dev * hr_dev,struct list_head * head)974 static void hem_list_free_all(struct hns_roce_dev *hr_dev,
975 struct list_head *head)
976 {
977 struct hns_roce_hem_item *hem, *temp_hem;
978
979 list_for_each_entry_safe(hem, temp_hem, head, list) {
980 list_del(&hem->list);
981 hem_list_free_item(hr_dev, hem);
982 }
983 }
984
hem_list_link_bt(void * base_addr,u64 table_addr)985 static void hem_list_link_bt(void *base_addr, u64 table_addr)
986 {
987 *(u64 *)(base_addr) = table_addr;
988 }
989
990 /* assign L0 table address to hem from root bt */
hem_list_assign_bt(struct hns_roce_hem_item * hem,void * cpu_addr,u64 phy_addr)991 static void hem_list_assign_bt(struct hns_roce_hem_item *hem, void *cpu_addr,
992 u64 phy_addr)
993 {
994 hem->addr = cpu_addr;
995 hem->dma_addr = (dma_addr_t)phy_addr;
996 }
997
hem_list_page_is_in_range(struct hns_roce_hem_item * hem,int offset)998 static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
999 int offset)
1000 {
1001 return (hem->start <= offset && offset <= hem->end);
1002 }
1003
hem_list_search_item(struct list_head * ba_list,int page_offset)1004 static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
1005 int page_offset)
1006 {
1007 struct hns_roce_hem_item *hem, *temp_hem;
1008 struct hns_roce_hem_item *found = NULL;
1009
1010 list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
1011 if (hem_list_page_is_in_range(hem, page_offset)) {
1012 found = hem;
1013 break;
1014 }
1015 }
1016
1017 return found;
1018 }
1019
hem_list_is_bottom_bt(int hopnum,int bt_level)1020 static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
1021 {
1022 /*
1023 * hopnum base address table levels
1024 * 0 L0(buf)
1025 * 1 L0 -> buf
1026 * 2 L0 -> L1 -> buf
1027 * 3 L0 -> L1 -> L2 -> buf
1028 */
1029 return bt_level >= (hopnum ? hopnum - 1 : hopnum);
1030 }
1031
1032 /*
1033 * calc base address entries num
1034 * @hopnum: num of mutihop addressing
1035 * @bt_level: base address table level
1036 * @unit: ba entries per bt page
1037 */
hem_list_calc_ba_range(int hopnum,int bt_level,int unit)1038 static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
1039 {
1040 u64 step;
1041 int max;
1042 int i;
1043
1044 if (hopnum <= bt_level)
1045 return 0;
1046 /*
1047 * hopnum bt_level range
1048 * 1 0 unit
1049 * ------------
1050 * 2 0 unit * unit
1051 * 2 1 unit
1052 * ------------
1053 * 3 0 unit * unit * unit
1054 * 3 1 unit * unit
1055 * 3 2 unit
1056 */
1057 step = 1;
1058 max = hopnum - bt_level;
1059 for (i = 0; i < max; i++)
1060 step = step * unit;
1061
1062 return step;
1063 }
1064
1065 /*
1066 * calc the root ba entries which could cover all regions
1067 * @regions: buf region array
1068 * @region_cnt: array size of @regions
1069 * @unit: ba entries per bt page
1070 */
hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region * regions,int region_cnt,int unit)1071 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
1072 int region_cnt, int unit)
1073 {
1074 struct hns_roce_buf_region *r;
1075 int total = 0;
1076 u64 step;
1077 int i;
1078
1079 for (i = 0; i < region_cnt; i++) {
1080 r = (struct hns_roce_buf_region *)®ions[i];
1081 /* when r->hopnum = 0, the region should not occupy root_ba. */
1082 if (!r->hopnum)
1083 continue;
1084
1085 if (r->hopnum > 1) {
1086 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1087 if (step > 0)
1088 total += (r->count + step - 1) / step;
1089 } else {
1090 total += r->count;
1091 }
1092 }
1093
1094 return total;
1095 }
1096
hem_list_alloc_mid_bt(struct hns_roce_dev * hr_dev,const struct hns_roce_buf_region * r,int unit,int offset,struct list_head * mid_bt,struct list_head * btm_bt)1097 static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
1098 const struct hns_roce_buf_region *r, int unit,
1099 int offset, struct list_head *mid_bt,
1100 struct list_head *btm_bt)
1101 {
1102 struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
1103 struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
1104 struct hns_roce_hem_item *cur, *pre;
1105 const int hopnum = r->hopnum;
1106 int start_aligned;
1107 int distance;
1108 int ret = 0;
1109 int max_ofs;
1110 int level;
1111 u64 step;
1112 int end;
1113
1114 if (hopnum <= 1)
1115 return 0;
1116
1117 if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
1118 dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
1119 return -EINVAL;
1120 }
1121
1122 if (offset < r->offset) {
1123 dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
1124 offset, r->offset);
1125 return -EINVAL;
1126 }
1127
1128 distance = offset - r->offset;
1129 max_ofs = r->offset + r->count - 1;
1130 for (level = 0; level < hopnum; level++)
1131 INIT_LIST_HEAD(&temp_list[level]);
1132
1133 /* config L1 bt to last bt and link them to corresponding parent */
1134 for (level = 1; level < hopnum; level++) {
1135 if (!hem_list_is_bottom_bt(hopnum, level)) {
1136 cur = hem_list_search_item(&mid_bt[level], offset);
1137 if (cur) {
1138 hem_ptrs[level] = cur;
1139 continue;
1140 }
1141 }
1142
1143 step = hem_list_calc_ba_range(hopnum, level, unit);
1144 if (step < 1) {
1145 ret = -EINVAL;
1146 goto err_exit;
1147 }
1148
1149 start_aligned = (distance / step) * step + r->offset;
1150 end = min_t(u64, start_aligned + step - 1, max_ofs);
1151 cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
1152 true);
1153 if (!cur) {
1154 ret = -ENOMEM;
1155 goto err_exit;
1156 }
1157 hem_ptrs[level] = cur;
1158 list_add(&cur->list, &temp_list[level]);
1159 if (hem_list_is_bottom_bt(hopnum, level))
1160 list_add(&cur->sibling, &temp_list[0]);
1161
1162 /* link bt to parent bt */
1163 if (level > 1) {
1164 pre = hem_ptrs[level - 1];
1165 step = (cur->start - pre->start) / step * BA_BYTE_LEN;
1166 hem_list_link_bt(pre->addr + step, cur->dma_addr);
1167 }
1168 }
1169
1170 list_splice(&temp_list[0], btm_bt);
1171 for (level = 1; level < hopnum; level++)
1172 list_splice(&temp_list[level], &mid_bt[level]);
1173
1174 return 0;
1175
1176 err_exit:
1177 for (level = 1; level < hopnum; level++)
1178 hem_list_free_all(hr_dev, &temp_list[level]);
1179
1180 return ret;
1181 }
1182
1183 static struct hns_roce_hem_item *
alloc_root_hem(struct hns_roce_dev * hr_dev,int unit,int * max_ba_num,const struct hns_roce_buf_region * regions,int region_cnt)1184 alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
1185 const struct hns_roce_buf_region *regions, int region_cnt)
1186 {
1187 const struct hns_roce_buf_region *r;
1188 struct hns_roce_hem_item *hem;
1189 int ba_num;
1190 int offset;
1191
1192 ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
1193 if (ba_num < 1)
1194 return ERR_PTR(-ENOMEM);
1195
1196 if (ba_num > unit)
1197 return ERR_PTR(-ENOBUFS);
1198
1199 offset = regions[0].offset;
1200 /* indicate to last region */
1201 r = ®ions[region_cnt - 1];
1202 hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
1203 ba_num, true);
1204 if (!hem)
1205 return ERR_PTR(-ENOMEM);
1206
1207 *max_ba_num = ba_num;
1208
1209 return hem;
1210 }
1211
alloc_fake_root_bt(struct hns_roce_dev * hr_dev,void * cpu_base,u64 phy_base,const struct hns_roce_buf_region * r,struct list_head * branch_head,struct list_head * leaf_head)1212 static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1213 u64 phy_base, const struct hns_roce_buf_region *r,
1214 struct list_head *branch_head,
1215 struct list_head *leaf_head)
1216 {
1217 struct hns_roce_hem_item *hem;
1218
1219 /* This is on the has_mtt branch, if r->hopnum
1220 * is 0, there is no root_ba to reuse for the
1221 * region's fake hem, so a dma_alloc request is
1222 * necessary here.
1223 */
1224 hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
1225 r->count, !r->hopnum);
1226 if (!hem)
1227 return -ENOMEM;
1228
1229 /* The root_ba can be reused only when r->hopnum > 0. */
1230 if (r->hopnum)
1231 hem_list_assign_bt(hem, cpu_base, phy_base);
1232 list_add(&hem->list, branch_head);
1233 list_add(&hem->sibling, leaf_head);
1234
1235 /* If r->hopnum == 0, 0 is returned,
1236 * so that the root_bt entry is not occupied.
1237 */
1238 return r->hopnum ? r->count : 0;
1239 }
1240
setup_middle_bt(struct hns_roce_dev * hr_dev,void * cpu_base,int unit,const struct hns_roce_buf_region * r,const struct list_head * branch_head)1241 static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1242 int unit, const struct hns_roce_buf_region *r,
1243 const struct list_head *branch_head)
1244 {
1245 struct hns_roce_hem_item *hem, *temp_hem;
1246 int total = 0;
1247 int offset;
1248 u64 step;
1249
1250 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1251 if (step < 1)
1252 return -EINVAL;
1253
1254 /* if exist mid bt, link L1 to L0 */
1255 list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
1256 offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
1257 hem_list_link_bt(cpu_base + offset, hem->dma_addr);
1258 total++;
1259 }
1260
1261 return total;
1262 }
1263
1264 static int
setup_root_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int unit,int max_ba_num,struct hns_roce_hem_head * head,const struct hns_roce_buf_region * regions,int region_cnt)1265 setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
1266 int unit, int max_ba_num, struct hns_roce_hem_head *head,
1267 const struct hns_roce_buf_region *regions, int region_cnt)
1268 {
1269 const struct hns_roce_buf_region *r;
1270 struct hns_roce_hem_item *root_hem;
1271 void *cpu_base;
1272 u64 phy_base;
1273 int i, total;
1274 int ret;
1275
1276 root_hem = list_first_entry(&head->root,
1277 struct hns_roce_hem_item, list);
1278 if (!root_hem)
1279 return -ENOMEM;
1280
1281 total = 0;
1282 for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
1283 r = ®ions[i];
1284 if (!r->count)
1285 continue;
1286
1287 /* all regions's mid[x][0] shared the root_bt's trunk */
1288 cpu_base = root_hem->addr + total * BA_BYTE_LEN;
1289 phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
1290
1291 /* if hopnum is 0 or 1, cut a new fake hem from the root bt
1292 * which's address share to all regions.
1293 */
1294 if (hem_list_is_bottom_bt(r->hopnum, 0))
1295 ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
1296 &head->branch[i], &head->leaf);
1297 else
1298 ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
1299 &hem_list->mid_bt[i][1]);
1300
1301 if (ret < 0)
1302 return ret;
1303
1304 total += ret;
1305 }
1306
1307 list_splice(&head->leaf, &hem_list->btm_bt);
1308 list_splice(&head->root, &hem_list->root_bt);
1309 for (i = 0; i < region_cnt; i++)
1310 list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
1311
1312 return 0;
1313 }
1314
hem_list_alloc_root_bt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int unit,const struct hns_roce_buf_region * regions,int region_cnt)1315 static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
1316 struct hns_roce_hem_list *hem_list, int unit,
1317 const struct hns_roce_buf_region *regions,
1318 int region_cnt)
1319 {
1320 struct hns_roce_hem_item *root_hem;
1321 struct hns_roce_hem_head head;
1322 int max_ba_num;
1323 int ret;
1324 int i;
1325
1326 root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
1327 if (root_hem)
1328 return 0;
1329
1330 max_ba_num = 0;
1331 root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
1332 region_cnt);
1333 if (IS_ERR(root_hem))
1334 return PTR_ERR(root_hem);
1335
1336 /* List head for storing all allocated HEM items */
1337 INIT_LIST_HEAD(&head.root);
1338 INIT_LIST_HEAD(&head.leaf);
1339 for (i = 0; i < region_cnt; i++)
1340 INIT_LIST_HEAD(&head.branch[i]);
1341
1342 hem_list->root_ba = root_hem->dma_addr;
1343 list_add(&root_hem->list, &head.root);
1344 ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
1345 region_cnt);
1346 if (ret) {
1347 for (i = 0; i < region_cnt; i++)
1348 hem_list_free_all(hr_dev, &head.branch[i]);
1349
1350 hem_list_free_all(hr_dev, &head.root);
1351 }
1352
1353 return ret;
1354 }
1355
1356 /* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
1357 * the bt page size is not expanded by cal_best_bt_pg_sz()
1358 */
1359 #define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
1360
1361 /* construct the base address table and link them by address hop config */
hns_roce_hem_list_request(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,const struct hns_roce_buf_region * regions,int region_cnt,unsigned int bt_pg_shift)1362 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
1363 struct hns_roce_hem_list *hem_list,
1364 const struct hns_roce_buf_region *regions,
1365 int region_cnt, unsigned int bt_pg_shift)
1366 {
1367 const struct hns_roce_buf_region *r;
1368 int ofs, end;
1369 int loop;
1370 int unit;
1371 int ret;
1372 int i;
1373
1374 if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
1375 dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
1376 region_cnt);
1377 return -EINVAL;
1378 }
1379
1380 unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
1381 for (i = 0; i < region_cnt; i++) {
1382 r = ®ions[i];
1383 if (!r->count)
1384 continue;
1385
1386 end = r->offset + r->count;
1387 for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
1388 if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
1389 cond_resched();
1390
1391 ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
1392 hem_list->mid_bt[i],
1393 &hem_list->btm_bt);
1394 if (ret) {
1395 dev_err(hr_dev->dev,
1396 "alloc hem trunk fail ret = %d!\n", ret);
1397 goto err_alloc;
1398 }
1399 }
1400 }
1401
1402 ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
1403 region_cnt);
1404 if (ret)
1405 dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
1406 else
1407 return 0;
1408
1409 err_alloc:
1410 hns_roce_hem_list_release(hr_dev, hem_list);
1411
1412 return ret;
1413 }
1414
hns_roce_hem_list_release(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list)1415 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
1416 struct hns_roce_hem_list *hem_list)
1417 {
1418 int i, j;
1419
1420 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1421 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1422 hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
1423
1424 hem_list_free_all(hr_dev, &hem_list->root_bt);
1425 INIT_LIST_HEAD(&hem_list->btm_bt);
1426 hem_list->root_ba = 0;
1427 }
1428
hns_roce_hem_list_init(struct hns_roce_hem_list * hem_list)1429 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
1430 {
1431 int i, j;
1432
1433 INIT_LIST_HEAD(&hem_list->root_bt);
1434 INIT_LIST_HEAD(&hem_list->btm_bt);
1435 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1436 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1437 INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
1438 }
1439
hns_roce_hem_list_find_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int offset,int * mtt_cnt)1440 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
1441 struct hns_roce_hem_list *hem_list,
1442 int offset, int *mtt_cnt)
1443 {
1444 struct list_head *head = &hem_list->btm_bt;
1445 struct hns_roce_hem_item *hem, *temp_hem;
1446 void *cpu_base = NULL;
1447 int loop = 1;
1448 int nr = 0;
1449
1450 list_for_each_entry_safe(hem, temp_hem, head, sibling) {
1451 if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
1452 cond_resched();
1453 loop++;
1454
1455 if (hem_list_page_is_in_range(hem, offset)) {
1456 nr = offset - hem->start;
1457 cpu_base = hem->addr + nr * BA_BYTE_LEN;
1458 nr = hem->end + 1 - offset;
1459 break;
1460 }
1461 }
1462
1463 if (mtt_cnt)
1464 *mtt_cnt = nr;
1465
1466 return cpu_base;
1467 }
1468