1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "hmc.h"
5 #include "defs.h"
6 #include "type.h"
7 #include "protos.h"
8 #include "virtchnl.h"
9
10 /**
11 * irdma_find_sd_index_limit - finds segment descriptor index limit
12 * @hmc_info: pointer to the HMC configuration information structure
13 * @type: type of HMC resources we're searching
14 * @idx: starting index for the object
15 * @cnt: number of objects we're trying to create
16 * @sd_idx: pointer to return index of the segment descriptor in question
17 * @sd_limit: pointer to return the maximum number of segment descriptors
18 *
19 * This function calculates the segment descriptor index and index limit
20 * for the resource defined by irdma_hmc_rsrc_type.
21 */
22
irdma_find_sd_index_limit(struct irdma_hmc_info * hmc_info,u32 type,u32 idx,u32 cnt,u32 * sd_idx,u32 * sd_limit)23 static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
24 u32 idx, u32 cnt, u32 *sd_idx,
25 u32 *sd_limit)
26 {
27 u64 fpm_addr, fpm_limit;
28
29 fpm_addr = hmc_info->hmc_obj[(type)].base +
30 hmc_info->hmc_obj[type].size * idx;
31 fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
32 *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
33 *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
34 *sd_limit += 1;
35 }
36
37 /**
38 * irdma_find_pd_index_limit - finds page descriptor index limit
39 * @hmc_info: pointer to the HMC configuration information struct
40 * @type: HMC resource type we're examining
41 * @idx: starting index for the object
42 * @cnt: number of objects we're trying to create
43 * @pd_idx: pointer to return page descriptor index
44 * @pd_limit: pointer to return page descriptor index limit
45 *
46 * Calculates the page descriptor index and index limit for the resource
47 * defined by irdma_hmc_rsrc_type.
48 */
49
irdma_find_pd_index_limit(struct irdma_hmc_info * hmc_info,u32 type,u32 idx,u32 cnt,u32 * pd_idx,u32 * pd_limit)50 static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
51 u32 idx, u32 cnt, u32 *pd_idx,
52 u32 *pd_limit)
53 {
54 u64 fpm_adr, fpm_limit;
55
56 fpm_adr = hmc_info->hmc_obj[type].base +
57 hmc_info->hmc_obj[type].size * idx;
58 fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
59 *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
60 *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
61 *pd_limit += 1;
62 }
63
64 /**
65 * irdma_set_sd_entry - setup entry for sd programming
66 * @pa: physical addr
67 * @idx: sd index
68 * @type: paged or direct sd
69 * @entry: sd entry ptr
70 */
irdma_set_sd_entry(u64 pa,u32 idx,enum irdma_sd_entry_type type,struct irdma_update_sd_entry * entry)71 static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
72 struct irdma_update_sd_entry *entry)
73 {
74 entry->data = pa |
75 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
76 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
77 type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
78 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
79
80 entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
81 }
82
83 /**
84 * irdma_clr_sd_entry - setup entry for sd clear
85 * @idx: sd index
86 * @type: paged or direct sd
87 * @entry: sd entry ptr
88 */
irdma_clr_sd_entry(u32 idx,enum irdma_sd_entry_type type,struct irdma_update_sd_entry * entry)89 static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
90 struct irdma_update_sd_entry *entry)
91 {
92 entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
93 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
94 type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
95
96 entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
97 }
98
99 /**
100 * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
101 * @dev: pointer to our device struct
102 * @sd_idx: segment descriptor index
103 * @pd_idx: page descriptor index
104 */
irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev * dev,u32 sd_idx,u32 pd_idx)105 static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
106 u32 pd_idx)
107 {
108 u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
109 FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
110 FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
111
112 writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
113 }
114
115 /**
116 * irdma_hmc_sd_one - setup 1 sd entry for cqp
117 * @dev: pointer to the device structure
118 * @hmc_fn_id: hmc's function id
119 * @pa: physical addr
120 * @sd_idx: sd index
121 * @type: paged or direct sd
122 * @setsd: flag to set or clear sd
123 */
irdma_hmc_sd_one(struct irdma_sc_dev * dev,u8 hmc_fn_id,u64 pa,u32 sd_idx,enum irdma_sd_entry_type type,bool setsd)124 int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
125 enum irdma_sd_entry_type type, bool setsd)
126 {
127 struct irdma_update_sds_info sdinfo;
128
129 sdinfo.cnt = 1;
130 sdinfo.hmc_fn_id = hmc_fn_id;
131 if (setsd)
132 irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
133 else
134 irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
135 return dev->cqp->process_cqp_sds(dev, &sdinfo);
136 }
137
138 /**
139 * irdma_hmc_sd_grp - setup group of sd entries for cqp
140 * @dev: pointer to the device structure
141 * @hmc_info: pointer to the HMC configuration information struct
142 * @sd_index: sd index
143 * @sd_cnt: number of sd entries
144 * @setsd: flag to set or clear sd
145 */
irdma_hmc_sd_grp(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 sd_index,u32 sd_cnt,bool setsd)146 static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
147 struct irdma_hmc_info *hmc_info, u32 sd_index,
148 u32 sd_cnt, bool setsd)
149 {
150 struct irdma_hmc_sd_entry *sd_entry;
151 struct irdma_update_sds_info sdinfo = {};
152 u64 pa;
153 u32 i;
154 int ret_code = 0;
155
156 sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
157 for (i = sd_index; i < sd_index + sd_cnt; i++) {
158 sd_entry = &hmc_info->sd_table.sd_entry[i];
159 if (!sd_entry || (!sd_entry->valid && setsd) ||
160 (sd_entry->valid && !setsd))
161 continue;
162 if (setsd) {
163 pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
164 sd_entry->u.pd_table.pd_page_addr.pa :
165 sd_entry->u.bp.addr.pa;
166 irdma_set_sd_entry(pa, i, sd_entry->entry_type,
167 &sdinfo.entry[sdinfo.cnt]);
168 } else {
169 irdma_clr_sd_entry(i, sd_entry->entry_type,
170 &sdinfo.entry[sdinfo.cnt]);
171 }
172 sdinfo.cnt++;
173 if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
174 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
175 if (ret_code) {
176 ibdev_dbg(to_ibdev(dev),
177 "HMC: sd_programming failed err=%d\n",
178 ret_code);
179 return ret_code;
180 }
181
182 sdinfo.cnt = 0;
183 }
184 }
185 if (sdinfo.cnt)
186 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
187
188 return ret_code;
189 }
190
191 /**
192 * irdma_hmc_finish_add_sd_reg - program sd entries for objects
193 * @dev: pointer to the device structure
194 * @info: create obj info
195 */
irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev * dev,struct irdma_hmc_create_obj_info * info)196 static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
197 struct irdma_hmc_create_obj_info *info)
198 {
199 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
200 return -EINVAL;
201
202 if ((info->start_idx + info->count) >
203 info->hmc_info->hmc_obj[info->rsrc_type].cnt)
204 return -EINVAL;
205
206 if (!info->add_sd_cnt)
207 return 0;
208 return irdma_hmc_sd_grp(dev, info->hmc_info,
209 info->hmc_info->sd_indexes[0], info->add_sd_cnt,
210 true);
211 }
212
213 /**
214 * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
215 * @dev: pointer to the device structure
216 * @info: pointer to irdma_hmc_create_obj_info struct
217 *
218 * This will allocate memory for PDs and backing pages and populate
219 * the sd and pd entries.
220 */
irdma_sc_create_hmc_obj(struct irdma_sc_dev * dev,struct irdma_hmc_create_obj_info * info)221 int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
222 struct irdma_hmc_create_obj_info *info)
223 {
224 struct irdma_hmc_sd_entry *sd_entry;
225 u32 sd_idx, sd_lmt;
226 u32 pd_idx = 0, pd_lmt = 0;
227 u32 pd_idx1 = 0, pd_lmt1 = 0;
228 u32 i, j;
229 bool pd_error = false;
230 int ret_code = 0;
231
232 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
233 dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
234 return 0;
235
236 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
237 return -EINVAL;
238
239 if ((info->start_idx + info->count) >
240 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
241 ibdev_dbg(to_ibdev(dev),
242 "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
243 info->rsrc_type, info->start_idx, info->count,
244 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
245 return -EINVAL;
246 }
247
248 irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
249 info->start_idx, info->count, &sd_idx,
250 &sd_lmt);
251 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
252 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
253 return -EINVAL;
254 }
255
256 irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
257 info->start_idx, info->count, &pd_idx,
258 &pd_lmt);
259
260 for (j = sd_idx; j < sd_lmt; j++) {
261 ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
262 info->entry_type,
263 IRDMA_HMC_DIRECT_BP_SIZE);
264 if (ret_code)
265 goto exit_sd_error;
266
267 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
268 if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
269 (dev->hmc_info == info->hmc_info &&
270 info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
271 pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
272 pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
273 for (i = pd_idx1; i < pd_lmt1; i++) {
274 /* update the pd table entry */
275 ret_code = irdma_add_pd_table_entry(dev,
276 info->hmc_info,
277 i, NULL);
278 if (ret_code) {
279 pd_error = true;
280 break;
281 }
282 }
283 if (pd_error) {
284 while (i && (i > pd_idx1)) {
285 irdma_remove_pd_bp(dev, info->hmc_info,
286 i - 1);
287 i--;
288 }
289 }
290 }
291 if (sd_entry->valid)
292 continue;
293
294 info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
295 info->add_sd_cnt++;
296 sd_entry->valid = true;
297 }
298 return irdma_hmc_finish_add_sd_reg(dev, info);
299
300 exit_sd_error:
301 while (j && (j > sd_idx)) {
302 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
303 switch (sd_entry->entry_type) {
304 case IRDMA_SD_TYPE_PAGED:
305 pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
306 pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
307 for (i = pd_idx1; i < pd_lmt1; i++)
308 irdma_prep_remove_pd_page(info->hmc_info, i);
309 break;
310 case IRDMA_SD_TYPE_DIRECT:
311 irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
312 break;
313 default:
314 ret_code = -EINVAL;
315 break;
316 }
317 j--;
318 }
319
320 return ret_code;
321 }
322
323 /**
324 * irdma_finish_del_sd_reg - delete sd entries for objects
325 * @dev: pointer to the device structure
326 * @info: dele obj info
327 * @reset: true if called before reset
328 */
irdma_finish_del_sd_reg(struct irdma_sc_dev * dev,struct irdma_hmc_del_obj_info * info,bool reset)329 static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
330 struct irdma_hmc_del_obj_info *info,
331 bool reset)
332 {
333 struct irdma_hmc_sd_entry *sd_entry;
334 int ret_code = 0;
335 u32 i, sd_idx;
336 struct irdma_dma_mem *mem;
337
338 if (dev->privileged && !reset)
339 ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
340 info->hmc_info->sd_indexes[0],
341 info->del_sd_cnt, false);
342
343 if (ret_code)
344 ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
345 for (i = 0; i < info->del_sd_cnt; i++) {
346 sd_idx = info->hmc_info->sd_indexes[i];
347 sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
348 mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
349 &sd_entry->u.pd_table.pd_page_addr :
350 &sd_entry->u.bp.addr;
351
352 if (!mem || !mem->va) {
353 ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
354 } else {
355 dma_free_coherent(dev->hw->device, mem->size, mem->va,
356 mem->pa);
357 mem->va = NULL;
358 }
359 }
360
361 return ret_code;
362 }
363
364 /**
365 * irdma_sc_del_hmc_obj - remove pe hmc objects
366 * @dev: pointer to the device structure
367 * @info: pointer to irdma_hmc_del_obj_info struct
368 * @reset: true if called before reset
369 *
370 * This will de-populate the SDs and PDs. It frees
371 * the memory for PDS and backing storage. After this function is returned,
372 * caller should deallocate memory allocated previously for
373 * book-keeping information about PDs and backing storage.
374 */
irdma_sc_del_hmc_obj(struct irdma_sc_dev * dev,struct irdma_hmc_del_obj_info * info,bool reset)375 int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
376 struct irdma_hmc_del_obj_info *info, bool reset)
377 {
378 struct irdma_hmc_pd_table *pd_table;
379 u32 sd_idx, sd_lmt;
380 u32 pd_idx, pd_lmt, rel_pd_idx;
381 u32 i, j;
382 int ret_code = 0;
383
384 if (dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
385 return 0;
386
387 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
388 ibdev_dbg(to_ibdev(dev),
389 "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
390 info->start_idx, info->rsrc_type,
391 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
392 return -EINVAL;
393 }
394
395 if ((info->start_idx + info->count) >
396 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
397 ibdev_dbg(to_ibdev(dev),
398 "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
399 info->start_idx, info->count, info->rsrc_type,
400 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
401 return -EINVAL;
402 }
403
404 irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
405 info->start_idx, info->count, &pd_idx,
406 &pd_lmt);
407
408 for (j = pd_idx; j < pd_lmt; j++) {
409 sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
410
411 if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
412 continue;
413
414 if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
415 IRDMA_SD_TYPE_PAGED)
416 continue;
417
418 rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
419 pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
420 if (pd_table->pd_entry &&
421 pd_table->pd_entry[rel_pd_idx].valid) {
422 ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
423 if (ret_code) {
424 ibdev_dbg(to_ibdev(dev),
425 "HMC: remove_pd_bp error\n");
426 return ret_code;
427 }
428 }
429 }
430
431 irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
432 info->start_idx, info->count, &sd_idx,
433 &sd_lmt);
434 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
435 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
436 ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
437 return -EINVAL;
438 }
439
440 for (i = sd_idx; i < sd_lmt; i++) {
441 pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
442 if (!info->hmc_info->sd_table.sd_entry[i].valid)
443 continue;
444 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
445 case IRDMA_SD_TYPE_DIRECT:
446 ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
447 if (!ret_code) {
448 info->hmc_info->sd_indexes[info->del_sd_cnt] =
449 (u16)i;
450 info->del_sd_cnt++;
451 }
452 break;
453 case IRDMA_SD_TYPE_PAGED:
454 ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
455 if (ret_code)
456 break;
457 if (dev->hmc_info != info->hmc_info &&
458 info->rsrc_type == IRDMA_HMC_IW_PBLE &&
459 pd_table->pd_entry) {
460 kfree(pd_table->pd_entry_virt_mem.va);
461 pd_table->pd_entry = NULL;
462 }
463 info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
464 info->del_sd_cnt++;
465 break;
466 default:
467 break;
468 }
469 }
470 return irdma_finish_del_sd_reg(dev, info, reset);
471 }
472
473 /**
474 * irdma_add_sd_table_entry - Adds a segment descriptor to the table
475 * @hw: pointer to our hw struct
476 * @hmc_info: pointer to the HMC configuration information struct
477 * @sd_index: segment descriptor index to manipulate
478 * @type: what type of segment descriptor we're manipulating
479 * @direct_mode_sz: size to alloc in direct mode
480 */
irdma_add_sd_table_entry(struct irdma_hw * hw,struct irdma_hmc_info * hmc_info,u32 sd_index,enum irdma_sd_entry_type type,u64 direct_mode_sz)481 int irdma_add_sd_table_entry(struct irdma_hw *hw,
482 struct irdma_hmc_info *hmc_info, u32 sd_index,
483 enum irdma_sd_entry_type type, u64 direct_mode_sz)
484 {
485 struct irdma_hmc_sd_entry *sd_entry;
486 struct irdma_dma_mem dma_mem;
487 u64 alloc_len;
488
489 sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
490 if (!sd_entry->valid) {
491 if (type == IRDMA_SD_TYPE_PAGED)
492 alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
493 else
494 alloc_len = direct_mode_sz;
495
496 /* allocate a 4K pd page or 2M backing page */
497 dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
498 dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
499 &dma_mem.pa, GFP_KERNEL);
500 if (!dma_mem.va)
501 return -ENOMEM;
502 if (type == IRDMA_SD_TYPE_PAGED) {
503 struct irdma_virt_mem *vmem =
504 &sd_entry->u.pd_table.pd_entry_virt_mem;
505
506 vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
507 vmem->va = kzalloc(vmem->size, GFP_KERNEL);
508 if (!vmem->va) {
509 dma_free_coherent(hw->device, dma_mem.size,
510 dma_mem.va, dma_mem.pa);
511 dma_mem.va = NULL;
512 return -ENOMEM;
513 }
514 sd_entry->u.pd_table.pd_entry = vmem->va;
515
516 memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
517 sizeof(sd_entry->u.pd_table.pd_page_addr));
518 } else {
519 memcpy(&sd_entry->u.bp.addr, &dma_mem,
520 sizeof(sd_entry->u.bp.addr));
521
522 sd_entry->u.bp.sd_pd_index = sd_index;
523 }
524
525 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
526 hmc_info->sd_table.use_cnt++;
527 }
528 if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
529 sd_entry->u.bp.use_cnt++;
530
531 return 0;
532 }
533
534 /**
535 * irdma_add_pd_table_entry - Adds page descriptor to the specified table
536 * @dev: pointer to our device structure
537 * @hmc_info: pointer to the HMC configuration information structure
538 * @pd_index: which page descriptor index to manipulate
539 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
540 *
541 * This function:
542 * 1. Initializes the pd entry
543 * 2. Adds pd_entry in the pd_table
544 * 3. Mark the entry valid in irdma_hmc_pd_entry structure
545 * 4. Initializes the pd_entry's ref count to 1
546 * assumptions:
547 * 1. The memory for pd should be pinned down, physically contiguous and
548 * aligned on 4K boundary and zeroed memory.
549 * 2. It should be 4K in size.
550 */
irdma_add_pd_table_entry(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 pd_index,struct irdma_dma_mem * rsrc_pg)551 int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
552 struct irdma_hmc_info *hmc_info, u32 pd_index,
553 struct irdma_dma_mem *rsrc_pg)
554 {
555 struct irdma_hmc_pd_table *pd_table;
556 struct irdma_hmc_pd_entry *pd_entry;
557 struct irdma_dma_mem mem;
558 struct irdma_dma_mem *page = &mem;
559 u32 sd_idx, rel_pd_idx;
560 u64 *pd_addr;
561 u64 page_desc;
562
563 if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
564 return -EINVAL;
565
566 sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
567 if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
568 IRDMA_SD_TYPE_PAGED)
569 return 0;
570
571 rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
572 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
573 pd_entry = &pd_table->pd_entry[rel_pd_idx];
574 if (!pd_entry->valid) {
575 if (rsrc_pg) {
576 pd_entry->rsrc_pg = true;
577 page = rsrc_pg;
578 } else {
579 page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,
580 IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
581 page->va = dma_alloc_coherent(dev->hw->device,
582 page->size, &page->pa,
583 GFP_KERNEL);
584 if (!page->va)
585 return -ENOMEM;
586
587 pd_entry->rsrc_pg = false;
588 }
589
590 memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
591 pd_entry->bp.sd_pd_index = pd_index;
592 pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
593 page_desc = page->pa | 0x1;
594 pd_addr = pd_table->pd_page_addr.va;
595 pd_addr += rel_pd_idx;
596 memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
597 pd_entry->sd_index = sd_idx;
598 pd_entry->valid = true;
599 pd_table->use_cnt++;
600
601 if (hmc_info->hmc_fn_id < dev->hw_attrs.first_hw_vf_fpm_id &&
602 dev->privileged)
603 irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
604 }
605 pd_entry->bp.use_cnt++;
606
607 return 0;
608 }
609
610 /**
611 * irdma_remove_pd_bp - remove a backing page from a page descriptor
612 * @dev: pointer to our HW structure
613 * @hmc_info: pointer to the HMC configuration information structure
614 * @idx: the page index
615 *
616 * This function:
617 * 1. Marks the entry in pd table (for paged address mode) or in sd table
618 * (for direct address mode) invalid.
619 * 2. Write to register PMPDINV to invalidate the backing page in FV cache
620 * 3. Decrement the ref count for the pd _entry
621 * assumptions:
622 * 1. Caller can deallocate the memory used by backing storage after this
623 * function returns.
624 */
irdma_remove_pd_bp(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 idx)625 int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
626 struct irdma_hmc_info *hmc_info, u32 idx)
627 {
628 struct irdma_hmc_pd_entry *pd_entry;
629 struct irdma_hmc_pd_table *pd_table;
630 struct irdma_hmc_sd_entry *sd_entry;
631 u32 sd_idx, rel_pd_idx;
632 struct irdma_dma_mem *mem;
633 u64 *pd_addr;
634
635 sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
636 rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
637 if (sd_idx >= hmc_info->sd_table.sd_cnt)
638 return -EINVAL;
639
640 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
641 if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
642 return -EINVAL;
643
644 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
645 pd_entry = &pd_table->pd_entry[rel_pd_idx];
646 if (--pd_entry->bp.use_cnt)
647 return 0;
648
649 pd_entry->valid = false;
650 pd_table->use_cnt--;
651 pd_addr = pd_table->pd_page_addr.va;
652 pd_addr += rel_pd_idx;
653 memset(pd_addr, 0, sizeof(u64));
654 if (dev->privileged && dev->hmc_fn_id == hmc_info->hmc_fn_id)
655 irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
656
657 if (!pd_entry->rsrc_pg) {
658 mem = &pd_entry->bp.addr;
659 if (!mem || !mem->va)
660 return -EINVAL;
661
662 dma_free_coherent(dev->hw->device, mem->size, mem->va,
663 mem->pa);
664 mem->va = NULL;
665 }
666 if (!pd_table->use_cnt)
667 kfree(pd_table->pd_entry_virt_mem.va);
668
669 return 0;
670 }
671
672 /**
673 * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
674 * @hmc_info: pointer to the HMC configuration information structure
675 * @idx: the page index
676 */
irdma_prep_remove_sd_bp(struct irdma_hmc_info * hmc_info,u32 idx)677 int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
678 {
679 struct irdma_hmc_sd_entry *sd_entry;
680
681 sd_entry = &hmc_info->sd_table.sd_entry[idx];
682 if (--sd_entry->u.bp.use_cnt)
683 return -EBUSY;
684
685 hmc_info->sd_table.use_cnt--;
686 sd_entry->valid = false;
687
688 return 0;
689 }
690
691 /**
692 * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
693 * @hmc_info: pointer to the HMC configuration information structure
694 * @idx: segment descriptor index to find the relevant page descriptor
695 */
irdma_prep_remove_pd_page(struct irdma_hmc_info * hmc_info,u32 idx)696 int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
697 {
698 struct irdma_hmc_sd_entry *sd_entry;
699
700 sd_entry = &hmc_info->sd_table.sd_entry[idx];
701
702 if (sd_entry->u.pd_table.use_cnt)
703 return -EBUSY;
704
705 sd_entry->valid = false;
706 hmc_info->sd_table.use_cnt--;
707
708 return 0;
709 }
710