Lines Matching +full:ctx +full:- +full:asid

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
15 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
17 static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) in get_pgt_info() argument
21 hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node, in get_pgt_info()
23 if (hop_addr == pgt_info->shadow_addr) in get_pgt_info()
29 static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info) in _free_hop() argument
31 struct hl_device *hdev = ctx->hdev; in _free_hop()
33 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr, in _free_hop()
34 hdev->asic_prop.mmu_hop_table_size); in _free_hop()
35 hash_del(&pgt_info->node); in _free_hop()
36 kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); in _free_hop()
40 static void free_hop(struct hl_ctx *ctx, u64 hop_addr) in free_hop() argument
42 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); in free_hop()
44 _free_hop(ctx, pgt_info); in free_hop()
47 static u64 alloc_hop(struct hl_ctx *ctx) in alloc_hop() argument
49 struct hl_device *hdev = ctx->hdev; in alloc_hop()
50 struct asic_fixed_properties *prop = &hdev->asic_prop; in alloc_hop()
58 phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool, in alloc_hop()
59 prop->mmu_hop_table_size); in alloc_hop()
61 dev_err(hdev->dev, "failed to allocate page\n"); in alloc_hop()
65 shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size, in alloc_hop()
70 pgt_info->phys_addr = phys_addr; in alloc_hop()
71 pgt_info->shadow_addr = shadow_addr; in alloc_hop()
72 pgt_info->ctx = ctx; in alloc_hop()
73 pgt_info->num_of_ptes = 0; in alloc_hop()
74 hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr); in alloc_hop()
79 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr, in alloc_hop()
80 prop->mmu_hop_table_size); in alloc_hop()
87 static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx) in get_phys_hop0_addr() argument
89 return ctx->hdev->asic_prop.mmu_pgt_addr + in get_phys_hop0_addr()
90 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); in get_phys_hop0_addr()
93 static inline u64 get_hop0_addr(struct hl_ctx *ctx) in get_hop0_addr() argument
95 return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + in get_hop0_addr()
96 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); in get_hop0_addr()
99 static void flush(struct hl_ctx *ctx) in flush() argument
103 ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx)); in flush()
107 static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) in write_pte() argument
116 u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) | in write_pte()
119 ctx->hdev->asic_funcs->write_pte(ctx->hdev, in write_pte()
120 get_phys_addr(ctx, shadow_pte_addr), in write_pte()
127 static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, in write_final_pte() argument
130 ctx->hdev->asic_funcs->write_pte(ctx->hdev, in write_final_pte()
131 get_phys_addr(ctx, shadow_pte_addr), in write_final_pte()
137 static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr) in clear_pte() argument
140 write_final_pte(ctx, pte_addr, 0); in clear_pte()
143 static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) in get_pte() argument
145 get_pgt_info(ctx, hop_addr)->num_of_ptes++; in get_pte()
149 * put_pte - decrement the num of ptes and free the hop if possible
151 * @ctx: pointer to the context structure
157 static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) in put_pte() argument
159 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); in put_pte()
162 pgt_info->num_of_ptes--; in put_pte()
168 num_of_ptes_left = pgt_info->num_of_ptes; in put_pte()
170 _free_hop(ctx, pgt_info); in put_pte()
175 static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop, in get_hop_pte_addr() argument
180 mask = mmu_prop->hop_masks[hop_idx]; in get_hop_pte_addr()
181 shift = mmu_prop->hop_shifts[hop_idx]; in get_hop_pte_addr()
183 ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift); in get_hop_pte_addr()
186 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, in get_alloc_next_hop_addr() argument
189 u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); in get_alloc_next_hop_addr()
192 hop_addr = alloc_hop(ctx); in get_alloc_next_hop_addr()
200 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr) in get_phys_addr() argument
202 u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1); in get_phys_addr()
207 if (shadow_hop_addr != get_hop0_addr(ctx)) in get_phys_addr()
208 phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr; in get_phys_addr()
210 phys_hop_addr = get_phys_hop0_addr(ctx); in get_phys_addr()
215 static int dram_default_mapping_init(struct hl_ctx *ctx) in dram_default_mapping_init() argument
217 struct hl_device *hdev = ctx->hdev; in dram_default_mapping_init()
218 struct asic_fixed_properties *prop = &hdev->asic_prop; in dram_default_mapping_init()
223 if ((!prop->dram_supports_virtual_memory) || in dram_default_mapping_init()
224 (!hdev->dram_default_page_mapping) || in dram_default_mapping_init()
225 (ctx->asid == HL_KERNEL_ASID_ID)) in dram_default_mapping_init()
228 num_of_hop3 = prop->dram_size_for_default_page_mapping; in dram_default_mapping_init()
229 do_div(num_of_hop3, prop->dram_page_size); in dram_default_mapping_init()
235 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL); in dram_default_mapping_init()
236 if (!ctx->dram_default_hops) in dram_default_mapping_init()
237 return -ENOMEM; in dram_default_mapping_init()
239 hop0_addr = get_hop0_addr(ctx); in dram_default_mapping_init()
241 hop1_addr = alloc_hop(ctx); in dram_default_mapping_init()
243 dev_err(hdev->dev, "failed to alloc hop 1\n"); in dram_default_mapping_init()
244 rc = -ENOMEM; in dram_default_mapping_init()
248 ctx->dram_default_hops[total_hops - 1] = hop1_addr; in dram_default_mapping_init()
250 hop2_addr = alloc_hop(ctx); in dram_default_mapping_init()
252 dev_err(hdev->dev, "failed to alloc hop 2\n"); in dram_default_mapping_init()
253 rc = -ENOMEM; in dram_default_mapping_init()
257 ctx->dram_default_hops[total_hops - 2] = hop2_addr; in dram_default_mapping_init()
260 ctx->dram_default_hops[i] = alloc_hop(ctx); in dram_default_mapping_init()
261 if (ctx->dram_default_hops[i] == ULLONG_MAX) { in dram_default_mapping_init()
262 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i); in dram_default_mapping_init()
263 rc = -ENOMEM; in dram_default_mapping_init()
271 write_pte(ctx, hop0_addr, pte_val); in dram_default_mapping_init()
274 write_pte(ctx, hop1_addr, pte_val); in dram_default_mapping_init()
275 get_pte(ctx, hop1_addr); in dram_default_mapping_init()
279 pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) | in dram_default_mapping_init()
281 write_pte(ctx, hop2_pte_addr, pte_val); in dram_default_mapping_init()
282 get_pte(ctx, hop2_addr); in dram_default_mapping_init()
286 pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) | in dram_default_mapping_init()
290 hop3_pte_addr = ctx->dram_default_hops[i]; in dram_default_mapping_init()
292 write_final_pte(ctx, hop3_pte_addr, pte_val); in dram_default_mapping_init()
293 get_pte(ctx, ctx->dram_default_hops[i]); in dram_default_mapping_init()
298 flush(ctx); in dram_default_mapping_init()
304 free_hop(ctx, ctx->dram_default_hops[i]); in dram_default_mapping_init()
306 free_hop(ctx, hop2_addr); in dram_default_mapping_init()
308 free_hop(ctx, hop1_addr); in dram_default_mapping_init()
310 kfree(ctx->dram_default_hops); in dram_default_mapping_init()
315 static void dram_default_mapping_fini(struct hl_ctx *ctx) in dram_default_mapping_fini() argument
317 struct hl_device *hdev = ctx->hdev; in dram_default_mapping_fini()
318 struct asic_fixed_properties *prop = &hdev->asic_prop; in dram_default_mapping_fini()
323 if ((!prop->dram_supports_virtual_memory) || in dram_default_mapping_fini()
324 (!hdev->dram_default_page_mapping) || in dram_default_mapping_fini()
325 (ctx->asid == HL_KERNEL_ASID_ID)) in dram_default_mapping_fini()
328 num_of_hop3 = prop->dram_size_for_default_page_mapping; in dram_default_mapping_fini()
329 do_div(num_of_hop3, prop->dram_page_size); in dram_default_mapping_fini()
332 hop0_addr = get_hop0_addr(ctx); in dram_default_mapping_fini()
335 hop1_addr = ctx->dram_default_hops[total_hops - 1]; in dram_default_mapping_fini()
336 hop2_addr = ctx->dram_default_hops[total_hops - 2]; in dram_default_mapping_fini()
339 hop3_pte_addr = ctx->dram_default_hops[i]; in dram_default_mapping_fini()
341 clear_pte(ctx, hop3_pte_addr); in dram_default_mapping_fini()
342 put_pte(ctx, ctx->dram_default_hops[i]); in dram_default_mapping_fini()
349 clear_pte(ctx, hop2_pte_addr); in dram_default_mapping_fini()
350 put_pte(ctx, hop2_addr); in dram_default_mapping_fini()
354 clear_pte(ctx, hop1_addr); in dram_default_mapping_fini()
355 put_pte(ctx, hop1_addr); in dram_default_mapping_fini()
356 clear_pte(ctx, hop0_addr); in dram_default_mapping_fini()
358 kfree(ctx->dram_default_hops); in dram_default_mapping_fini()
360 flush(ctx); in dram_default_mapping_fini()
364 * hl_mmu_v1_init() - initialize the MMU module.
368 * - Create a pool of pages for pgt_infos.
369 * - Create a shadow table for pgt
371 * Return: 0 for success, non-zero for failure.
375 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_mmu_v1_init()
378 hdev->mmu_priv.dr.mmu_pgt_pool = in hl_mmu_v1_init()
379 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); in hl_mmu_v1_init()
381 if (!hdev->mmu_priv.dr.mmu_pgt_pool) { in hl_mmu_v1_init()
382 dev_err(hdev->dev, "Failed to create page gen pool\n"); in hl_mmu_v1_init()
383 return -ENOMEM; in hl_mmu_v1_init()
386 rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr + in hl_mmu_v1_init()
387 prop->mmu_hop0_tables_total_size, in hl_mmu_v1_init()
388 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, in hl_mmu_v1_init()
389 -1); in hl_mmu_v1_init()
391 dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); in hl_mmu_v1_init()
395 hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size, in hl_mmu_v1_init()
397 if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { in hl_mmu_v1_init()
398 rc = -ENOMEM; in hl_mmu_v1_init()
407 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); in hl_mmu_v1_init()
413 * hl_mmu_v1_fini() - release the MMU module.
417 * - Disable MMU in H/W.
418 * - Free the pgt_infos pool.
426 if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { in hl_mmu_v1_fini()
427 kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); in hl_mmu_v1_fini()
428 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); in hl_mmu_v1_fini()
434 hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL; in hl_mmu_v1_fini()
439 * hl_mmu_v1_ctx_init() - initialize a context for using the MMU module.
440 * @ctx: pointer to the context structure to initialize.
444 * Return: 0 on success, non-zero otherwise.
446 static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx) in hl_mmu_v1_ctx_init() argument
448 hash_init(ctx->mmu_shadow_hash); in hl_mmu_v1_ctx_init()
449 return dram_default_mapping_init(ctx); in hl_mmu_v1_ctx_init()
453 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
455 * @ctx: pointer to the context structure
458 * - Free any pgts which were not freed yet
459 * - Free the mutex
460 * - Free DRAM default page mapping hops
462 static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) in hl_mmu_v1_ctx_fini() argument
464 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_ctx_fini()
469 dram_default_mapping_fini(ctx); in hl_mmu_v1_ctx_fini()
471 if (!hash_empty(ctx->mmu_shadow_hash)) in hl_mmu_v1_ctx_fini()
472 dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n", in hl_mmu_v1_ctx_fini()
473 ctx->asid); in hl_mmu_v1_ctx_fini()
475 hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) { in hl_mmu_v1_ctx_fini()
476 dev_err_ratelimited(hdev->dev, in hl_mmu_v1_ctx_fini()
477 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", in hl_mmu_v1_ctx_fini()
478 pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); in hl_mmu_v1_ctx_fini()
479 _free_hop(ctx, pgt_info); in hl_mmu_v1_ctx_fini()
483 static int hl_mmu_v1_unmap(struct hl_ctx *ctx, in hl_mmu_v1_unmap() argument
487 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_unmap()
488 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_mmu_v1_unmap()
494 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; in hl_mmu_v1_unmap()
498 hop_addr[hop_idx] = get_hop0_addr(ctx); in hl_mmu_v1_unmap()
500 hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte); in hl_mmu_v1_unmap()
506 get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx); in hl_mmu_v1_unmap()
511 is_huge = curr_pte & mmu_prop->last_mask; in hl_mmu_v1_unmap()
514 dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n"); in hl_mmu_v1_unmap()
515 return -EFAULT; in hl_mmu_v1_unmap()
520 hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte); in hl_mmu_v1_unmap()
525 get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx); in hl_mmu_v1_unmap()
530 if (hdev->dram_default_page_mapping && is_dram_addr) { in hl_mmu_v1_unmap()
531 u64 default_pte = (prop->mmu_dram_default_page_addr & in hl_mmu_v1_unmap()
532 HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | in hl_mmu_v1_unmap()
535 dev_err(hdev->dev, in hl_mmu_v1_unmap()
542 dev_err(hdev->dev, in hl_mmu_v1_unmap()
549 write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte); in hl_mmu_v1_unmap()
550 put_pte(ctx, hop_addr[hop_idx]); in hl_mmu_v1_unmap()
556 clear_pte(ctx, hop_pte_addr[MMU_HOP4]); in hl_mmu_v1_unmap()
558 clear_pte(ctx, hop_pte_addr[MMU_HOP3]); in hl_mmu_v1_unmap()
560 if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4])) in hl_mmu_v1_unmap()
566 for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) { in hl_mmu_v1_unmap()
567 clear_pte(ctx, hop_pte_addr[hop_idx]); in hl_mmu_v1_unmap()
572 if (put_pte(ctx, hop_addr[hop_idx])) in hl_mmu_v1_unmap()
581 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", in hl_mmu_v1_unmap()
584 return -EINVAL; in hl_mmu_v1_unmap()
587 static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, in hl_mmu_v1_map() argument
591 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_map()
592 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_mmu_v1_map()
595 int num_hops, hop_idx, prev_hop, rc = -ENOMEM; in hl_mmu_v1_map()
605 mmu_prop = &prop->dmmu; in hl_mmu_v1_map()
607 } else if (page_size == prop->pmmu_huge.page_size) { in hl_mmu_v1_map()
608 mmu_prop = &prop->pmmu_huge; in hl_mmu_v1_map()
611 mmu_prop = &prop->pmmu; in hl_mmu_v1_map()
615 num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS; in hl_mmu_v1_map()
619 hop_addr[hop_idx] = get_hop0_addr(ctx); in hl_mmu_v1_map()
622 get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]); in hl_mmu_v1_map()
628 get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx); in hl_mmu_v1_map()
632 if (hdev->dram_default_page_mapping && is_dram_addr) { in hl_mmu_v1_map()
633 u64 default_pte = (prop->mmu_dram_default_page_addr & in hl_mmu_v1_map()
634 HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | in hl_mmu_v1_map()
638 dev_err(hdev->dev, in hl_mmu_v1_map()
641 rc = -EINVAL; in hl_mmu_v1_map()
647 dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n"); in hl_mmu_v1_map()
648 rc = -EFAULT; in hl_mmu_v1_map()
653 dev_err(hdev->dev, in hl_mmu_v1_map()
658 dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx, in hl_mmu_v1_map()
662 rc = -EINVAL; in hl_mmu_v1_map()
666 curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask in hl_mmu_v1_map()
669 write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte); in hl_mmu_v1_map()
672 prev_hop = hop_idx - 1; in hl_mmu_v1_map()
676 write_pte(ctx, hop_pte_addr[prev_hop], curr_pte); in hl_mmu_v1_map()
678 get_pte(ctx, hop_addr[prev_hop]); in hl_mmu_v1_map()
682 get_pte(ctx, hop_addr[num_hops - 1]); in hl_mmu_v1_map()
687 for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) { in hl_mmu_v1_map()
689 free_hop(ctx, hop_addr[hop_idx]); in hl_mmu_v1_map()
696 * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
698 * @ctx: pointer to the context structure
701 static void hl_mmu_v1_swap_out(struct hl_ctx *ctx) in hl_mmu_v1_swap_out() argument
707 * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
709 * @ctx: pointer to the context structure
712 static void hl_mmu_v1_swap_in(struct hl_ctx *ctx) in hl_mmu_v1_swap_in() argument
717 static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, in hl_mmu_v1_get_tlb_info() argument
720 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_get_tlb_info()
721 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_mmu_v1_get_tlb_info()
726 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in hl_mmu_v1_get_tlb_info()
727 prop->dmmu.start_addr, in hl_mmu_v1_get_tlb_info()
728 prop->dmmu.end_addr); in hl_mmu_v1_get_tlb_info()
729 is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size, in hl_mmu_v1_get_tlb_info()
730 prop->pmmu.start_addr, in hl_mmu_v1_get_tlb_info()
731 prop->pmmu.end_addr); in hl_mmu_v1_get_tlb_info()
733 prop->pmmu_huge.page_size, in hl_mmu_v1_get_tlb_info()
734 prop->pmmu_huge.start_addr, in hl_mmu_v1_get_tlb_info()
735 prop->pmmu_huge.end_addr); in hl_mmu_v1_get_tlb_info()
737 mmu_prop = &prop->dmmu; in hl_mmu_v1_get_tlb_info()
740 mmu_prop = &prop->pmmu; in hl_mmu_v1_get_tlb_info()
743 mmu_prop = &prop->pmmu_huge; in hl_mmu_v1_get_tlb_info()
746 return -EINVAL; in hl_mmu_v1_get_tlb_info()
749 used_hops = mmu_prop->num_hops; in hl_mmu_v1_get_tlb_info()
753 used_hops--; in hl_mmu_v1_get_tlb_info()
755 hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx); in hl_mmu_v1_get_tlb_info()
756 hops->hop_info[0].hop_pte_addr = in hl_mmu_v1_get_tlb_info()
757 hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0, in hl_mmu_v1_get_tlb_info()
758 hops->hop_info[0].hop_addr, virt_addr); in hl_mmu_v1_get_tlb_info()
759 hops->hop_info[0].hop_pte_val = in hl_mmu_v1_get_tlb_info()
760 hdev->asic_funcs->read_pte(hdev, in hl_mmu_v1_get_tlb_info()
761 hops->hop_info[0].hop_pte_addr); in hl_mmu_v1_get_tlb_info()
764 hops->hop_info[i].hop_addr = in hl_mmu_v1_get_tlb_info()
765 hl_mmu_get_next_hop_addr(ctx, in hl_mmu_v1_get_tlb_info()
766 hops->hop_info[i - 1].hop_pte_val); in hl_mmu_v1_get_tlb_info()
767 if (hops->hop_info[i].hop_addr == ULLONG_MAX) in hl_mmu_v1_get_tlb_info()
768 return -EFAULT; in hl_mmu_v1_get_tlb_info()
770 hops->hop_info[i].hop_pte_addr = in hl_mmu_v1_get_tlb_info()
771 hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i, in hl_mmu_v1_get_tlb_info()
772 hops->hop_info[i].hop_addr, in hl_mmu_v1_get_tlb_info()
774 hops->hop_info[i].hop_pte_val = in hl_mmu_v1_get_tlb_info()
775 hdev->asic_funcs->read_pte(hdev, in hl_mmu_v1_get_tlb_info()
776 hops->hop_info[i].hop_pte_addr); in hl_mmu_v1_get_tlb_info()
778 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) in hl_mmu_v1_get_tlb_info()
779 return -EFAULT; in hl_mmu_v1_get_tlb_info()
781 if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask) in hl_mmu_v1_get_tlb_info()
786 if (i == mmu_prop->num_hops) in hl_mmu_v1_get_tlb_info()
787 return -EFAULT; in hl_mmu_v1_get_tlb_info()
789 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) in hl_mmu_v1_get_tlb_info()
790 return -EFAULT; in hl_mmu_v1_get_tlb_info()
792 hops->used_hops = i + 1; in hl_mmu_v1_get_tlb_info()
798 * hl_mmu_v1_prepare - prepare mmu for working with mmu v1
804 mmu->init = hl_mmu_v1_init; in hl_mmu_v1_set_funcs()
805 mmu->fini = hl_mmu_v1_fini; in hl_mmu_v1_set_funcs()
806 mmu->ctx_init = hl_mmu_v1_ctx_init; in hl_mmu_v1_set_funcs()
807 mmu->ctx_fini = hl_mmu_v1_ctx_fini; in hl_mmu_v1_set_funcs()
808 mmu->map = hl_mmu_v1_map; in hl_mmu_v1_set_funcs()
809 mmu->unmap = hl_mmu_v1_unmap; in hl_mmu_v1_set_funcs()
810 mmu->flush = flush; in hl_mmu_v1_set_funcs()
811 mmu->swap_out = hl_mmu_v1_swap_out; in hl_mmu_v1_set_funcs()
812 mmu->swap_in = hl_mmu_v1_swap_in; in hl_mmu_v1_set_funcs()
813 mmu->get_tlb_info = hl_mmu_v1_get_tlb_info; in hl_mmu_v1_set_funcs()