Lines Matching +full:ctx +full:- +full:asid

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_map_mem() argument
19 struct hl_device *hdev = ctx->hdev; in cb_map_mem()
20 struct asic_fixed_properties *prop = &hdev->asic_prop; in cb_map_mem()
21 u32 page_size = prop->pmmu.page_size; in cb_map_mem()
24 if (!hdev->supports_cb_mapping) { in cb_map_mem()
25 dev_err_ratelimited(hdev->dev, in cb_map_mem()
27 return -EINVAL; in cb_map_mem()
30 if (cb->is_mmu_mapped) in cb_map_mem()
33 cb->roundup_size = roundup(cb->size, page_size); in cb_map_mem()
35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); in cb_map_mem()
36 if (!cb->virtual_addr) { in cb_map_mem()
37 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n"); in cb_map_mem()
38 return -ENOMEM; in cb_map_mem()
41 mutex_lock(&hdev->mmu_lock); in cb_map_mem()
43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); in cb_map_mem()
45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem()
53 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
55 cb->is_mmu_mapped = true; in cb_map_mem()
60 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_map_mem()
62 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
63 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_map_mem()
68 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_unmap_mem() argument
70 struct hl_device *hdev = ctx->hdev; in cb_unmap_mem()
72 mutex_lock(&hdev->mmu_lock); in cb_unmap_mem()
73 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
75 mutex_unlock(&hdev->mmu_lock); in cb_unmap_mem()
77 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
82 if (cb->is_internal) in cb_fini()
83 gen_pool_free(hdev->internal_cb_pool, in cb_fini()
84 (uintptr_t)cb->kernel_address, cb->size); in cb_fini()
86 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address); in cb_fini()
93 if (cb->is_pool) { in cb_do_release()
94 atomic_set(&cb->is_handle_destroyed, 0); in cb_do_release()
95 spin_lock(&hdev->cb_pool_lock); in cb_do_release()
96 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
97 spin_unlock(&hdev->cb_pool_lock); in cb_do_release()
112 * the latency-sensitive code path for command submission. Due to H/W in hl_cb_alloc()
118 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled) in hl_cb_alloc()
128 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size); in hl_cb_alloc()
134 cb_offset = p - hdev->internal_cb_pool_virt_addr; in hl_cb_alloc()
135 cb->is_internal = true; in hl_cb_alloc()
136 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
138 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC); in hl_cb_alloc()
140 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL); in hl_cb_alloc()
142 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, in hl_cb_alloc()
147 dev_err(hdev->dev, in hl_cb_alloc()
154 cb->kernel_address = p; in hl_cb_alloc()
155 cb->size = cb_size; in hl_cb_alloc()
162 struct hl_ctx *ctx; member
170 struct hl_cb *cb = buf->private; in hl_cb_mmap_mem_release()
174 if (cb->is_mmu_mapped) in hl_cb_mmap_mem_release()
175 cb_unmap_mem(cb->ctx, cb); in hl_cb_mmap_mem_release()
177 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_release()
179 cb_do_release(cb->hdev, cb); in hl_cb_mmap_mem_release()
186 int rc, ctx_id = cb_args->ctx->asid; in hl_cb_mmap_mem_alloc()
189 if (!cb_args->internal_cb) { in hl_cb_mmap_mem_alloc()
191 if (cb_args->cb_size < PAGE_SIZE) in hl_cb_mmap_mem_alloc()
192 cb_args->cb_size = PAGE_SIZE; in hl_cb_mmap_mem_alloc()
195 cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) { in hl_cb_mmap_mem_alloc()
197 spin_lock(&cb_args->hdev->cb_pool_lock); in hl_cb_mmap_mem_alloc()
198 if (!list_empty(&cb_args->hdev->cb_pool)) { in hl_cb_mmap_mem_alloc()
199 cb = list_first_entry(&cb_args->hdev->cb_pool, in hl_cb_mmap_mem_alloc()
201 list_del(&cb->pool_list); in hl_cb_mmap_mem_alloc()
202 spin_unlock(&cb_args->hdev->cb_pool_lock); in hl_cb_mmap_mem_alloc()
205 spin_unlock(&cb_args->hdev->cb_pool_lock); in hl_cb_mmap_mem_alloc()
206 dev_dbg(cb_args->hdev->dev, "CB pool is empty\n"); in hl_cb_mmap_mem_alloc()
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb); in hl_cb_mmap_mem_alloc()
214 return -ENOMEM; in hl_cb_mmap_mem_alloc()
217 cb->hdev = cb_args->hdev; in hl_cb_mmap_mem_alloc()
218 cb->ctx = cb_args->ctx; in hl_cb_mmap_mem_alloc()
219 cb->buf = buf; in hl_cb_mmap_mem_alloc()
220 cb->buf->mappable_size = cb->size; in hl_cb_mmap_mem_alloc()
221 cb->buf->private = cb; in hl_cb_mmap_mem_alloc()
223 hl_ctx_get(cb->ctx); in hl_cb_mmap_mem_alloc()
225 if (cb_args->map_cb) { in hl_cb_mmap_mem_alloc()
227 dev_err(cb_args->hdev->dev, in hl_cb_mmap_mem_alloc()
229 rc = -EINVAL; in hl_cb_mmap_mem_alloc()
233 rc = cb_map_mem(cb_args->ctx, cb); in hl_cb_mmap_mem_alloc()
243 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_alloc()
244 cb_do_release(cb_args->hdev, cb); in hl_cb_mmap_mem_alloc()
252 struct hl_cb *cb = buf->private; in hl_cb_mmap()
254 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address, in hl_cb_mmap()
255 cb->bus_address, cb->size); in hl_cb_mmap()
267 struct hl_ctx *ctx, u32 cb_size, bool internal_cb, in hl_cb_create() argument
272 .ctx = ctx, in hl_cb_create()
278 int ctx_id = ctx->asid; in hl_cb_create()
280 if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) { in hl_cb_create()
281 dev_warn_ratelimited(hdev->dev, in hl_cb_create()
283 return -EBUSY; in hl_cb_create()
287 dev_err(hdev->dev, "CB size %d must be less than %d\n", in hl_cb_create()
289 return -EINVAL; in hl_cb_create()
296 return -ENOMEM; in hl_cb_create()
298 *handle = buf->handle; in hl_cb_create()
310 dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n", in hl_cb_destroy()
312 return -EINVAL; in hl_cb_destroy()
316 rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1); in hl_cb_destroy()
319 dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n", in hl_cb_destroy()
321 return -EINVAL; in hl_cb_destroy()
329 dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle); in hl_cb_destroy()
342 dev_err(mmg->dev, in hl_cb_info()
344 return -EINVAL; in hl_cb_info()
348 if (cb->is_mmu_mapped) { in hl_cb_info()
349 *device_va = cb->virtual_addr; in hl_cb_info()
351 dev_err(mmg->dev, "CB is not mapped to the device's MMU\n"); in hl_cb_info()
352 rc = -EINVAL; in hl_cb_info()
356 *usage_cnt = atomic_read(&cb->cs_cnt); in hl_cb_info()
366 struct hl_fpriv *hpriv = file_priv->driver_priv; in hl_cb_ioctl()
367 struct hl_device *hdev = hpriv->hdev; in hl_cb_ioctl()
375 dev_dbg_ratelimited(hdev->dev, in hl_cb_ioctl()
377 hdev->status[status]); in hl_cb_ioctl()
378 return -EBUSY; in hl_cb_ioctl()
381 switch (args->in.op) { in hl_cb_ioctl()
383 if (args->in.cb_size > HL_MAX_CB_SIZE) { in hl_cb_ioctl()
384 dev_err(hdev->dev, in hl_cb_ioctl()
386 args->in.cb_size, HL_MAX_CB_SIZE); in hl_cb_ioctl()
387 rc = -EINVAL; in hl_cb_ioctl()
389 rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx, in hl_cb_ioctl()
390 args->in.cb_size, false, in hl_cb_ioctl()
391 !!(args->in.flags & HL_CB_FLAGS_MAP), in hl_cb_ioctl()
396 args->out.cb_handle = handle; in hl_cb_ioctl()
400 rc = hl_cb_destroy(&hpriv->mem_mgr, in hl_cb_ioctl()
401 args->in.cb_handle); in hl_cb_ioctl()
405 rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle, in hl_cb_ioctl()
406 args->in.flags, in hl_cb_ioctl()
412 memset(&args->out, 0, sizeof(args->out)); in hl_cb_ioctl()
414 if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA) in hl_cb_ioctl()
415 args->out.device_va = device_va; in hl_cb_ioctl()
417 args->out.usage_cnt = usage_cnt; in hl_cb_ioctl()
421 rc = -EINVAL; in hl_cb_ioctl()
435 return buf->private; in hl_cb_get()
441 hl_mmap_mem_buf_put(cb->buf); in hl_cb_put()
451 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size, in hl_cb_kernel_create()
454 dev_err(hdev->dev, in hl_cb_kernel_create()
459 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle); in hl_cb_kernel_create()
462 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n", in hl_cb_kernel_create()
470 hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle); in hl_cb_kernel_create()
480 INIT_LIST_HEAD(&hdev->cb_pool); in hl_cb_pool_init()
481 spin_lock_init(&hdev->cb_pool_lock); in hl_cb_pool_init()
483 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { in hl_cb_pool_init()
484 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
487 cb->is_pool = true; in hl_cb_pool_init()
488 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
491 return -ENOMEM; in hl_cb_pool_init()
502 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
503 list_del(&cb->pool_list); in hl_cb_pool_fini()
510 int hl_cb_va_pool_init(struct hl_ctx *ctx) in hl_cb_va_pool_init() argument
512 struct hl_device *hdev = ctx->hdev; in hl_cb_va_pool_init()
513 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_cb_va_pool_init()
516 if (!hdev->supports_cb_mapping) in hl_cb_va_pool_init()
519 ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1); in hl_cb_va_pool_init()
520 if (!ctx->cb_va_pool) { in hl_cb_va_pool_init()
521 dev_err(hdev->dev, in hl_cb_va_pool_init()
523 return -ENOMEM; in hl_cb_va_pool_init()
526 ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST, in hl_cb_va_pool_init()
528 if (!ctx->cb_va_pool_base) { in hl_cb_va_pool_init()
529 rc = -ENOMEM; in hl_cb_va_pool_init()
532 rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1); in hl_cb_va_pool_init()
534 dev_err(hdev->dev, in hl_cb_va_pool_init()
542 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE); in hl_cb_va_pool_init()
544 gen_pool_destroy(ctx->cb_va_pool); in hl_cb_va_pool_init()
549 void hl_cb_va_pool_fini(struct hl_ctx *ctx) in hl_cb_va_pool_fini() argument
551 struct hl_device *hdev = ctx->hdev; in hl_cb_va_pool_fini()
553 if (!hdev->supports_cb_mapping) in hl_cb_va_pool_fini()
556 gen_pool_destroy(ctx->cb_va_pool); in hl_cb_va_pool_fini()
557 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE); in hl_cb_va_pool_fini()