1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2021 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "habanalabs.h"
9 #include "../include/hw_ip/mmu/mmu_general.h"
10
11 #include <linux/pci.h>
12 #include <linux/uaccess.h>
13 #include <linux/vmalloc.h>
14 #include <linux/iommu.h>
15
16 #define MMU_ADDR_BUF_SIZE 40
17 #define MMU_ASID_BUF_SIZE 10
18 #define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
19 #define I2C_MAX_TRANSACTION_LEN 8
20
hl_debugfs_i2c_read(struct hl_device * hdev,u8 i2c_bus,u8 i2c_addr,u8 i2c_reg,u8 i2c_len,u64 * val)21 static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
22 u8 i2c_reg, u8 i2c_len, u64 *val)
23 {
24 struct cpucp_packet pkt;
25 int rc;
26
27 if (!hl_device_operational(hdev, NULL))
28 return -EBUSY;
29
30 if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
31 dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
32 i2c_len, I2C_MAX_TRANSACTION_LEN);
33 return -EINVAL;
34 }
35
36 memset(&pkt, 0, sizeof(pkt));
37
38 pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
39 CPUCP_PKT_CTL_OPCODE_SHIFT);
40 pkt.i2c_bus = i2c_bus;
41 pkt.i2c_addr = i2c_addr;
42 pkt.i2c_reg = i2c_reg;
43 pkt.i2c_len = i2c_len;
44
45 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
46 0, val);
47 if (rc)
48 dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
49
50 return rc;
51 }
52
hl_debugfs_i2c_write(struct hl_device * hdev,u8 i2c_bus,u8 i2c_addr,u8 i2c_reg,u8 i2c_len,u64 val)53 static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
54 u8 i2c_reg, u8 i2c_len, u64 val)
55 {
56 struct cpucp_packet pkt;
57 int rc;
58
59 if (!hl_device_operational(hdev, NULL))
60 return -EBUSY;
61
62 if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
63 dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
64 i2c_len, I2C_MAX_TRANSACTION_LEN);
65 return -EINVAL;
66 }
67
68 memset(&pkt, 0, sizeof(pkt));
69
70 pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
71 CPUCP_PKT_CTL_OPCODE_SHIFT);
72 pkt.i2c_bus = i2c_bus;
73 pkt.i2c_addr = i2c_addr;
74 pkt.i2c_reg = i2c_reg;
75 pkt.i2c_len = i2c_len;
76 pkt.value = cpu_to_le64(val);
77
78 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
79 0, NULL);
80
81 if (rc)
82 dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
83
84 return rc;
85 }
86
hl_debugfs_led_set(struct hl_device * hdev,u8 led,u8 state)87 static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
88 {
89 struct cpucp_packet pkt;
90 int rc;
91
92 if (!hl_device_operational(hdev, NULL))
93 return;
94
95 memset(&pkt, 0, sizeof(pkt));
96
97 pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
98 CPUCP_PKT_CTL_OPCODE_SHIFT);
99 pkt.led_index = cpu_to_le32(led);
100 pkt.value = cpu_to_le64(state);
101
102 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
103 0, NULL);
104
105 if (rc)
106 dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
107 }
108
command_buffers_show(struct seq_file * s,void * data)109 static int command_buffers_show(struct seq_file *s, void *data)
110 {
111 struct hl_debugfs_entry *entry = s->private;
112 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
113 struct hl_cb *cb;
114 bool first = true;
115
116 spin_lock(&dev_entry->cb_spinlock);
117
118 list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
119 if (first) {
120 first = false;
121 seq_puts(s, "\n");
122 seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
123 seq_puts(s, "---------------------------------------------------------------\n");
124 }
125 seq_printf(s,
126 " %03llu %d 0x%08x %d %d %d\n",
127 cb->buf->handle, cb->ctx->asid, cb->size,
128 kref_read(&cb->buf->refcount),
129 atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
130 }
131
132 spin_unlock(&dev_entry->cb_spinlock);
133
134 if (!first)
135 seq_puts(s, "\n");
136
137 return 0;
138 }
139
command_submission_show(struct seq_file * s,void * data)140 static int command_submission_show(struct seq_file *s, void *data)
141 {
142 struct hl_debugfs_entry *entry = s->private;
143 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
144 struct hl_cs *cs;
145 bool first = true;
146
147 spin_lock(&dev_entry->cs_spinlock);
148
149 list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
150 if (first) {
151 first = false;
152 seq_puts(s, "\n");
153 seq_puts(s, " CS ID CS TYPE CTX ASID CS RefCnt Submitted Completed\n");
154 seq_puts(s, "----------------------------------------------------------------\n");
155 }
156 seq_printf(s,
157 " %llu %d %d %d %d %d\n",
158 cs->sequence, cs->type, cs->ctx->asid,
159 kref_read(&cs->refcount),
160 cs->submitted, cs->completed);
161 }
162
163 spin_unlock(&dev_entry->cs_spinlock);
164
165 if (!first)
166 seq_puts(s, "\n");
167
168 return 0;
169 }
170
command_submission_jobs_show(struct seq_file * s,void * data)171 static int command_submission_jobs_show(struct seq_file *s, void *data)
172 {
173 struct hl_debugfs_entry *entry = s->private;
174 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
175 struct hl_cs_job *job;
176 bool first = true;
177
178 spin_lock(&dev_entry->cs_job_spinlock);
179
180 list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
181 if (first) {
182 first = false;
183 seq_puts(s, "\n");
184 seq_puts(s, " JOB ID CS ID CS TYPE CTX ASID JOB RefCnt H/W Queue\n");
185 seq_puts(s, "---------------------------------------------------------------\n");
186 }
187 if (job->cs)
188 seq_printf(s,
189 " %02d %llu %d %d %d %d\n",
190 job->id, job->cs->sequence, job->cs->type,
191 job->cs->ctx->asid, kref_read(&job->refcount),
192 job->hw_queue_id);
193 else
194 seq_printf(s,
195 " %02d 0 0 %d %d %d\n",
196 job->id, HL_KERNEL_ASID_ID,
197 kref_read(&job->refcount), job->hw_queue_id);
198 }
199
200 spin_unlock(&dev_entry->cs_job_spinlock);
201
202 if (!first)
203 seq_puts(s, "\n");
204
205 return 0;
206 }
207
userptr_show(struct seq_file * s,void * data)208 static int userptr_show(struct seq_file *s, void *data)
209 {
210 struct hl_debugfs_entry *entry = s->private;
211 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
212 struct hl_userptr *userptr;
213 char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
214 "DMA_FROM_DEVICE", "DMA_NONE"};
215 bool first = true;
216
217 spin_lock(&dev_entry->userptr_spinlock);
218
219 list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
220 if (first) {
221 first = false;
222 seq_puts(s, "\n");
223 seq_puts(s, " pid user virtual address size dma dir\n");
224 seq_puts(s, "----------------------------------------------------------\n");
225 }
226 seq_printf(s, " %-7d 0x%-14llx %-10llu %-30s\n",
227 userptr->pid, userptr->addr, userptr->size,
228 dma_dir[userptr->dir]);
229 }
230
231 spin_unlock(&dev_entry->userptr_spinlock);
232
233 if (!first)
234 seq_puts(s, "\n");
235
236 return 0;
237 }
238
vm_show(struct seq_file * s,void * data)239 static int vm_show(struct seq_file *s, void *data)
240 {
241 struct hl_debugfs_entry *entry = s->private;
242 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
243 struct hl_vm_hw_block_list_node *lnode;
244 struct hl_ctx *ctx;
245 struct hl_vm *vm;
246 struct hl_vm_hash_node *hnode;
247 struct hl_userptr *userptr;
248 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
249 struct hl_va_range *va_range;
250 struct hl_vm_va_block *va_block;
251 enum vm_type *vm_type;
252 bool once = true;
253 u64 j;
254 int i;
255
256 mutex_lock(&dev_entry->ctx_mem_hash_mutex);
257
258 list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
259 once = false;
260 seq_puts(s, "\n\n----------------------------------------------------");
261 seq_puts(s, "\n----------------------------------------------------\n\n");
262 seq_printf(s, "ctx asid: %u\n", ctx->asid);
263
264 seq_puts(s, "\nmappings:\n\n");
265 seq_puts(s, " virtual address size handle\n");
266 seq_puts(s, "----------------------------------------------------\n");
267 mutex_lock(&ctx->mem_hash_lock);
268 hash_for_each(ctx->mem_hash, i, hnode, node) {
269 vm_type = hnode->ptr;
270
271 if (*vm_type == VM_TYPE_USERPTR) {
272 userptr = hnode->ptr;
273 seq_printf(s,
274 " 0x%-14llx %-10llu\n",
275 hnode->vaddr, userptr->size);
276 } else {
277 phys_pg_pack = hnode->ptr;
278 seq_printf(s,
279 " 0x%-14llx %-10llu %-4u\n",
280 hnode->vaddr, phys_pg_pack->total_size,
281 phys_pg_pack->handle);
282 }
283 }
284 mutex_unlock(&ctx->mem_hash_lock);
285
286 if (ctx->asid != HL_KERNEL_ASID_ID &&
287 !list_empty(&ctx->hw_block_mem_list)) {
288 seq_puts(s, "\nhw_block mappings:\n\n");
289 seq_puts(s,
290 " virtual address block size mapped size HW block id\n");
291 seq_puts(s,
292 "---------------------------------------------------------------\n");
293 mutex_lock(&ctx->hw_block_list_lock);
294 list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
295 seq_printf(s,
296 " 0x%-14lx %-6u %-6u %-9u\n",
297 lnode->vaddr, lnode->block_size, lnode->mapped_size,
298 lnode->id);
299 }
300 mutex_unlock(&ctx->hw_block_list_lock);
301 }
302
303 vm = &ctx->hdev->vm;
304 spin_lock(&vm->idr_lock);
305
306 if (!idr_is_empty(&vm->phys_pg_pack_handles))
307 seq_puts(s, "\n\nallocations:\n");
308
309 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
310 if (phys_pg_pack->asid != ctx->asid)
311 continue;
312
313 seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
314 seq_printf(s, "page size: %u\n\n",
315 phys_pg_pack->page_size);
316 seq_puts(s, " physical address\n");
317 seq_puts(s, "---------------------\n");
318 for (j = 0 ; j < phys_pg_pack->npages ; j++) {
319 seq_printf(s, " 0x%-14llx\n",
320 phys_pg_pack->pages[j]);
321 }
322 }
323 spin_unlock(&vm->idr_lock);
324
325 }
326
327 mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
328
329 ctx = hl_get_compute_ctx(dev_entry->hdev);
330 if (ctx) {
331 seq_puts(s, "\nVA ranges:\n\n");
332 for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
333 va_range = ctx->va_range[i];
334 seq_printf(s, " va_range %d\n", i);
335 seq_puts(s, "---------------------\n");
336 mutex_lock(&va_range->lock);
337 list_for_each_entry(va_block, &va_range->list, node) {
338 seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
339 va_block->start, va_block->end,
340 va_block->size);
341 }
342 mutex_unlock(&va_range->lock);
343 seq_puts(s, "\n");
344 }
345 hl_ctx_put(ctx);
346 }
347
348 if (!once)
349 seq_puts(s, "\n");
350
351 return 0;
352 }
353
userptr_lookup_show(struct seq_file * s,void * data)354 static int userptr_lookup_show(struct seq_file *s, void *data)
355 {
356 struct hl_debugfs_entry *entry = s->private;
357 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
358 struct scatterlist *sg;
359 struct hl_userptr *userptr;
360 bool first = true;
361 u64 total_npages, npages, sg_start, sg_end;
362 dma_addr_t dma_addr;
363 int i;
364
365 spin_lock(&dev_entry->userptr_spinlock);
366
367 list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
368 if (dev_entry->userptr_lookup >= userptr->addr &&
369 dev_entry->userptr_lookup < userptr->addr + userptr->size) {
370 total_npages = 0;
371 for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
372 npages = hl_get_sg_info(sg, &dma_addr);
373 sg_start = userptr->addr +
374 total_npages * PAGE_SIZE;
375 sg_end = userptr->addr +
376 (total_npages + npages) * PAGE_SIZE;
377
378 if (dev_entry->userptr_lookup >= sg_start &&
379 dev_entry->userptr_lookup < sg_end) {
380 dma_addr += (dev_entry->userptr_lookup -
381 sg_start);
382 if (first) {
383 first = false;
384 seq_puts(s, "\n");
385 seq_puts(s, " user virtual address dma address pid region start region size\n");
386 seq_puts(s, "---------------------------------------------------------------------------------------\n");
387 }
388 seq_printf(s, " 0x%-18llx 0x%-16llx %-8u 0x%-16llx %-12llu\n",
389 dev_entry->userptr_lookup,
390 (u64)dma_addr, userptr->pid,
391 userptr->addr, userptr->size);
392 }
393 total_npages += npages;
394 }
395 }
396 }
397
398 spin_unlock(&dev_entry->userptr_spinlock);
399
400 if (!first)
401 seq_puts(s, "\n");
402
403 return 0;
404 }
405
userptr_lookup_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)406 static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
407 size_t count, loff_t *f_pos)
408 {
409 struct seq_file *s = file->private_data;
410 struct hl_debugfs_entry *entry = s->private;
411 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
412 ssize_t rc;
413 u64 value;
414
415 rc = kstrtoull_from_user(buf, count, 16, &value);
416 if (rc)
417 return rc;
418
419 dev_entry->userptr_lookup = value;
420
421 return count;
422 }
423
mmu_show(struct seq_file * s,void * data)424 static int mmu_show(struct seq_file *s, void *data)
425 {
426 struct hl_debugfs_entry *entry = s->private;
427 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
428 struct hl_device *hdev = dev_entry->hdev;
429 struct hl_ctx *ctx;
430 struct hl_mmu_hop_info hops_info = {0};
431 u64 virt_addr = dev_entry->mmu_addr, phys_addr;
432 int i;
433
434 if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
435 ctx = hdev->kernel_ctx;
436 else
437 ctx = hl_get_compute_ctx(hdev);
438
439 if (!ctx) {
440 dev_err(hdev->dev, "no ctx available\n");
441 return 0;
442 }
443
444 if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
445 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
446 virt_addr);
447 goto put_ctx;
448 }
449
450 hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
451
452 if (hops_info.scrambled_vaddr &&
453 (dev_entry->mmu_addr != hops_info.scrambled_vaddr))
454 seq_printf(s,
455 "asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
456 dev_entry->mmu_asid, dev_entry->mmu_addr,
457 hops_info.scrambled_vaddr,
458 hops_info.unscrambled_paddr, phys_addr);
459 else
460 seq_printf(s,
461 "asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
462 dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
463
464 for (i = 0 ; i < hops_info.used_hops ; i++) {
465 seq_printf(s, "hop%d_addr: 0x%llx\n",
466 i, hops_info.hop_info[i].hop_addr);
467 seq_printf(s, "hop%d_pte_addr: 0x%llx\n",
468 i, hops_info.hop_info[i].hop_pte_addr);
469 seq_printf(s, "hop%d_pte: 0x%llx\n",
470 i, hops_info.hop_info[i].hop_pte_val);
471 }
472
473 put_ctx:
474 if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
475 hl_ctx_put(ctx);
476
477 return 0;
478 }
479
mmu_asid_va_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)480 static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
481 size_t count, loff_t *f_pos)
482 {
483 struct seq_file *s = file->private_data;
484 struct hl_debugfs_entry *entry = s->private;
485 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
486 struct hl_device *hdev = dev_entry->hdev;
487 char kbuf[MMU_KBUF_SIZE];
488 char *c;
489 ssize_t rc;
490
491 if (count > sizeof(kbuf) - 1)
492 goto err;
493 if (copy_from_user(kbuf, buf, count))
494 goto err;
495 kbuf[count] = 0;
496
497 c = strchr(kbuf, ' ');
498 if (!c)
499 goto err;
500 *c = '\0';
501
502 rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
503 if (rc)
504 goto err;
505
506 if (strncmp(c+1, "0x", 2))
507 goto err;
508 rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
509 if (rc)
510 goto err;
511
512 return count;
513
514 err:
515 dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
516
517 return -EINVAL;
518 }
519
mmu_ack_error(struct seq_file * s,void * data)520 static int mmu_ack_error(struct seq_file *s, void *data)
521 {
522 struct hl_debugfs_entry *entry = s->private;
523 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
524 struct hl_device *hdev = dev_entry->hdev;
525 int rc;
526
527 if (!dev_entry->mmu_cap_mask) {
528 dev_err(hdev->dev, "mmu_cap_mask is not set\n");
529 goto err;
530 }
531
532 rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
533 if (rc)
534 goto err;
535
536 return 0;
537 err:
538 return -EINVAL;
539 }
540
mmu_ack_error_value_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)541 static ssize_t mmu_ack_error_value_write(struct file *file,
542 const char __user *buf,
543 size_t count, loff_t *f_pos)
544 {
545 struct seq_file *s = file->private_data;
546 struct hl_debugfs_entry *entry = s->private;
547 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
548 struct hl_device *hdev = dev_entry->hdev;
549 char kbuf[MMU_KBUF_SIZE];
550 ssize_t rc;
551
552 if (count > sizeof(kbuf) - 1)
553 goto err;
554
555 if (copy_from_user(kbuf, buf, count))
556 goto err;
557
558 kbuf[count] = 0;
559
560 if (strncmp(kbuf, "0x", 2))
561 goto err;
562
563 rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
564 if (rc)
565 goto err;
566
567 return count;
568 err:
569 dev_err(hdev->dev, "usage: echo <0xmmu_cap_mask > > mmu_error\n");
570
571 return -EINVAL;
572 }
573
engines_show(struct seq_file * s,void * data)574 static int engines_show(struct seq_file *s, void *data)
575 {
576 struct hl_debugfs_entry *entry = s->private;
577 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
578 struct hl_device *hdev = dev_entry->hdev;
579 struct engines_data eng_data;
580
581 if (hdev->reset_info.in_reset) {
582 dev_warn_ratelimited(hdev->dev,
583 "Can't check device idle during reset\n");
584 return 0;
585 }
586
587 eng_data.actual_size = 0;
588 eng_data.allocated_buf_size = HL_ENGINES_DATA_MAX_SIZE;
589 eng_data.buf = vmalloc(eng_data.allocated_buf_size);
590 if (!eng_data.buf)
591 return -ENOMEM;
592
593 hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
594
595 if (eng_data.actual_size > eng_data.allocated_buf_size) {
596 dev_err(hdev->dev,
597 "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
598 eng_data.actual_size, eng_data.allocated_buf_size);
599 vfree(eng_data.buf);
600 return -ENOMEM;
601 }
602
603 seq_write(s, eng_data.buf, eng_data.actual_size);
604
605 vfree(eng_data.buf);
606
607 return 0;
608 }
609
hl_memory_scrub(struct file * f,const char __user * buf,size_t count,loff_t * ppos)610 static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
611 size_t count, loff_t *ppos)
612 {
613 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
614 struct hl_device *hdev = entry->hdev;
615 u64 val = hdev->memory_scrub_val;
616 int rc;
617
618 if (!hl_device_operational(hdev, NULL)) {
619 dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
620 return -EIO;
621 }
622
623 mutex_lock(&hdev->fpriv_list_lock);
624 if (hdev->is_compute_ctx_active) {
625 mutex_unlock(&hdev->fpriv_list_lock);
626 dev_err(hdev->dev, "can't scrub dram, context exist\n");
627 return -EBUSY;
628 }
629 hdev->is_in_dram_scrub = true;
630 mutex_unlock(&hdev->fpriv_list_lock);
631
632 rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
633
634 mutex_lock(&hdev->fpriv_list_lock);
635 hdev->is_in_dram_scrub = false;
636 mutex_unlock(&hdev->fpriv_list_lock);
637
638 if (rc)
639 return rc;
640 return count;
641 }
642
hl_is_device_va(struct hl_device * hdev,u64 addr)643 static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
644 {
645 struct asic_fixed_properties *prop = &hdev->asic_prop;
646
647 if (prop->dram_supports_virtual_memory &&
648 (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
649 return true;
650
651 if (addr >= prop->pmmu.start_addr &&
652 addr < prop->pmmu.end_addr)
653 return true;
654
655 if (addr >= prop->pmmu_huge.start_addr &&
656 addr < prop->pmmu_huge.end_addr)
657 return true;
658
659 return false;
660 }
661
hl_is_device_internal_memory_va(struct hl_device * hdev,u64 addr,u32 size)662 static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
663 u32 size)
664 {
665 struct asic_fixed_properties *prop = &hdev->asic_prop;
666 u64 dram_start_addr, dram_end_addr;
667
668 if (prop->dram_supports_virtual_memory) {
669 dram_start_addr = prop->dmmu.start_addr;
670 dram_end_addr = prop->dmmu.end_addr;
671 } else {
672 dram_start_addr = prop->dram_base_address;
673 dram_end_addr = prop->dram_end_address;
674 }
675
676 if (hl_mem_area_inside_range(addr, size, dram_start_addr,
677 dram_end_addr))
678 return true;
679
680 if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
681 prop->sram_end_address))
682 return true;
683
684 return false;
685 }
686
device_va_to_pa(struct hl_device * hdev,u64 virt_addr,u32 size,u64 * phys_addr)687 static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
688 u64 *phys_addr)
689 {
690 struct hl_vm_phys_pg_pack *phys_pg_pack;
691 struct hl_ctx *ctx;
692 struct hl_vm_hash_node *hnode;
693 u64 end_address, range_size;
694 struct hl_userptr *userptr;
695 enum vm_type *vm_type;
696 bool valid = false;
697 int i, rc = 0;
698
699 ctx = hl_get_compute_ctx(hdev);
700
701 if (!ctx) {
702 dev_err(hdev->dev, "no ctx available\n");
703 return -EINVAL;
704 }
705
706 /* Verify address is mapped */
707 mutex_lock(&ctx->mem_hash_lock);
708 hash_for_each(ctx->mem_hash, i, hnode, node) {
709 vm_type = hnode->ptr;
710
711 if (*vm_type == VM_TYPE_USERPTR) {
712 userptr = hnode->ptr;
713 range_size = userptr->size;
714 } else {
715 phys_pg_pack = hnode->ptr;
716 range_size = phys_pg_pack->total_size;
717 }
718
719 end_address = virt_addr + size;
720 if ((virt_addr >= hnode->vaddr) &&
721 (end_address <= hnode->vaddr + range_size)) {
722 valid = true;
723 break;
724 }
725 }
726 mutex_unlock(&ctx->mem_hash_lock);
727
728 if (!valid) {
729 dev_err(hdev->dev,
730 "virt addr 0x%llx is not mapped\n",
731 virt_addr);
732 rc = -EINVAL;
733 goto put_ctx;
734 }
735
736 rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
737 if (rc) {
738 dev_err(hdev->dev,
739 "virt addr 0x%llx is not mapped to phys addr\n",
740 virt_addr);
741 rc = -EINVAL;
742 }
743
744 put_ctx:
745 hl_ctx_put(ctx);
746
747 return rc;
748 }
749
hl_access_dev_mem_by_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type,bool * found)750 static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
751 u64 *val, enum debugfs_access_type acc_type, bool *found)
752 {
753 size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
754 sizeof(u64) : sizeof(u32);
755 struct pci_mem_region *mem_reg;
756 int i;
757
758 for (i = 0; i < PCI_REGION_NUMBER; i++) {
759 mem_reg = &hdev->pci_mem_region[i];
760 if (!mem_reg->used)
761 continue;
762 if (addr >= mem_reg->region_base &&
763 addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
764 *found = true;
765 return hdev->asic_funcs->access_dev_mem(hdev, i, addr, val, acc_type);
766 }
767 }
768 return 0;
769 }
770
hl_access_host_mem(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type)771 static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
772 enum debugfs_access_type acc_type)
773 {
774 struct asic_fixed_properties *prop = &hdev->asic_prop;
775 u64 offset = prop->device_dma_offset_for_host_access;
776
777 switch (acc_type) {
778 case DEBUGFS_READ32:
779 *val = *(u32 *) phys_to_virt(addr - offset);
780 break;
781 case DEBUGFS_WRITE32:
782 *(u32 *) phys_to_virt(addr - offset) = *val;
783 break;
784 case DEBUGFS_READ64:
785 *val = *(u64 *) phys_to_virt(addr - offset);
786 break;
787 case DEBUGFS_WRITE64:
788 *(u64 *) phys_to_virt(addr - offset) = *val;
789 break;
790 default:
791 dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
792 break;
793 }
794 }
795
hl_access_mem(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type)796 static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
797 enum debugfs_access_type acc_type)
798 {
799 size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
800 sizeof(u64) : sizeof(u32);
801 u64 host_start = hdev->asic_prop.host_base_address;
802 u64 host_end = hdev->asic_prop.host_end_address;
803 bool user_address, found = false;
804 int rc;
805
806 user_address = hl_is_device_va(hdev, addr);
807 if (user_address) {
808 rc = device_va_to_pa(hdev, addr, acc_size, &addr);
809 if (rc)
810 return rc;
811 }
812
813 rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
814 if (rc) {
815 dev_err(hdev->dev,
816 "Failed reading addr %#llx from dev mem (%d)\n",
817 addr, rc);
818 return rc;
819 }
820
821 if (found)
822 return 0;
823
824 if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
825 rc = -EINVAL;
826 goto err;
827 }
828
829 if (addr >= host_start && addr <= host_end - acc_size) {
830 hl_access_host_mem(hdev, addr, val, acc_type);
831 } else {
832 rc = -EINVAL;
833 goto err;
834 }
835
836 return 0;
837 err:
838 dev_err(hdev->dev, "invalid addr %#llx\n", addr);
839 return rc;
840 }
841
hl_data_read32(struct file * f,char __user * buf,size_t count,loff_t * ppos)842 static ssize_t hl_data_read32(struct file *f, char __user *buf,
843 size_t count, loff_t *ppos)
844 {
845 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
846 struct hl_device *hdev = entry->hdev;
847 u64 value64, addr = entry->addr;
848 char tmp_buf[32];
849 ssize_t rc;
850 u32 val;
851
852 if (hdev->reset_info.in_reset) {
853 dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
854 return 0;
855 }
856
857 if (*ppos)
858 return 0;
859
860 rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
861 if (rc)
862 return rc;
863
864 val = value64; /* downcast back to 32 */
865
866 sprintf(tmp_buf, "0x%08x\n", val);
867 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
868 strlen(tmp_buf));
869 }
870
hl_data_write32(struct file * f,const char __user * buf,size_t count,loff_t * ppos)871 static ssize_t hl_data_write32(struct file *f, const char __user *buf,
872 size_t count, loff_t *ppos)
873 {
874 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
875 struct hl_device *hdev = entry->hdev;
876 u64 value64, addr = entry->addr;
877 u32 value;
878 ssize_t rc;
879
880 if (hdev->reset_info.in_reset) {
881 dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
882 return 0;
883 }
884
885 rc = kstrtouint_from_user(buf, count, 16, &value);
886 if (rc)
887 return rc;
888
889 value64 = value;
890 rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
891 if (rc)
892 return rc;
893
894 return count;
895 }
896
hl_data_read64(struct file * f,char __user * buf,size_t count,loff_t * ppos)897 static ssize_t hl_data_read64(struct file *f, char __user *buf,
898 size_t count, loff_t *ppos)
899 {
900 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
901 struct hl_device *hdev = entry->hdev;
902 u64 addr = entry->addr;
903 char tmp_buf[32];
904 ssize_t rc;
905 u64 val;
906
907 if (hdev->reset_info.in_reset) {
908 dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
909 return 0;
910 }
911
912 if (*ppos)
913 return 0;
914
915 rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
916 if (rc)
917 return rc;
918
919 sprintf(tmp_buf, "0x%016llx\n", val);
920 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
921 strlen(tmp_buf));
922 }
923
hl_data_write64(struct file * f,const char __user * buf,size_t count,loff_t * ppos)924 static ssize_t hl_data_write64(struct file *f, const char __user *buf,
925 size_t count, loff_t *ppos)
926 {
927 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
928 struct hl_device *hdev = entry->hdev;
929 u64 addr = entry->addr;
930 u64 value;
931 ssize_t rc;
932
933 if (hdev->reset_info.in_reset) {
934 dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
935 return 0;
936 }
937
938 rc = kstrtoull_from_user(buf, count, 16, &value);
939 if (rc)
940 return rc;
941
942 rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
943 if (rc)
944 return rc;
945
946 return count;
947 }
948
hl_dma_size_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)949 static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
950 size_t count, loff_t *ppos)
951 {
952 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
953 struct hl_device *hdev = entry->hdev;
954 u64 addr = entry->addr;
955 ssize_t rc;
956 u32 size;
957
958 if (hdev->reset_info.in_reset) {
959 dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
960 return 0;
961 }
962 rc = kstrtouint_from_user(buf, count, 16, &size);
963 if (rc)
964 return rc;
965
966 if (!size) {
967 dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
968 return -EINVAL;
969 }
970
971 if (size > SZ_128M) {
972 dev_err(hdev->dev,
973 "DMA read failed. size can't be larger than 128MB\n");
974 return -EINVAL;
975 }
976
977 if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
978 dev_err(hdev->dev,
979 "DMA read failed. Invalid 0x%010llx + 0x%08x\n",
980 addr, size);
981 return -EINVAL;
982 }
983
984 /* Free the previous allocation, if there was any */
985 entry->data_dma_blob_desc.size = 0;
986 vfree(entry->data_dma_blob_desc.data);
987
988 entry->data_dma_blob_desc.data = vmalloc(size);
989 if (!entry->data_dma_blob_desc.data)
990 return -ENOMEM;
991
992 rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
993 entry->data_dma_blob_desc.data);
994 if (rc) {
995 dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
996 vfree(entry->data_dma_blob_desc.data);
997 entry->data_dma_blob_desc.data = NULL;
998 return -EIO;
999 }
1000
1001 entry->data_dma_blob_desc.size = size;
1002
1003 return count;
1004 }
1005
hl_monitor_dump_trigger(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1006 static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
1007 size_t count, loff_t *ppos)
1008 {
1009 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1010 struct hl_device *hdev = entry->hdev;
1011 u32 size, trig;
1012 ssize_t rc;
1013
1014 if (hdev->reset_info.in_reset) {
1015 dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
1016 return 0;
1017 }
1018 rc = kstrtouint_from_user(buf, count, 10, &trig);
1019 if (rc)
1020 return rc;
1021
1022 if (trig != 1) {
1023 dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
1024 return -EINVAL;
1025 }
1026
1027 size = sizeof(struct cpucp_monitor_dump);
1028
1029 /* Free the previous allocation, if there was any */
1030 entry->mon_dump_blob_desc.size = 0;
1031 vfree(entry->mon_dump_blob_desc.data);
1032
1033 entry->mon_dump_blob_desc.data = vmalloc(size);
1034 if (!entry->mon_dump_blob_desc.data)
1035 return -ENOMEM;
1036
1037 rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
1038 if (rc) {
1039 dev_err(hdev->dev, "Failed to dump monitors\n");
1040 vfree(entry->mon_dump_blob_desc.data);
1041 entry->mon_dump_blob_desc.data = NULL;
1042 return -EIO;
1043 }
1044
1045 entry->mon_dump_blob_desc.size = size;
1046
1047 return count;
1048 }
1049
hl_get_power_state(struct file * f,char __user * buf,size_t count,loff_t * ppos)1050 static ssize_t hl_get_power_state(struct file *f, char __user *buf,
1051 size_t count, loff_t *ppos)
1052 {
1053 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1054 struct hl_device *hdev = entry->hdev;
1055 char tmp_buf[200];
1056 int i;
1057
1058 if (*ppos)
1059 return 0;
1060
1061 if (hdev->pdev->current_state == PCI_D0)
1062 i = 1;
1063 else if (hdev->pdev->current_state == PCI_D3hot)
1064 i = 2;
1065 else
1066 i = 3;
1067
1068 sprintf(tmp_buf,
1069 "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
1070 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
1071 strlen(tmp_buf));
1072 }
1073
hl_set_power_state(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1074 static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
1075 size_t count, loff_t *ppos)
1076 {
1077 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1078 struct hl_device *hdev = entry->hdev;
1079 u32 value;
1080 ssize_t rc;
1081
1082 rc = kstrtouint_from_user(buf, count, 10, &value);
1083 if (rc)
1084 return rc;
1085
1086 if (value == 1) {
1087 pci_set_power_state(hdev->pdev, PCI_D0);
1088 pci_restore_state(hdev->pdev);
1089 rc = pci_enable_device(hdev->pdev);
1090 if (rc < 0)
1091 return rc;
1092 } else if (value == 2) {
1093 pci_save_state(hdev->pdev);
1094 pci_disable_device(hdev->pdev);
1095 pci_set_power_state(hdev->pdev, PCI_D3hot);
1096 } else {
1097 dev_dbg(hdev->dev, "invalid power state value %u\n", value);
1098 return -EINVAL;
1099 }
1100
1101 return count;
1102 }
1103
hl_i2c_data_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1104 static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
1105 size_t count, loff_t *ppos)
1106 {
1107 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1108 struct hl_device *hdev = entry->hdev;
1109 char tmp_buf[32];
1110 u64 val;
1111 ssize_t rc;
1112
1113 if (*ppos)
1114 return 0;
1115
1116 rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
1117 entry->i2c_reg, entry->i2c_len, &val);
1118 if (rc) {
1119 dev_err(hdev->dev,
1120 "Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
1121 entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1122 return rc;
1123 }
1124
1125 sprintf(tmp_buf, "%#02llx\n", val);
1126 rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
1127 strlen(tmp_buf));
1128
1129 return rc;
1130 }
1131
hl_i2c_data_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1132 static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
1133 size_t count, loff_t *ppos)
1134 {
1135 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1136 struct hl_device *hdev = entry->hdev;
1137 u64 value;
1138 ssize_t rc;
1139
1140 rc = kstrtou64_from_user(buf, count, 16, &value);
1141 if (rc)
1142 return rc;
1143
1144 rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
1145 entry->i2c_reg, entry->i2c_len, value);
1146 if (rc) {
1147 dev_err(hdev->dev,
1148 "Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
1149 value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1150 return rc;
1151 }
1152
1153 return count;
1154 }
1155
hl_led0_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1156 static ssize_t hl_led0_write(struct file *f, const char __user *buf,
1157 size_t count, loff_t *ppos)
1158 {
1159 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1160 struct hl_device *hdev = entry->hdev;
1161 u32 value;
1162 ssize_t rc;
1163
1164 rc = kstrtouint_from_user(buf, count, 10, &value);
1165 if (rc)
1166 return rc;
1167
1168 value = value ? 1 : 0;
1169
1170 hl_debugfs_led_set(hdev, 0, value);
1171
1172 return count;
1173 }
1174
hl_led1_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1175 static ssize_t hl_led1_write(struct file *f, const char __user *buf,
1176 size_t count, loff_t *ppos)
1177 {
1178 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1179 struct hl_device *hdev = entry->hdev;
1180 u32 value;
1181 ssize_t rc;
1182
1183 rc = kstrtouint_from_user(buf, count, 10, &value);
1184 if (rc)
1185 return rc;
1186
1187 value = value ? 1 : 0;
1188
1189 hl_debugfs_led_set(hdev, 1, value);
1190
1191 return count;
1192 }
1193
hl_led2_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1194 static ssize_t hl_led2_write(struct file *f, const char __user *buf,
1195 size_t count, loff_t *ppos)
1196 {
1197 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1198 struct hl_device *hdev = entry->hdev;
1199 u32 value;
1200 ssize_t rc;
1201
1202 rc = kstrtouint_from_user(buf, count, 10, &value);
1203 if (rc)
1204 return rc;
1205
1206 value = value ? 1 : 0;
1207
1208 hl_debugfs_led_set(hdev, 2, value);
1209
1210 return count;
1211 }
1212
hl_device_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1213 static ssize_t hl_device_read(struct file *f, char __user *buf,
1214 size_t count, loff_t *ppos)
1215 {
1216 static const char *help =
1217 "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
1218 return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
1219 }
1220
hl_device_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1221 static ssize_t hl_device_write(struct file *f, const char __user *buf,
1222 size_t count, loff_t *ppos)
1223 {
1224 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1225 struct hl_device *hdev = entry->hdev;
1226 char data[30] = {0};
1227
1228 /* don't allow partial writes */
1229 if (*ppos != 0)
1230 return 0;
1231
1232 simple_write_to_buffer(data, 29, ppos, buf, count);
1233
1234 if (strncmp("disable", data, strlen("disable")) == 0) {
1235 hdev->disabled = true;
1236 } else if (strncmp("enable", data, strlen("enable")) == 0) {
1237 hdev->disabled = false;
1238 } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
1239 hdev->asic_funcs->suspend(hdev);
1240 } else if (strncmp("resume", data, strlen("resume")) == 0) {
1241 hdev->asic_funcs->resume(hdev);
1242 } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
1243 hdev->device_cpu_disabled = true;
1244 } else {
1245 dev_err(hdev->dev,
1246 "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
1247 count = -EINVAL;
1248 }
1249
1250 return count;
1251 }
1252
hl_clk_gate_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1253 static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
1254 size_t count, loff_t *ppos)
1255 {
1256 return 0;
1257 }
1258
hl_clk_gate_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1259 static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
1260 size_t count, loff_t *ppos)
1261 {
1262 return count;
1263 }
1264
hl_stop_on_err_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1265 static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
1266 size_t count, loff_t *ppos)
1267 {
1268 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1269 struct hl_device *hdev = entry->hdev;
1270 char tmp_buf[200];
1271 ssize_t rc;
1272
1273 if (!hdev->asic_prop.configurable_stop_on_err)
1274 return -EOPNOTSUPP;
1275
1276 if (*ppos)
1277 return 0;
1278
1279 sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
1280 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1281 strlen(tmp_buf) + 1);
1282
1283 return rc;
1284 }
1285
hl_stop_on_err_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1286 static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
1287 size_t count, loff_t *ppos)
1288 {
1289 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1290 struct hl_device *hdev = entry->hdev;
1291 u32 value;
1292 ssize_t rc;
1293
1294 if (!hdev->asic_prop.configurable_stop_on_err)
1295 return -EOPNOTSUPP;
1296
1297 if (hdev->reset_info.in_reset) {
1298 dev_warn_ratelimited(hdev->dev,
1299 "Can't change stop on error during reset\n");
1300 return 0;
1301 }
1302
1303 rc = kstrtouint_from_user(buf, count, 10, &value);
1304 if (rc)
1305 return rc;
1306
1307 hdev->stop_on_err = value ? 1 : 0;
1308
1309 hl_device_reset(hdev, 0);
1310
1311 return count;
1312 }
1313
hl_security_violations_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1314 static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
1315 size_t count, loff_t *ppos)
1316 {
1317 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1318 struct hl_device *hdev = entry->hdev;
1319
1320 hdev->asic_funcs->ack_protection_bits_errors(hdev);
1321
1322 return 0;
1323 }
1324
hl_state_dump_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1325 static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
1326 size_t count, loff_t *ppos)
1327 {
1328 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1329 ssize_t rc;
1330
1331 down_read(&entry->state_dump_sem);
1332 if (!entry->state_dump[entry->state_dump_head])
1333 rc = 0;
1334 else
1335 rc = simple_read_from_buffer(
1336 buf, count, ppos,
1337 entry->state_dump[entry->state_dump_head],
1338 strlen(entry->state_dump[entry->state_dump_head]));
1339 up_read(&entry->state_dump_sem);
1340
1341 return rc;
1342 }
1343
hl_state_dump_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1344 static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
1345 size_t count, loff_t *ppos)
1346 {
1347 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1348 struct hl_device *hdev = entry->hdev;
1349 ssize_t rc;
1350 u32 size;
1351 int i;
1352
1353 rc = kstrtouint_from_user(buf, count, 10, &size);
1354 if (rc)
1355 return rc;
1356
1357 if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
1358 dev_err(hdev->dev, "Invalid number of dumps to skip\n");
1359 return -EINVAL;
1360 }
1361
1362 if (entry->state_dump[entry->state_dump_head]) {
1363 down_write(&entry->state_dump_sem);
1364 for (i = 0; i < size; ++i) {
1365 vfree(entry->state_dump[entry->state_dump_head]);
1366 entry->state_dump[entry->state_dump_head] = NULL;
1367 if (entry->state_dump_head > 0)
1368 entry->state_dump_head--;
1369 else
1370 entry->state_dump_head =
1371 ARRAY_SIZE(entry->state_dump) - 1;
1372 }
1373 up_write(&entry->state_dump_sem);
1374 }
1375
1376 return count;
1377 }
1378
hl_timeout_locked_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1379 static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
1380 size_t count, loff_t *ppos)
1381 {
1382 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1383 struct hl_device *hdev = entry->hdev;
1384 char tmp_buf[200];
1385 ssize_t rc;
1386
1387 if (*ppos)
1388 return 0;
1389
1390 sprintf(tmp_buf, "%d\n",
1391 jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
1392 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1393 strlen(tmp_buf) + 1);
1394
1395 return rc;
1396 }
1397
hl_timeout_locked_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1398 static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
1399 size_t count, loff_t *ppos)
1400 {
1401 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1402 struct hl_device *hdev = entry->hdev;
1403 u32 value;
1404 ssize_t rc;
1405
1406 rc = kstrtouint_from_user(buf, count, 10, &value);
1407 if (rc)
1408 return rc;
1409
1410 if (value)
1411 hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
1412 else
1413 hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
1414
1415 return count;
1416 }
1417
hl_check_razwi_happened(struct file * f,char __user * buf,size_t count,loff_t * ppos)1418 static ssize_t hl_check_razwi_happened(struct file *f, char __user *buf,
1419 size_t count, loff_t *ppos)
1420 {
1421 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1422 struct hl_device *hdev = entry->hdev;
1423
1424 hdev->asic_funcs->check_if_razwi_happened(hdev);
1425
1426 return 0;
1427 }
1428
1429 static const struct file_operations hl_mem_scrub_fops = {
1430 .owner = THIS_MODULE,
1431 .write = hl_memory_scrub,
1432 };
1433
1434 static const struct file_operations hl_data32b_fops = {
1435 .owner = THIS_MODULE,
1436 .read = hl_data_read32,
1437 .write = hl_data_write32
1438 };
1439
1440 static const struct file_operations hl_data64b_fops = {
1441 .owner = THIS_MODULE,
1442 .read = hl_data_read64,
1443 .write = hl_data_write64
1444 };
1445
1446 static const struct file_operations hl_dma_size_fops = {
1447 .owner = THIS_MODULE,
1448 .write = hl_dma_size_write
1449 };
1450
1451 static const struct file_operations hl_monitor_dump_fops = {
1452 .owner = THIS_MODULE,
1453 .write = hl_monitor_dump_trigger
1454 };
1455
1456 static const struct file_operations hl_i2c_data_fops = {
1457 .owner = THIS_MODULE,
1458 .read = hl_i2c_data_read,
1459 .write = hl_i2c_data_write
1460 };
1461
1462 static const struct file_operations hl_power_fops = {
1463 .owner = THIS_MODULE,
1464 .read = hl_get_power_state,
1465 .write = hl_set_power_state
1466 };
1467
1468 static const struct file_operations hl_led0_fops = {
1469 .owner = THIS_MODULE,
1470 .write = hl_led0_write
1471 };
1472
1473 static const struct file_operations hl_led1_fops = {
1474 .owner = THIS_MODULE,
1475 .write = hl_led1_write
1476 };
1477
1478 static const struct file_operations hl_led2_fops = {
1479 .owner = THIS_MODULE,
1480 .write = hl_led2_write
1481 };
1482
1483 static const struct file_operations hl_device_fops = {
1484 .owner = THIS_MODULE,
1485 .read = hl_device_read,
1486 .write = hl_device_write
1487 };
1488
1489 static const struct file_operations hl_clk_gate_fops = {
1490 .owner = THIS_MODULE,
1491 .read = hl_clk_gate_read,
1492 .write = hl_clk_gate_write
1493 };
1494
1495 static const struct file_operations hl_stop_on_err_fops = {
1496 .owner = THIS_MODULE,
1497 .read = hl_stop_on_err_read,
1498 .write = hl_stop_on_err_write
1499 };
1500
1501 static const struct file_operations hl_security_violations_fops = {
1502 .owner = THIS_MODULE,
1503 .read = hl_security_violations_read
1504 };
1505
1506 static const struct file_operations hl_state_dump_fops = {
1507 .owner = THIS_MODULE,
1508 .read = hl_state_dump_read,
1509 .write = hl_state_dump_write
1510 };
1511
1512 static const struct file_operations hl_timeout_locked_fops = {
1513 .owner = THIS_MODULE,
1514 .read = hl_timeout_locked_read,
1515 .write = hl_timeout_locked_write
1516 };
1517
1518 static const struct file_operations hl_razwi_check_fops = {
1519 .owner = THIS_MODULE,
1520 .read = hl_check_razwi_happened
1521 };
1522
1523 static const struct hl_info_list hl_debugfs_list[] = {
1524 {"command_buffers", command_buffers_show, NULL},
1525 {"command_submission", command_submission_show, NULL},
1526 {"command_submission_jobs", command_submission_jobs_show, NULL},
1527 {"userptr", userptr_show, NULL},
1528 {"vm", vm_show, NULL},
1529 {"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
1530 {"mmu", mmu_show, mmu_asid_va_write},
1531 {"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
1532 {"engines", engines_show, NULL},
1533 };
1534
hl_debugfs_open(struct inode * inode,struct file * file)1535 static int hl_debugfs_open(struct inode *inode, struct file *file)
1536 {
1537 struct hl_debugfs_entry *node = inode->i_private;
1538
1539 return single_open(file, node->info_ent->show, node);
1540 }
1541
hl_debugfs_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)1542 static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
1543 size_t count, loff_t *f_pos)
1544 {
1545 struct hl_debugfs_entry *node = file->f_inode->i_private;
1546
1547 if (node->info_ent->write)
1548 return node->info_ent->write(file, buf, count, f_pos);
1549 else
1550 return -EINVAL;
1551
1552 }
1553
1554 static const struct file_operations hl_debugfs_fops = {
1555 .owner = THIS_MODULE,
1556 .open = hl_debugfs_open,
1557 .read = seq_read,
1558 .write = hl_debugfs_write,
1559 .llseek = seq_lseek,
1560 .release = single_release,
1561 };
1562
add_secured_nodes(struct hl_dbg_device_entry * dev_entry,struct dentry * root)1563 static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
1564 {
1565 debugfs_create_u8("i2c_bus",
1566 0644,
1567 root,
1568 &dev_entry->i2c_bus);
1569
1570 debugfs_create_u8("i2c_addr",
1571 0644,
1572 root,
1573 &dev_entry->i2c_addr);
1574
1575 debugfs_create_u8("i2c_reg",
1576 0644,
1577 root,
1578 &dev_entry->i2c_reg);
1579
1580 debugfs_create_u8("i2c_len",
1581 0644,
1582 root,
1583 &dev_entry->i2c_len);
1584
1585 debugfs_create_file("i2c_data",
1586 0644,
1587 root,
1588 dev_entry,
1589 &hl_i2c_data_fops);
1590
1591 debugfs_create_file("led0",
1592 0200,
1593 root,
1594 dev_entry,
1595 &hl_led0_fops);
1596
1597 debugfs_create_file("led1",
1598 0200,
1599 root,
1600 dev_entry,
1601 &hl_led1_fops);
1602
1603 debugfs_create_file("led2",
1604 0200,
1605 root,
1606 dev_entry,
1607 &hl_led2_fops);
1608 }
1609
add_files_to_device(struct hl_device * hdev,struct hl_dbg_device_entry * dev_entry,struct dentry * root)1610 static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
1611 struct dentry *root)
1612 {
1613 int count = ARRAY_SIZE(hl_debugfs_list);
1614 struct hl_debugfs_entry *entry;
1615 int i;
1616
1617 debugfs_create_x64("memory_scrub_val",
1618 0644,
1619 root,
1620 &hdev->memory_scrub_val);
1621
1622 debugfs_create_file("memory_scrub",
1623 0200,
1624 root,
1625 dev_entry,
1626 &hl_mem_scrub_fops);
1627
1628 debugfs_create_x64("addr",
1629 0644,
1630 root,
1631 &dev_entry->addr);
1632
1633 debugfs_create_file("data32",
1634 0644,
1635 root,
1636 dev_entry,
1637 &hl_data32b_fops);
1638
1639 debugfs_create_file("data64",
1640 0644,
1641 root,
1642 dev_entry,
1643 &hl_data64b_fops);
1644
1645 debugfs_create_file("set_power_state",
1646 0200,
1647 root,
1648 dev_entry,
1649 &hl_power_fops);
1650
1651 debugfs_create_file("device",
1652 0200,
1653 root,
1654 dev_entry,
1655 &hl_device_fops);
1656
1657 debugfs_create_file("clk_gate",
1658 0200,
1659 root,
1660 dev_entry,
1661 &hl_clk_gate_fops);
1662
1663 debugfs_create_file("stop_on_err",
1664 0644,
1665 root,
1666 dev_entry,
1667 &hl_stop_on_err_fops);
1668
1669 debugfs_create_file("dump_security_violations",
1670 0644,
1671 root,
1672 dev_entry,
1673 &hl_security_violations_fops);
1674
1675 debugfs_create_file("dump_razwi_events",
1676 0644,
1677 root,
1678 dev_entry,
1679 &hl_razwi_check_fops);
1680
1681 debugfs_create_file("dma_size",
1682 0200,
1683 root,
1684 dev_entry,
1685 &hl_dma_size_fops);
1686
1687 debugfs_create_blob("data_dma",
1688 0400,
1689 root,
1690 &dev_entry->data_dma_blob_desc);
1691
1692 debugfs_create_file("monitor_dump_trig",
1693 0200,
1694 root,
1695 dev_entry,
1696 &hl_monitor_dump_fops);
1697
1698 debugfs_create_blob("monitor_dump",
1699 0400,
1700 root,
1701 &dev_entry->mon_dump_blob_desc);
1702
1703 debugfs_create_x8("skip_reset_on_timeout",
1704 0644,
1705 root,
1706 &hdev->reset_info.skip_reset_on_timeout);
1707
1708 debugfs_create_file("state_dump",
1709 0600,
1710 root,
1711 dev_entry,
1712 &hl_state_dump_fops);
1713
1714 debugfs_create_file("timeout_locked",
1715 0644,
1716 root,
1717 dev_entry,
1718 &hl_timeout_locked_fops);
1719
1720 debugfs_create_u32("device_release_watchdog_timeout",
1721 0644,
1722 root,
1723 &hdev->device_release_watchdog_timeout_sec);
1724
1725 for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
1726 debugfs_create_file(hl_debugfs_list[i].name,
1727 0444,
1728 root,
1729 entry,
1730 &hl_debugfs_fops);
1731 entry->info_ent = &hl_debugfs_list[i];
1732 entry->dev_entry = dev_entry;
1733 }
1734 }
1735
hl_debugfs_device_init(struct hl_device * hdev)1736 int hl_debugfs_device_init(struct hl_device *hdev)
1737 {
1738 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1739 int count = ARRAY_SIZE(hl_debugfs_list);
1740
1741 dev_entry->hdev = hdev;
1742 dev_entry->entry_arr = kmalloc_array(count, sizeof(struct hl_debugfs_entry), GFP_KERNEL);
1743 if (!dev_entry->entry_arr)
1744 return -ENOMEM;
1745
1746 dev_entry->data_dma_blob_desc.size = 0;
1747 dev_entry->data_dma_blob_desc.data = NULL;
1748 dev_entry->mon_dump_blob_desc.size = 0;
1749 dev_entry->mon_dump_blob_desc.data = NULL;
1750
1751 INIT_LIST_HEAD(&dev_entry->file_list);
1752 INIT_LIST_HEAD(&dev_entry->cb_list);
1753 INIT_LIST_HEAD(&dev_entry->cs_list);
1754 INIT_LIST_HEAD(&dev_entry->cs_job_list);
1755 INIT_LIST_HEAD(&dev_entry->userptr_list);
1756 INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
1757 mutex_init(&dev_entry->file_mutex);
1758 init_rwsem(&dev_entry->state_dump_sem);
1759 spin_lock_init(&dev_entry->cb_spinlock);
1760 spin_lock_init(&dev_entry->cs_spinlock);
1761 spin_lock_init(&dev_entry->cs_job_spinlock);
1762 spin_lock_init(&dev_entry->userptr_spinlock);
1763 mutex_init(&dev_entry->ctx_mem_hash_mutex);
1764
1765 return 0;
1766 }
1767
hl_debugfs_device_fini(struct hl_device * hdev)1768 void hl_debugfs_device_fini(struct hl_device *hdev)
1769 {
1770 struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
1771 int i;
1772
1773 mutex_destroy(&entry->ctx_mem_hash_mutex);
1774 mutex_destroy(&entry->file_mutex);
1775
1776 vfree(entry->data_dma_blob_desc.data);
1777 vfree(entry->mon_dump_blob_desc.data);
1778
1779 for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
1780 vfree(entry->state_dump[i]);
1781
1782 kfree(entry->entry_arr);
1783 }
1784
hl_debugfs_add_device(struct hl_device * hdev)1785 void hl_debugfs_add_device(struct hl_device *hdev)
1786 {
1787 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1788
1789 dev_entry->root = hdev->drm.accel->debugfs_root;
1790
1791 add_files_to_device(hdev, dev_entry, dev_entry->root);
1792
1793 if (!hdev->asic_prop.fw_security_enabled)
1794 add_secured_nodes(dev_entry, dev_entry->root);
1795 }
1796
hl_debugfs_add_file(struct hl_fpriv * hpriv)1797 void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1798 {
1799 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1800
1801 mutex_lock(&dev_entry->file_mutex);
1802 list_add(&hpriv->debugfs_list, &dev_entry->file_list);
1803 mutex_unlock(&dev_entry->file_mutex);
1804 }
1805
hl_debugfs_remove_file(struct hl_fpriv * hpriv)1806 void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1807 {
1808 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1809
1810 mutex_lock(&dev_entry->file_mutex);
1811 list_del(&hpriv->debugfs_list);
1812 mutex_unlock(&dev_entry->file_mutex);
1813 }
1814
hl_debugfs_add_cb(struct hl_cb * cb)1815 void hl_debugfs_add_cb(struct hl_cb *cb)
1816 {
1817 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1818
1819 spin_lock(&dev_entry->cb_spinlock);
1820 list_add(&cb->debugfs_list, &dev_entry->cb_list);
1821 spin_unlock(&dev_entry->cb_spinlock);
1822 }
1823
hl_debugfs_remove_cb(struct hl_cb * cb)1824 void hl_debugfs_remove_cb(struct hl_cb *cb)
1825 {
1826 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1827
1828 spin_lock(&dev_entry->cb_spinlock);
1829 list_del(&cb->debugfs_list);
1830 spin_unlock(&dev_entry->cb_spinlock);
1831 }
1832
hl_debugfs_add_cs(struct hl_cs * cs)1833 void hl_debugfs_add_cs(struct hl_cs *cs)
1834 {
1835 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1836
1837 spin_lock(&dev_entry->cs_spinlock);
1838 list_add(&cs->debugfs_list, &dev_entry->cs_list);
1839 spin_unlock(&dev_entry->cs_spinlock);
1840 }
1841
hl_debugfs_remove_cs(struct hl_cs * cs)1842 void hl_debugfs_remove_cs(struct hl_cs *cs)
1843 {
1844 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1845
1846 spin_lock(&dev_entry->cs_spinlock);
1847 list_del(&cs->debugfs_list);
1848 spin_unlock(&dev_entry->cs_spinlock);
1849 }
1850
hl_debugfs_add_job(struct hl_device * hdev,struct hl_cs_job * job)1851 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
1852 {
1853 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1854
1855 spin_lock(&dev_entry->cs_job_spinlock);
1856 list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1857 spin_unlock(&dev_entry->cs_job_spinlock);
1858 }
1859
hl_debugfs_remove_job(struct hl_device * hdev,struct hl_cs_job * job)1860 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
1861 {
1862 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1863
1864 spin_lock(&dev_entry->cs_job_spinlock);
1865 list_del(&job->debugfs_list);
1866 spin_unlock(&dev_entry->cs_job_spinlock);
1867 }
1868
hl_debugfs_add_userptr(struct hl_device * hdev,struct hl_userptr * userptr)1869 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
1870 {
1871 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1872
1873 spin_lock(&dev_entry->userptr_spinlock);
1874 list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1875 spin_unlock(&dev_entry->userptr_spinlock);
1876 }
1877
hl_debugfs_remove_userptr(struct hl_device * hdev,struct hl_userptr * userptr)1878 void hl_debugfs_remove_userptr(struct hl_device *hdev,
1879 struct hl_userptr *userptr)
1880 {
1881 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1882
1883 spin_lock(&dev_entry->userptr_spinlock);
1884 list_del(&userptr->debugfs_list);
1885 spin_unlock(&dev_entry->userptr_spinlock);
1886 }
1887
hl_debugfs_add_ctx_mem_hash(struct hl_device * hdev,struct hl_ctx * ctx)1888 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1889 {
1890 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1891
1892 mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1893 list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1894 mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1895 }
1896
hl_debugfs_remove_ctx_mem_hash(struct hl_device * hdev,struct hl_ctx * ctx)1897 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1898 {
1899 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1900
1901 mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1902 list_del(&ctx->debugfs_list);
1903 mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1904 }
1905
1906 /**
1907 * hl_debugfs_set_state_dump - register state dump making it accessible via
1908 * debugfs
1909 * @hdev: pointer to the device structure
1910 * @data: the actual dump data
1911 * @length: the length of the data
1912 */
hl_debugfs_set_state_dump(struct hl_device * hdev,char * data,unsigned long length)1913 void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
1914 unsigned long length)
1915 {
1916 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1917
1918 down_write(&dev_entry->state_dump_sem);
1919
1920 dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
1921 ARRAY_SIZE(dev_entry->state_dump);
1922 vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
1923 dev_entry->state_dump[dev_entry->state_dump_head] = data;
1924
1925 up_write(&dev_entry->state_dump_sem);
1926 }
1927