1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2022 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #define pr_fmt(fmt) "habanalabs: " fmt
9
10 #include <uapi/drm/habanalabs_accel.h>
11 #include "habanalabs.h"
12
13 #include <linux/pci.h>
14 #include <linux/hwmon.h>
15 #include <linux/vmalloc.h>
16
17 #include <drm/drm_accel.h>
18 #include <drm/drm_drv.h>
19
20 #include <trace/events/habanalabs.h>
21
22 #define HL_RESET_DELAY_USEC 10000 /* 10ms */
23
24 #define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 30
25
26 enum dma_alloc_type {
27 DMA_ALLOC_COHERENT,
28 DMA_ALLOC_POOL,
29 };
30
31 #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
32
33 /*
34 * hl_set_dram_bar- sets the bar to allow later access to address
35 *
36 * @hdev: pointer to habanalabs device structure.
37 * @addr: the address the caller wants to access.
38 * @region: the PCI region.
39 * @new_bar_region_base: the new BAR region base address.
40 *
41 * @return: the old BAR base address on success, U64_MAX for failure.
42 * The caller should set it back to the old address after use.
43 *
44 * In case the bar space does not cover the whole address space,
45 * the bar base address should be set to allow access to a given address.
46 * This function can be called also if the bar doesn't need to be set,
47 * in that case it just won't change the base.
48 */
hl_set_dram_bar(struct hl_device * hdev,u64 addr,struct pci_mem_region * region,u64 * new_bar_region_base)49 static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,
50 u64 *new_bar_region_base)
51 {
52 struct asic_fixed_properties *prop = &hdev->asic_prop;
53 u64 bar_base_addr, old_base;
54
55 if (is_power_of_2(prop->dram_pci_bar_size))
56 bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
57 else
58 bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
59 prop->dram_pci_bar_size;
60
61 old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
62
63 /* in case of success we need to update the new BAR base */
64 if ((old_base != U64_MAX) && new_bar_region_base)
65 *new_bar_region_base = bar_base_addr;
66
67 return old_base;
68 }
69
hl_access_sram_dram_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type,enum pci_region region_type,bool set_dram_bar)70 int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
71 enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)
72 {
73 struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
74 u64 old_base = 0, rc, bar_region_base = region->region_base;
75 void __iomem *acc_addr;
76
77 if (set_dram_bar) {
78 old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);
79 if (old_base == U64_MAX)
80 return -EIO;
81 }
82
83 acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
84 (addr - bar_region_base);
85
86 switch (acc_type) {
87 case DEBUGFS_READ8:
88 *val = readb(acc_addr);
89 break;
90 case DEBUGFS_WRITE8:
91 writeb(*val, acc_addr);
92 break;
93 case DEBUGFS_READ32:
94 *val = readl(acc_addr);
95 break;
96 case DEBUGFS_WRITE32:
97 writel(*val, acc_addr);
98 break;
99 case DEBUGFS_READ64:
100 *val = readq(acc_addr);
101 break;
102 case DEBUGFS_WRITE64:
103 writeq(*val, acc_addr);
104 break;
105 }
106
107 if (set_dram_bar) {
108 rc = hl_set_dram_bar(hdev, old_base, region, NULL);
109 if (rc == U64_MAX)
110 return -EIO;
111 }
112
113 return 0;
114 }
115
hl_dma_alloc_common(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,gfp_t flag,enum dma_alloc_type alloc_type,const char * caller)116 static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
117 gfp_t flag, enum dma_alloc_type alloc_type,
118 const char *caller)
119 {
120 void *ptr = NULL;
121
122 switch (alloc_type) {
123 case DMA_ALLOC_COHERENT:
124 ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
125 break;
126 case DMA_ALLOC_POOL:
127 ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
128 break;
129 }
130
131 if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
132 trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
133 caller);
134
135 return ptr;
136 }
137
hl_asic_dma_free_common(struct hl_device * hdev,size_t size,void * cpu_addr,dma_addr_t dma_handle,enum dma_alloc_type alloc_type,const char * caller)138 static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
139 dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
140 const char *caller)
141 {
142 /* this is needed to avoid warning on using freed pointer */
143 u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;
144
145 switch (alloc_type) {
146 case DMA_ALLOC_COHERENT:
147 hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
148 break;
149 case DMA_ALLOC_POOL:
150 hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
151 break;
152 }
153
154 trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
155 }
156
hl_asic_dma_alloc_coherent_caller(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,gfp_t flag,const char * caller)157 void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
158 gfp_t flag, const char *caller)
159 {
160 return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
161 }
162
hl_asic_dma_free_coherent_caller(struct hl_device * hdev,size_t size,void * cpu_addr,dma_addr_t dma_handle,const char * caller)163 void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
164 dma_addr_t dma_handle, const char *caller)
165 {
166 hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
167 }
168
hl_asic_dma_pool_zalloc_caller(struct hl_device * hdev,size_t size,gfp_t mem_flags,dma_addr_t * dma_handle,const char * caller)169 void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
170 dma_addr_t *dma_handle, const char *caller)
171 {
172 return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
173 }
174
hl_asic_dma_pool_free_caller(struct hl_device * hdev,void * vaddr,dma_addr_t dma_addr,const char * caller)175 void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
176 const char *caller)
177 {
178 hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
179 }
180
hl_cpu_accessible_dma_pool_alloc(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle)181 void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
182 {
183 return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
184 }
185
hl_cpu_accessible_dma_pool_free(struct hl_device * hdev,size_t size,void * vaddr)186 void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
187 {
188 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
189 }
190
hl_dma_map_sgtable_caller(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir,const char * caller)191 int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
192 enum dma_data_direction dir, const char *caller)
193 {
194 struct asic_fixed_properties *prop = &hdev->asic_prop;
195 struct scatterlist *sg;
196 int rc, i;
197
198 rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);
199 if (rc)
200 return rc;
201
202 if (!trace_habanalabs_dma_map_page_enabled())
203 return 0;
204
205 for_each_sgtable_dma_sg(sgt, sg, i)
206 trace_habanalabs_dma_map_page(hdev->dev,
207 page_to_phys(sg_page(sg)),
208 sg->dma_address - prop->device_dma_offset_for_host_access,
209 #ifdef CONFIG_NEED_SG_DMA_LENGTH
210 sg->dma_length,
211 #else
212 sg->length,
213 #endif
214 dir, caller);
215
216 return 0;
217 }
218
hl_asic_dma_map_sgtable(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir)219 int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
220 enum dma_data_direction dir)
221 {
222 struct asic_fixed_properties *prop = &hdev->asic_prop;
223 struct scatterlist *sg;
224 int rc, i;
225
226 rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
227 if (rc)
228 return rc;
229
230 /* Shift to the device's base physical address of host memory if necessary */
231 if (prop->device_dma_offset_for_host_access)
232 for_each_sgtable_dma_sg(sgt, sg, i)
233 sg->dma_address += prop->device_dma_offset_for_host_access;
234
235 return 0;
236 }
237
hl_dma_unmap_sgtable_caller(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir,const char * caller)238 void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
239 enum dma_data_direction dir, const char *caller)
240 {
241 struct asic_fixed_properties *prop = &hdev->asic_prop;
242 struct scatterlist *sg;
243 int i;
244
245 hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);
246
247 if (trace_habanalabs_dma_unmap_page_enabled()) {
248 for_each_sgtable_dma_sg(sgt, sg, i)
249 trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)),
250 sg->dma_address - prop->device_dma_offset_for_host_access,
251 #ifdef CONFIG_NEED_SG_DMA_LENGTH
252 sg->dma_length,
253 #else
254 sg->length,
255 #endif
256 dir, caller);
257 }
258 }
259
hl_asic_dma_unmap_sgtable(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir)260 void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
261 enum dma_data_direction dir)
262 {
263 struct asic_fixed_properties *prop = &hdev->asic_prop;
264 struct scatterlist *sg;
265 int i;
266
267 /* Cancel the device's base physical address of host memory if necessary */
268 if (prop->device_dma_offset_for_host_access)
269 for_each_sgtable_dma_sg(sgt, sg, i)
270 sg->dma_address -= prop->device_dma_offset_for_host_access;
271
272 dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
273 }
274
275 /*
276 * hl_access_cfg_region - access the config region
277 *
278 * @hdev: pointer to habanalabs device structure
279 * @addr: the address to access
280 * @val: the value to write from or read to
281 * @acc_type: the type of access (read/write 64/32)
282 */
hl_access_cfg_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type)283 int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
284 enum debugfs_access_type acc_type)
285 {
286 struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
287 u32 val_h, val_l;
288
289 if (!IS_ALIGNED(addr, sizeof(u32))) {
290 dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
291 return -EINVAL;
292 }
293
294 switch (acc_type) {
295 case DEBUGFS_READ32:
296 *val = RREG32(addr - cfg_region->region_base);
297 break;
298 case DEBUGFS_WRITE32:
299 WREG32(addr - cfg_region->region_base, *val);
300 break;
301 case DEBUGFS_READ64:
302 val_l = RREG32(addr - cfg_region->region_base);
303 val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
304
305 *val = (((u64) val_h) << 32) | val_l;
306 break;
307 case DEBUGFS_WRITE64:
308 WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
309 WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
310 break;
311 default:
312 dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
313 return -EOPNOTSUPP;
314 }
315
316 return 0;
317 }
318
319 /*
320 * hl_access_dev_mem - access device memory
321 *
322 * @hdev: pointer to habanalabs device structure
323 * @region_type: the type of the region the address belongs to
324 * @addr: the address to access
325 * @val: the value to write from or read to
326 * @acc_type: the type of access (r/w, 32/64)
327 */
hl_access_dev_mem(struct hl_device * hdev,enum pci_region region_type,u64 addr,u64 * val,enum debugfs_access_type acc_type)328 int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
329 u64 addr, u64 *val, enum debugfs_access_type acc_type)
330 {
331 switch (region_type) {
332 case PCI_REGION_CFG:
333 return hl_access_cfg_region(hdev, addr, val, acc_type);
334 case PCI_REGION_SRAM:
335 case PCI_REGION_DRAM:
336 return hl_access_sram_dram_region(hdev, addr, val, acc_type,
337 region_type, (region_type == PCI_REGION_DRAM));
338 default:
339 return -EFAULT;
340 }
341
342 return 0;
343 }
344
hl_engine_data_sprintf(struct engines_data * e,const char * fmt,...)345 void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
346 {
347 va_list args;
348 int str_size;
349
350 va_start(args, fmt);
351 /* Calculate formatted string length. Assuming each string is null terminated, hence
352 * increment result by 1
353 */
354 str_size = vsnprintf(NULL, 0, fmt, args) + 1;
355 va_end(args);
356
357 if ((e->actual_size + str_size) < e->allocated_buf_size) {
358 va_start(args, fmt);
359 vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
360 va_end(args);
361 }
362
363 /* Need to update the size even when not updating destination buffer to get the exact size
364 * of all input strings
365 */
366 e->actual_size += str_size;
367 }
368
hl_device_status(struct hl_device * hdev)369 enum hl_device_status hl_device_status(struct hl_device *hdev)
370 {
371 enum hl_device_status status;
372
373 if (hdev->device_fini_pending) {
374 status = HL_DEVICE_STATUS_MALFUNCTION;
375 } else if (hdev->reset_info.in_reset) {
376 if (hdev->reset_info.in_compute_reset)
377 status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
378 else
379 status = HL_DEVICE_STATUS_IN_RESET;
380 } else if (hdev->reset_info.needs_reset) {
381 status = HL_DEVICE_STATUS_NEEDS_RESET;
382 } else if (hdev->disabled) {
383 status = HL_DEVICE_STATUS_MALFUNCTION;
384 } else if (!hdev->init_done) {
385 status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
386 } else {
387 status = HL_DEVICE_STATUS_OPERATIONAL;
388 }
389
390 return status;
391 }
392
hl_device_operational(struct hl_device * hdev,enum hl_device_status * status)393 bool hl_device_operational(struct hl_device *hdev,
394 enum hl_device_status *status)
395 {
396 enum hl_device_status current_status;
397
398 current_status = hl_device_status(hdev);
399 if (status)
400 *status = current_status;
401
402 switch (current_status) {
403 case HL_DEVICE_STATUS_MALFUNCTION:
404 case HL_DEVICE_STATUS_IN_RESET:
405 case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
406 case HL_DEVICE_STATUS_NEEDS_RESET:
407 return false;
408 case HL_DEVICE_STATUS_OPERATIONAL:
409 case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
410 default:
411 return true;
412 }
413 }
414
hl_ctrl_device_operational(struct hl_device * hdev,enum hl_device_status * status)415 bool hl_ctrl_device_operational(struct hl_device *hdev,
416 enum hl_device_status *status)
417 {
418 enum hl_device_status current_status;
419
420 current_status = hl_device_status(hdev);
421 if (status)
422 *status = current_status;
423
424 switch (current_status) {
425 case HL_DEVICE_STATUS_MALFUNCTION:
426 return false;
427 case HL_DEVICE_STATUS_IN_RESET:
428 case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
429 case HL_DEVICE_STATUS_NEEDS_RESET:
430 case HL_DEVICE_STATUS_OPERATIONAL:
431 case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
432 default:
433 return true;
434 }
435 }
436
print_idle_status_mask(struct hl_device * hdev,const char * message,u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])437 static void print_idle_status_mask(struct hl_device *hdev, const char *message,
438 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
439 {
440 if (idle_mask[3])
441 dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
442 message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
443 else if (idle_mask[2])
444 dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
445 message, idle_mask[2], idle_mask[1], idle_mask[0]);
446 else if (idle_mask[1])
447 dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
448 message, idle_mask[1], idle_mask[0]);
449 else
450 dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
451 }
452
hpriv_release(struct kref * ref)453 static void hpriv_release(struct kref *ref)
454 {
455 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
456 bool reset_device, device_is_idle = true;
457 struct hl_fpriv *hpriv;
458 struct hl_device *hdev;
459
460 hpriv = container_of(ref, struct hl_fpriv, refcount);
461
462 hdev = hpriv->hdev;
463
464 hdev->asic_funcs->send_device_activity(hdev, false);
465
466 hl_debugfs_remove_file(hpriv);
467
468 mutex_destroy(&hpriv->ctx_lock);
469 mutex_destroy(&hpriv->restore_phase_mutex);
470
471 /* There should be no memory buffers at this point and handles IDR can be destroyed */
472 hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
473
474 /* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
475 * reset that waits for device release.
476 */
477 reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
478
479 /* Check the device idle status and reset if not idle.
480 * Skip it if already in reset, or if device is going to be reset in any case.
481 */
482 if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm)
483 device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
484 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
485 if (!device_is_idle) {
486 print_idle_status_mask(hdev, "device is not idle after user context is closed",
487 idle_mask);
488 reset_device = true;
489 }
490
491 /* We need to remove the user from the list to make sure the reset process won't
492 * try to kill the user process. Because, if we got here, it means there are no
493 * more driver/device resources that the user process is occupying so there is
494 * no need to kill it
495 *
496 * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
497 * a race between the release and opening the device again. We don't want to let
498 * a user open the device while there a reset is about to happen.
499 */
500 mutex_lock(&hdev->fpriv_list_lock);
501 list_del(&hpriv->dev_node);
502 mutex_unlock(&hdev->fpriv_list_lock);
503
504 put_pid(hpriv->taskpid);
505
506 if (reset_device) {
507 hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
508 } else {
509 /* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
510 int rc = hdev->asic_funcs->scrub_device_mem(hdev);
511
512 if (rc) {
513 dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
514 hl_device_reset(hdev, HL_DRV_RESET_HARD);
515 }
516 }
517
518 /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
519 * thread, we don't care because the in_reset is marked so if a user will try to open
520 * the device it will fail on that, even if compute_ctx is false.
521 */
522 mutex_lock(&hdev->fpriv_list_lock);
523 hdev->is_compute_ctx_active = false;
524 mutex_unlock(&hdev->fpriv_list_lock);
525
526 hdev->compute_ctx_in_release = 0;
527
528 /* release the eventfd */
529 if (hpriv->notifier_event.eventfd)
530 eventfd_ctx_put(hpriv->notifier_event.eventfd);
531
532 mutex_destroy(&hpriv->notifier_event.lock);
533
534 kfree(hpriv);
535 }
536
hl_hpriv_get(struct hl_fpriv * hpriv)537 void hl_hpriv_get(struct hl_fpriv *hpriv)
538 {
539 kref_get(&hpriv->refcount);
540 }
541
hl_hpriv_put(struct hl_fpriv * hpriv)542 int hl_hpriv_put(struct hl_fpriv *hpriv)
543 {
544 return kref_put(&hpriv->refcount, hpriv_release);
545 }
546
print_device_in_use_info(struct hl_device * hdev,const char * message)547 static void print_device_in_use_info(struct hl_device *hdev, const char *message)
548 {
549 u32 active_cs_num, dmabuf_export_cnt;
550 bool unknown_reason = true;
551 char buf[128];
552 size_t size;
553 int offset;
554
555 size = sizeof(buf);
556 offset = 0;
557
558 active_cs_num = hl_get_active_cs_num(hdev);
559 if (active_cs_num) {
560 unknown_reason = false;
561 offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
562 }
563
564 dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
565 if (dmabuf_export_cnt) {
566 unknown_reason = false;
567 offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
568 dmabuf_export_cnt);
569 }
570
571 if (unknown_reason)
572 scnprintf(buf + offset, size - offset, " [unknown reason]");
573
574 dev_notice(hdev->dev, "%s%s\n", message, buf);
575 }
576
577 /*
578 * hl_device_release() - release function for habanalabs device.
579 * @ddev: pointer to DRM device structure.
580 * @file: pointer to DRM file private data structure.
581 *
582 * Called when process closes an habanalabs device
583 */
hl_device_release(struct drm_device * ddev,struct drm_file * file_priv)584 void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
585 {
586 struct hl_fpriv *hpriv = file_priv->driver_priv;
587 struct hl_device *hdev = to_hl_device(ddev);
588
589 if (!hdev) {
590 pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
591 put_pid(hpriv->taskpid);
592 }
593
594 hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
595
596 /* Memory buffers might be still in use at this point and thus the handles IDR destruction
597 * is postponed to hpriv_release().
598 */
599 hl_mem_mgr_fini(&hpriv->mem_mgr);
600
601 hdev->compute_ctx_in_release = 1;
602
603 if (!hl_hpriv_put(hpriv)) {
604 print_device_in_use_info(hdev, "User process closed FD but device still in use");
605 hl_device_reset(hdev, HL_DRV_RESET_HARD);
606 }
607
608 hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
609 }
610
hl_device_release_ctrl(struct inode * inode,struct file * filp)611 static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
612 {
613 struct hl_fpriv *hpriv = filp->private_data;
614 struct hl_device *hdev = hpriv->hdev;
615
616 filp->private_data = NULL;
617
618 if (!hdev) {
619 pr_err("Closing FD after device was removed\n");
620 goto out;
621 }
622
623 mutex_lock(&hdev->fpriv_ctrl_list_lock);
624 list_del(&hpriv->dev_node);
625 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
626 out:
627 put_pid(hpriv->taskpid);
628
629 kfree(hpriv);
630
631 return 0;
632 }
633
__hl_mmap(struct hl_fpriv * hpriv,struct vm_area_struct * vma)634 static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
635 {
636 struct hl_device *hdev = hpriv->hdev;
637 unsigned long vm_pgoff;
638
639 if (!hdev) {
640 pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
641 return -ENODEV;
642 }
643
644 vm_pgoff = vma->vm_pgoff;
645
646 switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
647 case HL_MMAP_TYPE_BLOCK:
648 vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
649 return hl_hw_block_mmap(hpriv, vma);
650
651 case HL_MMAP_TYPE_CB:
652 case HL_MMAP_TYPE_TS_BUFF:
653 return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
654 }
655 return -EINVAL;
656 }
657
658 /*
659 * hl_mmap - mmap function for habanalabs device
660 *
661 * @*filp: pointer to file structure
662 * @*vma: pointer to vm_area_struct of the process
663 *
664 * Called when process does an mmap on habanalabs device. Call the relevant mmap
665 * function at the end of the common code.
666 */
hl_mmap(struct file * filp,struct vm_area_struct * vma)667 int hl_mmap(struct file *filp, struct vm_area_struct *vma)
668 {
669 struct drm_file *file_priv = filp->private_data;
670 struct hl_fpriv *hpriv = file_priv->driver_priv;
671
672 return __hl_mmap(hpriv, vma);
673 }
674
675 static const struct file_operations hl_ctrl_ops = {
676 .owner = THIS_MODULE,
677 .open = hl_device_open_ctrl,
678 .release = hl_device_release_ctrl,
679 .unlocked_ioctl = hl_ioctl_control,
680 .compat_ioctl = hl_ioctl_control
681 };
682
device_release_func(struct device * dev)683 static void device_release_func(struct device *dev)
684 {
685 kfree(dev);
686 }
687
688 /*
689 * device_init_cdev - Initialize cdev and device for habanalabs device
690 *
691 * @hdev: pointer to habanalabs device structure
692 * @class: pointer to the class object of the device
693 * @minor: minor number of the specific device
694 * @fops: file operations to install for this device
695 * @name: name of the device as it will appear in the filesystem
696 * @cdev: pointer to the char device object that will be initialized
697 * @dev: pointer to the device object that will be initialized
698 *
699 * Initialize a cdev and a Linux device for habanalabs's device.
700 */
device_init_cdev(struct hl_device * hdev,const struct class * class,int minor,const struct file_operations * fops,char * name,struct cdev * cdev,struct device ** dev)701 static int device_init_cdev(struct hl_device *hdev, const struct class *class,
702 int minor, const struct file_operations *fops,
703 char *name, struct cdev *cdev,
704 struct device **dev)
705 {
706 cdev_init(cdev, fops);
707 cdev->owner = THIS_MODULE;
708
709 *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
710 if (!*dev)
711 return -ENOMEM;
712
713 device_initialize(*dev);
714 (*dev)->devt = MKDEV(hdev->major, minor);
715 (*dev)->class = class;
716 (*dev)->release = device_release_func;
717 dev_set_drvdata(*dev, hdev);
718 dev_set_name(*dev, "%s", name);
719
720 return 0;
721 }
722
cdev_sysfs_debugfs_add(struct hl_device * hdev)723 static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
724 {
725 const struct class *accel_class = hdev->drm.accel->kdev->class;
726 char name[32];
727 int rc;
728
729 hdev->cdev_idx = hdev->drm.accel->index;
730
731 /* Initialize cdev and device structures for the control device */
732 snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx);
733 rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name,
734 &hdev->cdev_ctrl, &hdev->dev_ctrl);
735 if (rc)
736 return rc;
737
738 rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
739 if (rc) {
740 dev_err(hdev->dev_ctrl,
741 "failed to add an accel control char device to the system\n");
742 goto free_ctrl_device;
743 }
744
745 rc = hl_sysfs_init(hdev);
746 if (rc) {
747 dev_err(hdev->dev, "failed to initialize sysfs\n");
748 goto delete_ctrl_cdev_device;
749 }
750
751 hl_debugfs_add_device(hdev);
752
753 hdev->cdev_sysfs_debugfs_created = true;
754
755 return 0;
756
757 delete_ctrl_cdev_device:
758 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
759 free_ctrl_device:
760 put_device(hdev->dev_ctrl);
761 return rc;
762 }
763
cdev_sysfs_debugfs_remove(struct hl_device * hdev)764 static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)
765 {
766 if (!hdev->cdev_sysfs_debugfs_created)
767 return;
768
769 hl_sysfs_fini(hdev);
770
771 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
772 put_device(hdev->dev_ctrl);
773 }
774
device_hard_reset_pending(struct work_struct * work)775 static void device_hard_reset_pending(struct work_struct *work)
776 {
777 struct hl_device_reset_work *device_reset_work =
778 container_of(work, struct hl_device_reset_work, reset_work.work);
779 struct hl_device *hdev = device_reset_work->hdev;
780 u32 flags;
781 int rc;
782
783 flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
784
785 rc = hl_device_reset(hdev, flags);
786
787 if ((rc == -EBUSY) && !hdev->device_fini_pending) {
788 struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
789
790 if (ctx) {
791 /* The read refcount value should subtracted by one, because the read is
792 * protected with hl_get_compute_ctx().
793 */
794 dev_info(hdev->dev,
795 "Could not reset device (compute_ctx refcount %u). will try again in %u seconds",
796 kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);
797 hl_ctx_put(ctx);
798 } else {
799 dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",
800 HL_PENDING_RESET_PER_SEC);
801 }
802
803 queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
804 msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
805 }
806 }
807
device_release_watchdog_func(struct work_struct * work)808 static void device_release_watchdog_func(struct work_struct *work)
809 {
810 struct hl_device_reset_work *watchdog_work =
811 container_of(work, struct hl_device_reset_work, reset_work.work);
812 struct hl_device *hdev = watchdog_work->hdev;
813 u32 flags;
814
815 dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
816
817 flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
818
819 hl_device_reset(hdev, flags);
820 }
821
822 /*
823 * device_early_init - do some early initialization for the habanalabs device
824 *
825 * @hdev: pointer to habanalabs device structure
826 *
827 * Install the relevant function pointers and call the early_init function,
828 * if such a function exists
829 */
device_early_init(struct hl_device * hdev)830 static int device_early_init(struct hl_device *hdev)
831 {
832 int i, rc;
833 char workq_name[32];
834
835 switch (hdev->asic_type) {
836 case ASIC_GOYA:
837 goya_set_asic_funcs(hdev);
838 strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
839 break;
840 case ASIC_GAUDI:
841 gaudi_set_asic_funcs(hdev);
842 strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
843 break;
844 case ASIC_GAUDI_SEC:
845 gaudi_set_asic_funcs(hdev);
846 strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
847 break;
848 case ASIC_GAUDI2:
849 gaudi2_set_asic_funcs(hdev);
850 strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
851 break;
852 case ASIC_GAUDI2B:
853 gaudi2_set_asic_funcs(hdev);
854 strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
855 break;
856 case ASIC_GAUDI2C:
857 gaudi2_set_asic_funcs(hdev);
858 strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
859 break;
860 default:
861 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
862 hdev->asic_type);
863 return -EINVAL;
864 }
865
866 rc = hdev->asic_funcs->early_init(hdev);
867 if (rc)
868 return rc;
869
870 rc = hl_asid_init(hdev);
871 if (rc)
872 goto early_fini;
873
874 if (hdev->asic_prop.completion_queues_count) {
875 hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
876 sizeof(struct workqueue_struct *),
877 GFP_KERNEL);
878 if (!hdev->cq_wq) {
879 rc = -ENOMEM;
880 goto asid_fini;
881 }
882 }
883
884 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
885 snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
886 hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
887 if (hdev->cq_wq[i] == NULL) {
888 dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
889 rc = -ENOMEM;
890 goto free_cq_wq;
891 }
892 }
893
894 snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
895 hdev->eq_wq = create_singlethread_workqueue(workq_name);
896 if (hdev->eq_wq == NULL) {
897 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
898 rc = -ENOMEM;
899 goto free_cq_wq;
900 }
901
902 snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
903 hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
904 if (!hdev->cs_cmplt_wq) {
905 dev_err(hdev->dev,
906 "Failed to allocate CS completions workqueue\n");
907 rc = -ENOMEM;
908 goto free_eq_wq;
909 }
910
911 snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
912 hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
913 if (!hdev->ts_free_obj_wq) {
914 dev_err(hdev->dev,
915 "Failed to allocate Timestamp registration free workqueue\n");
916 rc = -ENOMEM;
917 goto free_cs_cmplt_wq;
918 }
919
920 snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
921 hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
922 if (!hdev->prefetch_wq) {
923 dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
924 rc = -ENOMEM;
925 goto free_ts_free_wq;
926 }
927
928 hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
929 if (!hdev->hl_chip_info) {
930 rc = -ENOMEM;
931 goto free_prefetch_wq;
932 }
933
934 rc = hl_mmu_if_set_funcs(hdev);
935 if (rc)
936 goto free_chip_info;
937
938 hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
939
940 snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
941 hdev->reset_wq = create_singlethread_workqueue(workq_name);
942 if (!hdev->reset_wq) {
943 rc = -ENOMEM;
944 dev_err(hdev->dev, "Failed to create device reset WQ\n");
945 goto free_cb_mgr;
946 }
947
948 INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
949 hdev->device_reset_work.hdev = hdev;
950 hdev->device_fini_pending = 0;
951
952 INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,
953 device_release_watchdog_func);
954 hdev->device_release_watchdog_work.hdev = hdev;
955
956 mutex_init(&hdev->send_cpu_message_lock);
957 mutex_init(&hdev->debug_lock);
958 INIT_LIST_HEAD(&hdev->cs_mirror_list);
959 spin_lock_init(&hdev->cs_mirror_lock);
960 spin_lock_init(&hdev->reset_info.lock);
961 INIT_LIST_HEAD(&hdev->fpriv_list);
962 INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
963 mutex_init(&hdev->fpriv_list_lock);
964 mutex_init(&hdev->fpriv_ctrl_list_lock);
965 mutex_init(&hdev->clk_throttling.lock);
966
967 return 0;
968
969 free_cb_mgr:
970 hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
971 hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
972 free_chip_info:
973 kfree(hdev->hl_chip_info);
974 free_prefetch_wq:
975 destroy_workqueue(hdev->prefetch_wq);
976 free_ts_free_wq:
977 destroy_workqueue(hdev->ts_free_obj_wq);
978 free_cs_cmplt_wq:
979 destroy_workqueue(hdev->cs_cmplt_wq);
980 free_eq_wq:
981 destroy_workqueue(hdev->eq_wq);
982 free_cq_wq:
983 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
984 if (hdev->cq_wq[i])
985 destroy_workqueue(hdev->cq_wq[i]);
986 kfree(hdev->cq_wq);
987 asid_fini:
988 hl_asid_fini(hdev);
989 early_fini:
990 if (hdev->asic_funcs->early_fini)
991 hdev->asic_funcs->early_fini(hdev);
992
993 return rc;
994 }
995
996 /*
997 * device_early_fini - finalize all that was done in device_early_init
998 *
999 * @hdev: pointer to habanalabs device structure
1000 *
1001 */
device_early_fini(struct hl_device * hdev)1002 static void device_early_fini(struct hl_device *hdev)
1003 {
1004 int i;
1005
1006 mutex_destroy(&hdev->debug_lock);
1007 mutex_destroy(&hdev->send_cpu_message_lock);
1008
1009 mutex_destroy(&hdev->fpriv_list_lock);
1010 mutex_destroy(&hdev->fpriv_ctrl_list_lock);
1011
1012 mutex_destroy(&hdev->clk_throttling.lock);
1013
1014 hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
1015 hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
1016
1017 kfree(hdev->hl_chip_info);
1018
1019 destroy_workqueue(hdev->prefetch_wq);
1020 destroy_workqueue(hdev->ts_free_obj_wq);
1021 destroy_workqueue(hdev->cs_cmplt_wq);
1022 destroy_workqueue(hdev->eq_wq);
1023 destroy_workqueue(hdev->reset_wq);
1024
1025 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1026 destroy_workqueue(hdev->cq_wq[i]);
1027 kfree(hdev->cq_wq);
1028
1029 hl_asid_fini(hdev);
1030
1031 if (hdev->asic_funcs->early_fini)
1032 hdev->asic_funcs->early_fini(hdev);
1033 }
1034
is_pci_link_healthy(struct hl_device * hdev)1035 static bool is_pci_link_healthy(struct hl_device *hdev)
1036 {
1037 u16 vendor_id;
1038
1039 if (!hdev->pdev)
1040 return false;
1041
1042 pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
1043
1044 return (vendor_id == PCI_VENDOR_ID_HABANALABS);
1045 }
1046
hl_device_eq_heartbeat_check(struct hl_device * hdev)1047 static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
1048 {
1049 struct asic_fixed_properties *prop = &hdev->asic_prop;
1050
1051 if (!prop->cpucp_info.eq_health_check_supported)
1052 return 0;
1053
1054 if (hdev->eq_heartbeat_received) {
1055 hdev->eq_heartbeat_received = false;
1056 } else {
1057 dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
1058 return -EIO;
1059 }
1060
1061 return 0;
1062 }
1063
hl_device_heartbeat(struct work_struct * work)1064 static void hl_device_heartbeat(struct work_struct *work)
1065 {
1066 struct hl_device *hdev = container_of(work, struct hl_device,
1067 work_heartbeat.work);
1068 struct hl_info_fw_err_info info = {0};
1069 u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
1070
1071 /* Start heartbeat checks only after driver has enabled events from FW */
1072 if (!hl_device_operational(hdev, NULL) || !hdev->init_done)
1073 goto reschedule;
1074
1075 /*
1076 * For EQ health check need to check if driver received the heartbeat eq event
1077 * in order to validate the eq is working.
1078 * Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.
1079 */
1080 if ((!hl_device_eq_heartbeat_check(hdev)) && (!hdev->asic_funcs->send_heartbeat(hdev)))
1081 goto reschedule;
1082
1083 if (hl_device_operational(hdev, NULL))
1084 dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
1085 is_pci_link_healthy(hdev) ? "healthy" : "broken");
1086
1087 info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
1088 info.event_mask = &event_mask;
1089 hl_handle_fw_err(hdev, &info);
1090 hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
1091
1092 return;
1093
1094 reschedule:
1095 /*
1096 * prev_reset_trigger tracks consecutive fatal h/w errors until first
1097 * heartbeat immediately post reset.
1098 * If control reached here, then at least one heartbeat work has been
1099 * scheduled since last reset/init cycle.
1100 * So if the device is not already in reset cycle, reset the flag
1101 * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
1102 * status for at least one heartbeat. From this point driver restarts
1103 * tracking future consecutive fatal errors.
1104 */
1105 if (!hdev->reset_info.in_reset)
1106 hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1107
1108 schedule_delayed_work(&hdev->work_heartbeat,
1109 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1110 }
1111
1112 /*
1113 * device_late_init - do late stuff initialization for the habanalabs device
1114 *
1115 * @hdev: pointer to habanalabs device structure
1116 *
1117 * Do stuff that either needs the device H/W queues to be active or needs
1118 * to happen after all the rest of the initialization is finished
1119 */
device_late_init(struct hl_device * hdev)1120 static int device_late_init(struct hl_device *hdev)
1121 {
1122 int rc;
1123
1124 if (hdev->asic_funcs->late_init) {
1125 rc = hdev->asic_funcs->late_init(hdev);
1126 if (rc) {
1127 dev_err(hdev->dev,
1128 "failed late initialization for the H/W\n");
1129 return rc;
1130 }
1131 }
1132
1133 hdev->high_pll = hdev->asic_prop.high_pll;
1134
1135 if (hdev->heartbeat) {
1136 /*
1137 * Before scheduling the heartbeat driver will check if eq event has received.
1138 * for the first schedule we need to set the indication as true then for the next
1139 * one this indication will be true only if eq event was sent by FW.
1140 */
1141 hdev->eq_heartbeat_received = true;
1142
1143 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
1144
1145 schedule_delayed_work(&hdev->work_heartbeat,
1146 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1147 }
1148
1149 hdev->late_init_done = true;
1150
1151 return 0;
1152 }
1153
1154 /*
1155 * device_late_fini - finalize all that was done in device_late_init
1156 *
1157 * @hdev: pointer to habanalabs device structure
1158 *
1159 */
device_late_fini(struct hl_device * hdev)1160 static void device_late_fini(struct hl_device *hdev)
1161 {
1162 if (!hdev->late_init_done)
1163 return;
1164
1165 if (hdev->heartbeat)
1166 cancel_delayed_work_sync(&hdev->work_heartbeat);
1167
1168 if (hdev->asic_funcs->late_fini)
1169 hdev->asic_funcs->late_fini(hdev);
1170
1171 hdev->late_init_done = false;
1172 }
1173
hl_device_utilization(struct hl_device * hdev,u32 * utilization)1174 int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
1175 {
1176 u64 max_power, curr_power, dc_power, dividend, divisor;
1177 int rc;
1178
1179 max_power = hdev->max_power;
1180 dc_power = hdev->asic_prop.dc_power_default;
1181 divisor = max_power - dc_power;
1182 if (!divisor) {
1183 dev_warn(hdev->dev, "device utilization is not supported\n");
1184 return -EOPNOTSUPP;
1185 }
1186 rc = hl_fw_cpucp_power_get(hdev, &curr_power);
1187
1188 if (rc)
1189 return rc;
1190
1191 curr_power = clamp(curr_power, dc_power, max_power);
1192
1193 dividend = (curr_power - dc_power) * 100;
1194 *utilization = (u32) div_u64(dividend, divisor);
1195
1196 return 0;
1197 }
1198
hl_device_set_debug_mode(struct hl_device * hdev,struct hl_ctx * ctx,bool enable)1199 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
1200 {
1201 int rc = 0;
1202
1203 mutex_lock(&hdev->debug_lock);
1204
1205 if (!enable) {
1206 if (!hdev->in_debug) {
1207 dev_err(hdev->dev,
1208 "Failed to disable debug mode because device was not in debug mode\n");
1209 rc = -EFAULT;
1210 goto out;
1211 }
1212
1213 if (!hdev->reset_info.hard_reset_pending)
1214 hdev->asic_funcs->halt_coresight(hdev, ctx);
1215
1216 hdev->in_debug = 0;
1217
1218 goto out;
1219 }
1220
1221 if (hdev->in_debug) {
1222 dev_err(hdev->dev,
1223 "Failed to enable debug mode because device is already in debug mode\n");
1224 rc = -EFAULT;
1225 goto out;
1226 }
1227
1228 hdev->in_debug = 1;
1229
1230 out:
1231 mutex_unlock(&hdev->debug_lock);
1232
1233 return rc;
1234 }
1235
take_release_locks(struct hl_device * hdev)1236 static void take_release_locks(struct hl_device *hdev)
1237 {
1238 /* Flush anyone that is inside the critical section of enqueue
1239 * jobs to the H/W
1240 */
1241 hdev->asic_funcs->hw_queues_lock(hdev);
1242 hdev->asic_funcs->hw_queues_unlock(hdev);
1243
1244 /* Flush processes that are sending message to CPU */
1245 mutex_lock(&hdev->send_cpu_message_lock);
1246 mutex_unlock(&hdev->send_cpu_message_lock);
1247
1248 /* Flush anyone that is inside device open */
1249 mutex_lock(&hdev->fpriv_list_lock);
1250 mutex_unlock(&hdev->fpriv_list_lock);
1251 mutex_lock(&hdev->fpriv_ctrl_list_lock);
1252 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
1253 }
1254
hl_abort_waiting_for_completions(struct hl_device * hdev)1255 static void hl_abort_waiting_for_completions(struct hl_device *hdev)
1256 {
1257 hl_abort_waiting_for_cs_completions(hdev);
1258
1259 /* Release all pending user interrupts, each pending user interrupt
1260 * holds a reference to a user context.
1261 */
1262 hl_release_pending_user_interrupts(hdev);
1263 }
1264
cleanup_resources(struct hl_device * hdev,bool hard_reset,bool fw_reset,bool skip_wq_flush)1265 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
1266 bool skip_wq_flush)
1267 {
1268 if (hard_reset)
1269 device_late_fini(hdev);
1270
1271 /*
1272 * Halt the engines and disable interrupts so we won't get any more
1273 * completions from H/W and we won't have any accesses from the
1274 * H/W to the host machine
1275 */
1276 hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
1277
1278 /* Go over all the queues, release all CS and their jobs */
1279 hl_cs_rollback_all(hdev, skip_wq_flush);
1280
1281 /* flush the MMU prefetch workqueue */
1282 flush_workqueue(hdev->prefetch_wq);
1283
1284 hl_abort_waiting_for_completions(hdev);
1285 }
1286
1287 /*
1288 * hl_device_suspend - initiate device suspend
1289 *
1290 * @hdev: pointer to habanalabs device structure
1291 *
1292 * Puts the hw in the suspend state (all asics).
1293 * Returns 0 for success or an error on failure.
1294 * Called at driver suspend.
1295 */
hl_device_suspend(struct hl_device * hdev)1296 int hl_device_suspend(struct hl_device *hdev)
1297 {
1298 int rc;
1299
1300 pci_save_state(hdev->pdev);
1301
1302 /* Block future CS/VM/JOB completion operations */
1303 spin_lock(&hdev->reset_info.lock);
1304 if (hdev->reset_info.in_reset) {
1305 spin_unlock(&hdev->reset_info.lock);
1306 dev_err(hdev->dev, "Can't suspend while in reset\n");
1307 return -EIO;
1308 }
1309 hdev->reset_info.in_reset = 1;
1310 spin_unlock(&hdev->reset_info.lock);
1311
1312 /* This blocks all other stuff that is not blocked by in_reset */
1313 hdev->disabled = true;
1314
1315 take_release_locks(hdev);
1316
1317 rc = hdev->asic_funcs->suspend(hdev);
1318 if (rc)
1319 dev_err(hdev->dev,
1320 "Failed to disable PCI access of device CPU\n");
1321
1322 /* Shut down the device */
1323 pci_disable_device(hdev->pdev);
1324 pci_set_power_state(hdev->pdev, PCI_D3hot);
1325
1326 return 0;
1327 }
1328
1329 /*
1330 * hl_device_resume - initiate device resume
1331 *
1332 * @hdev: pointer to habanalabs device structure
1333 *
1334 * Bring the hw back to operating state (all asics).
1335 * Returns 0 for success or an error on failure.
1336 * Called at driver resume.
1337 */
hl_device_resume(struct hl_device * hdev)1338 int hl_device_resume(struct hl_device *hdev)
1339 {
1340 int rc;
1341
1342 pci_set_power_state(hdev->pdev, PCI_D0);
1343 pci_restore_state(hdev->pdev);
1344 rc = pci_enable_device_mem(hdev->pdev);
1345 if (rc) {
1346 dev_err(hdev->dev,
1347 "Failed to enable PCI device in resume\n");
1348 return rc;
1349 }
1350
1351 pci_set_master(hdev->pdev);
1352
1353 rc = hdev->asic_funcs->resume(hdev);
1354 if (rc) {
1355 dev_err(hdev->dev, "Failed to resume device after suspend\n");
1356 goto disable_device;
1357 }
1358
1359
1360 /* 'in_reset' was set to true during suspend, now we must clear it in order
1361 * for hard reset to be performed
1362 */
1363 spin_lock(&hdev->reset_info.lock);
1364 hdev->reset_info.in_reset = 0;
1365 spin_unlock(&hdev->reset_info.lock);
1366
1367 rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
1368 if (rc) {
1369 dev_err(hdev->dev, "Failed to reset device during resume\n");
1370 goto disable_device;
1371 }
1372
1373 return 0;
1374
1375 disable_device:
1376 pci_disable_device(hdev->pdev);
1377
1378 return rc;
1379 }
1380
device_kill_open_processes(struct hl_device * hdev,u32 timeout,bool control_dev)1381 static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
1382 {
1383 struct task_struct *task = NULL;
1384 struct list_head *hpriv_list;
1385 struct hl_fpriv *hpriv;
1386 struct mutex *hpriv_lock;
1387 u32 pending_cnt;
1388
1389 hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1390 hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1391
1392 /* Giving time for user to close FD, and for processes that are inside
1393 * hl_device_open to finish
1394 */
1395 if (!list_empty(hpriv_list))
1396 ssleep(1);
1397
1398 if (timeout) {
1399 pending_cnt = timeout;
1400 } else {
1401 if (hdev->process_kill_trial_cnt) {
1402 /* Processes have been already killed */
1403 pending_cnt = 1;
1404 goto wait_for_processes;
1405 } else {
1406 /* Wait a small period after process kill */
1407 pending_cnt = HL_PENDING_RESET_PER_SEC;
1408 }
1409 }
1410
1411 mutex_lock(hpriv_lock);
1412
1413 /* This section must be protected because we are dereferencing
1414 * pointers that are freed if the process exits
1415 */
1416 list_for_each_entry(hpriv, hpriv_list, dev_node) {
1417 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
1418 if (task) {
1419 dev_info(hdev->dev, "Killing user process pid=%d\n",
1420 task_pid_nr(task));
1421 send_sig(SIGKILL, task, 1);
1422 usleep_range(1000, 10000);
1423
1424 put_task_struct(task);
1425 } else {
1426 dev_dbg(hdev->dev,
1427 "Can't get task struct for user process %d, process was killed from outside the driver\n",
1428 pid_nr(hpriv->taskpid));
1429 }
1430 }
1431
1432 mutex_unlock(hpriv_lock);
1433
1434 /*
1435 * We killed the open users, but that doesn't mean they are closed.
1436 * It could be that they are running a long cleanup phase in the driver
1437 * e.g. MMU unmappings, or running other long teardown flow even before
1438 * our cleanup.
1439 * Therefore we need to wait again to make sure they are closed before
1440 * continuing with the reset.
1441 */
1442
1443 wait_for_processes:
1444 while ((!list_empty(hpriv_list)) && (pending_cnt)) {
1445 dev_dbg(hdev->dev,
1446 "Waiting for all unmap operations to finish before hard reset\n");
1447
1448 pending_cnt--;
1449
1450 ssleep(1);
1451 }
1452
1453 /* All processes exited successfully */
1454 if (list_empty(hpriv_list))
1455 return 0;
1456
1457 /* Give up waiting for processes to exit */
1458 if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
1459 return -ETIME;
1460
1461 hdev->process_kill_trial_cnt++;
1462
1463 return -EBUSY;
1464 }
1465
device_disable_open_processes(struct hl_device * hdev,bool control_dev)1466 static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
1467 {
1468 struct list_head *hpriv_list;
1469 struct hl_fpriv *hpriv;
1470 struct mutex *hpriv_lock;
1471
1472 hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1473 hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1474
1475 mutex_lock(hpriv_lock);
1476 list_for_each_entry(hpriv, hpriv_list, dev_node)
1477 hpriv->hdev = NULL;
1478 mutex_unlock(hpriv_lock);
1479 }
1480
send_disable_pci_access(struct hl_device * hdev,u32 flags)1481 static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
1482 {
1483 /* If reset is due to heartbeat, device CPU is no responsive in
1484 * which case no point sending PCI disable message to it.
1485 */
1486 if ((flags & HL_DRV_RESET_HARD) &&
1487 !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
1488 /* Disable PCI access from device F/W so he won't send
1489 * us additional interrupts. We disable MSI/MSI-X at
1490 * the halt_engines function and we can't have the F/W
1491 * sending us interrupts after that. We need to disable
1492 * the access here because if the device is marked
1493 * disable, the message won't be send. Also, in case
1494 * of heartbeat, the device CPU is marked as disable
1495 * so this message won't be sent
1496 */
1497 if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
1498 dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
1499 return;
1500 }
1501
1502 /* verify that last EQs are handled before disabled is set */
1503 if (hdev->cpu_queues_enable)
1504 synchronize_irq(pci_irq_vector(hdev->pdev,
1505 hdev->asic_prop.eq_interrupt_id));
1506 }
1507 }
1508
handle_reset_trigger(struct hl_device * hdev,u32 flags)1509 static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
1510 {
1511 u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1512
1513 /* No consecutive mechanism when user context exists */
1514 if (hdev->is_compute_ctx_active)
1515 return;
1516
1517 /*
1518 * 'reset cause' is being updated here, because getting here
1519 * means that it's the 1st time and the last time we're here
1520 * ('in_reset' makes sure of it). This makes sure that
1521 * 'reset_cause' will continue holding its 1st recorded reason!
1522 */
1523 if (flags & HL_DRV_RESET_HEARTBEAT) {
1524 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
1525 cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
1526 } else if (flags & HL_DRV_RESET_TDR) {
1527 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
1528 cur_reset_trigger = HL_DRV_RESET_TDR;
1529 } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
1530 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1531 cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
1532 } else {
1533 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1534 }
1535
1536 /*
1537 * If reset cause is same twice, then reset_trigger_repeated
1538 * is set and if this reset is due to a fatal FW error
1539 * device is set to an unstable state.
1540 */
1541 if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
1542 hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
1543 hdev->reset_info.reset_trigger_repeated = 0;
1544 } else {
1545 hdev->reset_info.reset_trigger_repeated = 1;
1546 }
1547 }
1548
1549 /*
1550 * hl_device_reset - reset the device
1551 *
1552 * @hdev: pointer to habanalabs device structure
1553 * @flags: reset flags.
1554 *
1555 * Block future CS and wait for pending CS to be enqueued
1556 * Call ASIC H/W fini
1557 * Flush all completions
1558 * Re-initialize all internal data structures
1559 * Call ASIC H/W init, late_init
1560 * Test queues
1561 * Enable device
1562 *
1563 * Returns 0 for success or an error on failure.
1564 */
hl_device_reset(struct hl_device * hdev,u32 flags)1565 int hl_device_reset(struct hl_device *hdev, u32 flags)
1566 {
1567 bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
1568 schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
1569 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
1570 struct hl_ctx *ctx;
1571 int i, rc, hw_fini_rc;
1572
1573 if (!hdev->init_done) {
1574 dev_err(hdev->dev, "Can't reset before initialization is done\n");
1575 return 0;
1576 }
1577
1578 hard_reset = !!(flags & HL_DRV_RESET_HARD);
1579 from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
1580 fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
1581 from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
1582 delay_reset = !!(flags & HL_DRV_RESET_DELAY);
1583 from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
1584 reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
1585
1586 if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
1587 dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
1588 return 0;
1589 }
1590
1591 if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
1592 dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
1593 hard_reset = true;
1594 }
1595
1596 if (reset_upon_device_release) {
1597 if (hard_reset) {
1598 dev_crit(hdev->dev,
1599 "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
1600 return -EINVAL;
1601 }
1602
1603 goto do_reset;
1604 }
1605
1606 if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
1607 dev_dbg(hdev->dev,
1608 "asic doesn't allow inference soft reset - do hard-reset instead\n");
1609 hard_reset = true;
1610 }
1611
1612 do_reset:
1613 /* Re-entry of reset thread */
1614 if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
1615 goto kill_processes;
1616
1617 /*
1618 * Prevent concurrency in this function - only one reset should be
1619 * done at any given time. We need to perform this only if we didn't
1620 * get here from a dedicated hard reset thread.
1621 */
1622 if (!from_hard_reset_thread) {
1623 /* Block future CS/VM/JOB completion operations */
1624 spin_lock(&hdev->reset_info.lock);
1625 if (hdev->reset_info.in_reset) {
1626 /* We allow scheduling of a hard reset only during a compute reset */
1627 if (hard_reset && hdev->reset_info.in_compute_reset)
1628 hdev->reset_info.hard_reset_schedule_flags = flags;
1629 spin_unlock(&hdev->reset_info.lock);
1630 return 0;
1631 }
1632
1633 /* This still allows the completion of some KDMA ops
1634 * Update this before in_reset because in_compute_reset implies we are in reset
1635 */
1636 hdev->reset_info.in_compute_reset = !hard_reset;
1637
1638 hdev->reset_info.in_reset = 1;
1639
1640 spin_unlock(&hdev->reset_info.lock);
1641
1642 /* Cancel the device release watchdog work if required.
1643 * In case of reset-upon-device-release while the release watchdog work is
1644 * scheduled due to a hard-reset, do hard-reset instead of compute-reset.
1645 */
1646 if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
1647 struct hl_device_reset_work *watchdog_work =
1648 &hdev->device_release_watchdog_work;
1649
1650 hdev->reset_info.watchdog_active = 0;
1651 if (!from_watchdog_thread)
1652 cancel_delayed_work_sync(&watchdog_work->reset_work);
1653
1654 if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
1655 hdev->reset_info.in_compute_reset = 0;
1656 flags |= HL_DRV_RESET_HARD;
1657 flags &= ~HL_DRV_RESET_DEV_RELEASE;
1658 hard_reset = true;
1659 }
1660 }
1661
1662 if (delay_reset)
1663 usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
1664
1665 escalate_reset_flow:
1666 handle_reset_trigger(hdev, flags);
1667 send_disable_pci_access(hdev, flags);
1668
1669 /* This also blocks future CS/VM/JOB completion operations */
1670 hdev->disabled = true;
1671
1672 take_release_locks(hdev);
1673
1674 if (hard_reset)
1675 dev_info(hdev->dev, "Going to reset device\n");
1676 else if (reset_upon_device_release)
1677 dev_dbg(hdev->dev, "Going to reset device after release by user\n");
1678 else
1679 dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
1680 }
1681
1682 if ((hard_reset) && (!from_hard_reset_thread)) {
1683 hdev->reset_info.hard_reset_pending = true;
1684
1685 hdev->process_kill_trial_cnt = 0;
1686
1687 hdev->device_reset_work.flags = flags;
1688
1689 /*
1690 * Because the reset function can't run from heartbeat work,
1691 * we need to call the reset function from a dedicated work.
1692 */
1693 queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);
1694
1695 return 0;
1696 }
1697
1698 cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);
1699
1700 kill_processes:
1701 if (hard_reset) {
1702 /* Kill processes here after CS rollback. This is because the
1703 * process can't really exit until all its CSs are done, which
1704 * is what we do in cs rollback
1705 */
1706 rc = device_kill_open_processes(hdev, 0, false);
1707
1708 if (rc == -EBUSY) {
1709 if (hdev->device_fini_pending) {
1710 dev_crit(hdev->dev,
1711 "%s Failed to kill all open processes, stopping hard reset\n",
1712 dev_name(&(hdev)->pdev->dev));
1713 goto out_err;
1714 }
1715
1716 /* signal reset thread to reschedule */
1717 return rc;
1718 }
1719
1720 if (rc) {
1721 dev_crit(hdev->dev,
1722 "%s Failed to kill all open processes, stopping hard reset\n",
1723 dev_name(&(hdev)->pdev->dev));
1724 goto out_err;
1725 }
1726
1727 /* Flush the Event queue workers to make sure no other thread is
1728 * reading or writing to registers during the reset
1729 */
1730 flush_workqueue(hdev->eq_wq);
1731 }
1732
1733 /* Reset the H/W. It will be in idle state after this returns */
1734 hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1735
1736 if (hard_reset) {
1737 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1738
1739 /* Release kernel context */
1740 if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1741 hdev->kernel_ctx = NULL;
1742
1743 hl_vm_fini(hdev);
1744 hl_mmu_fini(hdev);
1745 hl_eq_reset(hdev, &hdev->event_queue);
1746 }
1747
1748 /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1749 hl_hw_queue_reset(hdev, hard_reset);
1750 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1751 hl_cq_reset(hdev, &hdev->completion_queue[i]);
1752
1753 /* Make sure the context switch phase will run again */
1754 ctx = hl_get_compute_ctx(hdev);
1755 if (ctx) {
1756 atomic_set(&ctx->thread_ctx_switch_token, 1);
1757 ctx->thread_ctx_switch_wait_token = 0;
1758 hl_ctx_put(ctx);
1759 }
1760
1761 if (hw_fini_rc) {
1762 rc = hw_fini_rc;
1763 goto out_err;
1764 }
1765 /* Finished tear-down, starting to re-initialize */
1766
1767 if (hard_reset) {
1768 hdev->device_cpu_disabled = false;
1769 hdev->reset_info.hard_reset_pending = false;
1770
1771 if (hdev->reset_info.reset_trigger_repeated &&
1772 (hdev->reset_info.prev_reset_trigger ==
1773 HL_DRV_RESET_FW_FATAL_ERR)) {
1774 /* if there 2 back to back resets from FW,
1775 * ensure driver puts the driver in a unusable state
1776 */
1777 dev_crit(hdev->dev,
1778 "%s Consecutive FW fatal errors received, stopping hard reset\n",
1779 dev_name(&(hdev)->pdev->dev));
1780 rc = -EIO;
1781 goto out_err;
1782 }
1783
1784 if (hdev->kernel_ctx) {
1785 dev_crit(hdev->dev,
1786 "%s kernel ctx was alive during hard reset, something is terribly wrong\n",
1787 dev_name(&(hdev)->pdev->dev));
1788 rc = -EBUSY;
1789 goto out_err;
1790 }
1791
1792 rc = hl_mmu_init(hdev);
1793 if (rc) {
1794 dev_err(hdev->dev,
1795 "Failed to initialize MMU S/W after hard reset\n");
1796 goto out_err;
1797 }
1798
1799 /* Allocate the kernel context */
1800 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1801 GFP_KERNEL);
1802 if (!hdev->kernel_ctx) {
1803 rc = -ENOMEM;
1804 hl_mmu_fini(hdev);
1805 goto out_err;
1806 }
1807
1808 hdev->is_compute_ctx_active = false;
1809
1810 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1811 if (rc) {
1812 dev_err(hdev->dev,
1813 "failed to init kernel ctx in hard reset\n");
1814 kfree(hdev->kernel_ctx);
1815 hdev->kernel_ctx = NULL;
1816 hl_mmu_fini(hdev);
1817 goto out_err;
1818 }
1819 }
1820
1821 /* Device is now enabled as part of the initialization requires
1822 * communication with the device firmware to get information that
1823 * is required for the initialization itself
1824 */
1825 hdev->disabled = false;
1826
1827 /* F/W security enabled indication might be updated after hard-reset */
1828 if (hard_reset) {
1829 rc = hl_fw_read_preboot_status(hdev);
1830 if (rc)
1831 goto out_err;
1832 }
1833
1834 rc = hdev->asic_funcs->hw_init(hdev);
1835 if (rc) {
1836 dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
1837 goto out_err;
1838 }
1839
1840 /* If device is not idle fail the reset process */
1841 if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1842 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1843 print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);
1844 rc = -EIO;
1845 goto out_err;
1846 }
1847
1848 /* Check that the communication with the device is working */
1849 rc = hdev->asic_funcs->test_queues(hdev);
1850 if (rc) {
1851 dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
1852 goto out_err;
1853 }
1854
1855 if (hard_reset) {
1856 rc = device_late_init(hdev);
1857 if (rc) {
1858 dev_err(hdev->dev, "Failed late init after hard reset\n");
1859 goto out_err;
1860 }
1861
1862 rc = hl_vm_init(hdev);
1863 if (rc) {
1864 dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
1865 goto out_err;
1866 }
1867
1868 if (!hdev->asic_prop.fw_security_enabled)
1869 hl_fw_set_max_power(hdev);
1870 } else {
1871 rc = hdev->asic_funcs->compute_reset_late_init(hdev);
1872 if (rc) {
1873 if (reset_upon_device_release)
1874 dev_err(hdev->dev,
1875 "Failed late init in reset after device release\n");
1876 else
1877 dev_err(hdev->dev, "Failed late init after compute reset\n");
1878 goto out_err;
1879 }
1880 }
1881
1882 rc = hdev->asic_funcs->scrub_device_mem(hdev);
1883 if (rc) {
1884 dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
1885 goto out_err;
1886 }
1887
1888 spin_lock(&hdev->reset_info.lock);
1889 hdev->reset_info.in_compute_reset = 0;
1890
1891 /* Schedule hard reset only if requested and if not already in hard reset.
1892 * We keep 'in_reset' enabled, so no other reset can go in during the hard
1893 * reset schedule
1894 */
1895 if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
1896 schedule_hard_reset = true;
1897 else
1898 hdev->reset_info.in_reset = 0;
1899
1900 spin_unlock(&hdev->reset_info.lock);
1901
1902 hdev->reset_info.needs_reset = false;
1903
1904 if (hard_reset)
1905 dev_info(hdev->dev,
1906 "Successfully finished resetting the %s device\n",
1907 dev_name(&(hdev)->pdev->dev));
1908 else
1909 dev_dbg(hdev->dev,
1910 "Successfully finished resetting the %s device\n",
1911 dev_name(&(hdev)->pdev->dev));
1912
1913 if (hard_reset) {
1914 hdev->reset_info.hard_reset_cnt++;
1915
1916 /* After reset is done, we are ready to receive events from
1917 * the F/W. We can't do it before because we will ignore events
1918 * and if those events are fatal, we won't know about it and
1919 * the device will be operational although it shouldn't be
1920 */
1921 hdev->asic_funcs->enable_events_from_fw(hdev);
1922 } else {
1923 if (!reset_upon_device_release)
1924 hdev->reset_info.compute_reset_cnt++;
1925
1926 if (schedule_hard_reset) {
1927 dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
1928 flags = hdev->reset_info.hard_reset_schedule_flags;
1929 hdev->reset_info.hard_reset_schedule_flags = 0;
1930 hard_reset = true;
1931 goto escalate_reset_flow;
1932 }
1933 }
1934
1935 return 0;
1936
1937 out_err:
1938 hdev->disabled = true;
1939
1940 spin_lock(&hdev->reset_info.lock);
1941 hdev->reset_info.in_compute_reset = 0;
1942
1943 if (hard_reset) {
1944 dev_err(hdev->dev,
1945 "%s Failed to reset! Device is NOT usable\n",
1946 dev_name(&(hdev)->pdev->dev));
1947 hdev->reset_info.hard_reset_cnt++;
1948 } else {
1949 if (reset_upon_device_release) {
1950 dev_err(hdev->dev, "Failed to reset device after user release\n");
1951 flags &= ~HL_DRV_RESET_DEV_RELEASE;
1952 } else {
1953 dev_err(hdev->dev, "Failed to do compute reset\n");
1954 hdev->reset_info.compute_reset_cnt++;
1955 }
1956
1957 spin_unlock(&hdev->reset_info.lock);
1958 flags |= HL_DRV_RESET_HARD;
1959 hard_reset = true;
1960 goto escalate_reset_flow;
1961 }
1962
1963 hdev->reset_info.in_reset = 0;
1964
1965 spin_unlock(&hdev->reset_info.lock);
1966
1967 return rc;
1968 }
1969
1970 /*
1971 * hl_device_cond_reset() - conditionally reset the device.
1972 * @hdev: pointer to habanalabs device structure.
1973 * @reset_flags: reset flags.
1974 * @event_mask: events to notify user about.
1975 *
1976 * Conditionally reset the device, or alternatively schedule a watchdog work to reset the device
1977 * unless another reset precedes it.
1978 */
hl_device_cond_reset(struct hl_device * hdev,u32 flags,u64 event_mask)1979 int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
1980 {
1981 struct hl_ctx *ctx = NULL;
1982
1983 /* F/W reset cannot be postponed */
1984 if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
1985 goto device_reset;
1986
1987 /* Device release watchdog is relevant only if user exists and gets a reset notification */
1988 if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {
1989 dev_err(hdev->dev, "Resetting device without a reset indication to user\n");
1990 goto device_reset;
1991 }
1992
1993 ctx = hl_get_compute_ctx(hdev);
1994 if (!ctx)
1995 goto device_reset;
1996
1997 /*
1998 * There is no point in postponing the reset if user is not registered for events.
1999 * However if no eventfd_ctx exists but the device release watchdog is already scheduled, it
2000 * just implies that user has unregistered as part of handling a previous event. In this
2001 * case an immediate reset is not required.
2002 */
2003 if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active)
2004 goto device_reset;
2005
2006 /* Schedule the device release watchdog work unless reset is already in progress or if the
2007 * work is already scheduled.
2008 */
2009 spin_lock(&hdev->reset_info.lock);
2010 if (hdev->reset_info.in_reset) {
2011 spin_unlock(&hdev->reset_info.lock);
2012 goto device_reset;
2013 }
2014
2015 if (hdev->reset_info.watchdog_active) {
2016 hdev->device_release_watchdog_work.flags |= flags;
2017 goto out;
2018 }
2019
2020 hdev->device_release_watchdog_work.flags = flags;
2021 dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
2022 hdev->device_release_watchdog_timeout_sec);
2023 schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
2024 msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
2025 hdev->reset_info.watchdog_active = 1;
2026 out:
2027 spin_unlock(&hdev->reset_info.lock);
2028
2029 hl_notifier_event_send_all(hdev, event_mask);
2030
2031 hl_ctx_put(ctx);
2032
2033 hl_abort_waiting_for_completions(hdev);
2034
2035 return 0;
2036
2037 device_reset:
2038 if (event_mask)
2039 hl_notifier_event_send_all(hdev, event_mask);
2040 if (ctx)
2041 hl_ctx_put(ctx);
2042
2043 return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD);
2044 }
2045
hl_notifier_event_send(struct hl_notifier_event * notifier_event,u64 event_mask)2046 static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
2047 {
2048 mutex_lock(¬ifier_event->lock);
2049 notifier_event->events_mask |= event_mask;
2050
2051 if (notifier_event->eventfd)
2052 eventfd_signal(notifier_event->eventfd);
2053
2054 mutex_unlock(¬ifier_event->lock);
2055 }
2056
2057 /*
2058 * hl_notifier_event_send_all - notify all user processes via eventfd
2059 *
2060 * @hdev: pointer to habanalabs device structure
2061 * @event_mask: the occurred event/s
2062 * Returns 0 for success or an error on failure.
2063 */
hl_notifier_event_send_all(struct hl_device * hdev,u64 event_mask)2064 void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
2065 {
2066 struct hl_fpriv *hpriv;
2067
2068 if (!event_mask) {
2069 dev_warn(hdev->dev, "Skip sending zero event");
2070 return;
2071 }
2072
2073 mutex_lock(&hdev->fpriv_list_lock);
2074
2075 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
2076 hl_notifier_event_send(&hpriv->notifier_event, event_mask);
2077
2078 mutex_unlock(&hdev->fpriv_list_lock);
2079 }
2080
2081 /*
2082 * hl_device_init - main initialization function for habanalabs device
2083 *
2084 * @hdev: pointer to habanalabs device structure
2085 *
2086 * Allocate an id for the device, do early initialization and then call the
2087 * ASIC specific initialization functions. Finally, create the cdev and the
2088 * Linux device to expose it to the user
2089 */
hl_device_init(struct hl_device * hdev)2090 int hl_device_init(struct hl_device *hdev)
2091 {
2092 int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
2093 struct hl_ts_free_jobs *free_jobs_data;
2094 bool expose_interfaces_on_err = false;
2095 void *p;
2096
2097 /* Initialize ASIC function pointers and perform early init */
2098 rc = device_early_init(hdev);
2099 if (rc)
2100 goto out_disabled;
2101
2102 user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2103 hdev->asic_prop.user_interrupt_count;
2104
2105 if (user_interrupt_cnt) {
2106 hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
2107 GFP_KERNEL);
2108 if (!hdev->user_interrupt) {
2109 rc = -ENOMEM;
2110 goto early_fini;
2111 }
2112
2113 /* Timestamp records supported only if CQ supported in device */
2114 if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2115 for (i = 0 ; i < user_interrupt_cnt ; i++) {
2116 p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2117 sizeof(struct timestamp_reg_free_node));
2118 if (!p) {
2119 rc = -ENOMEM;
2120 goto free_usr_intr_mem;
2121 }
2122 free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data;
2123 free_jobs_data->free_nodes_pool = p;
2124 free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2125 free_jobs_data->next_avail_free_node_idx = 0;
2126 }
2127 }
2128 }
2129
2130 free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data;
2131 p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2132 sizeof(struct timestamp_reg_free_node));
2133 if (!p) {
2134 rc = -ENOMEM;
2135 goto free_usr_intr_mem;
2136 }
2137
2138 free_jobs_data->free_nodes_pool = p;
2139 free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2140 free_jobs_data->next_avail_free_node_idx = 0;
2141
2142 /*
2143 * Start calling ASIC initialization. First S/W then H/W and finally
2144 * late init
2145 */
2146 rc = hdev->asic_funcs->sw_init(hdev);
2147 if (rc)
2148 goto free_common_usr_intr_mem;
2149
2150
2151 /* initialize completion structure for multi CS wait */
2152 hl_multi_cs_completion_init(hdev);
2153
2154 /*
2155 * Initialize the H/W queues. Must be done before hw_init, because
2156 * there the addresses of the kernel queue are being written to the
2157 * registers of the device
2158 */
2159 rc = hl_hw_queues_create(hdev);
2160 if (rc) {
2161 dev_err(hdev->dev, "failed to initialize kernel queues\n");
2162 goto sw_fini;
2163 }
2164
2165 cq_cnt = hdev->asic_prop.completion_queues_count;
2166
2167 /*
2168 * Initialize the completion queues. Must be done before hw_init,
2169 * because there the addresses of the completion queues are being
2170 * passed as arguments to request_irq
2171 */
2172 if (cq_cnt) {
2173 hdev->completion_queue = kcalloc(cq_cnt,
2174 sizeof(*hdev->completion_queue),
2175 GFP_KERNEL);
2176
2177 if (!hdev->completion_queue) {
2178 dev_err(hdev->dev,
2179 "failed to allocate completion queues\n");
2180 rc = -ENOMEM;
2181 goto hw_queues_destroy;
2182 }
2183 }
2184
2185 for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
2186 rc = hl_cq_init(hdev, &hdev->completion_queue[i],
2187 hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
2188 if (rc) {
2189 dev_err(hdev->dev,
2190 "failed to initialize completion queue\n");
2191 goto cq_fini;
2192 }
2193 hdev->completion_queue[i].cq_idx = i;
2194 }
2195
2196 hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
2197 sizeof(struct hl_cs *), GFP_KERNEL);
2198 if (!hdev->shadow_cs_queue) {
2199 rc = -ENOMEM;
2200 goto cq_fini;
2201 }
2202
2203 /*
2204 * Initialize the event queue. Must be done before hw_init,
2205 * because there the address of the event queue is being
2206 * passed as argument to request_irq
2207 */
2208 rc = hl_eq_init(hdev, &hdev->event_queue);
2209 if (rc) {
2210 dev_err(hdev->dev, "failed to initialize event queue\n");
2211 goto free_shadow_cs_queue;
2212 }
2213
2214 /* MMU S/W must be initialized before kernel context is created */
2215 rc = hl_mmu_init(hdev);
2216 if (rc) {
2217 dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
2218 goto eq_fini;
2219 }
2220
2221 /* Allocate the kernel context */
2222 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
2223 if (!hdev->kernel_ctx) {
2224 rc = -ENOMEM;
2225 goto mmu_fini;
2226 }
2227
2228 hdev->is_compute_ctx_active = false;
2229
2230 hdev->asic_funcs->state_dump_init(hdev);
2231
2232 hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
2233
2234 hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
2235
2236 rc = hl_debugfs_device_init(hdev);
2237 if (rc) {
2238 dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");
2239 kfree(hdev->kernel_ctx);
2240 goto mmu_fini;
2241 }
2242
2243 /* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after
2244 * hl_debugfs_device_init().
2245 */
2246 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
2247 if (rc) {
2248 dev_err(hdev->dev, "failed to initialize kernel context\n");
2249 kfree(hdev->kernel_ctx);
2250 goto debugfs_device_fini;
2251 }
2252
2253 rc = hl_cb_pool_init(hdev);
2254 if (rc) {
2255 dev_err(hdev->dev, "failed to initialize CB pool\n");
2256 goto release_ctx;
2257 }
2258
2259 rc = hl_dec_init(hdev);
2260 if (rc) {
2261 dev_err(hdev->dev, "Failed to initialize the decoder module\n");
2262 goto cb_pool_fini;
2263 }
2264
2265 /*
2266 * From this point, override rc (=0) in case of an error to allow debugging
2267 * (by adding char devices and creating sysfs/debugfs files as part of the error flow).
2268 */
2269 expose_interfaces_on_err = true;
2270
2271 /* Device is now enabled as part of the initialization requires
2272 * communication with the device firmware to get information that
2273 * is required for the initialization itself
2274 */
2275 hdev->disabled = false;
2276
2277 rc = hdev->asic_funcs->hw_init(hdev);
2278 if (rc) {
2279 dev_err(hdev->dev, "failed to initialize the H/W\n");
2280 rc = 0;
2281 goto out_disabled;
2282 }
2283
2284 /* Check that the communication with the device is working */
2285 rc = hdev->asic_funcs->test_queues(hdev);
2286 if (rc) {
2287 dev_err(hdev->dev, "Failed to detect if device is alive\n");
2288 rc = 0;
2289 goto out_disabled;
2290 }
2291
2292 rc = device_late_init(hdev);
2293 if (rc) {
2294 dev_err(hdev->dev, "Failed late initialization\n");
2295 rc = 0;
2296 goto out_disabled;
2297 }
2298
2299 dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
2300 hdev->asic_name,
2301 hdev->asic_prop.dram_size / SZ_1G);
2302
2303 rc = hl_vm_init(hdev);
2304 if (rc) {
2305 dev_err(hdev->dev, "Failed to initialize memory module\n");
2306 rc = 0;
2307 goto out_disabled;
2308 }
2309
2310 /*
2311 * Expose devices and sysfs/debugfs files to user.
2312 * From here there is no need to expose them in case of an error.
2313 */
2314 expose_interfaces_on_err = false;
2315
2316 rc = drm_dev_register(&hdev->drm, 0);
2317 if (rc) {
2318 dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc);
2319 rc = 0;
2320 goto out_disabled;
2321 }
2322
2323 rc = cdev_sysfs_debugfs_add(hdev);
2324 if (rc) {
2325 dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");
2326 rc = 0;
2327 goto out_disabled;
2328 }
2329
2330 /* Need to call this again because the max power might change,
2331 * depending on card type for certain ASICs
2332 */
2333 if (hdev->asic_prop.set_max_power_on_device_init &&
2334 !hdev->asic_prop.fw_security_enabled)
2335 hl_fw_set_max_power(hdev);
2336
2337 /*
2338 * hl_hwmon_init() must be called after device_late_init(), because only
2339 * there we get the information from the device about which
2340 * hwmon-related sensors the device supports.
2341 * Furthermore, it must be done after adding the device to the system.
2342 */
2343 rc = hl_hwmon_init(hdev);
2344 if (rc) {
2345 dev_err(hdev->dev, "Failed to initialize hwmon\n");
2346 rc = 0;
2347 goto out_disabled;
2348 }
2349
2350 dev_notice(hdev->dev,
2351 "Successfully added device %s to habanalabs driver\n",
2352 dev_name(&(hdev)->pdev->dev));
2353
2354 /* After initialization is done, we are ready to receive events from
2355 * the F/W. We can't do it before because we will ignore events and if
2356 * those events are fatal, we won't know about it and the device will
2357 * be operational although it shouldn't be
2358 */
2359 hdev->asic_funcs->enable_events_from_fw(hdev);
2360
2361 hdev->init_done = true;
2362
2363 return 0;
2364
2365 cb_pool_fini:
2366 hl_cb_pool_fini(hdev);
2367 release_ctx:
2368 if (hl_ctx_put(hdev->kernel_ctx) != 1)
2369 dev_err(hdev->dev,
2370 "kernel ctx is still alive on initialization failure\n");
2371 debugfs_device_fini:
2372 hl_debugfs_device_fini(hdev);
2373 mmu_fini:
2374 hl_mmu_fini(hdev);
2375 eq_fini:
2376 hl_eq_fini(hdev, &hdev->event_queue);
2377 free_shadow_cs_queue:
2378 kfree(hdev->shadow_cs_queue);
2379 cq_fini:
2380 for (i = 0 ; i < cq_ready_cnt ; i++)
2381 hl_cq_fini(hdev, &hdev->completion_queue[i]);
2382 kfree(hdev->completion_queue);
2383 hw_queues_destroy:
2384 hl_hw_queues_destroy(hdev);
2385 sw_fini:
2386 hdev->asic_funcs->sw_fini(hdev);
2387 free_common_usr_intr_mem:
2388 vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2389 free_usr_intr_mem:
2390 if (user_interrupt_cnt) {
2391 for (i = 0 ; i < user_interrupt_cnt ; i++) {
2392 if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool)
2393 break;
2394 vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2395 }
2396 kfree(hdev->user_interrupt);
2397 }
2398 early_fini:
2399 device_early_fini(hdev);
2400 out_disabled:
2401 hdev->disabled = true;
2402 if (expose_interfaces_on_err) {
2403 drm_dev_register(&hdev->drm, 0);
2404 cdev_sysfs_debugfs_add(hdev);
2405 }
2406
2407 pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n",
2408 hdev->cdev_idx, dev_name(&hdev->pdev->dev));
2409
2410 return rc;
2411 }
2412
2413 /*
2414 * hl_device_fini - main tear-down function for habanalabs device
2415 *
2416 * @hdev: pointer to habanalabs device structure
2417 *
2418 * Destroy the device, call ASIC fini functions and release the id
2419 */
hl_device_fini(struct hl_device * hdev)2420 void hl_device_fini(struct hl_device *hdev)
2421 {
2422 u32 user_interrupt_cnt;
2423 bool device_in_reset;
2424 ktime_t timeout;
2425 u64 reset_sec;
2426 int i, rc;
2427
2428 dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev));
2429
2430 hdev->device_fini_pending = 1;
2431 flush_delayed_work(&hdev->device_reset_work.reset_work);
2432
2433 if (hdev->pldm)
2434 reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
2435 else
2436 reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
2437
2438 /*
2439 * This function is competing with the reset function, so try to
2440 * take the reset atomic and if we are already in middle of reset,
2441 * wait until reset function is finished. Reset function is designed
2442 * to always finish. However, in Gaudi, because of all the network
2443 * ports, the hard reset could take between 10-30 seconds
2444 */
2445
2446 timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
2447
2448 spin_lock(&hdev->reset_info.lock);
2449 device_in_reset = !!hdev->reset_info.in_reset;
2450 if (!device_in_reset)
2451 hdev->reset_info.in_reset = 1;
2452 spin_unlock(&hdev->reset_info.lock);
2453
2454 while (device_in_reset) {
2455 usleep_range(50, 200);
2456
2457 spin_lock(&hdev->reset_info.lock);
2458 device_in_reset = !!hdev->reset_info.in_reset;
2459 if (!device_in_reset)
2460 hdev->reset_info.in_reset = 1;
2461 spin_unlock(&hdev->reset_info.lock);
2462
2463 if (ktime_compare(ktime_get(), timeout) > 0) {
2464 dev_crit(hdev->dev,
2465 "%s Failed to remove device because reset function did not finish\n",
2466 dev_name(&(hdev)->pdev->dev));
2467 return;
2468 }
2469 }
2470
2471 cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);
2472
2473 /* Disable PCI access from device F/W so it won't send us additional
2474 * interrupts. We disable MSI/MSI-X at the halt_engines function and we
2475 * can't have the F/W sending us interrupts after that. We need to
2476 * disable the access here because if the device is marked disable, the
2477 * message won't be send. Also, in case of heartbeat, the device CPU is
2478 * marked as disable so this message won't be sent
2479 */
2480 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
2481
2482 /* Mark device as disabled */
2483 hdev->disabled = true;
2484
2485 take_release_locks(hdev);
2486
2487 hdev->reset_info.hard_reset_pending = true;
2488
2489 hl_hwmon_fini(hdev);
2490
2491 cleanup_resources(hdev, true, false, false);
2492
2493 /* Kill processes here after CS rollback. This is because the process
2494 * can't really exit until all its CSs are done, which is what we
2495 * do in cs rollback
2496 */
2497 dev_info(hdev->dev,
2498 "Waiting for all processes to exit (timeout of %u seconds)",
2499 HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);
2500
2501 hdev->process_kill_trial_cnt = 0;
2502 rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
2503 if (rc) {
2504 dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc);
2505 device_disable_open_processes(hdev, false);
2506 }
2507
2508 hdev->process_kill_trial_cnt = 0;
2509 rc = device_kill_open_processes(hdev, 0, true);
2510 if (rc) {
2511 dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc);
2512 device_disable_open_processes(hdev, true);
2513 }
2514
2515 hl_cb_pool_fini(hdev);
2516
2517 /* Reset the H/W. It will be in idle state after this returns */
2518 rc = hdev->asic_funcs->hw_fini(hdev, true, false);
2519 if (rc)
2520 dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
2521
2522 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
2523
2524 /* Release kernel context */
2525 if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
2526 dev_err(hdev->dev, "kernel ctx is still alive\n");
2527
2528 hl_dec_fini(hdev);
2529
2530 hl_vm_fini(hdev);
2531
2532 hl_mmu_fini(hdev);
2533
2534 vfree(hdev->captured_err_info.page_fault_info.user_mappings);
2535
2536 hl_eq_fini(hdev, &hdev->event_queue);
2537
2538 kfree(hdev->shadow_cs_queue);
2539
2540 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2541 hl_cq_fini(hdev, &hdev->completion_queue[i]);
2542 kfree(hdev->completion_queue);
2543
2544 user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2545 hdev->asic_prop.user_interrupt_count;
2546
2547 if (user_interrupt_cnt) {
2548 if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2549 for (i = 0 ; i < user_interrupt_cnt ; i++)
2550 vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2551 }
2552
2553 kfree(hdev->user_interrupt);
2554 }
2555
2556 vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2557
2558 hl_hw_queues_destroy(hdev);
2559
2560 /* Call ASIC S/W finalize function */
2561 hdev->asic_funcs->sw_fini(hdev);
2562
2563 device_early_fini(hdev);
2564
2565 /* Hide devices and sysfs/debugfs files from user */
2566 cdev_sysfs_debugfs_remove(hdev);
2567 drm_dev_unregister(&hdev->drm);
2568
2569 hl_debugfs_device_fini(hdev);
2570
2571 pr_info("removed device successfully\n");
2572 }
2573
2574 /*
2575 * MMIO register access helper functions.
2576 */
2577
2578 /*
2579 * hl_rreg - Read an MMIO register
2580 *
2581 * @hdev: pointer to habanalabs device structure
2582 * @reg: MMIO register offset (in bytes)
2583 *
2584 * Returns the value of the MMIO register we are asked to read
2585 *
2586 */
hl_rreg(struct hl_device * hdev,u32 reg)2587 inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
2588 {
2589 u32 val = readl(hdev->rmmio + reg);
2590
2591 if (unlikely(trace_habanalabs_rreg32_enabled()))
2592 trace_habanalabs_rreg32(hdev->dev, reg, val);
2593
2594 return val;
2595 }
2596
2597 /*
2598 * hl_wreg - Write to an MMIO register
2599 *
2600 * @hdev: pointer to habanalabs device structure
2601 * @reg: MMIO register offset (in bytes)
2602 * @val: 32-bit value
2603 *
2604 * Writes the 32-bit value into the MMIO register
2605 *
2606 */
hl_wreg(struct hl_device * hdev,u32 reg,u32 val)2607 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
2608 {
2609 if (unlikely(trace_habanalabs_wreg32_enabled()))
2610 trace_habanalabs_wreg32(hdev->dev, reg, val);
2611
2612 writel(val, hdev->rmmio + reg);
2613 }
2614
hl_capture_razwi(struct hl_device * hdev,u64 addr,u16 * engine_id,u16 num_of_engines,u8 flags)2615 void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2616 u8 flags)
2617 {
2618 struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;
2619
2620 if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
2621 dev_err(hdev->dev,
2622 "Number of possible razwi initiators (%u) exceeded limit (%u)\n",
2623 num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);
2624 return;
2625 }
2626
2627 /* In case it's the first razwi since the device was opened, capture its parameters */
2628 if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))
2629 return;
2630
2631 razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());
2632 razwi_info->razwi.addr = addr;
2633 razwi_info->razwi.num_of_possible_engines = num_of_engines;
2634 memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],
2635 num_of_engines * sizeof(u16));
2636 razwi_info->razwi.flags = flags;
2637
2638 razwi_info->razwi_info_available = true;
2639 }
2640
hl_handle_razwi(struct hl_device * hdev,u64 addr,u16 * engine_id,u16 num_of_engines,u8 flags,u64 * event_mask)2641 void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2642 u8 flags, u64 *event_mask)
2643 {
2644 hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);
2645
2646 if (event_mask)
2647 *event_mask |= HL_NOTIFIER_EVENT_RAZWI;
2648 }
2649
hl_capture_user_mappings(struct hl_device * hdev,bool is_pmmu)2650 static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
2651 {
2652 struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2653 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
2654 struct hl_vm_hash_node *hnode;
2655 struct hl_userptr *userptr;
2656 enum vm_type *vm_type;
2657 struct hl_ctx *ctx;
2658 u32 map_idx = 0;
2659 int i;
2660
2661 /* Reset previous session count*/
2662 pgf_info->num_of_user_mappings = 0;
2663
2664 ctx = hl_get_compute_ctx(hdev);
2665 if (!ctx) {
2666 dev_err(hdev->dev, "Can't get user context for user mappings\n");
2667 return;
2668 }
2669
2670 mutex_lock(&ctx->mem_hash_lock);
2671 hash_for_each(ctx->mem_hash, i, hnode, node) {
2672 vm_type = hnode->ptr;
2673 if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||
2674 ((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))
2675 pgf_info->num_of_user_mappings++;
2676
2677 }
2678
2679 if (!pgf_info->num_of_user_mappings)
2680 goto finish;
2681
2682 /* In case we already allocated in previous session, need to release it before
2683 * allocating new buffer.
2684 */
2685 vfree(pgf_info->user_mappings);
2686 pgf_info->user_mappings =
2687 vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));
2688 if (!pgf_info->user_mappings) {
2689 pgf_info->num_of_user_mappings = 0;
2690 goto finish;
2691 }
2692
2693 hash_for_each(ctx->mem_hash, i, hnode, node) {
2694 vm_type = hnode->ptr;
2695 if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {
2696 userptr = hnode->ptr;
2697 pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2698 pgf_info->user_mappings[map_idx].size = userptr->size;
2699 map_idx++;
2700 } else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {
2701 phys_pg_pack = hnode->ptr;
2702 pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2703 pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;
2704 map_idx++;
2705 }
2706 }
2707 finish:
2708 mutex_unlock(&ctx->mem_hash_lock);
2709 hl_ctx_put(ctx);
2710 }
2711
hl_capture_page_fault(struct hl_device * hdev,u64 addr,u16 eng_id,bool is_pmmu)2712 void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
2713 {
2714 struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2715
2716 /* Capture only the first page fault */
2717 if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))
2718 return;
2719
2720 pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());
2721 pgf_info->page_fault.addr = addr;
2722 pgf_info->page_fault.engine_id = eng_id;
2723 hl_capture_user_mappings(hdev, is_pmmu);
2724
2725 pgf_info->page_fault_info_available = true;
2726 }
2727
hl_handle_page_fault(struct hl_device * hdev,u64 addr,u16 eng_id,bool is_pmmu,u64 * event_mask)2728 void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
2729 u64 *event_mask)
2730 {
2731 hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);
2732
2733 if (event_mask)
2734 *event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
2735 }
2736
hl_capture_hw_err(struct hl_device * hdev,u16 event_id)2737 static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
2738 {
2739 struct hw_err_info *info = &hdev->captured_err_info.hw_err;
2740
2741 /* Capture only the first HW err */
2742 if (atomic_cmpxchg(&info->event_detected, 0, 1))
2743 return;
2744
2745 info->event.timestamp = ktime_to_ns(ktime_get());
2746 info->event.event_id = event_id;
2747
2748 info->event_info_available = true;
2749 }
2750
hl_handle_critical_hw_err(struct hl_device * hdev,u16 event_id,u64 * event_mask)2751 void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
2752 {
2753 hl_capture_hw_err(hdev, event_id);
2754
2755 if (event_mask)
2756 *event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
2757 }
2758
hl_capture_fw_err(struct hl_device * hdev,struct hl_info_fw_err_info * fw_info)2759 static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
2760 {
2761 struct fw_err_info *info = &hdev->captured_err_info.fw_err;
2762
2763 /* Capture only the first FW error */
2764 if (atomic_cmpxchg(&info->event_detected, 0, 1))
2765 return;
2766
2767 info->event.timestamp = ktime_to_ns(ktime_get());
2768 info->event.err_type = fw_info->err_type;
2769 if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
2770 info->event.event_id = fw_info->event_id;
2771
2772 info->event_info_available = true;
2773 }
2774
hl_handle_fw_err(struct hl_device * hdev,struct hl_info_fw_err_info * info)2775 void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
2776 {
2777 hl_capture_fw_err(hdev, info);
2778
2779 if (info->event_mask)
2780 *info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
2781 }
2782
hl_capture_engine_err(struct hl_device * hdev,u16 engine_id,u16 error_count)2783 void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count)
2784 {
2785 struct engine_err_info *info = &hdev->captured_err_info.engine_err;
2786
2787 /* Capture only the first engine error */
2788 if (atomic_cmpxchg(&info->event_detected, 0, 1))
2789 return;
2790
2791 info->event.timestamp = ktime_to_ns(ktime_get());
2792 info->event.engine_id = engine_id;
2793 info->event.error_count = error_count;
2794 info->event_info_available = true;
2795 }
2796
hl_enable_err_info_capture(struct hl_error_info * captured_err_info)2797 void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
2798 {
2799 vfree(captured_err_info->page_fault_info.user_mappings);
2800 memset(captured_err_info, 0, sizeof(struct hl_error_info));
2801 atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
2802 captured_err_info->undef_opcode.write_enable = true;
2803 }
2804