1  // SPDX-License-Identifier: GPL-2.0
2  
3  /*
4   * Copyright 2016-2022 HabanaLabs, Ltd.
5   * All Rights Reserved.
6   */
7  
8  #define pr_fmt(fmt)			"habanalabs: " fmt
9  
10  #include <uapi/drm/habanalabs_accel.h>
11  #include "habanalabs.h"
12  
13  #include <linux/pci.h>
14  #include <linux/hwmon.h>
15  #include <linux/vmalloc.h>
16  
17  #include <drm/drm_accel.h>
18  #include <drm/drm_drv.h>
19  
20  #include <trace/events/habanalabs.h>
21  
22  #define HL_RESET_DELAY_USEC			10000	/* 10ms */
23  
24  #define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC	30
25  
26  enum dma_alloc_type {
27  	DMA_ALLOC_COHERENT,
28  	DMA_ALLOC_POOL,
29  };
30  
31  #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
32  
33  static void hl_device_heartbeat(struct work_struct *work);
34  
35  /*
36   * hl_set_dram_bar- sets the bar to allow later access to address
37   *
38   * @hdev: pointer to habanalabs device structure.
39   * @addr: the address the caller wants to access.
40   * @region: the PCI region.
41   * @new_bar_region_base: the new BAR region base address.
42   *
43   * @return: the old BAR base address on success, U64_MAX for failure.
44   *	    The caller should set it back to the old address after use.
45   *
46   * In case the bar space does not cover the whole address space,
47   * the bar base address should be set to allow access to a given address.
48   * This function can be called also if the bar doesn't need to be set,
49   * in that case it just won't change the base.
50   */
hl_set_dram_bar(struct hl_device * hdev,u64 addr,struct pci_mem_region * region,u64 * new_bar_region_base)51  static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,
52  				u64 *new_bar_region_base)
53  {
54  	struct asic_fixed_properties *prop = &hdev->asic_prop;
55  	u64 bar_base_addr, old_base;
56  
57  	if (is_power_of_2(prop->dram_pci_bar_size))
58  		bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
59  	else
60  		bar_base_addr = region->region_base +
61  				div64_u64((addr - region->region_base), prop->dram_pci_bar_size) *
62  				prop->dram_pci_bar_size;
63  
64  	old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
65  
66  	/* in case of success we need to update the new BAR base */
67  	if ((old_base != U64_MAX) && new_bar_region_base)
68  		*new_bar_region_base = bar_base_addr;
69  
70  	return old_base;
71  }
72  
hl_access_sram_dram_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type,enum pci_region region_type,bool set_dram_bar)73  int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
74  	enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)
75  {
76  	struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
77  	u64 old_base = 0, rc, bar_region_base = region->region_base;
78  	void __iomem *acc_addr;
79  
80  	if (set_dram_bar) {
81  		old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);
82  		if (old_base == U64_MAX)
83  			return -EIO;
84  	}
85  
86  	acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
87  			(addr - bar_region_base);
88  
89  	switch (acc_type) {
90  	case DEBUGFS_READ8:
91  		*val = readb(acc_addr);
92  		break;
93  	case DEBUGFS_WRITE8:
94  		writeb(*val, acc_addr);
95  		break;
96  	case DEBUGFS_READ32:
97  		*val = readl(acc_addr);
98  		break;
99  	case DEBUGFS_WRITE32:
100  		writel(*val, acc_addr);
101  		break;
102  	case DEBUGFS_READ64:
103  		*val = readq(acc_addr);
104  		break;
105  	case DEBUGFS_WRITE64:
106  		writeq(*val, acc_addr);
107  		break;
108  	}
109  
110  	if (set_dram_bar) {
111  		rc = hl_set_dram_bar(hdev, old_base, region, NULL);
112  		if (rc == U64_MAX)
113  			return -EIO;
114  	}
115  
116  	return 0;
117  }
118  
hl_dma_alloc_common(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,gfp_t flag,enum dma_alloc_type alloc_type,const char * caller)119  static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
120  					gfp_t flag, enum dma_alloc_type alloc_type,
121  					const char *caller)
122  {
123  	void *ptr = NULL;
124  
125  	switch (alloc_type) {
126  	case DMA_ALLOC_COHERENT:
127  		ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
128  		break;
129  	case DMA_ALLOC_POOL:
130  		ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
131  		break;
132  	}
133  
134  	if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
135  		trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle,
136  						size, caller);
137  
138  	return ptr;
139  }
140  
hl_asic_dma_free_common(struct hl_device * hdev,size_t size,void * cpu_addr,dma_addr_t dma_handle,enum dma_alloc_type alloc_type,const char * caller)141  static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
142  					dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
143  					const char *caller)
144  {
145  	/* this is needed to avoid warning on using freed pointer */
146  	u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;
147  
148  	switch (alloc_type) {
149  	case DMA_ALLOC_COHERENT:
150  		hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
151  		break;
152  	case DMA_ALLOC_POOL:
153  		hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
154  		break;
155  	}
156  
157  	trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller);
158  }
159  
hl_asic_dma_alloc_coherent_caller(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,gfp_t flag,const char * caller)160  void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
161  					gfp_t flag, const char *caller)
162  {
163  	return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
164  }
165  
hl_asic_dma_free_coherent_caller(struct hl_device * hdev,size_t size,void * cpu_addr,dma_addr_t dma_handle,const char * caller)166  void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
167  					dma_addr_t dma_handle, const char *caller)
168  {
169  	hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
170  }
171  
hl_asic_dma_pool_zalloc_caller(struct hl_device * hdev,size_t size,gfp_t mem_flags,dma_addr_t * dma_handle,const char * caller)172  void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
173  					dma_addr_t *dma_handle, const char *caller)
174  {
175  	return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
176  }
177  
hl_asic_dma_pool_free_caller(struct hl_device * hdev,void * vaddr,dma_addr_t dma_addr,const char * caller)178  void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
179  					const char *caller)
180  {
181  	hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
182  }
183  
hl_cpu_accessible_dma_pool_alloc(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle)184  void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
185  {
186  	return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
187  }
188  
hl_cpu_accessible_dma_pool_free(struct hl_device * hdev,size_t size,void * vaddr)189  void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
190  {
191  	hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
192  }
193  
hl_dma_map_sgtable_caller(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir,const char * caller)194  int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
195  				enum dma_data_direction dir, const char *caller)
196  {
197  	struct asic_fixed_properties *prop = &hdev->asic_prop;
198  	struct scatterlist *sg;
199  	int rc, i;
200  
201  	rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);
202  	if (rc)
203  		return rc;
204  
205  	if (!trace_habanalabs_dma_map_page_enabled())
206  		return 0;
207  
208  	for_each_sgtable_dma_sg(sgt, sg, i)
209  		trace_habanalabs_dma_map_page(&(hdev)->pdev->dev,
210  					page_to_phys(sg_page(sg)),
211  					sg->dma_address - prop->device_dma_offset_for_host_access,
212  #ifdef CONFIG_NEED_SG_DMA_LENGTH
213  					sg->dma_length,
214  #else
215  					sg->length,
216  #endif
217  					dir, caller);
218  
219  	return 0;
220  }
221  
hl_asic_dma_map_sgtable(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir)222  int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
223  				enum dma_data_direction dir)
224  {
225  	struct asic_fixed_properties *prop = &hdev->asic_prop;
226  	struct scatterlist *sg;
227  	int rc, i;
228  
229  	rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
230  	if (rc)
231  		return rc;
232  
233  	/* Shift to the device's base physical address of host memory if necessary */
234  	if (prop->device_dma_offset_for_host_access)
235  		for_each_sgtable_dma_sg(sgt, sg, i)
236  			sg->dma_address += prop->device_dma_offset_for_host_access;
237  
238  	return 0;
239  }
240  
hl_dma_unmap_sgtable_caller(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir,const char * caller)241  void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
242  					enum dma_data_direction dir, const char *caller)
243  {
244  	struct asic_fixed_properties *prop = &hdev->asic_prop;
245  	struct scatterlist *sg;
246  	int i;
247  
248  	hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);
249  
250  	if (trace_habanalabs_dma_unmap_page_enabled()) {
251  		for_each_sgtable_dma_sg(sgt, sg, i)
252  			trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev,
253  					page_to_phys(sg_page(sg)),
254  					sg->dma_address - prop->device_dma_offset_for_host_access,
255  #ifdef CONFIG_NEED_SG_DMA_LENGTH
256  					sg->dma_length,
257  #else
258  					sg->length,
259  #endif
260  					dir, caller);
261  	}
262  }
263  
hl_asic_dma_unmap_sgtable(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir)264  void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
265  				enum dma_data_direction dir)
266  {
267  	struct asic_fixed_properties *prop = &hdev->asic_prop;
268  	struct scatterlist *sg;
269  	int i;
270  
271  	/* Cancel the device's base physical address of host memory if necessary */
272  	if (prop->device_dma_offset_for_host_access)
273  		for_each_sgtable_dma_sg(sgt, sg, i)
274  			sg->dma_address -= prop->device_dma_offset_for_host_access;
275  
276  	dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
277  }
278  
279  /*
280   * hl_access_cfg_region - access the config region
281   *
282   * @hdev: pointer to habanalabs device structure
283   * @addr: the address to access
284   * @val: the value to write from or read to
285   * @acc_type: the type of access (read/write 64/32)
286   */
hl_access_cfg_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type)287  int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
288  	enum debugfs_access_type acc_type)
289  {
290  	struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
291  	u32 val_h, val_l;
292  
293  	if (!IS_ALIGNED(addr, sizeof(u32))) {
294  		dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
295  		return -EINVAL;
296  	}
297  
298  	switch (acc_type) {
299  	case DEBUGFS_READ32:
300  		*val = RREG32(addr - cfg_region->region_base);
301  		break;
302  	case DEBUGFS_WRITE32:
303  		WREG32(addr - cfg_region->region_base, *val);
304  		break;
305  	case DEBUGFS_READ64:
306  		val_l = RREG32(addr - cfg_region->region_base);
307  		val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
308  
309  		*val = (((u64) val_h) << 32) | val_l;
310  		break;
311  	case DEBUGFS_WRITE64:
312  		WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
313  		WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
314  		break;
315  	default:
316  		dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
317  		return -EOPNOTSUPP;
318  	}
319  
320  	return 0;
321  }
322  
323  /*
324   * hl_access_dev_mem - access device memory
325   *
326   * @hdev: pointer to habanalabs device structure
327   * @region_type: the type of the region the address belongs to
328   * @addr: the address to access
329   * @val: the value to write from or read to
330   * @acc_type: the type of access (r/w, 32/64)
331   */
hl_access_dev_mem(struct hl_device * hdev,enum pci_region region_type,u64 addr,u64 * val,enum debugfs_access_type acc_type)332  int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
333  			u64 addr, u64 *val, enum debugfs_access_type acc_type)
334  {
335  	switch (region_type) {
336  	case PCI_REGION_CFG:
337  		return hl_access_cfg_region(hdev, addr, val, acc_type);
338  	case PCI_REGION_SRAM:
339  	case PCI_REGION_DRAM:
340  		return hl_access_sram_dram_region(hdev, addr, val, acc_type,
341  				region_type, (region_type == PCI_REGION_DRAM));
342  	default:
343  		return -EFAULT;
344  	}
345  
346  	return 0;
347  }
348  
hl_engine_data_sprintf(struct engines_data * e,const char * fmt,...)349  void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
350  {
351  	va_list args;
352  	int str_size;
353  
354  	va_start(args, fmt);
355  	/* Calculate formatted string length. Assuming each string is null terminated, hence
356  	 * increment result by 1
357  	 */
358  	str_size = vsnprintf(NULL, 0, fmt, args) + 1;
359  	va_end(args);
360  
361  	if ((e->actual_size + str_size) < e->allocated_buf_size) {
362  		va_start(args, fmt);
363  		vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
364  		va_end(args);
365  	}
366  
367  	/* Need to update the size even when not updating destination buffer to get the exact size
368  	 * of all input strings
369  	 */
370  	e->actual_size += str_size;
371  }
372  
hl_device_status(struct hl_device * hdev)373  enum hl_device_status hl_device_status(struct hl_device *hdev)
374  {
375  	enum hl_device_status status;
376  
377  	if (hdev->device_fini_pending) {
378  		status = HL_DEVICE_STATUS_MALFUNCTION;
379  	} else if (hdev->reset_info.in_reset) {
380  		if (hdev->reset_info.in_compute_reset)
381  			status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
382  		else
383  			status = HL_DEVICE_STATUS_IN_RESET;
384  	} else if (hdev->reset_info.needs_reset) {
385  		status = HL_DEVICE_STATUS_NEEDS_RESET;
386  	} else if (hdev->disabled) {
387  		status = HL_DEVICE_STATUS_MALFUNCTION;
388  	} else if (!hdev->init_done) {
389  		status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
390  	} else {
391  		status = HL_DEVICE_STATUS_OPERATIONAL;
392  	}
393  
394  	return status;
395  }
396  
hl_device_operational(struct hl_device * hdev,enum hl_device_status * status)397  bool hl_device_operational(struct hl_device *hdev,
398  		enum hl_device_status *status)
399  {
400  	enum hl_device_status current_status;
401  
402  	current_status = hl_device_status(hdev);
403  	if (status)
404  		*status = current_status;
405  
406  	switch (current_status) {
407  	case HL_DEVICE_STATUS_MALFUNCTION:
408  	case HL_DEVICE_STATUS_IN_RESET:
409  	case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
410  	case HL_DEVICE_STATUS_NEEDS_RESET:
411  		return false;
412  	case HL_DEVICE_STATUS_OPERATIONAL:
413  	case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
414  	default:
415  		return true;
416  	}
417  }
418  
hl_ctrl_device_operational(struct hl_device * hdev,enum hl_device_status * status)419  bool hl_ctrl_device_operational(struct hl_device *hdev,
420  		enum hl_device_status *status)
421  {
422  	enum hl_device_status current_status;
423  
424  	current_status = hl_device_status(hdev);
425  	if (status)
426  		*status = current_status;
427  
428  	switch (current_status) {
429  	case HL_DEVICE_STATUS_MALFUNCTION:
430  		return false;
431  	case HL_DEVICE_STATUS_IN_RESET:
432  	case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
433  	case HL_DEVICE_STATUS_NEEDS_RESET:
434  	case HL_DEVICE_STATUS_OPERATIONAL:
435  	case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
436  	default:
437  		return true;
438  	}
439  }
440  
print_idle_status_mask(struct hl_device * hdev,const char * message,u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])441  static void print_idle_status_mask(struct hl_device *hdev, const char *message,
442  					u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
443  {
444  	if (idle_mask[3])
445  		dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n",
446  			dev_name(&hdev->pdev->dev), message,
447  			idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
448  	else if (idle_mask[2])
449  		dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n",
450  			dev_name(&hdev->pdev->dev), message,
451  			idle_mask[2], idle_mask[1], idle_mask[0]);
452  	else if (idle_mask[1])
453  		dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n",
454  			dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]);
455  	else
456  		dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message,
457  			idle_mask[0]);
458  }
459  
hpriv_release(struct kref * ref)460  static void hpriv_release(struct kref *ref)
461  {
462  	u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
463  	bool reset_device, device_is_idle = true;
464  	struct hl_fpriv *hpriv;
465  	struct hl_device *hdev;
466  
467  	hpriv = container_of(ref, struct hl_fpriv, refcount);
468  
469  	hdev = hpriv->hdev;
470  
471  	hdev->asic_funcs->send_device_activity(hdev, false);
472  
473  	hl_debugfs_remove_file(hpriv);
474  
475  	mutex_destroy(&hpriv->ctx_lock);
476  	mutex_destroy(&hpriv->restore_phase_mutex);
477  
478  	/* There should be no memory buffers at this point and handles IDR can be destroyed */
479  	hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
480  
481  	/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
482  	 * reset that waits for device release.
483  	 */
484  	reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
485  
486  	/* Check the device idle status and reset if not idle.
487  	 * Skip it if already in reset, or if device is going to be reset in any case.
488  	 */
489  	if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm)
490  		device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
491  							HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
492  	if (!device_is_idle) {
493  		print_idle_status_mask(hdev, "device is not idle after user context is closed",
494  					idle_mask);
495  		reset_device = true;
496  	}
497  
498  	/* We need to remove the user from the list to make sure the reset process won't
499  	 * try to kill the user process. Because, if we got here, it means there are no
500  	 * more driver/device resources that the user process is occupying so there is
501  	 * no need to kill it
502  	 *
503  	 * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
504  	 * a race between the release and opening the device again. We don't want to let
505  	 * a user open the device while there a reset is about to happen.
506  	 */
507  	mutex_lock(&hdev->fpriv_list_lock);
508  	list_del(&hpriv->dev_node);
509  	mutex_unlock(&hdev->fpriv_list_lock);
510  
511  	put_pid(hpriv->taskpid);
512  
513  	if (reset_device) {
514  		hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
515  	} else {
516  		/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
517  		int rc = hdev->asic_funcs->scrub_device_mem(hdev);
518  
519  		if (rc) {
520  			dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
521  			hl_device_reset(hdev, HL_DRV_RESET_HARD);
522  		}
523  	}
524  
525  	/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
526  	 * thread, we don't care because the in_reset is marked so if a user will try to open
527  	 * the device it will fail on that, even if compute_ctx is false.
528  	 */
529  	mutex_lock(&hdev->fpriv_list_lock);
530  	hdev->is_compute_ctx_active = false;
531  	mutex_unlock(&hdev->fpriv_list_lock);
532  
533  	hdev->compute_ctx_in_release = 0;
534  
535  	/* release the eventfd */
536  	if (hpriv->notifier_event.eventfd)
537  		eventfd_ctx_put(hpriv->notifier_event.eventfd);
538  
539  	mutex_destroy(&hpriv->notifier_event.lock);
540  
541  	kfree(hpriv);
542  }
543  
hl_hpriv_get(struct hl_fpriv * hpriv)544  void hl_hpriv_get(struct hl_fpriv *hpriv)
545  {
546  	kref_get(&hpriv->refcount);
547  }
548  
hl_hpriv_put(struct hl_fpriv * hpriv)549  int hl_hpriv_put(struct hl_fpriv *hpriv)
550  {
551  	return kref_put(&hpriv->refcount, hpriv_release);
552  }
553  
print_device_in_use_info(struct hl_device * hdev,struct hl_mem_mgr_fini_stats * mm_fini_stats,const char * message)554  static void print_device_in_use_info(struct hl_device *hdev,
555  		struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message)
556  {
557  	u32 active_cs_num, dmabuf_export_cnt;
558  	bool unknown_reason = true;
559  	char buf[128];
560  	size_t size;
561  	int offset;
562  
563  	size = sizeof(buf);
564  	offset = 0;
565  
566  	active_cs_num = hl_get_active_cs_num(hdev);
567  	if (active_cs_num) {
568  		unknown_reason = false;
569  		offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
570  	}
571  
572  	dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
573  	if (dmabuf_export_cnt) {
574  		unknown_reason = false;
575  		offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
576  					dmabuf_export_cnt);
577  	}
578  
579  	if (mm_fini_stats->n_busy_cb) {
580  		unknown_reason = false;
581  		offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]",
582  				mm_fini_stats->n_busy_cb);
583  	}
584  
585  	if (unknown_reason)
586  		scnprintf(buf + offset, size - offset, " [unknown reason]");
587  
588  	dev_notice(hdev->dev, "%s%s\n", message, buf);
589  }
590  
591  /*
592   * hl_device_release() - release function for habanalabs device.
593   * @ddev: pointer to DRM device structure.
594   * @file: pointer to DRM file private data structure.
595   *
596   * Called when process closes an habanalabs device
597   */
hl_device_release(struct drm_device * ddev,struct drm_file * file_priv)598  void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
599  {
600  	struct hl_fpriv *hpriv = file_priv->driver_priv;
601  	struct hl_device *hdev = to_hl_device(ddev);
602  	struct hl_mem_mgr_fini_stats mm_fini_stats;
603  
604  	if (!hdev) {
605  		pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
606  		put_pid(hpriv->taskpid);
607  	}
608  
609  	hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
610  
611  	/* Memory buffers might be still in use at this point and thus the handles IDR destruction
612  	 * is postponed to hpriv_release().
613  	 */
614  	hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats);
615  
616  	hdev->compute_ctx_in_release = 1;
617  
618  	if (!hl_hpriv_put(hpriv)) {
619  		print_device_in_use_info(hdev, &mm_fini_stats,
620  				"User process closed FD but device still in use");
621  		hl_device_reset(hdev, HL_DRV_RESET_HARD);
622  	}
623  
624  	hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
625  }
626  
hl_device_release_ctrl(struct inode * inode,struct file * filp)627  static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
628  {
629  	struct hl_fpriv *hpriv = filp->private_data;
630  	struct hl_device *hdev = hpriv->hdev;
631  
632  	filp->private_data = NULL;
633  
634  	if (!hdev) {
635  		pr_err("Closing FD after device was removed\n");
636  		goto out;
637  	}
638  
639  	mutex_lock(&hdev->fpriv_ctrl_list_lock);
640  	list_del(&hpriv->dev_node);
641  	mutex_unlock(&hdev->fpriv_ctrl_list_lock);
642  out:
643  	put_pid(hpriv->taskpid);
644  
645  	kfree(hpriv);
646  
647  	return 0;
648  }
649  
__hl_mmap(struct hl_fpriv * hpriv,struct vm_area_struct * vma)650  static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
651  {
652  	struct hl_device *hdev = hpriv->hdev;
653  	unsigned long vm_pgoff;
654  
655  	if (!hdev) {
656  		pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
657  		return -ENODEV;
658  	}
659  
660  	vm_pgoff = vma->vm_pgoff;
661  
662  	switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
663  	case HL_MMAP_TYPE_BLOCK:
664  		vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
665  		return hl_hw_block_mmap(hpriv, vma);
666  
667  	case HL_MMAP_TYPE_CB:
668  	case HL_MMAP_TYPE_TS_BUFF:
669  		return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
670  	}
671  	return -EINVAL;
672  }
673  
674  /*
675   * hl_mmap - mmap function for habanalabs device
676   *
677   * @*filp: pointer to file structure
678   * @*vma: pointer to vm_area_struct of the process
679   *
680   * Called when process does an mmap on habanalabs device. Call the relevant mmap
681   * function at the end of the common code.
682   */
hl_mmap(struct file * filp,struct vm_area_struct * vma)683  int hl_mmap(struct file *filp, struct vm_area_struct *vma)
684  {
685  	struct drm_file *file_priv = filp->private_data;
686  	struct hl_fpriv *hpriv = file_priv->driver_priv;
687  
688  	return __hl_mmap(hpriv, vma);
689  }
690  
691  static const struct file_operations hl_ctrl_ops = {
692  	.owner = THIS_MODULE,
693  	.open = hl_device_open_ctrl,
694  	.release = hl_device_release_ctrl,
695  	.unlocked_ioctl = hl_ioctl_control,
696  	.compat_ioctl = hl_ioctl_control
697  };
698  
device_release_func(struct device * dev)699  static void device_release_func(struct device *dev)
700  {
701  	kfree(dev);
702  }
703  
704  /*
705   * device_init_cdev - Initialize cdev and device for habanalabs device
706   *
707   * @hdev: pointer to habanalabs device structure
708   * @class: pointer to the class object of the device
709   * @minor: minor number of the specific device
710   * @fops: file operations to install for this device
711   * @name: name of the device as it will appear in the filesystem
712   * @cdev: pointer to the char device object that will be initialized
713   * @dev: pointer to the device object that will be initialized
714   *
715   * Initialize a cdev and a Linux device for habanalabs's device.
716   */
device_init_cdev(struct hl_device * hdev,const struct class * class,int minor,const struct file_operations * fops,char * name,struct cdev * cdev,struct device ** dev)717  static int device_init_cdev(struct hl_device *hdev, const struct class *class,
718  				int minor, const struct file_operations *fops,
719  				char *name, struct cdev *cdev,
720  				struct device **dev)
721  {
722  	cdev_init(cdev, fops);
723  	cdev->owner = THIS_MODULE;
724  
725  	*dev = kzalloc(sizeof(**dev), GFP_KERNEL);
726  	if (!*dev)
727  		return -ENOMEM;
728  
729  	device_initialize(*dev);
730  	(*dev)->devt = MKDEV(hdev->major, minor);
731  	(*dev)->class = class;
732  	(*dev)->release = device_release_func;
733  	dev_set_drvdata(*dev, hdev);
734  	dev_set_name(*dev, "%s", name);
735  
736  	return 0;
737  }
738  
cdev_sysfs_debugfs_add(struct hl_device * hdev)739  static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
740  {
741  	const struct class *accel_class = hdev->drm.accel->kdev->class;
742  	char name[32];
743  	int rc;
744  
745  	hdev->cdev_idx = hdev->drm.accel->index;
746  
747  	/* Initialize cdev and device structures for the control device */
748  	snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx);
749  	rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name,
750  				&hdev->cdev_ctrl, &hdev->dev_ctrl);
751  	if (rc)
752  		return rc;
753  
754  	rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
755  	if (rc) {
756  		dev_err(hdev->dev_ctrl,
757  			"failed to add an accel control char device to the system\n");
758  		goto free_ctrl_device;
759  	}
760  
761  	rc = hl_sysfs_init(hdev);
762  	if (rc) {
763  		dev_err(hdev->dev, "failed to initialize sysfs\n");
764  		goto delete_ctrl_cdev_device;
765  	}
766  
767  	hl_debugfs_add_device(hdev);
768  
769  	hdev->cdev_sysfs_debugfs_created = true;
770  
771  	return 0;
772  
773  delete_ctrl_cdev_device:
774  	cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
775  free_ctrl_device:
776  	put_device(hdev->dev_ctrl);
777  	return rc;
778  }
779  
cdev_sysfs_debugfs_remove(struct hl_device * hdev)780  static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)
781  {
782  	if (!hdev->cdev_sysfs_debugfs_created)
783  		return;
784  
785  	hl_sysfs_fini(hdev);
786  
787  	cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
788  	put_device(hdev->dev_ctrl);
789  }
790  
device_hard_reset_pending(struct work_struct * work)791  static void device_hard_reset_pending(struct work_struct *work)
792  {
793  	struct hl_device_reset_work *device_reset_work =
794  		container_of(work, struct hl_device_reset_work, reset_work.work);
795  	struct hl_device *hdev = device_reset_work->hdev;
796  	u32 flags;
797  	int rc;
798  
799  	flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
800  
801  	rc = hl_device_reset(hdev, flags);
802  
803  	if ((rc == -EBUSY) && !hdev->device_fini_pending) {
804  		struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
805  
806  		if (ctx) {
807  			/* The read refcount value should subtracted by one, because the read is
808  			 * protected with hl_get_compute_ctx().
809  			 */
810  			dev_info(hdev->dev,
811  				"Could not reset device (compute_ctx refcount %u). will try again in %u seconds",
812  				kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);
813  			hl_ctx_put(ctx);
814  		} else {
815  			dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",
816  				HL_PENDING_RESET_PER_SEC);
817  		}
818  
819  		queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
820  					msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
821  	}
822  }
823  
device_release_watchdog_func(struct work_struct * work)824  static void device_release_watchdog_func(struct work_struct *work)
825  {
826  	struct hl_device_reset_work *watchdog_work =
827  			container_of(work, struct hl_device_reset_work, reset_work.work);
828  	struct hl_device *hdev = watchdog_work->hdev;
829  	u32 flags;
830  
831  	dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
832  
833  	flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
834  
835  	hl_device_reset(hdev, flags);
836  }
837  
838  /*
839   * device_early_init - do some early initialization for the habanalabs device
840   *
841   * @hdev: pointer to habanalabs device structure
842   *
843   * Install the relevant function pointers and call the early_init function,
844   * if such a function exists
845   */
device_early_init(struct hl_device * hdev)846  static int device_early_init(struct hl_device *hdev)
847  {
848  	int i, rc;
849  	char workq_name[32];
850  
851  	switch (hdev->asic_type) {
852  	case ASIC_GOYA:
853  		goya_set_asic_funcs(hdev);
854  		strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
855  		break;
856  	case ASIC_GAUDI:
857  		gaudi_set_asic_funcs(hdev);
858  		strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
859  		break;
860  	case ASIC_GAUDI_SEC:
861  		gaudi_set_asic_funcs(hdev);
862  		strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
863  		break;
864  	case ASIC_GAUDI2:
865  		gaudi2_set_asic_funcs(hdev);
866  		strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
867  		break;
868  	case ASIC_GAUDI2B:
869  		gaudi2_set_asic_funcs(hdev);
870  		strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
871  		break;
872  	case ASIC_GAUDI2C:
873  		gaudi2_set_asic_funcs(hdev);
874  		strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
875  		break;
876  	case ASIC_GAUDI2D:
877  		gaudi2_set_asic_funcs(hdev);
878  		strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name));
879  		break;
880  	default:
881  		dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
882  			hdev->asic_type);
883  		return -EINVAL;
884  	}
885  
886  	rc = hdev->asic_funcs->early_init(hdev);
887  	if (rc)
888  		return rc;
889  
890  	rc = hl_asid_init(hdev);
891  	if (rc)
892  		goto early_fini;
893  
894  	if (hdev->asic_prop.completion_queues_count) {
895  		hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
896  				sizeof(struct workqueue_struct *),
897  				GFP_KERNEL);
898  		if (!hdev->cq_wq) {
899  			rc = -ENOMEM;
900  			goto asid_fini;
901  		}
902  	}
903  
904  	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
905  		snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
906  		hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
907  		if (hdev->cq_wq[i] == NULL) {
908  			dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
909  			rc = -ENOMEM;
910  			goto free_cq_wq;
911  		}
912  	}
913  
914  	snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
915  	hdev->eq_wq = create_singlethread_workqueue(workq_name);
916  	if (hdev->eq_wq == NULL) {
917  		dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
918  		rc = -ENOMEM;
919  		goto free_cq_wq;
920  	}
921  
922  	snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
923  	hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
924  	if (!hdev->cs_cmplt_wq) {
925  		dev_err(hdev->dev,
926  			"Failed to allocate CS completions workqueue\n");
927  		rc = -ENOMEM;
928  		goto free_eq_wq;
929  	}
930  
931  	snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
932  	hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
933  	if (!hdev->ts_free_obj_wq) {
934  		dev_err(hdev->dev,
935  			"Failed to allocate Timestamp registration free workqueue\n");
936  		rc = -ENOMEM;
937  		goto free_cs_cmplt_wq;
938  	}
939  
940  	snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
941  	hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
942  	if (!hdev->prefetch_wq) {
943  		dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
944  		rc = -ENOMEM;
945  		goto free_ts_free_wq;
946  	}
947  
948  	hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
949  	if (!hdev->hl_chip_info) {
950  		rc = -ENOMEM;
951  		goto free_prefetch_wq;
952  	}
953  
954  	rc = hl_mmu_if_set_funcs(hdev);
955  	if (rc)
956  		goto free_chip_info;
957  
958  	hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
959  
960  	snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
961  	hdev->reset_wq = create_singlethread_workqueue(workq_name);
962  	if (!hdev->reset_wq) {
963  		rc = -ENOMEM;
964  		dev_err(hdev->dev, "Failed to create device reset WQ\n");
965  		goto free_cb_mgr;
966  	}
967  
968  	INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
969  
970  	INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
971  	hdev->device_reset_work.hdev = hdev;
972  	hdev->device_fini_pending = 0;
973  
974  	INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,
975  				device_release_watchdog_func);
976  	hdev->device_release_watchdog_work.hdev = hdev;
977  
978  	mutex_init(&hdev->send_cpu_message_lock);
979  	mutex_init(&hdev->debug_lock);
980  	INIT_LIST_HEAD(&hdev->cs_mirror_list);
981  	spin_lock_init(&hdev->cs_mirror_lock);
982  	spin_lock_init(&hdev->reset_info.lock);
983  	INIT_LIST_HEAD(&hdev->fpriv_list);
984  	INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
985  	mutex_init(&hdev->fpriv_list_lock);
986  	mutex_init(&hdev->fpriv_ctrl_list_lock);
987  	mutex_init(&hdev->clk_throttling.lock);
988  
989  	return 0;
990  
991  free_cb_mgr:
992  	hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
993  	hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
994  free_chip_info:
995  	kfree(hdev->hl_chip_info);
996  free_prefetch_wq:
997  	destroy_workqueue(hdev->prefetch_wq);
998  free_ts_free_wq:
999  	destroy_workqueue(hdev->ts_free_obj_wq);
1000  free_cs_cmplt_wq:
1001  	destroy_workqueue(hdev->cs_cmplt_wq);
1002  free_eq_wq:
1003  	destroy_workqueue(hdev->eq_wq);
1004  free_cq_wq:
1005  	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1006  		if (hdev->cq_wq[i])
1007  			destroy_workqueue(hdev->cq_wq[i]);
1008  	kfree(hdev->cq_wq);
1009  asid_fini:
1010  	hl_asid_fini(hdev);
1011  early_fini:
1012  	if (hdev->asic_funcs->early_fini)
1013  		hdev->asic_funcs->early_fini(hdev);
1014  
1015  	return rc;
1016  }
1017  
1018  /*
1019   * device_early_fini - finalize all that was done in device_early_init
1020   *
1021   * @hdev: pointer to habanalabs device structure
1022   *
1023   */
device_early_fini(struct hl_device * hdev)1024  static void device_early_fini(struct hl_device *hdev)
1025  {
1026  	int i;
1027  
1028  	mutex_destroy(&hdev->debug_lock);
1029  	mutex_destroy(&hdev->send_cpu_message_lock);
1030  
1031  	mutex_destroy(&hdev->fpriv_list_lock);
1032  	mutex_destroy(&hdev->fpriv_ctrl_list_lock);
1033  
1034  	mutex_destroy(&hdev->clk_throttling.lock);
1035  
1036  	hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
1037  	hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
1038  
1039  	kfree(hdev->hl_chip_info);
1040  
1041  	destroy_workqueue(hdev->prefetch_wq);
1042  	destroy_workqueue(hdev->ts_free_obj_wq);
1043  	destroy_workqueue(hdev->cs_cmplt_wq);
1044  	destroy_workqueue(hdev->eq_wq);
1045  	destroy_workqueue(hdev->reset_wq);
1046  
1047  	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1048  		destroy_workqueue(hdev->cq_wq[i]);
1049  	kfree(hdev->cq_wq);
1050  
1051  	hl_asid_fini(hdev);
1052  
1053  	if (hdev->asic_funcs->early_fini)
1054  		hdev->asic_funcs->early_fini(hdev);
1055  }
1056  
is_pci_link_healthy(struct hl_device * hdev)1057  static bool is_pci_link_healthy(struct hl_device *hdev)
1058  {
1059  	u16 device_id;
1060  
1061  	if (!hdev->pdev)
1062  		return false;
1063  
1064  	pci_read_config_word(hdev->pdev, PCI_DEVICE_ID, &device_id);
1065  
1066  	return (device_id == hdev->pdev->device);
1067  }
1068  
stringify_time_of_last_heartbeat(struct hl_device * hdev,char * time_str,size_t size,bool is_pq_hb)1069  static void stringify_time_of_last_heartbeat(struct hl_device *hdev, char *time_str, size_t size,
1070  						bool is_pq_hb)
1071  {
1072  	time64_t seconds = is_pq_hb ? hdev->heartbeat_debug_info.last_pq_heartbeat_ts
1073  					: hdev->heartbeat_debug_info.last_eq_heartbeat_ts;
1074  	struct tm tm;
1075  
1076  	if (!seconds)
1077  		return;
1078  
1079  	time64_to_tm(seconds, 0, &tm);
1080  
1081  	snprintf(time_str, size, "%ld-%02d-%02d %02d:%02d:%02d (UTC)",
1082  		tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
1083  }
1084  
hl_device_eq_heartbeat_received(struct hl_device * hdev)1085  static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
1086  {
1087  	struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
1088  	u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
1089  	struct asic_fixed_properties *prop = &hdev->asic_prop;
1090  	char pq_time_str[64] = "N/A", eq_time_str[64] = "N/A";
1091  
1092  	if (!prop->cpucp_info.eq_health_check_supported)
1093  		return true;
1094  
1095  	if (!hdev->eq_heartbeat_received) {
1096  		dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
1097  
1098  		stringify_time_of_last_heartbeat(hdev, pq_time_str, sizeof(pq_time_str), true);
1099  		stringify_time_of_last_heartbeat(hdev, eq_time_str, sizeof(eq_time_str), false);
1100  		dev_err(hdev->dev,
1101  			"EQ: {CI %u, HB counter %u, last HB time: %s}, PQ: {PI: %u, CI: %u (%u), last HB time: %s}\n",
1102  			hdev->event_queue.ci,
1103  			heartbeat_debug_info->heartbeat_event_counter,
1104  			eq_time_str,
1105  			hdev->kernel_queues[cpu_q_id].pi,
1106  			atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
1107  			atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
1108  			pq_time_str);
1109  
1110  		hl_eq_dump(hdev, &hdev->event_queue);
1111  
1112  		return false;
1113  	}
1114  
1115  	hdev->eq_heartbeat_received = false;
1116  
1117  	return true;
1118  }
1119  
hl_device_heartbeat(struct work_struct * work)1120  static void hl_device_heartbeat(struct work_struct *work)
1121  {
1122  	struct hl_device *hdev = container_of(work, struct hl_device,
1123  						work_heartbeat.work);
1124  	struct hl_info_fw_err_info info = {0};
1125  	u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
1126  
1127  	/* Start heartbeat checks only after driver has enabled events from FW */
1128  	if (!hl_device_operational(hdev, NULL) || !hdev->init_done)
1129  		goto reschedule;
1130  
1131  	/*
1132  	 * For EQ health check need to check if driver received the heartbeat eq event
1133  	 * in order to validate the eq is working.
1134  	 * Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.
1135  	 */
1136  	if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev)))
1137  		goto reschedule;
1138  
1139  	if (hl_device_operational(hdev, NULL))
1140  		dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
1141  			is_pci_link_healthy(hdev) ? "healthy" : "broken");
1142  
1143  	info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
1144  	info.event_mask = &event_mask;
1145  	hl_handle_fw_err(hdev, &info);
1146  	hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
1147  
1148  	return;
1149  
1150  reschedule:
1151  	/*
1152  	 * prev_reset_trigger tracks consecutive fatal h/w errors until first
1153  	 * heartbeat immediately post reset.
1154  	 * If control reached here, then at least one heartbeat work has been
1155  	 * scheduled since last reset/init cycle.
1156  	 * So if the device is not already in reset cycle, reset the flag
1157  	 * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
1158  	 * status for at least one heartbeat. From this point driver restarts
1159  	 * tracking future consecutive fatal errors.
1160  	 */
1161  	if (!hdev->reset_info.in_reset)
1162  		hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1163  
1164  	schedule_delayed_work(&hdev->work_heartbeat,
1165  			usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1166  }
1167  
1168  /*
1169   * device_late_init - do late stuff initialization for the habanalabs device
1170   *
1171   * @hdev: pointer to habanalabs device structure
1172   *
1173   * Do stuff that either needs the device H/W queues to be active or needs
1174   * to happen after all the rest of the initialization is finished
1175   */
device_late_init(struct hl_device * hdev)1176  static int device_late_init(struct hl_device *hdev)
1177  {
1178  	int rc;
1179  
1180  	if (hdev->asic_funcs->late_init) {
1181  		rc = hdev->asic_funcs->late_init(hdev);
1182  		if (rc) {
1183  			dev_err(hdev->dev,
1184  				"failed late initialization for the H/W\n");
1185  			return rc;
1186  		}
1187  	}
1188  
1189  	hdev->high_pll = hdev->asic_prop.high_pll;
1190  	hdev->late_init_done = true;
1191  
1192  	return 0;
1193  }
1194  
1195  /*
1196   * device_late_fini - finalize all that was done in device_late_init
1197   *
1198   * @hdev: pointer to habanalabs device structure
1199   *
1200   */
device_late_fini(struct hl_device * hdev)1201  static void device_late_fini(struct hl_device *hdev)
1202  {
1203  	if (!hdev->late_init_done)
1204  		return;
1205  
1206  	if (hdev->asic_funcs->late_fini)
1207  		hdev->asic_funcs->late_fini(hdev);
1208  
1209  	hdev->late_init_done = false;
1210  }
1211  
hl_device_utilization(struct hl_device * hdev,u32 * utilization)1212  int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
1213  {
1214  	u64 max_power, curr_power, dc_power, dividend, divisor;
1215  	int rc;
1216  
1217  	max_power = hdev->max_power;
1218  	dc_power = hdev->asic_prop.dc_power_default;
1219  	divisor = max_power - dc_power;
1220  	if (!divisor) {
1221  		dev_warn(hdev->dev, "device utilization is not supported\n");
1222  		return -EOPNOTSUPP;
1223  	}
1224  	rc = hl_fw_cpucp_power_get(hdev, &curr_power);
1225  
1226  	if (rc)
1227  		return rc;
1228  
1229  	curr_power = clamp(curr_power, dc_power, max_power);
1230  
1231  	dividend = (curr_power - dc_power) * 100;
1232  	*utilization = (u32) div_u64(dividend, divisor);
1233  
1234  	return 0;
1235  }
1236  
hl_device_set_debug_mode(struct hl_device * hdev,struct hl_ctx * ctx,bool enable)1237  int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
1238  {
1239  	int rc = 0;
1240  
1241  	mutex_lock(&hdev->debug_lock);
1242  
1243  	if (!enable) {
1244  		if (!hdev->in_debug) {
1245  			dev_err(hdev->dev,
1246  				"Failed to disable debug mode because device was not in debug mode\n");
1247  			rc = -EFAULT;
1248  			goto out;
1249  		}
1250  
1251  		if (!hdev->reset_info.hard_reset_pending)
1252  			hdev->asic_funcs->halt_coresight(hdev, ctx);
1253  
1254  		hdev->in_debug = 0;
1255  
1256  		goto out;
1257  	}
1258  
1259  	if (hdev->in_debug) {
1260  		dev_err(hdev->dev,
1261  			"Failed to enable debug mode because device is already in debug mode\n");
1262  		rc = -EFAULT;
1263  		goto out;
1264  	}
1265  
1266  	hdev->in_debug = 1;
1267  
1268  out:
1269  	mutex_unlock(&hdev->debug_lock);
1270  
1271  	return rc;
1272  }
1273  
take_release_locks(struct hl_device * hdev)1274  static void take_release_locks(struct hl_device *hdev)
1275  {
1276  	/* Flush anyone that is inside the critical section of enqueue
1277  	 * jobs to the H/W
1278  	 */
1279  	hdev->asic_funcs->hw_queues_lock(hdev);
1280  	hdev->asic_funcs->hw_queues_unlock(hdev);
1281  
1282  	/* Flush processes that are sending message to CPU */
1283  	mutex_lock(&hdev->send_cpu_message_lock);
1284  	mutex_unlock(&hdev->send_cpu_message_lock);
1285  
1286  	/* Flush anyone that is inside device open */
1287  	mutex_lock(&hdev->fpriv_list_lock);
1288  	mutex_unlock(&hdev->fpriv_list_lock);
1289  	mutex_lock(&hdev->fpriv_ctrl_list_lock);
1290  	mutex_unlock(&hdev->fpriv_ctrl_list_lock);
1291  }
1292  
hl_abort_waiting_for_completions(struct hl_device * hdev)1293  static void hl_abort_waiting_for_completions(struct hl_device *hdev)
1294  {
1295  	hl_abort_waiting_for_cs_completions(hdev);
1296  
1297  	/* Release all pending user interrupts, each pending user interrupt
1298  	 * holds a reference to a user context.
1299  	 */
1300  	hl_release_pending_user_interrupts(hdev);
1301  }
1302  
cleanup_resources(struct hl_device * hdev,bool hard_reset,bool fw_reset,bool skip_wq_flush)1303  static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
1304  				bool skip_wq_flush)
1305  {
1306  	if (hard_reset) {
1307  		if (hdev->heartbeat)
1308  			cancel_delayed_work_sync(&hdev->work_heartbeat);
1309  
1310  		device_late_fini(hdev);
1311  	}
1312  
1313  	/*
1314  	 * Halt the engines and disable interrupts so we won't get any more
1315  	 * completions from H/W and we won't have any accesses from the
1316  	 * H/W to the host machine
1317  	 */
1318  	hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
1319  
1320  	/* Go over all the queues, release all CS and their jobs */
1321  	hl_cs_rollback_all(hdev, skip_wq_flush);
1322  
1323  	/* flush the MMU prefetch workqueue */
1324  	flush_workqueue(hdev->prefetch_wq);
1325  
1326  	hl_abort_waiting_for_completions(hdev);
1327  }
1328  
1329  /*
1330   * hl_device_suspend - initiate device suspend
1331   *
1332   * @hdev: pointer to habanalabs device structure
1333   *
1334   * Puts the hw in the suspend state (all asics).
1335   * Returns 0 for success or an error on failure.
1336   * Called at driver suspend.
1337   */
hl_device_suspend(struct hl_device * hdev)1338  int hl_device_suspend(struct hl_device *hdev)
1339  {
1340  	int rc;
1341  
1342  	pci_save_state(hdev->pdev);
1343  
1344  	/* Block future CS/VM/JOB completion operations */
1345  	spin_lock(&hdev->reset_info.lock);
1346  	if (hdev->reset_info.in_reset) {
1347  		spin_unlock(&hdev->reset_info.lock);
1348  		dev_err(hdev->dev, "Can't suspend while in reset\n");
1349  		return -EIO;
1350  	}
1351  	hdev->reset_info.in_reset = 1;
1352  	spin_unlock(&hdev->reset_info.lock);
1353  
1354  	/* This blocks all other stuff that is not blocked by in_reset */
1355  	hdev->disabled = true;
1356  
1357  	take_release_locks(hdev);
1358  
1359  	rc = hdev->asic_funcs->suspend(hdev);
1360  	if (rc)
1361  		dev_err(hdev->dev,
1362  			"Failed to disable PCI access of device CPU\n");
1363  
1364  	/* Shut down the device */
1365  	pci_disable_device(hdev->pdev);
1366  	pci_set_power_state(hdev->pdev, PCI_D3hot);
1367  
1368  	return 0;
1369  }
1370  
1371  /*
1372   * hl_device_resume - initiate device resume
1373   *
1374   * @hdev: pointer to habanalabs device structure
1375   *
1376   * Bring the hw back to operating state (all asics).
1377   * Returns 0 for success or an error on failure.
1378   * Called at driver resume.
1379   */
hl_device_resume(struct hl_device * hdev)1380  int hl_device_resume(struct hl_device *hdev)
1381  {
1382  	int rc;
1383  
1384  	pci_set_power_state(hdev->pdev, PCI_D0);
1385  	pci_restore_state(hdev->pdev);
1386  	rc = pci_enable_device_mem(hdev->pdev);
1387  	if (rc) {
1388  		dev_err(hdev->dev,
1389  			"Failed to enable PCI device in resume\n");
1390  		return rc;
1391  	}
1392  
1393  	pci_set_master(hdev->pdev);
1394  
1395  	rc = hdev->asic_funcs->resume(hdev);
1396  	if (rc) {
1397  		dev_err(hdev->dev, "Failed to resume device after suspend\n");
1398  		goto disable_device;
1399  	}
1400  
1401  
1402  	/* 'in_reset' was set to true during suspend, now we must clear it in order
1403  	 * for hard reset to be performed
1404  	 */
1405  	spin_lock(&hdev->reset_info.lock);
1406  	hdev->reset_info.in_reset = 0;
1407  	spin_unlock(&hdev->reset_info.lock);
1408  
1409  	rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
1410  	if (rc) {
1411  		dev_err(hdev->dev, "Failed to reset device during resume\n");
1412  		goto disable_device;
1413  	}
1414  
1415  	return 0;
1416  
1417  disable_device:
1418  	pci_disable_device(hdev->pdev);
1419  
1420  	return rc;
1421  }
1422  
device_kill_open_processes(struct hl_device * hdev,u32 timeout,bool control_dev)1423  static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
1424  {
1425  	struct task_struct *task = NULL;
1426  	struct list_head *hpriv_list;
1427  	struct hl_fpriv *hpriv;
1428  	struct mutex *hpriv_lock;
1429  	u32 pending_cnt;
1430  
1431  	hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1432  	hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1433  
1434  	/* Giving time for user to close FD, and for processes that are inside
1435  	 * hl_device_open to finish
1436  	 */
1437  	if (!list_empty(hpriv_list))
1438  		ssleep(1);
1439  
1440  	if (timeout) {
1441  		pending_cnt = timeout;
1442  	} else {
1443  		if (hdev->process_kill_trial_cnt) {
1444  			/* Processes have been already killed */
1445  			pending_cnt = 1;
1446  			goto wait_for_processes;
1447  		} else {
1448  			/* Wait a small period after process kill */
1449  			pending_cnt = HL_PENDING_RESET_PER_SEC;
1450  		}
1451  	}
1452  
1453  	mutex_lock(hpriv_lock);
1454  
1455  	/* This section must be protected because we are dereferencing
1456  	 * pointers that are freed if the process exits
1457  	 */
1458  	list_for_each_entry(hpriv, hpriv_list, dev_node) {
1459  		task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
1460  		if (task) {
1461  			dev_info(hdev->dev, "Killing user process pid=%d\n",
1462  				task_pid_nr(task));
1463  			send_sig(SIGKILL, task, 1);
1464  			usleep_range(1000, 10000);
1465  
1466  			put_task_struct(task);
1467  		} else {
1468  			dev_dbg(hdev->dev,
1469  				"Can't get task struct for user process %d, process was killed from outside the driver\n",
1470  				pid_nr(hpriv->taskpid));
1471  		}
1472  	}
1473  
1474  	mutex_unlock(hpriv_lock);
1475  
1476  	/*
1477  	 * We killed the open users, but that doesn't mean they are closed.
1478  	 * It could be that they are running a long cleanup phase in the driver
1479  	 * e.g. MMU unmappings, or running other long teardown flow even before
1480  	 * our cleanup.
1481  	 * Therefore we need to wait again to make sure they are closed before
1482  	 * continuing with the reset.
1483  	 */
1484  
1485  wait_for_processes:
1486  	while ((!list_empty(hpriv_list)) && (pending_cnt)) {
1487  		dev_dbg(hdev->dev,
1488  			"Waiting for all unmap operations to finish before hard reset\n");
1489  
1490  		pending_cnt--;
1491  
1492  		ssleep(1);
1493  	}
1494  
1495  	/* All processes exited successfully */
1496  	if (list_empty(hpriv_list))
1497  		return 0;
1498  
1499  	/* Give up waiting for processes to exit */
1500  	if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
1501  		return -ETIME;
1502  
1503  	hdev->process_kill_trial_cnt++;
1504  
1505  	return -EBUSY;
1506  }
1507  
device_disable_open_processes(struct hl_device * hdev,bool control_dev)1508  static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
1509  {
1510  	struct list_head *hpriv_list;
1511  	struct hl_fpriv *hpriv;
1512  	struct mutex *hpriv_lock;
1513  
1514  	hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1515  	hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1516  
1517  	mutex_lock(hpriv_lock);
1518  	list_for_each_entry(hpriv, hpriv_list, dev_node)
1519  		hpriv->hdev = NULL;
1520  	mutex_unlock(hpriv_lock);
1521  }
1522  
send_disable_pci_access(struct hl_device * hdev,u32 flags)1523  static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
1524  {
1525  	/* If reset is due to heartbeat, device CPU is no responsive in
1526  	 * which case no point sending PCI disable message to it.
1527  	 */
1528  	if ((flags & HL_DRV_RESET_HARD) &&
1529  			!(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
1530  		/* Disable PCI access from device F/W so he won't send
1531  		 * us additional interrupts. We disable MSI/MSI-X at
1532  		 * the halt_engines function and we can't have the F/W
1533  		 * sending us interrupts after that. We need to disable
1534  		 * the access here because if the device is marked
1535  		 * disable, the message won't be send. Also, in case
1536  		 * of heartbeat, the device CPU is marked as disable
1537  		 * so this message won't be sent
1538  		 */
1539  		if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
1540  			return;
1541  
1542  		/* disable_irq also generates sync irq, this verifies that last EQs are handled
1543  		 * before disabled is set. The IRQ will be enabled again in request_irq call.
1544  		 */
1545  		if (hdev->cpu_queues_enable)
1546  			disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id));
1547  	}
1548  }
1549  
handle_reset_trigger(struct hl_device * hdev,u32 flags)1550  static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
1551  {
1552  	u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1553  
1554  	/* No consecutive mechanism when user context exists */
1555  	if (hdev->is_compute_ctx_active)
1556  		return;
1557  
1558  	/*
1559  	 * 'reset cause' is being updated here, because getting here
1560  	 * means that it's the 1st time and the last time we're here
1561  	 * ('in_reset' makes sure of it). This makes sure that
1562  	 * 'reset_cause' will continue holding its 1st recorded reason!
1563  	 */
1564  	if (flags & HL_DRV_RESET_HEARTBEAT) {
1565  		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
1566  		cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
1567  	} else if (flags & HL_DRV_RESET_TDR) {
1568  		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
1569  		cur_reset_trigger = HL_DRV_RESET_TDR;
1570  	} else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
1571  		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1572  		cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
1573  	} else {
1574  		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1575  	}
1576  
1577  	/*
1578  	 * If reset cause is same twice, then reset_trigger_repeated
1579  	 * is set and if this reset is due to a fatal FW error
1580  	 * device is set to an unstable state.
1581  	 */
1582  	if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
1583  		hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
1584  		hdev->reset_info.reset_trigger_repeated = 0;
1585  	} else {
1586  		hdev->reset_info.reset_trigger_repeated = 1;
1587  	}
1588  }
1589  
reset_heartbeat_debug_info(struct hl_device * hdev)1590  static void reset_heartbeat_debug_info(struct hl_device *hdev)
1591  {
1592  	hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0;
1593  	hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0;
1594  	hdev->heartbeat_debug_info.heartbeat_event_counter = 0;
1595  }
1596  
device_heartbeat_schedule(struct hl_device * hdev)1597  static inline void device_heartbeat_schedule(struct hl_device *hdev)
1598  {
1599  	if (!hdev->heartbeat)
1600  		return;
1601  
1602  	reset_heartbeat_debug_info(hdev);
1603  
1604  	/*
1605  	 * Before scheduling the heartbeat driver will check if eq event has received.
1606  	 * for the first schedule we need to set the indication as true then for the next
1607  	 * one this indication will be true only if eq event was sent by FW.
1608  	 */
1609  	hdev->eq_heartbeat_received = true;
1610  
1611  	schedule_delayed_work(&hdev->work_heartbeat,
1612  			usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1613  }
1614  
1615  /*
1616   * hl_device_reset - reset the device
1617   *
1618   * @hdev: pointer to habanalabs device structure
1619   * @flags: reset flags.
1620   *
1621   * Block future CS and wait for pending CS to be enqueued
1622   * Call ASIC H/W fini
1623   * Flush all completions
1624   * Re-initialize all internal data structures
1625   * Call ASIC H/W init, late_init
1626   * Test queues
1627   * Enable device
1628   *
1629   * Returns 0 for success or an error on failure.
1630   */
hl_device_reset(struct hl_device * hdev,u32 flags)1631  int hl_device_reset(struct hl_device *hdev, u32 flags)
1632  {
1633  	bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
1634  		schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
1635  	u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
1636  	struct hl_ctx *ctx;
1637  	int i, rc, hw_fini_rc;
1638  
1639  	if (!hdev->init_done) {
1640  		dev_err(hdev->dev, "Can't reset before initialization is done\n");
1641  		return 0;
1642  	}
1643  
1644  	hard_reset = !!(flags & HL_DRV_RESET_HARD);
1645  	from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
1646  	fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
1647  	from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
1648  	delay_reset = !!(flags & HL_DRV_RESET_DELAY);
1649  	from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
1650  	reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
1651  
1652  	if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
1653  		dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
1654  		return 0;
1655  	}
1656  
1657  	if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
1658  		dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
1659  		hard_reset = true;
1660  	}
1661  
1662  	if (reset_upon_device_release) {
1663  		if (hard_reset) {
1664  			dev_crit(hdev->dev,
1665  				"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
1666  			return -EINVAL;
1667  		}
1668  
1669  		goto do_reset;
1670  	}
1671  
1672  	if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
1673  		dev_dbg(hdev->dev,
1674  			"asic doesn't allow inference soft reset - do hard-reset instead\n");
1675  		hard_reset = true;
1676  	}
1677  
1678  do_reset:
1679  	/* Re-entry of reset thread */
1680  	if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
1681  		goto kill_processes;
1682  
1683  	/*
1684  	 * Prevent concurrency in this function - only one reset should be
1685  	 * done at any given time. We need to perform this only if we didn't
1686  	 * get here from a dedicated hard reset thread.
1687  	 */
1688  	if (!from_hard_reset_thread) {
1689  		/* Block future CS/VM/JOB completion operations */
1690  		spin_lock(&hdev->reset_info.lock);
1691  		if (hdev->reset_info.in_reset) {
1692  			/* We allow scheduling of a hard reset only during a compute reset */
1693  			if (hard_reset && hdev->reset_info.in_compute_reset)
1694  				hdev->reset_info.hard_reset_schedule_flags = flags;
1695  			spin_unlock(&hdev->reset_info.lock);
1696  			return 0;
1697  		}
1698  
1699  		/* This still allows the completion of some KDMA ops
1700  		 * Update this before in_reset because in_compute_reset implies we are in reset
1701  		 */
1702  		hdev->reset_info.in_compute_reset = !hard_reset;
1703  
1704  		hdev->reset_info.in_reset = 1;
1705  
1706  		spin_unlock(&hdev->reset_info.lock);
1707  
1708  		/* Cancel the device release watchdog work if required.
1709  		 * In case of reset-upon-device-release while the release watchdog work is
1710  		 * scheduled due to a hard-reset, do hard-reset instead of compute-reset.
1711  		 */
1712  		if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
1713  			struct hl_device_reset_work *watchdog_work =
1714  					&hdev->device_release_watchdog_work;
1715  
1716  			hdev->reset_info.watchdog_active = 0;
1717  			if (!from_watchdog_thread)
1718  				cancel_delayed_work_sync(&watchdog_work->reset_work);
1719  
1720  			if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
1721  				hdev->reset_info.in_compute_reset = 0;
1722  				flags |= HL_DRV_RESET_HARD;
1723  				flags &= ~HL_DRV_RESET_DEV_RELEASE;
1724  				hard_reset = true;
1725  			}
1726  		}
1727  
1728  		if (delay_reset)
1729  			usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
1730  
1731  escalate_reset_flow:
1732  		handle_reset_trigger(hdev, flags);
1733  		send_disable_pci_access(hdev, flags);
1734  
1735  		/* This also blocks future CS/VM/JOB completion operations */
1736  		hdev->disabled = true;
1737  
1738  		take_release_locks(hdev);
1739  
1740  		if (hard_reset)
1741  			dev_info(hdev->dev, "Going to reset device\n");
1742  		else if (reset_upon_device_release)
1743  			dev_dbg(hdev->dev, "Going to reset device after release by user\n");
1744  		else
1745  			dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
1746  	}
1747  
1748  	if ((hard_reset) && (!from_hard_reset_thread)) {
1749  		hdev->reset_info.hard_reset_pending = true;
1750  
1751  		hdev->process_kill_trial_cnt = 0;
1752  
1753  		hdev->device_reset_work.flags = flags;
1754  
1755  		/*
1756  		 * Because the reset function can't run from heartbeat work,
1757  		 * we need to call the reset function from a dedicated work.
1758  		 */
1759  		queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);
1760  
1761  		return 0;
1762  	}
1763  
1764  	cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);
1765  
1766  kill_processes:
1767  	if (hard_reset) {
1768  		/* Kill processes here after CS rollback. This is because the
1769  		 * process can't really exit until all its CSs are done, which
1770  		 * is what we do in cs rollback
1771  		 */
1772  		rc = device_kill_open_processes(hdev, 0, false);
1773  
1774  		if (rc == -EBUSY) {
1775  			if (hdev->device_fini_pending) {
1776  				dev_crit(hdev->dev,
1777  					"%s Failed to kill all open processes, stopping hard reset\n",
1778  					dev_name(&(hdev)->pdev->dev));
1779  				goto out_err;
1780  			}
1781  
1782  			/* signal reset thread to reschedule */
1783  			return rc;
1784  		}
1785  
1786  		if (rc) {
1787  			dev_crit(hdev->dev,
1788  				"%s Failed to kill all open processes, stopping hard reset\n",
1789  				dev_name(&(hdev)->pdev->dev));
1790  			goto out_err;
1791  		}
1792  
1793  		/* Flush the Event queue workers to make sure no other thread is
1794  		 * reading or writing to registers during the reset
1795  		 */
1796  		flush_workqueue(hdev->eq_wq);
1797  	}
1798  
1799  	/* Reset the H/W. It will be in idle state after this returns */
1800  	hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1801  
1802  	if (hard_reset) {
1803  		hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1804  
1805  		/* Release kernel context */
1806  		if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1807  			hdev->kernel_ctx = NULL;
1808  
1809  		hl_vm_fini(hdev);
1810  		hl_mmu_fini(hdev);
1811  		hl_eq_reset(hdev, &hdev->event_queue);
1812  	}
1813  
1814  	/* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1815  	hl_hw_queue_reset(hdev, hard_reset);
1816  	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1817  		hl_cq_reset(hdev, &hdev->completion_queue[i]);
1818  
1819  	/* Make sure the context switch phase will run again */
1820  	ctx = hl_get_compute_ctx(hdev);
1821  	if (ctx) {
1822  		atomic_set(&ctx->thread_ctx_switch_token, 1);
1823  		ctx->thread_ctx_switch_wait_token = 0;
1824  		hl_ctx_put(ctx);
1825  	}
1826  
1827  	if (hw_fini_rc) {
1828  		rc = hw_fini_rc;
1829  		goto out_err;
1830  	}
1831  	/* Finished tear-down, starting to re-initialize */
1832  
1833  	if (hard_reset) {
1834  		hdev->device_cpu_disabled = false;
1835  		hdev->reset_info.hard_reset_pending = false;
1836  
1837  		/*
1838  		 * Put the device in an unusable state if there are 2 back to back resets due to
1839  		 * fatal errors.
1840  		 */
1841  		if (hdev->reset_info.reset_trigger_repeated &&
1842  				(hdev->reset_info.prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR ||
1843  						hdev->reset_info.prev_reset_trigger ==
1844  								HL_DRV_RESET_HEARTBEAT)) {
1845  			dev_crit(hdev->dev,
1846  				"%s Consecutive fatal errors, stopping hard reset\n",
1847  				dev_name(&(hdev)->pdev->dev));
1848  			rc = -EIO;
1849  			goto out_err;
1850  		}
1851  
1852  		if (hdev->kernel_ctx) {
1853  			dev_crit(hdev->dev,
1854  				"%s kernel ctx was alive during hard reset, something is terribly wrong\n",
1855  				dev_name(&(hdev)->pdev->dev));
1856  			rc = -EBUSY;
1857  			goto out_err;
1858  		}
1859  
1860  		rc = hl_mmu_init(hdev);
1861  		if (rc) {
1862  			dev_err(hdev->dev,
1863  				"Failed to initialize MMU S/W after hard reset\n");
1864  			goto out_err;
1865  		}
1866  
1867  		/* Allocate the kernel context */
1868  		hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1869  						GFP_KERNEL);
1870  		if (!hdev->kernel_ctx) {
1871  			rc = -ENOMEM;
1872  			hl_mmu_fini(hdev);
1873  			goto out_err;
1874  		}
1875  
1876  		hdev->is_compute_ctx_active = false;
1877  
1878  		rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1879  		if (rc) {
1880  			dev_err(hdev->dev,
1881  				"failed to init kernel ctx in hard reset\n");
1882  			kfree(hdev->kernel_ctx);
1883  			hdev->kernel_ctx = NULL;
1884  			hl_mmu_fini(hdev);
1885  			goto out_err;
1886  		}
1887  	}
1888  
1889  	/* Device is now enabled as part of the initialization requires
1890  	 * communication with the device firmware to get information that
1891  	 * is required for the initialization itself
1892  	 */
1893  	hdev->disabled = false;
1894  
1895  	/* F/W security enabled indication might be updated after hard-reset */
1896  	if (hard_reset) {
1897  		rc = hl_fw_read_preboot_status(hdev);
1898  		if (rc)
1899  			goto out_err;
1900  	}
1901  
1902  	rc = hdev->asic_funcs->hw_init(hdev);
1903  	if (rc) {
1904  		dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
1905  		goto out_err;
1906  	}
1907  
1908  	/* If device is not idle fail the reset process */
1909  	if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1910  						HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1911  		print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);
1912  		rc = -EIO;
1913  		goto out_err;
1914  	}
1915  
1916  	/* Check that the communication with the device is working */
1917  	rc = hdev->asic_funcs->test_queues(hdev);
1918  	if (rc) {
1919  		dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
1920  		goto out_err;
1921  	}
1922  
1923  	if (hard_reset) {
1924  		rc = device_late_init(hdev);
1925  		if (rc) {
1926  			dev_err(hdev->dev, "Failed late init after hard reset\n");
1927  			goto out_err;
1928  		}
1929  
1930  		rc = hl_vm_init(hdev);
1931  		if (rc) {
1932  			dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
1933  			goto out_err;
1934  		}
1935  
1936  		if (!hdev->asic_prop.fw_security_enabled)
1937  			hl_fw_set_max_power(hdev);
1938  	} else {
1939  		rc = hdev->asic_funcs->compute_reset_late_init(hdev);
1940  		if (rc) {
1941  			if (reset_upon_device_release)
1942  				dev_err(hdev->dev,
1943  					"Failed late init in reset after device release\n");
1944  			else
1945  				dev_err(hdev->dev, "Failed late init after compute reset\n");
1946  			goto out_err;
1947  		}
1948  	}
1949  
1950  	rc = hdev->asic_funcs->scrub_device_mem(hdev);
1951  	if (rc) {
1952  		dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
1953  		goto out_err;
1954  	}
1955  
1956  	spin_lock(&hdev->reset_info.lock);
1957  	hdev->reset_info.in_compute_reset = 0;
1958  
1959  	/* Schedule hard reset only if requested and if not already in hard reset.
1960  	 * We keep 'in_reset' enabled, so no other reset can go in during the hard
1961  	 * reset schedule
1962  	 */
1963  	if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
1964  		schedule_hard_reset = true;
1965  	else
1966  		hdev->reset_info.in_reset = 0;
1967  
1968  	spin_unlock(&hdev->reset_info.lock);
1969  
1970  	hdev->reset_info.needs_reset = false;
1971  
1972  	if (hard_reset)
1973  		dev_info(hdev->dev,
1974  			 "Successfully finished resetting the %s device\n",
1975  			 dev_name(&(hdev)->pdev->dev));
1976  	else
1977  		dev_dbg(hdev->dev,
1978  			"Successfully finished resetting the %s device\n",
1979  			dev_name(&(hdev)->pdev->dev));
1980  
1981  	if (hard_reset) {
1982  		hdev->reset_info.hard_reset_cnt++;
1983  
1984  		device_heartbeat_schedule(hdev);
1985  
1986  		/* After reset is done, we are ready to receive events from
1987  		 * the F/W. We can't do it before because we will ignore events
1988  		 * and if those events are fatal, we won't know about it and
1989  		 * the device will be operational although it shouldn't be
1990  		 */
1991  		hdev->asic_funcs->enable_events_from_fw(hdev);
1992  	} else {
1993  		if (!reset_upon_device_release)
1994  			hdev->reset_info.compute_reset_cnt++;
1995  
1996  		if (schedule_hard_reset) {
1997  			dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
1998  			flags = hdev->reset_info.hard_reset_schedule_flags;
1999  			hdev->reset_info.hard_reset_schedule_flags = 0;
2000  			hard_reset = true;
2001  			goto escalate_reset_flow;
2002  		}
2003  	}
2004  
2005  	return 0;
2006  
2007  out_err:
2008  	hdev->disabled = true;
2009  
2010  	spin_lock(&hdev->reset_info.lock);
2011  	hdev->reset_info.in_compute_reset = 0;
2012  
2013  	if (hard_reset) {
2014  		dev_err(hdev->dev,
2015  			"%s Failed to reset! Device is NOT usable\n",
2016  			dev_name(&(hdev)->pdev->dev));
2017  		hdev->reset_info.hard_reset_cnt++;
2018  	} else {
2019  		if (reset_upon_device_release) {
2020  			dev_err(hdev->dev, "Failed to reset device after user release\n");
2021  			flags &= ~HL_DRV_RESET_DEV_RELEASE;
2022  		} else {
2023  			dev_err(hdev->dev, "Failed to do compute reset\n");
2024  			hdev->reset_info.compute_reset_cnt++;
2025  		}
2026  
2027  		spin_unlock(&hdev->reset_info.lock);
2028  		flags |= HL_DRV_RESET_HARD;
2029  		hard_reset = true;
2030  		goto escalate_reset_flow;
2031  	}
2032  
2033  	hdev->reset_info.in_reset = 0;
2034  
2035  	spin_unlock(&hdev->reset_info.lock);
2036  
2037  	return rc;
2038  }
2039  
2040  /*
2041   * hl_device_cond_reset() - conditionally reset the device.
2042   * @hdev: pointer to habanalabs device structure.
2043   * @reset_flags: reset flags.
2044   * @event_mask: events to notify user about.
2045   *
2046   * Conditionally reset the device, or alternatively schedule a watchdog work to reset the device
2047   * unless another reset precedes it.
2048   */
hl_device_cond_reset(struct hl_device * hdev,u32 flags,u64 event_mask)2049  int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
2050  {
2051  	struct hl_ctx *ctx = NULL;
2052  
2053  	/* F/W reset cannot be postponed */
2054  	if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
2055  		goto device_reset;
2056  
2057  	/* Device release watchdog is relevant only if user exists and gets a reset notification */
2058  	if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {
2059  		dev_err(hdev->dev, "Resetting device without a reset indication to user\n");
2060  		goto device_reset;
2061  	}
2062  
2063  	ctx = hl_get_compute_ctx(hdev);
2064  	if (!ctx)
2065  		goto device_reset;
2066  
2067  	/*
2068  	 * There is no point in postponing the reset if user is not registered for events.
2069  	 * However if no eventfd_ctx exists but the device release watchdog is already scheduled, it
2070  	 * just implies that user has unregistered as part of handling a previous event. In this
2071  	 * case an immediate reset is not required.
2072  	 */
2073  	if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active)
2074  		goto device_reset;
2075  
2076  	/* Schedule the device release watchdog work unless reset is already in progress or if the
2077  	 * work is already scheduled.
2078  	 */
2079  	spin_lock(&hdev->reset_info.lock);
2080  	if (hdev->reset_info.in_reset) {
2081  		spin_unlock(&hdev->reset_info.lock);
2082  		goto device_reset;
2083  	}
2084  
2085  	if (hdev->reset_info.watchdog_active) {
2086  		hdev->device_release_watchdog_work.flags |= flags;
2087  		goto out;
2088  	}
2089  
2090  	hdev->device_release_watchdog_work.flags = flags;
2091  	dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
2092  		hdev->device_release_watchdog_timeout_sec);
2093  	schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
2094  				msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
2095  	hdev->reset_info.watchdog_active = 1;
2096  out:
2097  	spin_unlock(&hdev->reset_info.lock);
2098  
2099  	hl_notifier_event_send_all(hdev, event_mask);
2100  
2101  	hl_ctx_put(ctx);
2102  
2103  	hl_abort_waiting_for_completions(hdev);
2104  
2105  	return 0;
2106  
2107  device_reset:
2108  	if (event_mask)
2109  		hl_notifier_event_send_all(hdev, event_mask);
2110  	if (ctx)
2111  		hl_ctx_put(ctx);
2112  
2113  	return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD);
2114  }
2115  
hl_notifier_event_send(struct hl_notifier_event * notifier_event,u64 event_mask)2116  static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
2117  {
2118  	mutex_lock(&notifier_event->lock);
2119  	notifier_event->events_mask |= event_mask;
2120  
2121  	if (notifier_event->eventfd)
2122  		eventfd_signal(notifier_event->eventfd);
2123  
2124  	mutex_unlock(&notifier_event->lock);
2125  }
2126  
2127  /*
2128   * hl_notifier_event_send_all - notify all user processes via eventfd
2129   *
2130   * @hdev: pointer to habanalabs device structure
2131   * @event_mask: the occurred event/s
2132   * Returns 0 for success or an error on failure.
2133   */
hl_notifier_event_send_all(struct hl_device * hdev,u64 event_mask)2134  void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
2135  {
2136  	struct hl_fpriv	*hpriv;
2137  
2138  	if (!event_mask) {
2139  		dev_warn(hdev->dev, "Skip sending zero event");
2140  		return;
2141  	}
2142  
2143  	mutex_lock(&hdev->fpriv_list_lock);
2144  
2145  	list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
2146  		hl_notifier_event_send(&hpriv->notifier_event, event_mask);
2147  
2148  	mutex_unlock(&hdev->fpriv_list_lock);
2149  }
2150  
2151  /*
2152   * hl_device_init - main initialization function for habanalabs device
2153   *
2154   * @hdev: pointer to habanalabs device structure
2155   *
2156   * Allocate an id for the device, do early initialization and then call the
2157   * ASIC specific initialization functions. Finally, create the cdev and the
2158   * Linux device to expose it to the user
2159   */
hl_device_init(struct hl_device * hdev)2160  int hl_device_init(struct hl_device *hdev)
2161  {
2162  	int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
2163  	struct hl_ts_free_jobs *free_jobs_data;
2164  	bool expose_interfaces_on_err = false;
2165  	void *p;
2166  
2167  	/* Initialize ASIC function pointers and perform early init */
2168  	rc = device_early_init(hdev);
2169  	if (rc)
2170  		goto out_disabled;
2171  
2172  	user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2173  				hdev->asic_prop.user_interrupt_count;
2174  
2175  	if (user_interrupt_cnt) {
2176  		hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
2177  						GFP_KERNEL);
2178  		if (!hdev->user_interrupt) {
2179  			rc = -ENOMEM;
2180  			goto early_fini;
2181  		}
2182  
2183  		/* Timestamp records supported only if CQ supported in device */
2184  		if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2185  			for (i = 0 ; i < user_interrupt_cnt ; i++) {
2186  				p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2187  						sizeof(struct timestamp_reg_free_node));
2188  				if (!p) {
2189  					rc = -ENOMEM;
2190  					goto free_usr_intr_mem;
2191  				}
2192  				free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data;
2193  				free_jobs_data->free_nodes_pool = p;
2194  				free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2195  				free_jobs_data->next_avail_free_node_idx = 0;
2196  			}
2197  		}
2198  	}
2199  
2200  	free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data;
2201  	p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2202  				sizeof(struct timestamp_reg_free_node));
2203  	if (!p) {
2204  		rc = -ENOMEM;
2205  		goto free_usr_intr_mem;
2206  	}
2207  
2208  	free_jobs_data->free_nodes_pool = p;
2209  	free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2210  	free_jobs_data->next_avail_free_node_idx = 0;
2211  
2212  	/*
2213  	 * Start calling ASIC initialization. First S/W then H/W and finally
2214  	 * late init
2215  	 */
2216  	rc = hdev->asic_funcs->sw_init(hdev);
2217  	if (rc)
2218  		goto free_common_usr_intr_mem;
2219  
2220  
2221  	/* initialize completion structure for multi CS wait */
2222  	hl_multi_cs_completion_init(hdev);
2223  
2224  	/*
2225  	 * Initialize the H/W queues. Must be done before hw_init, because
2226  	 * there the addresses of the kernel queue are being written to the
2227  	 * registers of the device
2228  	 */
2229  	rc = hl_hw_queues_create(hdev);
2230  	if (rc) {
2231  		dev_err(hdev->dev, "failed to initialize kernel queues\n");
2232  		goto sw_fini;
2233  	}
2234  
2235  	cq_cnt = hdev->asic_prop.completion_queues_count;
2236  
2237  	/*
2238  	 * Initialize the completion queues. Must be done before hw_init,
2239  	 * because there the addresses of the completion queues are being
2240  	 * passed as arguments to request_irq
2241  	 */
2242  	if (cq_cnt) {
2243  		hdev->completion_queue = kcalloc(cq_cnt,
2244  				sizeof(*hdev->completion_queue),
2245  				GFP_KERNEL);
2246  
2247  		if (!hdev->completion_queue) {
2248  			dev_err(hdev->dev,
2249  				"failed to allocate completion queues\n");
2250  			rc = -ENOMEM;
2251  			goto hw_queues_destroy;
2252  		}
2253  	}
2254  
2255  	for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
2256  		rc = hl_cq_init(hdev, &hdev->completion_queue[i],
2257  				hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
2258  		if (rc) {
2259  			dev_err(hdev->dev,
2260  				"failed to initialize completion queue\n");
2261  			goto cq_fini;
2262  		}
2263  		hdev->completion_queue[i].cq_idx = i;
2264  	}
2265  
2266  	hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
2267  					sizeof(struct hl_cs *), GFP_KERNEL);
2268  	if (!hdev->shadow_cs_queue) {
2269  		rc = -ENOMEM;
2270  		goto cq_fini;
2271  	}
2272  
2273  	/*
2274  	 * Initialize the event queue. Must be done before hw_init,
2275  	 * because there the address of the event queue is being
2276  	 * passed as argument to request_irq
2277  	 */
2278  	rc = hl_eq_init(hdev, &hdev->event_queue);
2279  	if (rc) {
2280  		dev_err(hdev->dev, "failed to initialize event queue\n");
2281  		goto free_shadow_cs_queue;
2282  	}
2283  
2284  	/* MMU S/W must be initialized before kernel context is created */
2285  	rc = hl_mmu_init(hdev);
2286  	if (rc) {
2287  		dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
2288  		goto eq_fini;
2289  	}
2290  
2291  	/* Allocate the kernel context */
2292  	hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
2293  	if (!hdev->kernel_ctx) {
2294  		rc = -ENOMEM;
2295  		goto mmu_fini;
2296  	}
2297  
2298  	hdev->is_compute_ctx_active = false;
2299  
2300  	hdev->asic_funcs->state_dump_init(hdev);
2301  
2302  	hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
2303  
2304  	hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
2305  
2306  	rc = hl_debugfs_device_init(hdev);
2307  	if (rc) {
2308  		dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");
2309  		kfree(hdev->kernel_ctx);
2310  		goto mmu_fini;
2311  	}
2312  
2313  	/* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after
2314  	 * hl_debugfs_device_init().
2315  	 */
2316  	rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
2317  	if (rc) {
2318  		dev_err(hdev->dev, "failed to initialize kernel context\n");
2319  		kfree(hdev->kernel_ctx);
2320  		goto debugfs_device_fini;
2321  	}
2322  
2323  	rc = hl_cb_pool_init(hdev);
2324  	if (rc) {
2325  		dev_err(hdev->dev, "failed to initialize CB pool\n");
2326  		goto release_ctx;
2327  	}
2328  
2329  	rc = hl_dec_init(hdev);
2330  	if (rc) {
2331  		dev_err(hdev->dev, "Failed to initialize the decoder module\n");
2332  		goto cb_pool_fini;
2333  	}
2334  
2335  	/*
2336  	 * From this point, override rc (=0) in case of an error to allow debugging
2337  	 * (by adding char devices and creating sysfs/debugfs files as part of the error flow).
2338  	 */
2339  	expose_interfaces_on_err = true;
2340  
2341  	/* Device is now enabled as part of the initialization requires
2342  	 * communication with the device firmware to get information that
2343  	 * is required for the initialization itself
2344  	 */
2345  	hdev->disabled = false;
2346  
2347  	rc = hdev->asic_funcs->hw_init(hdev);
2348  	if (rc) {
2349  		dev_err(hdev->dev, "failed to initialize the H/W\n");
2350  		rc = 0;
2351  		goto out_disabled;
2352  	}
2353  
2354  	/* Check that the communication with the device is working */
2355  	rc = hdev->asic_funcs->test_queues(hdev);
2356  	if (rc) {
2357  		dev_err(hdev->dev, "Failed to detect if device is alive\n");
2358  		rc = 0;
2359  		goto out_disabled;
2360  	}
2361  
2362  	rc = device_late_init(hdev);
2363  	if (rc) {
2364  		dev_err(hdev->dev, "Failed late initialization\n");
2365  		rc = 0;
2366  		goto out_disabled;
2367  	}
2368  
2369  	dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
2370  		hdev->asic_name,
2371  		hdev->asic_prop.dram_size / SZ_1G);
2372  
2373  	rc = hl_vm_init(hdev);
2374  	if (rc) {
2375  		dev_err(hdev->dev, "Failed to initialize memory module\n");
2376  		rc = 0;
2377  		goto out_disabled;
2378  	}
2379  
2380  	/*
2381  	 * Expose devices and sysfs/debugfs files to user.
2382  	 * From here there is no need to expose them in case of an error.
2383  	 */
2384  	expose_interfaces_on_err = false;
2385  
2386  	rc = drm_dev_register(&hdev->drm, 0);
2387  	if (rc) {
2388  		dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc);
2389  		rc = 0;
2390  		goto out_disabled;
2391  	}
2392  
2393  	rc = cdev_sysfs_debugfs_add(hdev);
2394  	if (rc) {
2395  		dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");
2396  		rc = 0;
2397  		goto out_disabled;
2398  	}
2399  
2400  	/* Need to call this again because the max power might change,
2401  	 * depending on card type for certain ASICs
2402  	 */
2403  	if (hdev->asic_prop.set_max_power_on_device_init &&
2404  			!hdev->asic_prop.fw_security_enabled)
2405  		hl_fw_set_max_power(hdev);
2406  
2407  	/*
2408  	 * hl_hwmon_init() must be called after device_late_init(), because only
2409  	 * there we get the information from the device about which
2410  	 * hwmon-related sensors the device supports.
2411  	 * Furthermore, it must be done after adding the device to the system.
2412  	 */
2413  	rc = hl_hwmon_init(hdev);
2414  	if (rc) {
2415  		dev_err(hdev->dev, "Failed to initialize hwmon\n");
2416  		rc = 0;
2417  		goto out_disabled;
2418  	}
2419  
2420  	/* Scheduling the EQ heartbeat thread must come after driver is done with all
2421  	 * initializations, as we want to make sure the FW gets enough time to be prepared
2422  	 * to respond to heartbeat packets.
2423  	 */
2424  	device_heartbeat_schedule(hdev);
2425  
2426  	dev_notice(hdev->dev,
2427  		"Successfully added device %s to habanalabs driver\n",
2428  		dev_name(&(hdev)->pdev->dev));
2429  
2430  	/* After initialization is done, we are ready to receive events from
2431  	 * the F/W. We can't do it before because we will ignore events and if
2432  	 * those events are fatal, we won't know about it and the device will
2433  	 * be operational although it shouldn't be
2434  	 */
2435  	hdev->asic_funcs->enable_events_from_fw(hdev);
2436  
2437  	hdev->init_done = true;
2438  
2439  	return 0;
2440  
2441  cb_pool_fini:
2442  	hl_cb_pool_fini(hdev);
2443  release_ctx:
2444  	if (hl_ctx_put(hdev->kernel_ctx) != 1)
2445  		dev_err(hdev->dev,
2446  			"kernel ctx is still alive on initialization failure\n");
2447  debugfs_device_fini:
2448  	hl_debugfs_device_fini(hdev);
2449  mmu_fini:
2450  	hl_mmu_fini(hdev);
2451  eq_fini:
2452  	hl_eq_fini(hdev, &hdev->event_queue);
2453  free_shadow_cs_queue:
2454  	kfree(hdev->shadow_cs_queue);
2455  cq_fini:
2456  	for (i = 0 ; i < cq_ready_cnt ; i++)
2457  		hl_cq_fini(hdev, &hdev->completion_queue[i]);
2458  	kfree(hdev->completion_queue);
2459  hw_queues_destroy:
2460  	hl_hw_queues_destroy(hdev);
2461  sw_fini:
2462  	hdev->asic_funcs->sw_fini(hdev);
2463  free_common_usr_intr_mem:
2464  	vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2465  free_usr_intr_mem:
2466  	if (user_interrupt_cnt) {
2467  		for (i = 0 ; i < user_interrupt_cnt ; i++) {
2468  			if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool)
2469  				break;
2470  			vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2471  		}
2472  		kfree(hdev->user_interrupt);
2473  	}
2474  early_fini:
2475  	device_early_fini(hdev);
2476  out_disabled:
2477  	hdev->disabled = true;
2478  	if (expose_interfaces_on_err) {
2479  		drm_dev_register(&hdev->drm, 0);
2480  		cdev_sysfs_debugfs_add(hdev);
2481  	}
2482  
2483  	pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n",
2484  		hdev->cdev_idx, dev_name(&hdev->pdev->dev));
2485  
2486  	return rc;
2487  }
2488  
2489  /*
2490   * hl_device_fini - main tear-down function for habanalabs device
2491   *
2492   * @hdev: pointer to habanalabs device structure
2493   *
2494   * Destroy the device, call ASIC fini functions and release the id
2495   */
hl_device_fini(struct hl_device * hdev)2496  void hl_device_fini(struct hl_device *hdev)
2497  {
2498  	u32 user_interrupt_cnt;
2499  	bool device_in_reset;
2500  	ktime_t timeout;
2501  	u64 reset_sec;
2502  	int i, rc;
2503  
2504  	dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev));
2505  
2506  	hdev->device_fini_pending = 1;
2507  	flush_delayed_work(&hdev->device_reset_work.reset_work);
2508  
2509  	if (hdev->pldm)
2510  		reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
2511  	else
2512  		reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
2513  
2514  	/*
2515  	 * This function is competing with the reset function, so try to
2516  	 * take the reset atomic and if we are already in middle of reset,
2517  	 * wait until reset function is finished. Reset function is designed
2518  	 * to always finish. However, in Gaudi, because of all the network
2519  	 * ports, the hard reset could take between 10-30 seconds
2520  	 */
2521  
2522  	timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
2523  
2524  	spin_lock(&hdev->reset_info.lock);
2525  	device_in_reset = !!hdev->reset_info.in_reset;
2526  	if (!device_in_reset)
2527  		hdev->reset_info.in_reset = 1;
2528  	spin_unlock(&hdev->reset_info.lock);
2529  
2530  	while (device_in_reset) {
2531  		usleep_range(50, 200);
2532  
2533  		spin_lock(&hdev->reset_info.lock);
2534  		device_in_reset = !!hdev->reset_info.in_reset;
2535  		if (!device_in_reset)
2536  			hdev->reset_info.in_reset = 1;
2537  		spin_unlock(&hdev->reset_info.lock);
2538  
2539  		if (ktime_compare(ktime_get(), timeout) > 0) {
2540  			dev_crit(hdev->dev,
2541  				"%s Failed to remove device because reset function did not finish\n",
2542  				dev_name(&(hdev)->pdev->dev));
2543  			return;
2544  		}
2545  	}
2546  
2547  	cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);
2548  
2549  	/* Disable PCI access from device F/W so it won't send us additional
2550  	 * interrupts. We disable MSI/MSI-X at the halt_engines function and we
2551  	 * can't have the F/W sending us interrupts after that. We need to
2552  	 * disable the access here because if the device is marked disable, the
2553  	 * message won't be send. Also, in case of heartbeat, the device CPU is
2554  	 * marked as disable so this message won't be sent
2555  	 */
2556  	hl_fw_send_pci_access_msg(hdev,	CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
2557  
2558  	/* Mark device as disabled */
2559  	hdev->disabled = true;
2560  
2561  	take_release_locks(hdev);
2562  
2563  	hdev->reset_info.hard_reset_pending = true;
2564  
2565  	hl_hwmon_fini(hdev);
2566  
2567  	cleanup_resources(hdev, true, false, false);
2568  
2569  	/* Kill processes here after CS rollback. This is because the process
2570  	 * can't really exit until all its CSs are done, which is what we
2571  	 * do in cs rollback
2572  	 */
2573  	dev_info(hdev->dev,
2574  		"Waiting for all processes to exit (timeout of %u seconds)",
2575  		HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);
2576  
2577  	hdev->process_kill_trial_cnt = 0;
2578  	rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
2579  	if (rc) {
2580  		dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc);
2581  		device_disable_open_processes(hdev, false);
2582  	}
2583  
2584  	hdev->process_kill_trial_cnt = 0;
2585  	rc = device_kill_open_processes(hdev, 0, true);
2586  	if (rc) {
2587  		dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc);
2588  		device_disable_open_processes(hdev, true);
2589  	}
2590  
2591  	hl_cb_pool_fini(hdev);
2592  
2593  	/* Reset the H/W. It will be in idle state after this returns */
2594  	rc = hdev->asic_funcs->hw_fini(hdev, true, false);
2595  	if (rc)
2596  		dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
2597  
2598  	hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
2599  
2600  	/* Release kernel context */
2601  	if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
2602  		dev_err(hdev->dev, "kernel ctx is still alive\n");
2603  
2604  	hl_dec_fini(hdev);
2605  
2606  	hl_vm_fini(hdev);
2607  
2608  	hl_mmu_fini(hdev);
2609  
2610  	vfree(hdev->captured_err_info.page_fault_info.user_mappings);
2611  
2612  	hl_eq_fini(hdev, &hdev->event_queue);
2613  
2614  	kfree(hdev->shadow_cs_queue);
2615  
2616  	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2617  		hl_cq_fini(hdev, &hdev->completion_queue[i]);
2618  	kfree(hdev->completion_queue);
2619  
2620  	user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2621  					hdev->asic_prop.user_interrupt_count;
2622  
2623  	if (user_interrupt_cnt) {
2624  		if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2625  			for (i = 0 ; i < user_interrupt_cnt ; i++)
2626  				vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2627  		}
2628  
2629  		kfree(hdev->user_interrupt);
2630  	}
2631  
2632  	vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2633  
2634  	hl_hw_queues_destroy(hdev);
2635  
2636  	/* Call ASIC S/W finalize function */
2637  	hdev->asic_funcs->sw_fini(hdev);
2638  
2639  	device_early_fini(hdev);
2640  
2641  	/* Hide devices and sysfs/debugfs files from user */
2642  	cdev_sysfs_debugfs_remove(hdev);
2643  	drm_dev_unregister(&hdev->drm);
2644  
2645  	hl_debugfs_device_fini(hdev);
2646  
2647  	pr_info("removed device successfully\n");
2648  }
2649  
2650  /*
2651   * MMIO register access helper functions.
2652   */
2653  
2654  /*
2655   * hl_rreg - Read an MMIO register
2656   *
2657   * @hdev: pointer to habanalabs device structure
2658   * @reg: MMIO register offset (in bytes)
2659   *
2660   * Returns the value of the MMIO register we are asked to read
2661   *
2662   */
hl_rreg(struct hl_device * hdev,u32 reg)2663  inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
2664  {
2665  	u32 val = readl(hdev->rmmio + reg);
2666  
2667  	if (unlikely(trace_habanalabs_rreg32_enabled()))
2668  		trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val);
2669  
2670  	return val;
2671  }
2672  
2673  /*
2674   * hl_wreg - Write to an MMIO register
2675   *
2676   * @hdev: pointer to habanalabs device structure
2677   * @reg: MMIO register offset (in bytes)
2678   * @val: 32-bit value
2679   *
2680   * Writes the 32-bit value into the MMIO register
2681   *
2682   */
hl_wreg(struct hl_device * hdev,u32 reg,u32 val)2683  inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
2684  {
2685  	if (unlikely(trace_habanalabs_wreg32_enabled()))
2686  		trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val);
2687  
2688  	writel(val, hdev->rmmio + reg);
2689  }
2690  
hl_capture_razwi(struct hl_device * hdev,u64 addr,u16 * engine_id,u16 num_of_engines,u8 flags)2691  void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2692  			u8 flags)
2693  {
2694  	struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;
2695  
2696  	if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
2697  		dev_err(hdev->dev,
2698  				"Number of possible razwi initiators (%u) exceeded limit (%u)\n",
2699  				num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);
2700  		return;
2701  	}
2702  
2703  	/* In case it's the first razwi since the device was opened, capture its parameters */
2704  	if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))
2705  		return;
2706  
2707  	razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());
2708  	razwi_info->razwi.addr = addr;
2709  	razwi_info->razwi.num_of_possible_engines = num_of_engines;
2710  	memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],
2711  			num_of_engines * sizeof(u16));
2712  	razwi_info->razwi.flags = flags;
2713  
2714  	razwi_info->razwi_info_available = true;
2715  }
2716  
hl_handle_razwi(struct hl_device * hdev,u64 addr,u16 * engine_id,u16 num_of_engines,u8 flags,u64 * event_mask)2717  void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2718  			u8 flags, u64 *event_mask)
2719  {
2720  	hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);
2721  
2722  	if (event_mask)
2723  		*event_mask |= HL_NOTIFIER_EVENT_RAZWI;
2724  }
2725  
hl_capture_user_mappings(struct hl_device * hdev,bool is_pmmu)2726  static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
2727  {
2728  	struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2729  	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
2730  	struct hl_vm_hash_node *hnode;
2731  	struct hl_userptr *userptr;
2732  	enum vm_type *vm_type;
2733  	struct hl_ctx *ctx;
2734  	u32 map_idx = 0;
2735  	int i;
2736  
2737  	/* Reset previous session count*/
2738  	pgf_info->num_of_user_mappings = 0;
2739  
2740  	ctx = hl_get_compute_ctx(hdev);
2741  	if (!ctx) {
2742  		dev_err(hdev->dev, "Can't get user context for user mappings\n");
2743  		return;
2744  	}
2745  
2746  	mutex_lock(&ctx->mem_hash_lock);
2747  	hash_for_each(ctx->mem_hash, i, hnode, node) {
2748  		vm_type = hnode->ptr;
2749  		if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||
2750  				((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))
2751  			pgf_info->num_of_user_mappings++;
2752  
2753  	}
2754  
2755  	if (!pgf_info->num_of_user_mappings)
2756  		goto finish;
2757  
2758  	/* In case we already allocated in previous session, need to release it before
2759  	 * allocating new buffer.
2760  	 */
2761  	vfree(pgf_info->user_mappings);
2762  	pgf_info->user_mappings =
2763  			vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));
2764  	if (!pgf_info->user_mappings) {
2765  		pgf_info->num_of_user_mappings = 0;
2766  		goto finish;
2767  	}
2768  
2769  	hash_for_each(ctx->mem_hash, i, hnode, node) {
2770  		vm_type = hnode->ptr;
2771  		if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {
2772  			userptr = hnode->ptr;
2773  			pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2774  			pgf_info->user_mappings[map_idx].size = userptr->size;
2775  			map_idx++;
2776  		} else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {
2777  			phys_pg_pack = hnode->ptr;
2778  			pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2779  			pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;
2780  			map_idx++;
2781  		}
2782  	}
2783  finish:
2784  	mutex_unlock(&ctx->mem_hash_lock);
2785  	hl_ctx_put(ctx);
2786  }
2787  
hl_capture_page_fault(struct hl_device * hdev,u64 addr,u16 eng_id,bool is_pmmu)2788  void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
2789  {
2790  	struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2791  
2792  	/* Capture only the first page fault */
2793  	if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))
2794  		return;
2795  
2796  	pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());
2797  	pgf_info->page_fault.addr = addr;
2798  	pgf_info->page_fault.engine_id = eng_id;
2799  	hl_capture_user_mappings(hdev, is_pmmu);
2800  
2801  	pgf_info->page_fault_info_available = true;
2802  }
2803  
hl_handle_page_fault(struct hl_device * hdev,u64 addr,u16 eng_id,bool is_pmmu,u64 * event_mask)2804  void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
2805  				u64 *event_mask)
2806  {
2807  	hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);
2808  
2809  	if (event_mask)
2810  		*event_mask |=  HL_NOTIFIER_EVENT_PAGE_FAULT;
2811  }
2812  
hl_capture_hw_err(struct hl_device * hdev,u16 event_id)2813  static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
2814  {
2815  	struct hw_err_info *info = &hdev->captured_err_info.hw_err;
2816  
2817  	/* Capture only the first HW err */
2818  	if (atomic_cmpxchg(&info->event_detected, 0, 1))
2819  		return;
2820  
2821  	info->event.timestamp = ktime_to_ns(ktime_get());
2822  	info->event.event_id = event_id;
2823  
2824  	info->event_info_available = true;
2825  }
2826  
hl_handle_critical_hw_err(struct hl_device * hdev,u16 event_id,u64 * event_mask)2827  void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
2828  {
2829  	hl_capture_hw_err(hdev, event_id);
2830  
2831  	if (event_mask)
2832  		*event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
2833  }
2834  
hl_capture_fw_err(struct hl_device * hdev,struct hl_info_fw_err_info * fw_info)2835  static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
2836  {
2837  	struct fw_err_info *info = &hdev->captured_err_info.fw_err;
2838  
2839  	/* Capture only the first FW error */
2840  	if (atomic_cmpxchg(&info->event_detected, 0, 1))
2841  		return;
2842  
2843  	info->event.timestamp = ktime_to_ns(ktime_get());
2844  	info->event.err_type = fw_info->err_type;
2845  	if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
2846  		info->event.event_id = fw_info->event_id;
2847  
2848  	info->event_info_available = true;
2849  }
2850  
hl_handle_fw_err(struct hl_device * hdev,struct hl_info_fw_err_info * info)2851  void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
2852  {
2853  	hl_capture_fw_err(hdev, info);
2854  
2855  	if (info->event_mask)
2856  		*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
2857  }
2858  
hl_capture_engine_err(struct hl_device * hdev,u16 engine_id,u16 error_count)2859  void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count)
2860  {
2861  	struct engine_err_info *info = &hdev->captured_err_info.engine_err;
2862  
2863  	/* Capture only the first engine error */
2864  	if (atomic_cmpxchg(&info->event_detected, 0, 1))
2865  		return;
2866  
2867  	info->event.timestamp = ktime_to_ns(ktime_get());
2868  	info->event.engine_id = engine_id;
2869  	info->event.error_count = error_count;
2870  	info->event_info_available = true;
2871  }
2872  
hl_enable_err_info_capture(struct hl_error_info * captured_err_info)2873  void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
2874  {
2875  	vfree(captured_err_info->page_fault_info.user_mappings);
2876  	memset(captured_err_info, 0, sizeof(struct hl_error_info));
2877  	atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
2878  	captured_err_info->undef_opcode.write_enable = true;
2879  }
2880  
hl_init_cpu_for_irq(struct hl_device * hdev)2881  void hl_init_cpu_for_irq(struct hl_device *hdev)
2882  {
2883  #ifdef CONFIG_NUMA
2884  	struct cpumask *available_mask = &hdev->irq_affinity_mask;
2885  	int numa_node = hdev->pdev->dev.numa_node, i;
2886  	static struct cpumask cpu_mask;
2887  
2888  	if (numa_node < 0)
2889  		return;
2890  
2891  	if (!cpumask_and(&cpu_mask, cpumask_of_node(numa_node), cpu_online_mask)) {
2892  		dev_err(hdev->dev, "No available affinities in current numa node\n");
2893  		return;
2894  	}
2895  
2896  	/* Remove HT siblings */
2897  	for_each_cpu(i, &cpu_mask)
2898  		cpumask_set_cpu(cpumask_first(topology_sibling_cpumask(i)), available_mask);
2899  #endif
2900  }
2901  
hl_set_irq_affinity(struct hl_device * hdev,int irq)2902  void hl_set_irq_affinity(struct hl_device *hdev, int irq)
2903  {
2904  	if (cpumask_empty(&hdev->irq_affinity_mask)) {
2905  		dev_dbg(hdev->dev, "affinity mask is empty\n");
2906  		return;
2907  	}
2908  
2909  	if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))
2910  		dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);
2911  }
2912  
hl_eq_heartbeat_event_handle(struct hl_device * hdev)2913  void hl_eq_heartbeat_event_handle(struct hl_device *hdev)
2914  {
2915  	hdev->heartbeat_debug_info.heartbeat_event_counter++;
2916  	hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds();
2917  	hdev->eq_heartbeat_received = true;
2918  }
2919  
hl_handle_clk_change_event(struct hl_device * hdev,u16 event_type,u64 * event_mask)2920  void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask)
2921  {
2922  	struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling;
2923  	ktime_t zero_time = ktime_set(0, 0);
2924  
2925  	mutex_lock(&clk_throttle->lock);
2926  
2927  	switch (event_type) {
2928  	case EQ_EVENT_POWER_EVT_START:
2929  		clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER;
2930  		clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER;
2931  		clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
2932  		clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
2933  		dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
2934  		break;
2935  
2936  	case EQ_EVENT_POWER_EVT_END:
2937  		clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER;
2938  		clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
2939  		dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
2940  		break;
2941  
2942  	case EQ_EVENT_THERMAL_EVT_START:
2943  		clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL;
2944  		clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
2945  		clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
2946  		clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
2947  		*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
2948  		dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
2949  		break;
2950  
2951  	case EQ_EVENT_THERMAL_EVT_END:
2952  		clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL;
2953  		clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
2954  		*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
2955  		dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
2956  		break;
2957  
2958  	default:
2959  		dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);
2960  		break;
2961  	}
2962  
2963  	mutex_unlock(&clk_throttle->lock);
2964  }
2965