Lines Matching +full:rc +full:- +full:map +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
23 * hl_pci_bars_map() - Map PCI BARs.
25 * @name: Array of BAR names.
26 * @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
28 * Request PCI regions and map them to kernel virtual addresses.
30 * Return: 0 on success, non-zero for failure.
32 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], in hl_pci_bars_map()
35 struct pci_dev *pdev = hdev->pdev; in hl_pci_bars_map()
36 int rc, i, bar; in hl_pci_bars_map() local
38 rc = pci_request_regions(pdev, HL_NAME); in hl_pci_bars_map()
39 if (rc) { in hl_pci_bars_map()
40 dev_err(hdev->dev, "Cannot obtain PCI resources\n"); in hl_pci_bars_map()
41 return rc; in hl_pci_bars_map()
45 bar = i * 2; /* 64-bit BARs */ in hl_pci_bars_map()
46 hdev->pcie_bar[bar] = is_wc[i] ? in hl_pci_bars_map()
49 if (!hdev->pcie_bar[bar]) { in hl_pci_bars_map()
50 dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n", in hl_pci_bars_map()
51 is_wc[i] ? "_wc" : "", name[i]); in hl_pci_bars_map()
52 rc = -ENODEV; in hl_pci_bars_map()
60 for (i = 2 ; i >= 0 ; i--) { in hl_pci_bars_map()
61 bar = i * 2; /* 64-bit BARs */ in hl_pci_bars_map()
62 if (hdev->pcie_bar[bar]) in hl_pci_bars_map()
63 iounmap(hdev->pcie_bar[bar]); in hl_pci_bars_map()
68 return rc; in hl_pci_bars_map()
72 * hl_pci_bars_unmap() - Unmap PCI BARS.
79 struct pci_dev *pdev = hdev->pdev; in hl_pci_bars_unmap()
82 for (i = 2 ; i >= 0 ; i--) { in hl_pci_bars_unmap()
83 bar = i * 2; /* 64-bit BARs */ in hl_pci_bars_unmap()
84 iounmap(hdev->pcie_bar[bar]); in hl_pci_bars_unmap()
92 struct pci_dev *pdev = hdev->pdev; in hl_pci_elbi_read()
97 if (hdev->pldm) in hl_pci_elbi_read()
126 trace_habanalabs_elbi_read(&hdev->pdev->dev, (u32) addr, val); in hl_pci_elbi_read()
132 dev_err(hdev->dev, "Error reading from ELBI\n"); in hl_pci_elbi_read()
133 return -EIO; in hl_pci_elbi_read()
137 dev_err(hdev->dev, "ELBI read didn't finish in time\n"); in hl_pci_elbi_read()
138 return -EIO; in hl_pci_elbi_read()
141 dev_err(hdev->dev, "ELBI read has undefined bits in status\n"); in hl_pci_elbi_read()
142 return -EIO; in hl_pci_elbi_read()
146 * hl_pci_elbi_write() - Write through the ELBI interface.
155 struct pci_dev *pdev = hdev->pdev; in hl_pci_elbi_write()
160 if (hdev->pldm) in hl_pci_elbi_write()
189 trace_habanalabs_elbi_write(&hdev->pdev->dev, (u32) addr, val); in hl_pci_elbi_write()
194 return -EIO; in hl_pci_elbi_write()
197 dev_err(hdev->dev, "ELBI write didn't finish in time\n"); in hl_pci_elbi_write()
198 return -EIO; in hl_pci_elbi_write()
201 dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); in hl_pci_elbi_write()
202 return -EIO; in hl_pci_elbi_write()
206 * hl_pci_iatu_write() - iatu write routine.
215 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_pci_iatu_write()
217 int rc; in hl_pci_iatu_write() local
224 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000); in hl_pci_iatu_write()
226 rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset, in hl_pci_iatu_write()
229 if (rc) in hl_pci_iatu_write()
230 return -EIO; in hl_pci_iatu_write()
236 * hl_pci_set_inbound_region() - Configure inbound region
248 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_pci_set_inbound_region()
251 int rc = 0; in hl_pci_set_inbound_region() local
256 if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) { in hl_pci_set_inbound_region()
257 bar_phys_base = hdev->pcie_bar_phys[pci_region->bar]; in hl_pci_set_inbound_region()
258 region_base = bar_phys_base + pci_region->offset_in_bar; in hl_pci_set_inbound_region()
259 region_end_address = region_base + pci_region->size - 1; in hl_pci_set_inbound_region()
261 rc |= hl_pci_iatu_write(hdev, offset + 0x8, in hl_pci_set_inbound_region()
263 rc |= hl_pci_iatu_write(hdev, offset + 0xC, in hl_pci_set_inbound_region()
265 rc |= hl_pci_iatu_write(hdev, offset + 0x10, in hl_pci_set_inbound_region()
270 rc |= hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(pci_region->addr)); in hl_pci_set_inbound_region()
271 rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(pci_region->addr)); in hl_pci_set_inbound_region()
274 rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0); in hl_pci_set_inbound_region()
278 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, pci_region->mode); in hl_pci_set_inbound_region()
281 if (pci_region->mode == PCI_BAR_MATCH_MODE) in hl_pci_set_inbound_region()
282 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, pci_region->bar); in hl_pci_set_inbound_region()
284 rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val); in hl_pci_set_inbound_region()
290 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); in hl_pci_set_inbound_region()
292 if (rc) in hl_pci_set_inbound_region()
293 dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n", in hl_pci_set_inbound_region()
294 pci_region->bar, pci_region->addr); in hl_pci_set_inbound_region()
296 return rc; in hl_pci_set_inbound_region()
300 * hl_pci_set_outbound_region() - Configure outbound region 0
311 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_pci_set_outbound_region()
313 int rc = 0; in hl_pci_set_outbound_region() local
317 pci_region->addr + pci_region->size - 1; in hl_pci_set_outbound_region()
318 rc |= hl_pci_iatu_write(hdev, 0x008, in hl_pci_set_outbound_region()
319 lower_32_bits(pci_region->addr)); in hl_pci_set_outbound_region()
320 rc |= hl_pci_iatu_write(hdev, 0x00C, in hl_pci_set_outbound_region()
321 upper_32_bits(pci_region->addr)); in hl_pci_set_outbound_region()
322 rc |= hl_pci_iatu_write(hdev, 0x010, in hl_pci_set_outbound_region()
324 rc |= hl_pci_iatu_write(hdev, 0x014, 0); in hl_pci_set_outbound_region()
326 rc |= hl_pci_iatu_write(hdev, 0x018, 0); in hl_pci_set_outbound_region()
328 rc |= hl_pci_iatu_write(hdev, 0x020, in hl_pci_set_outbound_region()
331 rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000); in hl_pci_set_outbound_region()
333 rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000); in hl_pci_set_outbound_region()
339 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); in hl_pci_set_outbound_region()
341 return rc; in hl_pci_set_outbound_region()
345 * hl_get_pci_memory_region() - get PCI region for given address
357 struct pci_mem_region *region = &hdev->pci_mem_region[i]; in hl_get_pci_memory_region()
359 if (!region->used) in hl_get_pci_memory_region()
362 if ((addr >= region->region_base) && in hl_get_pci_memory_region()
363 (addr < region->region_base + region->region_size)) in hl_get_pci_memory_region()
371 * hl_pci_init() - PCI initialization code.
374 * Set DMA masks, initialize the PCI controller and map the PCI BARs.
376 * Return: 0 on success, non-zero for failure.
380 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_pci_init()
381 struct pci_dev *pdev = hdev->pdev; in hl_pci_init()
382 int rc; in hl_pci_init() local
384 rc = pci_enable_device_mem(pdev); in hl_pci_init()
385 if (rc) { in hl_pci_init()
386 dev_err(hdev->dev, "can't enable PCI device\n"); in hl_pci_init()
387 return rc; in hl_pci_init()
392 rc = hdev->asic_funcs->pci_bars_map(hdev); in hl_pci_init()
393 if (rc) { in hl_pci_init()
394 dev_err(hdev->dev, "Failed to map PCI BAR addresses\n"); in hl_pci_init()
398 rc = hdev->asic_funcs->init_iatu(hdev); in hl_pci_init()
399 if (rc) { in hl_pci_init()
400 dev_err(hdev->dev, "PCI controller was not initialized successfully\n"); in hl_pci_init()
405 if (hdev->asic_prop.iatu_done_by_fw) in hl_pci_init()
408 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask)); in hl_pci_init()
409 if (rc) { in hl_pci_init()
410 dev_err(hdev->dev, in hl_pci_init()
412 prop->dma_mask, rc); in hl_pci_init()
416 dma_set_max_seg_size(&pdev->dev, U32_MAX); in hl_pci_init()
425 return rc; in hl_pci_init()
429 * hl_pci_fini() - PCI finalization code.
438 pci_disable_device(hdev->pdev); in hl_pci_fini()