Lines Matching +full:sha +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
17 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
18 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
19 [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
22 [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
24 [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
37 .can_queue = 1,
41 .track_queue_depth = 1,
65 struct mvs_phy *phy = &mvi->phy[phy_id]; in mvs_phy_init()
66 struct asd_sas_phy *sas_phy = &phy->sas_phy; in mvs_phy_init()
68 phy->mvi = mvi; in mvs_phy_init()
69 phy->port = NULL; in mvs_phy_init()
70 timer_setup(&phy->timer, NULL, 0); in mvs_phy_init()
71 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; in mvs_phy_init()
72 sas_phy->iproto = SAS_PROTOCOL_ALL; in mvs_phy_init()
73 sas_phy->tproto = 0; in mvs_phy_init()
74 sas_phy->role = PHY_ROLE_INITIATOR; in mvs_phy_init()
75 sas_phy->oob_mode = OOB_NOT_CONNECTED; in mvs_phy_init()
76 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; in mvs_phy_init()
78 sas_phy->id = phy_id; in mvs_phy_init()
79 sas_phy->sas_addr = &mvi->sas_addr[0]; in mvs_phy_init()
80 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; in mvs_phy_init()
81 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; in mvs_phy_init()
82 sas_phy->lldd_phy = phy; in mvs_phy_init()
93 if (mvi->flags & MVF_FLAG_SOC) in mvs_free()
98 dma_pool_destroy(mvi->dma_pool); in mvs_free()
100 if (mvi->tx) in mvs_free()
101 dma_free_coherent(mvi->dev, in mvs_free()
102 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, in mvs_free()
103 mvi->tx, mvi->tx_dma); in mvs_free()
104 if (mvi->rx_fis) in mvs_free()
105 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, in mvs_free()
106 mvi->rx_fis, mvi->rx_fis_dma); in mvs_free()
107 if (mvi->rx) in mvs_free()
108 dma_free_coherent(mvi->dev, in mvs_free()
109 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), in mvs_free()
110 mvi->rx, mvi->rx_dma); in mvs_free()
111 if (mvi->slot) in mvs_free()
112 dma_free_coherent(mvi->dev, in mvs_free()
113 sizeof(*mvi->slot) * slot_nr, in mvs_free()
114 mvi->slot, mvi->slot_dma); in mvs_free()
116 if (mvi->bulk_buffer) in mvs_free()
117 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, in mvs_free()
118 mvi->bulk_buffer, mvi->bulk_buffer_dma); in mvs_free()
119 if (mvi->bulk_buffer1) in mvs_free()
120 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, in mvs_free()
121 mvi->bulk_buffer1, mvi->bulk_buffer_dma1); in mvs_free()
123 MVS_CHIP_DISP->chip_iounmap(mvi); in mvs_free()
124 if (mvi->shost) in mvs_free()
125 scsi_host_put(mvi->shost); in mvs_free()
126 list_for_each_entry(mwq, &mvi->wq_list, entry) in mvs_free()
127 cancel_delayed_work(&mwq->work_q); in mvs_free()
128 kfree(mvi->rsvd_tags); in mvs_free()
139 struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; in mvs_tasklet() local
141 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; in mvs_tasklet()
142 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; in mvs_tasklet()
145 BUG_ON(1); in mvs_tasklet()
147 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq); in mvs_tasklet()
152 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; in mvs_tasklet()
153 MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat); in mvs_tasklet()
156 MVS_CHIP_DISP->interrupt_enable(mvi); in mvs_tasklet()
165 struct sas_ha_struct *sha = opaque; in mvs_interrupt() local
170 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; in mvs_interrupt()
173 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; in mvs_interrupt()
178 MVS_CHIP_DISP->interrupt_disable(mvi); in mvs_interrupt()
181 stat = MVS_CHIP_DISP->isr_status(mvi, irq); in mvs_interrupt()
184 MVS_CHIP_DISP->interrupt_enable(mvi); in mvs_interrupt()
190 tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); in mvs_interrupt()
193 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; in mvs_interrupt()
194 MVS_CHIP_DISP->isr(mvi, irq, stat); in mvs_interrupt()
205 if (mvi->flags & MVF_FLAG_SOC) in mvs_alloc()
210 spin_lock_init(&mvi->lock); in mvs_alloc()
211 for (i = 0; i < mvi->chip->n_phy; i++) { in mvs_alloc()
213 mvi->port[i].wide_port_phymap = 0; in mvs_alloc()
214 mvi->port[i].port_attached = 0; in mvs_alloc()
215 INIT_LIST_HEAD(&mvi->port[i].list); in mvs_alloc()
218 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; in mvs_alloc()
219 mvi->devices[i].dev_type = SAS_PHY_UNUSED; in mvs_alloc()
220 mvi->devices[i].device_id = i; in mvs_alloc()
221 mvi->devices[i].dev_status = MVS_DEV_NORMAL; in mvs_alloc()
227 mvi->tx = dma_alloc_coherent(mvi->dev, in mvs_alloc()
228 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, in mvs_alloc()
229 &mvi->tx_dma, GFP_KERNEL); in mvs_alloc()
230 if (!mvi->tx) in mvs_alloc()
232 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, in mvs_alloc()
233 &mvi->rx_fis_dma, GFP_KERNEL); in mvs_alloc()
234 if (!mvi->rx_fis) in mvs_alloc()
237 mvi->rx = dma_alloc_coherent(mvi->dev, in mvs_alloc()
238 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), in mvs_alloc()
239 &mvi->rx_dma, GFP_KERNEL); in mvs_alloc()
240 if (!mvi->rx) in mvs_alloc()
242 mvi->rx[0] = cpu_to_le32(0xfff); in mvs_alloc()
243 mvi->rx_cons = 0xfff; in mvs_alloc()
245 mvi->slot = dma_alloc_coherent(mvi->dev, in mvs_alloc()
246 sizeof(*mvi->slot) * slot_nr, in mvs_alloc()
247 &mvi->slot_dma, GFP_KERNEL); in mvs_alloc()
248 if (!mvi->slot) in mvs_alloc()
251 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, in mvs_alloc()
253 &mvi->bulk_buffer_dma, GFP_KERNEL); in mvs_alloc()
254 if (!mvi->bulk_buffer) in mvs_alloc()
257 mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev, in mvs_alloc()
259 &mvi->bulk_buffer_dma1, GFP_KERNEL); in mvs_alloc()
260 if (!mvi->bulk_buffer1) in mvs_alloc()
263 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); in mvs_alloc()
264 mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev, in mvs_alloc()
266 if (!mvi->dma_pool) { in mvs_alloc()
273 return 1; in mvs_alloc()
280 struct pci_dev *pdev = mvi->pdev; in mvs_ioremap()
281 if (bar_ex != -1) { in mvs_ioremap()
292 mvi->regs_ex = ioremap(res_start, res_len); in mvs_ioremap()
294 mvi->regs_ex = (void *)res_start; in mvs_ioremap()
295 if (!mvi->regs_ex) in mvs_ioremap()
302 iounmap(mvi->regs_ex); in mvs_ioremap()
303 mvi->regs_ex = NULL; in mvs_ioremap()
307 mvi->regs = ioremap(res_start, res_len); in mvs_ioremap()
309 if (!mvi->regs) { in mvs_ioremap()
310 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) in mvs_ioremap()
311 iounmap(mvi->regs_ex); in mvs_ioremap()
312 mvi->regs_ex = NULL; in mvs_ioremap()
318 return -1; in mvs_ioremap()
331 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); in mvs_pci_alloc() local
334 (1L << mvs_chips[ent->driver_data].slot_width) * in mvs_pci_alloc()
339 mvi->pdev = pdev; in mvs_pci_alloc()
340 mvi->dev = &pdev->dev; in mvs_pci_alloc()
341 mvi->chip_id = ent->driver_data; in mvs_pci_alloc()
342 mvi->chip = &mvs_chips[mvi->chip_id]; in mvs_pci_alloc()
343 INIT_LIST_HEAD(&mvi->wq_list); in mvs_pci_alloc()
345 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; in mvs_pci_alloc()
346 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; in mvs_pci_alloc()
348 mvi->id = id; in mvs_pci_alloc()
349 mvi->sas = sha; in mvs_pci_alloc()
350 mvi->shost = shost; in mvs_pci_alloc()
352 mvi->rsvd_tags = bitmap_zalloc(MVS_RSVD_SLOTS, GFP_KERNEL); in mvs_pci_alloc()
353 if (!mvi->rsvd_tags) in mvs_pci_alloc()
356 if (MVS_CHIP_DISP->chip_ioremap(mvi)) in mvs_pci_alloc()
369 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in pci_go_64()
371 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in pci_go_64()
373 dev_printk(KERN_ERR, &pdev->dev, in pci_go_64()
374 "32-bit DMA enable failed\n"); in pci_go_64()
388 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); in mvs_prep_sas_ha_init() local
390 core_nr = chip_info->n_host; in mvs_prep_sas_ha_init()
391 phy_nr = core_nr * chip_info->n_phy; in mvs_prep_sas_ha_init()
394 memset(sha, 0x00, sizeof(struct sas_ha_struct)); in mvs_prep_sas_ha_init()
400 sha->sas_phy = arr_phy; in mvs_prep_sas_ha_init()
401 sha->sas_port = arr_port; in mvs_prep_sas_ha_init()
402 sha->shost = shost; in mvs_prep_sas_ha_init()
404 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); in mvs_prep_sas_ha_init()
405 if (!sha->lldd_ha) in mvs_prep_sas_ha_init()
408 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; in mvs_prep_sas_ha_init()
410 shost->transportt = mvs_stt; in mvs_prep_sas_ha_init()
411 shost->max_id = MVS_MAX_DEVICES; in mvs_prep_sas_ha_init()
412 shost->max_lun = ~0; in mvs_prep_sas_ha_init()
413 shost->max_channel = 1; in mvs_prep_sas_ha_init()
414 shost->max_cmd_len = 16; in mvs_prep_sas_ha_init()
420 return -1; in mvs_prep_sas_ha_init()
429 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); in mvs_post_sas_ha_init() local
430 unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; in mvs_post_sas_ha_init()
433 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; in mvs_post_sas_ha_init()
434 for (i = 0; i < chip_info->n_phy; i++) { in mvs_post_sas_ha_init()
435 sha->sas_phy[j * chip_info->n_phy + i] = in mvs_post_sas_ha_init()
436 &mvi->phy[i].sas_phy; in mvs_post_sas_ha_init()
437 sha->sas_port[j * chip_info->n_phy + i] = in mvs_post_sas_ha_init()
438 &mvi->port[i].sas_port; in mvs_post_sas_ha_init()
442 sha->sas_ha_name = DRV_NAME; in mvs_post_sas_ha_init()
443 sha->dev = mvi->dev; in mvs_post_sas_ha_init()
444 sha->sas_addr = &mvi->sas_addr[0]; in mvs_post_sas_ha_init()
446 sha->num_phys = nr_core * chip_info->n_phy; in mvs_post_sas_ha_init()
448 if (mvi->flags & MVF_FLAG_SOC) in mvs_post_sas_ha_init()
453 can_queue -= MVS_RSVD_SLOTS; in mvs_post_sas_ha_init()
455 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); in mvs_post_sas_ha_init()
456 shost->can_queue = can_queue; in mvs_post_sas_ha_init()
457 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; in mvs_post_sas_ha_init()
458 sha->shost = mvi->shost; in mvs_post_sas_ha_init()
464 for (i = 0; i < mvi->chip->n_phy; i++) { in mvs_init_sas_add()
465 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; in mvs_init_sas_add()
466 mvi->phy[i].dev_sas_addr = in mvs_init_sas_add()
467 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); in mvs_init_sas_add()
470 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); in mvs_init_sas_add()
481 dev_printk(KERN_INFO, &pdev->dev, in mvs_pci_init()
499 rc = -ENOMEM; in mvs_pci_init()
503 chip = &mvs_chips[ent->driver_data]; in mvs_pci_init()
505 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); in mvs_pci_init()
508 rc = -ENOMEM; in mvs_pci_init()
515 rc = -ENOMEM; in mvs_pci_init()
524 rc = -ENOMEM; in mvs_pci_init()
528 memset(&mvi->hba_info_param, 0xFF, in mvs_pci_init()
533 mvi->instance = nhost; in mvs_pci_init()
534 rc = MVS_CHIP_DISP->chip_init(mvi); in mvs_pci_init()
540 } while (nhost < chip->n_host); in mvs_pci_init()
543 struct mvs_prv_info *mpi = SHOST_TO_SAS_HA(shost)->lldd_ha; in mvs_pci_init()
545 tasklet_init(&(mpi->mv_tasklet), mvs_tasklet, in mvs_pci_init()
552 rc = scsi_add_host(shost, &pdev->dev); in mvs_pci_init()
559 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, in mvs_pci_init()
564 MVS_CHIP_DISP->interrupt_enable(mvi); in mvs_pci_init()
566 scsi_scan_host(mvi->shost); in mvs_pci_init()
573 scsi_remove_host(mvi->shost); in mvs_pci_init()
585 struct sas_ha_struct *sha = pci_get_drvdata(pdev); in mvs_pci_remove() local
588 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; in mvs_pci_remove()
589 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; in mvs_pci_remove()
592 tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); in mvs_pci_remove()
595 sas_unregister_ha(sha); in mvs_pci_remove()
596 sas_remove_host(mvi->shost); in mvs_pci_remove()
598 MVS_CHIP_DISP->interrupt_disable(mvi); in mvs_pci_remove()
599 free_irq(mvi->pdev->irq, sha); in mvs_pci_remove()
601 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; in mvs_pci_remove()
604 kfree(sha->sas_phy); in mvs_pci_remove()
605 kfree(sha->sas_port); in mvs_pci_remove()
606 kfree(sha); in mvs_pci_remove()
688 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); in interrupt_coalescing_store() local
693 if (sscanf(buffer, "%u", &val) != 1) in interrupt_coalescing_store()
694 return -EINVAL; in interrupt_coalescing_store()
704 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; in interrupt_coalescing_store()
705 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; in interrupt_coalescing_store()
708 return -EINVAL; in interrupt_coalescing_store()
711 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; in interrupt_coalescing_store()
712 if (MVS_CHIP_DISP->tune_interrupt) in interrupt_coalescing_store()
713 MVS_CHIP_DISP->tune_interrupt(mvi, in interrupt_coalescing_store()
734 return -ENOMEM; in mvs_init()