Lines Matching full:ap

295 static void nv_nf2_freeze(struct ata_port *ap);
296 static void nv_nf2_thaw(struct ata_port *ap);
297 static void nv_ck804_freeze(struct ata_port *ap);
298 static void nv_ck804_thaw(struct ata_port *ap);
305 static void nv_adma_irq_clear(struct ata_port *ap);
306 static int nv_adma_port_start(struct ata_port *ap);
307 static void nv_adma_port_stop(struct ata_port *ap);
309 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
310 static int nv_adma_port_resume(struct ata_port *ap);
312 static void nv_adma_freeze(struct ata_port *ap);
313 static void nv_adma_thaw(struct ata_port *ap);
314 static void nv_adma_error_handler(struct ata_port *ap);
317 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
319 static void nv_mcp55_thaw(struct ata_port *ap);
320 static void nv_mcp55_freeze(struct ata_port *ap);
321 static void nv_swncq_error_handler(struct ata_port *ap);
324 static int nv_swncq_port_start(struct ata_port *ap);
328 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
331 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
332 static int nv_swncq_port_resume(struct ata_port *ap);
598 static void nv_adma_register_mode(struct ata_port *ap) in nv_adma_register_mode() argument
600 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_register_mode()
615 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n", in nv_adma_register_mode()
629 ata_port_warn(ap, in nv_adma_register_mode()
636 static void nv_adma_mode(struct ata_port *ap) in nv_adma_mode() argument
638 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_mode()
659 ata_port_warn(ap, in nv_adma_mode()
669 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_adma_device_configure() local
670 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_device_configure()
672 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_adma_device_configure()
685 spin_lock_irqsave(ap->lock, flags); in nv_adma_device_configure()
687 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { in nv_adma_device_configure()
703 nv_adma_register_mode(ap); in nv_adma_device_configure()
712 if (ap->port_no == 1) in nv_adma_device_configure()
730 port0 = ap->host->ports[0]->private_data; in nv_adma_device_configure()
731 port1 = ap->host->ports[1]->private_data; in nv_adma_device_configure()
748 ata_port_info(ap, in nv_adma_device_configure()
750 (unsigned long long)*ap->host->dev->dma_mask, in nv_adma_device_configure()
753 spin_unlock_irqrestore(ap->lock, flags); in nv_adma_device_configure()
760 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
764 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) in nv_adma_tf_read() argument
773 nv_adma_register_mode(ap); in nv_adma_tf_read()
775 ata_sff_tf_read(ap, tf); in nv_adma_tf_read()
810 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) in nv_adma_check_cpb() argument
812 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_check_cpb()
815 ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags); in nv_adma_check_cpb()
821 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_check_cpb()
844 ata_port_freeze(ap); in nv_adma_check_cpb()
846 ata_port_abort(ap); in nv_adma_check_cpb()
855 static int nv_host_intr(struct ata_port *ap, u8 irq_stat) in nv_host_intr() argument
857 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr()
861 ata_port_freeze(ap); in nv_host_intr()
871 ata_sff_check_status(ap); in nv_host_intr()
876 return ata_bmdma_port_intr(ap, qc); in nv_host_intr()
888 struct ata_port *ap = host->ports[i]; in nv_adma_interrupt() local
889 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_interrupt()
901 handled += nv_host_intr(ap, irq_stat); in nv_adma_interrupt()
909 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
915 handled += nv_host_intr(ap, irq_stat); in nv_adma_interrupt()
924 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && in nv_adma_interrupt()
948 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_interrupt()
966 ata_port_freeze(ap); in nv_adma_interrupt()
979 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
981 ap->link.active_tag; in nv_adma_interrupt()
983 check_commands = ap->link.sactive; in nv_adma_interrupt()
989 rc = nv_adma_check_cpb(ap, pos, in nv_adma_interrupt()
997 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); in nv_adma_interrupt()
1015 static void nv_adma_freeze(struct ata_port *ap) in nv_adma_freeze() argument
1017 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_freeze()
1021 nv_ck804_freeze(ap); in nv_adma_freeze()
1027 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_freeze()
1028 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_freeze()
1037 static void nv_adma_thaw(struct ata_port *ap) in nv_adma_thaw() argument
1039 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_thaw()
1043 nv_ck804_thaw(ap); in nv_adma_thaw()
1055 static void nv_adma_irq_clear(struct ata_port *ap) in nv_adma_irq_clear() argument
1057 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_irq_clear()
1062 ata_bmdma_irq_clear(ap); in nv_adma_irq_clear()
1067 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_irq_clear()
1068 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_irq_clear()
1075 if (ap->port_no == 0) { in nv_adma_irq_clear()
1082 pp = ap->host->ports[0]->private_data; in nv_adma_irq_clear()
1084 pp = ap->host->ports[1]->private_data; in nv_adma_irq_clear()
1090 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1096 static int nv_adma_port_start(struct ata_port *ap) in nv_adma_port_start() argument
1098 struct device *dev = ap->host->dev; in nv_adma_port_start()
1116 rc = ata_bmdma_port_start(ap); in nv_adma_port_start()
1124 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + in nv_adma_port_start()
1125 ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_port_start()
1127 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; in nv_adma_port_start()
1129 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); in nv_adma_port_start()
1164 ap->private_data = pp; in nv_adma_port_start()
1190 static void nv_adma_port_stop(struct ata_port *ap) in nv_adma_port_stop() argument
1192 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_stop()
1199 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg) in nv_adma_port_suspend() argument
1201 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_suspend()
1205 nv_adma_register_mode(ap); in nv_adma_port_suspend()
1216 static int nv_adma_port_resume(struct ata_port *ap) in nv_adma_port_resume() argument
1218 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_resume()
1251 static void nv_adma_setup_port(struct ata_port *ap) in nv_adma_setup_port() argument
1253 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_adma_setup_port()
1254 struct ata_ioports *ioport = &ap->ioaddr; in nv_adma_setup_port()
1256 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_setup_port()
1315 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1333 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1350 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1358 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1396 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1413 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1416 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1444 struct ata_port *ap = host->ports[i]; in nv_generic_interrupt() local
1447 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1449 handled += ata_bmdma_port_intr(ap, qc); in nv_generic_interrupt()
1455 ap->ops->sff_check_status(ap); in nv_generic_interrupt()
1509 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_read()
1518 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_write()
1530 if (!(link->ap->pflags & ATA_PFLAG_LOADING) && in nv_hardreset()
1554 static void nv_nf2_freeze(struct ata_port *ap) in nv_nf2_freeze() argument
1556 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_freeze()
1557 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_freeze()
1565 static void nv_nf2_thaw(struct ata_port *ap) in nv_nf2_thaw() argument
1567 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_thaw()
1568 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_thaw()
1578 static void nv_ck804_freeze(struct ata_port *ap) in nv_ck804_freeze() argument
1580 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_freeze()
1581 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_freeze()
1589 static void nv_ck804_thaw(struct ata_port *ap) in nv_ck804_thaw() argument
1591 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_thaw()
1592 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_thaw()
1602 static void nv_mcp55_freeze(struct ata_port *ap) in nv_mcp55_freeze() argument
1604 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_freeze()
1605 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_freeze()
1615 static void nv_mcp55_thaw(struct ata_port *ap) in nv_mcp55_thaw() argument
1617 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_thaw()
1618 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_thaw()
1628 static void nv_adma_error_handler(struct ata_port *ap) in nv_adma_error_handler() argument
1630 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_error_handler()
1636 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { in nv_adma_error_handler()
1644 ata_port_err(ap, in nv_adma_error_handler()
1653 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || in nv_adma_error_handler()
1654 ap->link.sactive & (1 << i)) in nv_adma_error_handler()
1655 ata_port_err(ap, in nv_adma_error_handler()
1662 nv_adma_register_mode(ap); in nv_adma_error_handler()
1681 ata_bmdma_error_handler(ap); in nv_adma_error_handler()
1684 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) in nv_swncq_qc_to_dq() argument
1686 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_to_dq()
1695 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap) in nv_swncq_qc_from_dq() argument
1697 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_from_dq()
1709 return ata_qc_from_tag(ap, tag); in nv_swncq_qc_from_dq()
1712 static void nv_swncq_fis_reinit(struct ata_port *ap) in nv_swncq_fis_reinit() argument
1714 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fis_reinit()
1722 static void nv_swncq_pp_reinit(struct ata_port *ap) in nv_swncq_pp_reinit() argument
1724 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_pp_reinit()
1732 nv_swncq_fis_reinit(ap); in nv_swncq_pp_reinit()
1735 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis) in nv_swncq_irq_clear() argument
1737 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_irq_clear()
1742 static void __ata_bmdma_stop(struct ata_port *ap) in __ata_bmdma_stop() argument
1746 qc.ap = ap; in __ata_bmdma_stop()
1750 static void nv_swncq_ncq_stop(struct ata_port *ap) in nv_swncq_ncq_stop() argument
1752 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_ncq_stop()
1757 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n", in nv_swncq_ncq_stop()
1758 ap->qc_active, ap->link.sactive); in nv_swncq_ncq_stop()
1759 ata_port_err(ap, in nv_swncq_ncq_stop()
1765 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n", in nv_swncq_ncq_stop()
1766 ap->ops->sff_check_status(ap), in nv_swncq_ncq_stop()
1767 ioread8(ap->ioaddr.error_addr)); in nv_swncq_ncq_stop()
1772 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n"); in nv_swncq_ncq_stop()
1782 ata_port_err(ap, in nv_swncq_ncq_stop()
1791 nv_swncq_pp_reinit(ap); in nv_swncq_ncq_stop()
1792 ap->ops->sff_irq_clear(ap); in nv_swncq_ncq_stop()
1793 __ata_bmdma_stop(ap); in nv_swncq_ncq_stop()
1794 nv_swncq_irq_clear(ap, 0xffff); in nv_swncq_ncq_stop()
1797 static void nv_swncq_error_handler(struct ata_port *ap) in nv_swncq_error_handler() argument
1799 struct ata_eh_context *ehc = &ap->link.eh_context; in nv_swncq_error_handler()
1801 if (ap->link.sactive) { in nv_swncq_error_handler()
1802 nv_swncq_ncq_stop(ap); in nv_swncq_error_handler()
1806 ata_bmdma_error_handler(ap); in nv_swncq_error_handler()
1810 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg) in nv_swncq_port_suspend() argument
1812 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_suspend()
1829 static int nv_swncq_port_resume(struct ata_port *ap) in nv_swncq_port_resume() argument
1831 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_resume()
1877 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_swncq_device_configure() local
1878 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_swncq_device_configure()
1890 dev = &ap->link.device[sdev->id]; in nv_swncq_device_configure()
1891 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) in nv_swncq_device_configure()
1921 static int nv_swncq_port_start(struct ata_port *ap) in nv_swncq_port_start() argument
1923 struct device *dev = ap->host->dev; in nv_swncq_port_start()
1924 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_start()
1929 rc = ata_bmdma_port_start(ap); in nv_swncq_port_start()
1942 ap->private_data = pp; in nv_swncq_port_start()
1943 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; in nv_swncq_port_start()
1944 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1945 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1967 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg() local
1969 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fill_sg()
2001 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, in nv_swncq_issue_atacmd() argument
2004 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_issue_atacmd()
2015 trace_ata_tf_load(ap, &qc->tf); in nv_swncq_issue_atacmd()
2016 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2017 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag); in nv_swncq_issue_atacmd()
2018 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2025 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue() local
2026 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_issue()
2032 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_qc_issue()
2034 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ in nv_swncq_qc_issue()
2039 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis) in nv_swncq_hotplug() argument
2042 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_hotplug()
2047 sata_scr_read(&ap->link, SCR_ERROR, &serror); in nv_swncq_hotplug()
2048 sata_scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_hotplug()
2061 ata_port_freeze(ap); in nv_swncq_hotplug()
2064 static int nv_swncq_sdbfis(struct ata_port *ap) in nv_swncq_sdbfis() argument
2067 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_sdbfis()
2068 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_sdbfis()
2074 host_stat = ap->ops->bmdma_status(ap); in nv_swncq_sdbfis()
2075 trace_ata_bmdma_status(ap, host_stat); in nv_swncq_sdbfis()
2085 ap->ops->sff_irq_clear(ap); in nv_swncq_sdbfis()
2086 __ata_bmdma_stop(ap); in nv_swncq_sdbfis()
2095 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); in nv_swncq_sdbfis()
2097 if (!ap->qc_active) { in nv_swncq_sdbfis()
2098 ata_port_dbg(ap, "over\n"); in nv_swncq_sdbfis()
2099 nv_swncq_pp_reinit(ap); in nv_swncq_sdbfis()
2113 ata_port_dbg(ap, "QC: qc_active 0x%llx," in nv_swncq_sdbfis()
2116 ap->qc_active, pp->qc_active, in nv_swncq_sdbfis()
2120 nv_swncq_fis_reinit(ap); in nv_swncq_sdbfis()
2123 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2124 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2130 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_sdbfis()
2132 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2138 static inline u32 nv_swncq_tag(struct ata_port *ap) in nv_swncq_tag() argument
2140 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_tag()
2147 static void nv_swncq_dmafis(struct ata_port *ap) in nv_swncq_dmafis() argument
2153 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_dmafis()
2155 __ata_bmdma_stop(ap); in nv_swncq_dmafis()
2156 tag = nv_swncq_tag(ap); in nv_swncq_dmafis()
2158 ata_port_dbg(ap, "dma setup tag 0x%x\n", tag); in nv_swncq_dmafis()
2159 qc = ata_qc_from_tag(ap, tag); in nv_swncq_dmafis()
2168 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); in nv_swncq_dmafis()
2171 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2176 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2179 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) in nv_swncq_host_interrupt() argument
2181 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_host_interrupt()
2183 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_host_interrupt()
2187 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2188 nv_swncq_irq_clear(ap, fis); in nv_swncq_host_interrupt()
2192 if (ata_port_is_frozen(ap)) in nv_swncq_host_interrupt()
2196 nv_swncq_hotplug(ap, fis); in nv_swncq_host_interrupt()
2203 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) in nv_swncq_host_interrupt()
2205 ap->ops->scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_host_interrupt()
2213 ata_port_freeze(ap); in nv_swncq_host_interrupt()
2226 ata_port_dbg(ap, "SWNCQ: qc_active 0x%X " in nv_swncq_host_interrupt()
2230 if (nv_swncq_sdbfis(ap) < 0) in nv_swncq_host_interrupt()
2249 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2254 ata_port_dbg(ap, "send next command\n"); in nv_swncq_host_interrupt()
2255 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_host_interrupt()
2256 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_host_interrupt()
2265 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); in nv_swncq_host_interrupt()
2267 nv_swncq_dmafis(ap); in nv_swncq_host_interrupt()
2274 ata_port_freeze(ap); in nv_swncq_host_interrupt()
2291 struct ata_port *ap = host->ports[i]; in nv_swncq_interrupt() local
2293 if (ap->link.sactive) { in nv_swncq_interrupt()
2294 nv_swncq_host_interrupt(ap, (u16)irq_stat); in nv_swncq_interrupt()
2298 nv_swncq_irq_clear(ap, 0xfff0); in nv_swncq_interrupt()
2300 handled += nv_host_intr(ap, (u8)irq_stat); in nv_swncq_interrupt()