Lines Matching refs:nvmeq
302 struct nvme_queue *nvmeq, int qid) in nvme_dbbuf_init() argument
307 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
308 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
309 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
310 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
313 static void nvme_dbbuf_free(struct nvme_queue *nvmeq) in nvme_dbbuf_free() argument
315 if (!nvmeq->qid) in nvme_dbbuf_free()
318 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
319 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
320 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
321 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
399 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx() local
404 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx() local
415 hctx->driver_data = nvmeq; in nvme_init_hctx()
471 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) in nvme_write_sq_db() argument
474 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
476 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
478 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
482 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
483 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
484 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
485 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
488 static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, in nvme_sq_copy_cmd() argument
491 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_sq_copy_cmd()
493 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd()
494 nvmeq->sq_tail = 0; in nvme_sq_copy_cmd()
499 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs() local
501 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
502 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
503 nvme_write_sq_db(nvmeq, true); in nvme_commit_rqs()
504 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
510 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_use_sgls() local
517 if (!nvmeq->qid) in nvme_pci_use_sgls()
777 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_map_data() local
786 if (nvmeq->qid && sgl_threshold && in nvme_map_data()
879 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq() local
880 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
889 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
898 spin_lock(&nvmeq->sq_lock); in nvme_queue_rq()
899 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_queue_rq()
900 nvme_write_sq_db(nvmeq, bd->last); in nvme_queue_rq()
901 spin_unlock(&nvmeq->sq_lock); in nvme_queue_rq()
905 static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) in nvme_submit_cmds() argument
907 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmds()
912 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_submit_cmds()
914 nvme_write_sq_db(nvmeq, true); in nvme_submit_cmds()
915 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmds()
918 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) in nvme_prep_rq_batch() argument
924 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_prep_rq_batch()
926 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) in nvme_prep_rq_batch()
929 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; in nvme_prep_rq_batch()
938 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_queue_rqs() local
940 if (!nvme_prep_rq_batch(nvmeq, req)) { in nvme_queue_rqs()
952 nvme_submit_cmds(nvmeq, rqlist); in nvme_queue_rqs()
964 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_unmap_rq() local
965 struct nvme_dev *dev = nvmeq->dev; in nvme_pci_unmap_rq()
990 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) in nvme_cqe_pending() argument
992 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
994 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
997 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) in nvme_ring_cq_doorbell() argument
999 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
1001 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
1002 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
1003 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
1006 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) in nvme_queue_tagset() argument
1008 if (!nvmeq->qid) in nvme_queue_tagset()
1009 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1010 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
1013 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, in nvme_handle_cqe() argument
1016 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1026 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { in nvme_handle_cqe()
1027 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1032 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); in nvme_handle_cqe()
1034 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1040 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
1047 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) in nvme_update_cq_head() argument
1049 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1051 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1052 nvmeq->cq_head = 0; in nvme_update_cq_head()
1053 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1055 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1059 static inline int nvme_poll_cq(struct nvme_queue *nvmeq, in nvme_poll_cq() argument
1064 while (nvme_cqe_pending(nvmeq)) { in nvme_poll_cq()
1071 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq()
1072 nvme_update_cq_head(nvmeq); in nvme_poll_cq()
1076 nvme_ring_cq_doorbell(nvmeq); in nvme_poll_cq()
1082 struct nvme_queue *nvmeq = data; in nvme_irq() local
1085 if (nvme_poll_cq(nvmeq, &iob)) { in nvme_irq()
1095 struct nvme_queue *nvmeq = data; in nvme_irq_check() local
1097 if (nvme_cqe_pending(nvmeq)) in nvme_irq_check()
1106 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) in nvme_poll_irqdisable() argument
1108 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1110 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1112 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1113 nvme_poll_cq(nvmeq, NULL); in nvme_poll_irqdisable()
1114 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1119 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll() local
1122 if (!nvme_cqe_pending(nvmeq)) in nvme_poll()
1125 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1126 found = nvme_poll_cq(nvmeq, iob); in nvme_poll()
1127 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1135 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event() local
1141 spin_lock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1142 nvme_sq_copy_cmd(nvmeq, &c); in nvme_pci_submit_async_event()
1143 nvme_write_sq_db(nvmeq, true); in nvme_pci_submit_async_event()
1144 spin_unlock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1193 struct nvme_queue *nvmeq, s16 vector) in adapter_alloc_cq() argument
1198 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1206 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1208 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1216 struct nvme_queue *nvmeq) in adapter_alloc_sq() argument
1235 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1237 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1256 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in abort_endio() local
1258 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1260 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1319 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_timeout() local
1320 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1347 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1350 nvme_poll_irqdisable(nvmeq); in nvme_timeout()
1355 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1372 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1388 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1392 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); in nvme_timeout()
1405 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1407 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1410 nvmeq->qid, blk_op_str(req_op(req)), req_op(req), in nvme_timeout()
1445 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
1447 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1448 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1449 if (!nvmeq->sq_cmds) in nvme_free_queue()
1452 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1453 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1454 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1456 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1457 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1473 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_suspend_queue() local
1475 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1481 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1482 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1483 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1484 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1485 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1538 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, in nvme_alloc_sq_cmds() argument
1544 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1545 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1546 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1547 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1548 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1549 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1553 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1557 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1558 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1559 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1566 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue() local
1571 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1572 nvmeq->q_depth = depth; in nvme_alloc_queue()
1573 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1574 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1575 if (!nvmeq->cqes) in nvme_alloc_queue()
1578 if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) in nvme_alloc_queue()
1581 nvmeq->dev = dev; in nvme_alloc_queue()
1582 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1583 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1584 nvmeq->cq_head = 0; in nvme_alloc_queue()
1585 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1586 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1587 nvmeq->qid = qid; in nvme_alloc_queue()
1593 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1594 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1599 static int queue_request_irq(struct nvme_queue *nvmeq) in queue_request_irq() argument
1601 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1602 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1605 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1606 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1608 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1609 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1613 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
1615 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1617 nvmeq->sq_tail = 0; in nvme_init_queue()
1618 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1619 nvmeq->cq_head = 0; in nvme_init_queue()
1620 nvmeq->cq_phase = 1; in nvme_init_queue()
1621 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1622 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1623 nvme_dbbuf_init(dev, nvmeq, qid); in nvme_init_queue()
1650 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) in nvme_create_queue() argument
1652 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1656 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1665 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1667 result = adapter_alloc_cq(dev, qid, nvmeq, vector); in nvme_create_queue()
1671 result = adapter_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
1677 nvmeq->cq_vector = vector; in nvme_create_queue()
1682 nvme_init_queue(nvmeq, qid); in nvme_create_queue()
1684 result = queue_request_irq(nvmeq); in nvme_create_queue()
1689 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1765 struct nvme_queue *nvmeq; in nvme_pci_configure_admin_queue() local
1795 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1796 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1800 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1801 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1807 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1808 nvme_init_queue(nvmeq, 0); in nvme_pci_configure_admin_queue()
1809 result = queue_request_irq(nvmeq); in nvme_pci_configure_admin_queue()
1815 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
2423 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end() local
2426 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2433 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end() local
2436 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2441 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) in nvme_delete_queue() argument
2443 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2448 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2459 req->end_io_data = nvmeq; in nvme_delete_queue()
2461 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2480 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_delete_io_queues() local
2482 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_delete_io_queues()