Lines Matching full:queue
52 struct nvmet_rdma_queue *queue; member
66 struct nvmet_rdma_queue *queue; member
147 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
157 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp()
220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp()
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp()
243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
248 sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); in nvmet_rdma_put_rsp()
448 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_alloc_rsps() argument
450 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_alloc_rsps()
451 int nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_alloc_rsps()
454 if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL, in nvmet_rdma_alloc_rsps()
458 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), in nvmet_rdma_alloc_rsps()
460 if (!queue->rsps) in nvmet_rdma_alloc_rsps()
464 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
475 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); in nvmet_rdma_alloc_rsps()
476 kfree(queue->rsps); in nvmet_rdma_alloc_rsps()
478 sbitmap_free(&queue->rsp_tags); in nvmet_rdma_alloc_rsps()
483 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_rsps() argument
485 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_free_rsps()
486 int i, nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_free_rsps()
489 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); in nvmet_rdma_free_rsps()
490 kfree(queue->rsps); in nvmet_rdma_free_rsps()
491 sbitmap_free(&queue->rsp_tags); in nvmet_rdma_free_rsps()
506 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv()
514 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) in nvmet_rdma_process_wr_wait_list() argument
516 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
517 while (!list_empty(&queue->rsp_wr_wait_list)) { in nvmet_rdma_process_wr_wait_list()
521 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
525 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
527 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
530 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
534 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
629 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init()
648 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy()
663 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp() local
665 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
673 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) in nvmet_rdma_release_rsp()
674 nvmet_rdma_process_wr_wait_list(queue); in nvmet_rdma_release_rsp()
679 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) in nvmet_rdma_error_comp() argument
681 if (queue->nvme_sq.ctrl) { in nvmet_rdma_error_comp()
682 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_rdma_error_comp()
687 * cleanup the queue in nvmet_rdma_error_comp()
689 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_error_comp()
697 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_send_done() local
705 nvmet_rdma_error_comp(queue); in nvmet_rdma_send_done()
713 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
734 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
736 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
750 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_read_data_done() local
754 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
764 nvmet_rdma_error_comp(queue); in nvmet_rdma_read_data_done()
783 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_write_data_done() local
784 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done()
791 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_write_data_done()
801 nvmet_rdma_error_comp(queue); in nvmet_rdma_write_data_done()
858 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
945 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command() local
948 &queue->sq_wr_avail) < 0)) { in nvmet_rdma_execute_command()
949 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", in nvmet_rdma_execute_command()
950 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
951 queue->nvme_sq.ctrl->cntlid); in nvmet_rdma_execute_command()
952 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
957 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, in nvmet_rdma_execute_command()
958 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
967 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, in nvmet_rdma_handle_command() argument
972 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
975 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
979 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, in nvmet_rdma_handle_command()
980 &queue->nvme_sq, &nvmet_rdma_ops)) in nvmet_rdma_handle_command()
988 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
989 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_handle_command()
990 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
1003 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_recv_done() local
1011 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1018 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1022 cmd->queue = queue; in nvmet_rdma_recv_done()
1023 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
1030 nvmet_rdma_post_recv(queue->dev, cmd); in nvmet_rdma_recv_done()
1033 rsp->queue = queue; in nvmet_rdma_recv_done()
1037 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
1041 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { in nvmet_rdma_recv_done()
1044 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1045 if (queue->state == NVMET_RDMA_Q_CONNECTING) in nvmet_rdma_recv_done()
1046 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
1049 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1053 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
1251 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_create_queue_ib() argument
1254 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_create_queue_ib()
1260 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; in nvmet_rdma_create_queue_ib()
1262 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, in nvmet_rdma_create_queue_ib()
1263 queue->comp_vector, IB_POLL_WORKQUEUE); in nvmet_rdma_create_queue_ib()
1264 if (IS_ERR(queue->cq)) { in nvmet_rdma_create_queue_ib()
1265 ret = PTR_ERR(queue->cq); in nvmet_rdma_create_queue_ib()
1271 qp_attr.qp_context = queue; in nvmet_rdma_create_queue_ib()
1273 qp_attr.send_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1274 qp_attr.recv_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1278 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; in nvmet_rdma_create_queue_ib()
1279 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, in nvmet_rdma_create_queue_ib()
1281 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; in nvmet_rdma_create_queue_ib()
1285 if (queue->nsrq) { in nvmet_rdma_create_queue_ib()
1286 qp_attr.srq = queue->nsrq->srq; in nvmet_rdma_create_queue_ib()
1289 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; in nvmet_rdma_create_queue_ib()
1293 if (queue->port->pi_enable && queue->host_qid) in nvmet_rdma_create_queue_ib()
1296 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); in nvmet_rdma_create_queue_ib()
1301 queue->qp = queue->cm_id->qp; in nvmet_rdma_create_queue_ib()
1303 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); in nvmet_rdma_create_queue_ib()
1306 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, in nvmet_rdma_create_queue_ib()
1307 qp_attr.cap.max_send_wr, queue->cm_id); in nvmet_rdma_create_queue_ib()
1309 if (!queue->nsrq) { in nvmet_rdma_create_queue_ib()
1310 for (i = 0; i < queue->recv_queue_size; i++) { in nvmet_rdma_create_queue_ib()
1311 queue->cmds[i].queue = queue; in nvmet_rdma_create_queue_ib()
1312 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); in nvmet_rdma_create_queue_ib()
1322 rdma_destroy_qp(queue->cm_id); in nvmet_rdma_create_queue_ib()
1324 ib_cq_pool_put(queue->cq, nr_cqe + 1); in nvmet_rdma_create_queue_ib()
1328 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_destroy_queue_ib() argument
1330 ib_drain_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1331 if (queue->cm_id) in nvmet_rdma_destroy_queue_ib()
1332 rdma_destroy_id(queue->cm_id); in nvmet_rdma_destroy_queue_ib()
1333 ib_destroy_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1334 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * in nvmet_rdma_destroy_queue_ib()
1335 queue->send_queue_size + 1); in nvmet_rdma_destroy_queue_ib()
1338 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_queue() argument
1340 pr_debug("freeing queue %d\n", queue->idx); in nvmet_rdma_free_queue()
1342 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_free_queue()
1344 nvmet_rdma_destroy_queue_ib(queue); in nvmet_rdma_free_queue()
1345 if (!queue->nsrq) { in nvmet_rdma_free_queue()
1346 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_free_queue()
1347 queue->recv_queue_size, in nvmet_rdma_free_queue()
1348 !queue->host_qid); in nvmet_rdma_free_queue()
1350 nvmet_rdma_free_rsps(queue); in nvmet_rdma_free_queue()
1351 ida_free(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_free_queue()
1352 kfree(queue); in nvmet_rdma_free_queue()
1357 struct nvmet_rdma_queue *queue = in nvmet_rdma_release_queue_work() local
1359 struct nvmet_rdma_device *dev = queue->dev; in nvmet_rdma_release_queue_work()
1361 nvmet_rdma_free_queue(queue); in nvmet_rdma_release_queue_work()
1368 struct nvmet_rdma_queue *queue) in nvmet_rdma_parse_cm_connect_req() argument
1379 queue->host_qid = le16_to_cpu(req->qid); in nvmet_rdma_parse_cm_connect_req()
1382 * req->hsqsize corresponds to our recv queue size plus 1 in nvmet_rdma_parse_cm_connect_req()
1383 * req->hrqsize corresponds to our send queue size in nvmet_rdma_parse_cm_connect_req()
1385 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; in nvmet_rdma_parse_cm_connect_req()
1386 queue->send_queue_size = le16_to_cpu(req->hrqsize); in nvmet_rdma_parse_cm_connect_req()
1388 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) in nvmet_rdma_parse_cm_connect_req()
1417 struct nvmet_rdma_queue *queue; in nvmet_rdma_alloc_queue() local
1420 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_rdma_alloc_queue()
1421 if (!queue) { in nvmet_rdma_alloc_queue()
1426 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1432 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); in nvmet_rdma_alloc_queue()
1440 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); in nvmet_rdma_alloc_queue()
1441 queue->dev = ndev; in nvmet_rdma_alloc_queue()
1442 queue->cm_id = cm_id; in nvmet_rdma_alloc_queue()
1443 queue->port = port->nport; in nvmet_rdma_alloc_queue()
1445 spin_lock_init(&queue->state_lock); in nvmet_rdma_alloc_queue()
1446 queue->state = NVMET_RDMA_Q_CONNECTING; in nvmet_rdma_alloc_queue()
1447 INIT_LIST_HEAD(&queue->rsp_wait_list); in nvmet_rdma_alloc_queue()
1448 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); in nvmet_rdma_alloc_queue()
1449 spin_lock_init(&queue->rsp_wr_wait_lock); in nvmet_rdma_alloc_queue()
1450 INIT_LIST_HEAD(&queue->queue_list); in nvmet_rdma_alloc_queue()
1452 queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); in nvmet_rdma_alloc_queue()
1453 if (queue->idx < 0) { in nvmet_rdma_alloc_queue()
1462 queue->comp_vector = !queue->host_qid ? 0 : in nvmet_rdma_alloc_queue()
1463 queue->idx % ndev->device->num_comp_vectors; in nvmet_rdma_alloc_queue()
1466 ret = nvmet_rdma_alloc_rsps(queue); in nvmet_rdma_alloc_queue()
1473 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; in nvmet_rdma_alloc_queue()
1475 queue->cmds = nvmet_rdma_alloc_cmds(ndev, in nvmet_rdma_alloc_queue()
1476 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1477 !queue->host_qid); in nvmet_rdma_alloc_queue()
1478 if (IS_ERR(queue->cmds)) { in nvmet_rdma_alloc_queue()
1484 ret = nvmet_rdma_create_queue_ib(queue); in nvmet_rdma_alloc_queue()
1486 pr_err("%s: creating RDMA queue failed (%d).\n", in nvmet_rdma_alloc_queue()
1492 return queue; in nvmet_rdma_alloc_queue()
1495 if (!queue->nsrq) { in nvmet_rdma_alloc_queue()
1496 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_alloc_queue()
1497 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1498 !queue->host_qid); in nvmet_rdma_alloc_queue()
1501 nvmet_rdma_free_rsps(queue); in nvmet_rdma_alloc_queue()
1503 ida_free(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_alloc_queue()
1505 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1507 kfree(queue); in nvmet_rdma_alloc_queue()
1515 struct nvmet_rdma_queue *queue = priv; in nvmet_rdma_qp_event() local
1519 rdma_notify(queue->cm_id, event->event); in nvmet_rdma_qp_event()
1522 pr_debug("received last WQE reached event for queue=0x%p\n", in nvmet_rdma_qp_event()
1523 queue); in nvmet_rdma_qp_event()
1533 struct nvmet_rdma_queue *queue, in nvmet_rdma_cm_accept() argument
1543 queue->dev->device->attrs.max_qp_init_rd_atom); in nvmet_rdma_cm_accept()
1547 priv.crqsize = cpu_to_le16(queue->recv_queue_size); in nvmet_rdma_cm_accept()
1560 struct nvmet_rdma_queue *queue; in nvmet_rdma_queue_connect() local
1569 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); in nvmet_rdma_queue_connect()
1570 if (!queue) { in nvmet_rdma_queue_connect()
1575 if (queue->host_qid == 0) { in nvmet_rdma_queue_connect()
1582 if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl && in nvmet_rdma_queue_connect()
1591 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); in nvmet_rdma_queue_connect()
1597 queue->cm_id = NULL; in nvmet_rdma_queue_connect()
1602 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); in nvmet_rdma_queue_connect()
1608 nvmet_rdma_free_queue(queue); in nvmet_rdma_queue_connect()
1615 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_established() argument
1619 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1620 if (queue->state != NVMET_RDMA_Q_CONNECTING) { in nvmet_rdma_queue_established()
1621 pr_warn("trying to establish a connected queue\n"); in nvmet_rdma_queue_established()
1624 queue->state = NVMET_RDMA_Q_LIVE; in nvmet_rdma_queue_established()
1626 while (!list_empty(&queue->rsp_wait_list)) { in nvmet_rdma_queue_established()
1629 cmd = list_first_entry(&queue->rsp_wait_list, in nvmet_rdma_queue_established()
1633 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1634 nvmet_rdma_handle_command(queue, cmd); in nvmet_rdma_queue_established()
1635 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1639 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1642 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in __nvmet_rdma_queue_disconnect() argument
1647 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); in __nvmet_rdma_queue_disconnect()
1649 spin_lock_irqsave(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1650 switch (queue->state) { in __nvmet_rdma_queue_disconnect()
1652 while (!list_empty(&queue->rsp_wait_list)) { in __nvmet_rdma_queue_disconnect()
1655 rsp = list_first_entry(&queue->rsp_wait_list, in __nvmet_rdma_queue_disconnect()
1663 queue->state = NVMET_RDMA_Q_DISCONNECTING; in __nvmet_rdma_queue_disconnect()
1669 spin_unlock_irqrestore(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1672 rdma_disconnect(queue->cm_id); in __nvmet_rdma_queue_disconnect()
1673 queue_work(nvmet_wq, &queue->release_work); in __nvmet_rdma_queue_disconnect()
1677 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_disconnect() argument
1682 if (!list_empty(&queue->queue_list)) { in nvmet_rdma_queue_disconnect()
1683 list_del_init(&queue->queue_list); in nvmet_rdma_queue_disconnect()
1689 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_queue_disconnect()
1693 struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_connect_fail() argument
1695 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); in nvmet_rdma_queue_connect_fail()
1698 if (!list_empty(&queue->queue_list)) in nvmet_rdma_queue_connect_fail()
1699 list_del_init(&queue->queue_list); in nvmet_rdma_queue_connect_fail()
1702 pr_err("failed to connect queue %d\n", queue->idx); in nvmet_rdma_queue_connect_fail()
1703 queue_work(nvmet_wq, &queue->release_work); in nvmet_rdma_queue_connect_fail()
1709 * @queue: nvmet rdma queue (cm id qp_context)
1713 * queue cm_id and/or a device bound listener cm_id (where in this
1714 * case queue will be null).
1722 struct nvmet_rdma_queue *queue) in nvmet_rdma_device_removal() argument
1726 if (queue) { in nvmet_rdma_device_removal()
1728 * This is a queue cm_id. we have registered in nvmet_rdma_device_removal()
1756 struct nvmet_rdma_queue *queue = NULL; in nvmet_rdma_cm_handler() local
1760 queue = cm_id->qp->qp_context; in nvmet_rdma_cm_handler()
1771 nvmet_rdma_queue_established(queue); in nvmet_rdma_cm_handler()
1774 if (!queue) { in nvmet_rdma_cm_handler()
1783 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_cm_handler()
1786 ret = nvmet_rdma_device_removal(cm_id, queue); in nvmet_rdma_cm_handler()
1794 nvmet_rdma_queue_connect_fail(cm_id, queue); in nvmet_rdma_cm_handler()
1807 struct nvmet_rdma_queue *queue, *n; in nvmet_rdma_delete_ctrl() local
1810 list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) { in nvmet_rdma_delete_ctrl()
1811 if (queue->nvme_sq.ctrl != ctrl) in nvmet_rdma_delete_ctrl()
1813 list_del_init(&queue->queue_list); in nvmet_rdma_delete_ctrl()
1814 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_delete_ctrl()
1821 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_destroy_port_queues() local
1825 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_destroy_port_queues()
1827 if (queue->port != nport) in nvmet_rdma_destroy_port_queues()
1830 list_del_init(&queue->queue_list); in nvmet_rdma_destroy_port_queues()
1831 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_destroy_port_queues()
1846 * guarantees that no new queue will be created. in nvmet_rdma_disable_port()
1992 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()
2005 struct nvmet_rdma_queue *queue = in nvmet_rdma_host_port_addr() local
2009 (struct sockaddr *)&queue->cm_id->route.addr.dst_addr); in nvmet_rdma_host_port_addr()
2043 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_remove_one() local
2064 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_remove_one()
2066 if (queue->dev->device != ib_device) in nvmet_rdma_remove_one()
2069 pr_info("Removing queue %d\n", queue->idx); in nvmet_rdma_remove_one()
2070 list_del_init(&queue->queue_list); in nvmet_rdma_remove_one()
2071 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_remove_one()