Lines Matching full:queue

105 	struct nvme_tcp_queue	*queue;  member
205 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
212 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
214 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
218 * Check if the queue is TLS encrypted
220 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument
225 return queue->tls_enabled; in nvme_tcp_queue_tls()
239 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
241 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
244 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
245 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
248 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument
250 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
253 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_ddgst_len() argument
255 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
274 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
279 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
368 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) in nvme_tcp_send_all() argument
372 /* drain the send queue as much as we can... */ in nvme_tcp_send_all()
374 ret = nvme_tcp_try_send(queue); in nvme_tcp_send_all()
378 static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue) in nvme_tcp_queue_has_pending() argument
380 return !list_empty(&queue->send_list) || in nvme_tcp_queue_has_pending()
381 !llist_empty(&queue->req_list); in nvme_tcp_queue_has_pending()
384 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) in nvme_tcp_queue_more() argument
386 return !nvme_tcp_queue_tls(queue) && in nvme_tcp_queue_more()
387 nvme_tcp_queue_has_pending(queue); in nvme_tcp_queue_more()
393 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request() local
396 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
397 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
401 * directly, otherwise queue io_work. Also, only do that if we in nvme_tcp_queue_request()
404 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
405 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
406 nvme_tcp_send_all(queue); in nvme_tcp_queue_request()
407 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
410 if (last && nvme_tcp_queue_has_pending(queue)) in nvme_tcp_queue_request()
411 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
414 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) in nvme_tcp_process_req_list() argument
419 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
421 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
426 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) in nvme_tcp_fetch_request() argument
430 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
433 nvme_tcp_process_req_list(queue); in nvme_tcp_fetch_request()
434 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
472 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, in nvme_tcp_verify_hdgst() argument
480 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
481 "queue %d: header digest flag is cleared\n", in nvme_tcp_verify_hdgst()
482 nvme_tcp_queue_id(queue)); in nvme_tcp_verify_hdgst()
487 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
490 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
499 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) in nvme_tcp_check_ddgst() argument
502 u8 digest_len = nvme_tcp_hdgst_len(queue); in nvme_tcp_check_ddgst()
509 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
510 "queue %d: data digest flag is cleared\n", in nvme_tcp_check_ddgst()
511 nvme_tcp_queue_id(queue)); in nvme_tcp_check_ddgst()
514 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
535 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request() local
536 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_init_request()
538 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
545 req->queue = queue; in nvme_tcp_init_request()
556 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() local
558 hctx->driver_data = queue; in nvme_tcp_init_hctx()
566 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx() local
568 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
573 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) in nvme_tcp_recv_state() argument
575 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
576 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
580 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) in nvme_tcp_init_recv_ctx() argument
582 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
583 nvme_tcp_hdgst_len(queue); in nvme_tcp_init_recv_ctx()
584 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
585 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
586 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
598 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, in nvme_tcp_process_nvme_cqe() argument
604 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
606 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
607 "got bad cqe.command_id %#x on queue %d\n", in nvme_tcp_process_nvme_cqe()
608 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
609 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
619 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
624 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, in nvme_tcp_handle_c2h_data() argument
629 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
631 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
632 "got bad c2hdata.command_id %#x on queue %d\n", in nvme_tcp_handle_c2h_data()
633 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
638 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
639 "queue %d tag %#x unexpected data\n", in nvme_tcp_handle_c2h_data()
640 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
644 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
648 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
649 "queue %d tag %#x SUCCESS set but not last PDU\n", in nvme_tcp_handle_c2h_data()
650 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
651 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
658 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, in nvme_tcp_handle_comp() argument
666 * survive any kind of queue freeze and often don't respond to in nvme_tcp_handle_comp()
670 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), in nvme_tcp_handle_comp()
672 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
675 ret = nvme_tcp_process_nvme_cqe(queue, cqe); in nvme_tcp_handle_comp()
683 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu() local
686 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
687 u8 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
691 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); in nvme_tcp_setup_h2c_data_pdu()
700 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
702 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
714 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, in nvme_tcp_handle_r2t() argument
722 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
724 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
725 "got bad r2t.command_id %#x on queue %d\n", in nvme_tcp_handle_r2t()
726 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
732 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
739 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
746 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
763 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_pdu() argument
767 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
768 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
772 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
776 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
777 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
780 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
783 hdr = queue->pdu; in nvme_tcp_recv_pdu()
784 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
785 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
791 if (queue->data_digest) { in nvme_tcp_recv_pdu()
792 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
799 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
801 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
802 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
804 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
805 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
807 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
821 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_data() argument
824 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
826 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
832 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
844 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
845 "queue %d no space in request %#x", in nvme_tcp_recv_data()
846 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
847 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
857 if (queue->data_digest) in nvme_tcp_recv_data()
859 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
864 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
865 "queue %d failed to copy request %#x data", in nvme_tcp_recv_data()
866 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
872 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
875 if (!queue->data_remaining) { in nvme_tcp_recv_data()
876 if (queue->data_digest) { in nvme_tcp_recv_data()
877 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
878 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
883 queue->nr_cqe++; in nvme_tcp_recv_data()
885 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
892 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, in nvme_tcp_recv_ddgst() argument
895 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
896 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
897 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
898 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
905 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
908 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
911 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
912 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
918 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
920 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
921 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
925 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
930 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
933 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_ddgst()
940 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb() local
944 if (unlikely(!queue->rd_enabled)) in nvme_tcp_recv_skb()
948 switch (nvme_tcp_recv_state(queue)) { in nvme_tcp_recv_skb()
950 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
953 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
956 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
962 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
964 queue->rd_enabled = false; in nvme_tcp_recv_skb()
965 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
975 struct nvme_tcp_queue *queue; in nvme_tcp_data_ready() local
980 queue = sk->sk_user_data; in nvme_tcp_data_ready()
981 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
982 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
983 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
989 struct nvme_tcp_queue *queue; in nvme_tcp_write_space() local
992 queue = sk->sk_user_data; in nvme_tcp_write_space()
993 if (likely(queue && sk_stream_is_writeable(sk))) { in nvme_tcp_write_space()
995 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
1002 struct nvme_tcp_queue *queue; in nvme_tcp_state_change() local
1005 queue = sk->sk_user_data; in nvme_tcp_state_change()
1006 if (!queue) in nvme_tcp_state_change()
1015 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
1018 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
1019 "queue %d socket state %d\n", in nvme_tcp_state_change()
1020 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
1023 queue->state_change(sk); in nvme_tcp_state_change()
1028 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) in nvme_tcp_done_send_req() argument
1030 queue->request = NULL; in nvme_tcp_done_send_req()
1038 nvme_complete_async_event(&req->queue->ctrl->ctrl, in nvme_tcp_fail_request()
1048 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data() local
1064 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
1074 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data()
1078 if (queue->data_digest) in nvme_tcp_try_send_data()
1079 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
1092 if (queue->data_digest) { in nvme_tcp_try_send_data()
1093 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
1101 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_data()
1111 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu() local
1116 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_cmd_pdu()
1120 if (inline_data || nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_cmd_pdu()
1125 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
1126 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
1130 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_cmd_pdu()
1138 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1139 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1141 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_cmd_pdu()
1152 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu() local
1156 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_data_pdu()
1160 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1161 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1168 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data_pdu()
1175 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1176 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1186 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst() local
1196 if (nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_ddgst()
1201 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1209 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_ddgst()
1217 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) in nvme_tcp_try_send() argument
1223 if (!queue->request) { in nvme_tcp_try_send()
1224 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1225 if (!queue->request) in nvme_tcp_try_send()
1228 req = queue->request; in nvme_tcp_try_send()
1257 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1259 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1260 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send()
1267 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) in nvme_tcp_try_recv() argument
1269 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1274 rd_desc.arg.data = queue; in nvme_tcp_try_recv()
1277 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1285 struct nvme_tcp_queue *queue = in nvme_tcp_io_work() local
1293 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1294 result = nvme_tcp_try_send(queue); in nvme_tcp_io_work()
1295 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1302 result = nvme_tcp_try_recv(queue); in nvme_tcp_io_work()
1308 if (!pending || !queue->rd_enabled) in nvme_tcp_io_work()
1313 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1316 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_free_crypto() argument
1318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1320 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1321 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1325 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_alloc_crypto() argument
1333 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1334 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1336 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1338 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1339 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1341 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1345 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1360 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req() local
1362 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_async_req()
1364 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1370 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1377 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue() local
1380 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1383 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1384 nvme_tcp_free_crypto(queue); in nvme_tcp_free_queue()
1386 page_frag_cache_drain(&queue->pf_cache); in nvme_tcp_free_queue()
1390 fput(queue->sock->file); in nvme_tcp_free_queue()
1391 queue->sock = NULL; in nvme_tcp_free_queue()
1394 kfree(queue->pdu); in nvme_tcp_free_queue()
1395 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1396 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1399 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) in nvme_tcp_init_connection() argument
1428 if (queue->hdr_digest) in nvme_tcp_init_connection()
1430 if (queue->data_digest) in nvme_tcp_init_connection()
1435 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1437 pr_warn("queue %d: failed to send icreq, error %d\n", in nvme_tcp_init_connection()
1438 nvme_tcp_queue_id(queue), ret); in nvme_tcp_init_connection()
1445 if (nvme_tcp_queue_tls(queue)) { in nvme_tcp_init_connection()
1449 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1452 pr_warn("queue %d: failed to receive icresp, error %d\n", in nvme_tcp_init_connection()
1453 nvme_tcp_queue_id(queue), ret); in nvme_tcp_init_connection()
1457 if (nvme_tcp_queue_tls(queue)) { in nvme_tcp_init_connection()
1458 ctype = tls_get_record_type(queue->sock->sk, in nvme_tcp_init_connection()
1461 pr_err("queue %d: unhandled TLS record %d\n", in nvme_tcp_init_connection()
1462 nvme_tcp_queue_id(queue), ctype); in nvme_tcp_init_connection()
1468 pr_err("queue %d: bad type returned %d\n", in nvme_tcp_init_connection()
1469 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1474 pr_err("queue %d: bad pdu length returned %d\n", in nvme_tcp_init_connection()
1475 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1480 pr_err("queue %d: bad pfv returned %d\n", in nvme_tcp_init_connection()
1481 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1486 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1487 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1488 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1489 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1490 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1496 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1497 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1498 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1499 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1500 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1506 pr_err("queue %d: unsupported cpda returned %d\n", in nvme_tcp_init_connection()
1507 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1513 pr_err("queue %d: invalid maxh2cdata returned %u\n", in nvme_tcp_init_connection()
1514 nvme_tcp_queue_id(queue), maxh2cdata); in nvme_tcp_init_connection()
1517 queue->maxh2cdata = maxh2cdata; in nvme_tcp_init_connection()
1527 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) in nvme_tcp_admin_queue() argument
1529 return nvme_tcp_queue_id(queue) == 0; in nvme_tcp_admin_queue()
1532 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) in nvme_tcp_default_queue() argument
1534 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue()
1535 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_default_queue()
1537 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_default_queue()
1541 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) in nvme_tcp_read_queue() argument
1543 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue()
1544 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_read_queue()
1546 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_read_queue()
1547 !nvme_tcp_default_queue(queue) && in nvme_tcp_read_queue()
1552 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) in nvme_tcp_poll_queue() argument
1554 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue()
1555 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_poll_queue()
1557 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_poll_queue()
1558 !nvme_tcp_default_queue(queue) && in nvme_tcp_poll_queue()
1559 !nvme_tcp_read_queue(queue) && in nvme_tcp_poll_queue()
1565 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) in nvme_tcp_set_queue_io_cpu() argument
1567 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu()
1568 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_set_queue_io_cpu()
1571 if (nvme_tcp_default_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1573 else if (nvme_tcp_read_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1575 else if (nvme_tcp_poll_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1579 queue->io_cpu = WORK_CPU_UNBOUND; in nvme_tcp_set_queue_io_cpu()
1581 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1586 struct nvme_tcp_queue *queue = data; in nvme_tcp_tls_done() local
1587 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_tls_done()
1588 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_tls_done()
1591 dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n", in nvme_tcp_tls_done()
1595 queue->tls_err = -status; in nvme_tcp_tls_done()
1601 dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n", in nvme_tcp_tls_done()
1603 queue->tls_err = -ENOKEY; in nvme_tcp_tls_done()
1605 queue->tls_enabled = true; in nvme_tcp_tls_done()
1609 queue->tls_err = 0; in nvme_tcp_tls_done()
1613 complete(&queue->tls_complete); in nvme_tcp_tls_done()
1617 struct nvme_tcp_queue *queue, in nvme_tcp_start_tls() argument
1620 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_start_tls()
1626 dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n", in nvme_tcp_start_tls()
1629 args.ta_sock = queue->sock; in nvme_tcp_start_tls()
1631 args.ta_data = queue; in nvme_tcp_start_tls()
1638 queue->tls_err = -EOPNOTSUPP; in nvme_tcp_start_tls()
1639 init_completion(&queue->tls_complete); in nvme_tcp_start_tls()
1642 dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n", in nvme_tcp_start_tls()
1646 ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo); in nvme_tcp_start_tls()
1652 "queue %d: TLS handshake failed, error %d\n", in nvme_tcp_start_tls()
1654 tls_handshake_cancel(queue->sock->sk); in nvme_tcp_start_tls()
1657 "queue %d: TLS handshake complete, error %d\n", in nvme_tcp_start_tls()
1658 qid, queue->tls_err); in nvme_tcp_start_tls()
1659 ret = queue->tls_err; in nvme_tcp_start_tls()
1668 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue() local
1672 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1673 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1674 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1675 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1676 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1677 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1680 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1682 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1686 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1693 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); in nvme_tcp_alloc_queue()
1698 nvme_tcp_reclassify_socket(queue->sock); in nvme_tcp_alloc_queue()
1701 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1704 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1707 * Cleanup whatever is sitting in the TCP transmit queue on socket in nvme_tcp_alloc_queue()
1711 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1714 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1718 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1721 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1723 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1724 queue->sock->sk->sk_use_task_frag = false; in nvme_tcp_alloc_queue()
1725 nvme_tcp_set_queue_io_cpu(queue); in nvme_tcp_alloc_queue()
1726 queue->request = NULL; in nvme_tcp_alloc_queue()
1727 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1728 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1729 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1730 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1731 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1734 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1738 "failed to bind queue %d socket %d\n", in nvme_tcp_alloc_queue()
1748 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1752 "failed to bind to interface %s queue %d err %d\n", in nvme_tcp_alloc_queue()
1758 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1759 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1760 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1761 ret = nvme_tcp_alloc_crypto(queue); in nvme_tcp_alloc_queue()
1764 "failed to allocate queue %d crypto\n", qid); in nvme_tcp_alloc_queue()
1770 nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_queue()
1771 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1772 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1777 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1778 nvme_tcp_queue_id(queue)); in nvme_tcp_alloc_queue()
1780 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1790 ret = nvme_tcp_start_tls(nctrl, queue, pskid); in nvme_tcp_alloc_queue()
1795 ret = nvme_tcp_init_connection(queue); in nvme_tcp_alloc_queue()
1799 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1804 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1806 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1808 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1809 nvme_tcp_free_crypto(queue); in nvme_tcp_alloc_queue()
1812 fput(queue->sock->file); in nvme_tcp_alloc_queue()
1813 queue->sock = NULL; in nvme_tcp_alloc_queue()
1815 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1816 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1820 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) in nvme_tcp_restore_sock_ops() argument
1822 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_ops()
1826 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_ops()
1827 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_ops()
1828 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_ops()
1832 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) in __nvme_tcp_stop_queue() argument
1834 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1835 nvme_tcp_restore_sock_ops(queue); in __nvme_tcp_stop_queue()
1836 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1842 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue() local
1844 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_stop_queue()
1847 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue()
1848 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1849 __nvme_tcp_stop_queue(queue); in nvme_tcp_stop_queue()
1850 /* Stopping the queue will disable TLS */ in nvme_tcp_stop_queue()
1851 queue->tls_enabled = false; in nvme_tcp_stop_queue()
1852 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue()
1855 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) in nvme_tcp_setup_sock_ops() argument
1857 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1858 queue->sock->sk->sk_user_data = queue; in nvme_tcp_setup_sock_ops()
1859 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_setup_sock_ops()
1860 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_setup_sock_ops()
1861 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_setup_sock_ops()
1862 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_setup_sock_ops()
1863 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_setup_sock_ops()
1864 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_setup_sock_ops()
1866 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_setup_sock_ops()
1868 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1874 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; in nvme_tcp_start_queue() local
1877 queue->rd_enabled = true; in nvme_tcp_start_queue()
1878 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_start_queue()
1879 nvme_tcp_setup_sock_ops(queue); in nvme_tcp_start_queue()
1887 set_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_start_queue()
1889 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_start_queue()
1890 __nvme_tcp_stop_queue(queue); in nvme_tcp_start_queue()
1892 "failed to connect queue: %d ret=%d\n", idx, ret); in nvme_tcp_start_queue()
2055 * queue number might have changed. in nvme_tcp_configure_io_queues()
2413 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, in nvme_tcp_set_sg_inline() argument
2418 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2437 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event() local
2440 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_submit_async_event()
2444 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2465 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out()
2467 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2474 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout()
2477 int qid = nvme_tcp_queue_id(req->queue); in nvme_tcp_timeout()
2510 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, in nvme_tcp_map_data() argument
2523 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2535 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu() local
2536 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; in nvme_tcp_setup_cmd_pdu()
2562 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2564 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2566 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_cmd_pdu()
2573 ret = nvme_tcp_map_data(queue, rq); in nvme_tcp_setup_cmd_pdu()
2576 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2586 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs() local
2588 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2589 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2595 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2596 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq() local
2599 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2602 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2603 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2625 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll() local
2626 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2628 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2631 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2634 nvme_tcp_try_recv(queue); in nvme_tcp_poll()
2635 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2636 return queue->nr_cqe; in nvme_tcp_poll()
2641 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; in nvme_tcp_get_address() local
2647 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_get_address()
2650 mutex_lock(&queue->queue_lock); in nvme_tcp_get_address()
2652 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); in nvme_tcp_get_address()
2660 mutex_unlock(&queue->queue_lock); in nvme_tcp_get_address()