Lines Matching full:pdu

171 	union nvme_tcp_pdu	pdu;  member
299 void *pdu, size_t len) in nvmet_tcp_hdgst() argument
303 sg_init_one(&sg, pdu, len); in nvmet_tcp_hdgst()
304 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvmet_tcp_hdgst()
309 void *pdu, size_t len) in nvmet_tcp_verify_hdgst() argument
311 struct nvme_tcp_hdr *hdr = pdu; in nvmet_tcp_verify_hdgst()
321 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvmet_tcp_verify_hdgst()
322 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); in nvmet_tcp_verify_hdgst()
323 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvmet_tcp_verify_hdgst()
334 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) in nvmet_tcp_check_ddgst() argument
336 struct nvme_tcp_hdr *hdr = pdu; in nvmet_tcp_check_ddgst()
455 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; in nvmet_setup_c2h_data_pdu() local
463 pdu->hdr.type = nvme_tcp_c2h_data; in nvmet_setup_c2h_data_pdu()
464 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? in nvmet_setup_c2h_data_pdu()
466 pdu->hdr.hlen = sizeof(*pdu); in nvmet_setup_c2h_data_pdu()
467 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; in nvmet_setup_c2h_data_pdu()
468 pdu->hdr.plen = in nvmet_setup_c2h_data_pdu()
469 cpu_to_le32(pdu->hdr.hlen + hdgst + in nvmet_setup_c2h_data_pdu()
471 pdu->command_id = cmd->req.cqe->command_id; in nvmet_setup_c2h_data_pdu()
472 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); in nvmet_setup_c2h_data_pdu()
473 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); in nvmet_setup_c2h_data_pdu()
476 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvmet_setup_c2h_data_pdu()
481 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvmet_setup_c2h_data_pdu()
482 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_c2h_data_pdu()
488 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; in nvmet_setup_r2t_pdu() local
495 pdu->hdr.type = nvme_tcp_r2t; in nvmet_setup_r2t_pdu()
496 pdu->hdr.flags = 0; in nvmet_setup_r2t_pdu()
497 pdu->hdr.hlen = sizeof(*pdu); in nvmet_setup_r2t_pdu()
498 pdu->hdr.pdo = 0; in nvmet_setup_r2t_pdu()
499 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvmet_setup_r2t_pdu()
501 pdu->command_id = cmd->req.cmd->common.command_id; in nvmet_setup_r2t_pdu()
502 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
503 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); in nvmet_setup_r2t_pdu()
504 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); in nvmet_setup_r2t_pdu()
506 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvmet_setup_r2t_pdu()
507 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_r2t_pdu()
513 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; in nvmet_setup_response_pdu() local
520 pdu->hdr.type = nvme_tcp_rsp; in nvmet_setup_response_pdu()
521 pdu->hdr.flags = 0; in nvmet_setup_response_pdu()
522 pdu->hdr.hlen = sizeof(*pdu); in nvmet_setup_response_pdu()
523 pdu->hdr.pdo = 0; in nvmet_setup_response_pdu()
524 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvmet_setup_response_pdu()
526 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvmet_setup_response_pdu()
527 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_response_pdu()
892 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; in nvmet_tcp_handle_icreq()
893 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; in nvmet_tcp_handle_icreq()
899 pr_err("bad nvme-tcp pdu length (%d)\n", in nvmet_tcp_handle_icreq()
959 * we don't, we can simply prepare for the next pdu and bail out, in nvmet_tcp_handle_req_failure()
983 struct nvme_tcp_data_pdu *data = &queue->pdu.data; in nvmet_tcp_handle_h2c_data_pdu()
1014 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); in nvmet_tcp_handle_h2c_data_pdu()
1032 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
1033 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
1039 pr_err("unexpected pdu type (%d) before icreq\n", in nvmet_tcp_done_recv_pdu()
1048 pr_err("queue %d: received icreq pdu in state %d\n", in nvmet_tcp_done_recv_pdu()
1178 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1185 iov.iov_base = (void *)&queue->pdu + queue->offset; in nvmet_tcp_try_recv_pdu()
1210 pr_err("unexpected pdu type %d\n", hdr->type); in nvmet_tcp_try_recv_pdu()
1216 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); in nvmet_tcp_try_recv_pdu()
1225 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { in nvmet_tcp_try_recv_pdu()
1231 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { in nvmet_tcp_try_recv_pdu()
1313 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", in nvmet_tcp_try_recv_ddgst()
1315 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1744 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_peek_pdu()
1747 .iov_base = (u8 *)&queue->pdu + queue->offset, in nvmet_tcp_try_peek_pdu()