Lines Matching +full:cmd +full:- +full:cnt +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0-only
19 memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); in cxgbit_set_one_ppod()
28 ppod->addr[i] = cpu_to_be64(addr + offset); in cxgbit_set_one_ppod()
30 if (offset == (len + sg->offset)) { in cxgbit_set_one_ppod()
39 ppod->addr[i] = 0ULL; in cxgbit_set_one_ppod()
60 ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; in cxgbit_set_one_ppod()
69 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; in cxgbit_ppod_init_idata()
81 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | in cxgbit_ppod_init_idata()
83 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | in cxgbit_ppod_init_idata()
86 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); in cxgbit_ppod_init_idata()
87 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); in cxgbit_ppod_init_idata()
88 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); in cxgbit_ppod_init_idata()
91 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); in cxgbit_ppod_init_idata()
92 idata->len = htonl(dlen); in cxgbit_ppod_init_idata()
103 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_ppod_write_idata()
110 skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid); in cxgbit_ppod_write_idata()
112 return -ENOMEM; in cxgbit_ppod_write_idata()
114 req = (struct ulp_mem_io *)skb->data; in cxgbit_ppod_write_idata()
121 __skb_queue_tail(&csk->ppodq, skb); in cxgbit_ppod_write_idata()
130 unsigned int pidx = ttinfo->idx; in cxgbit_ddp_set_map()
131 unsigned int npods = ttinfo->npods; in cxgbit_ddp_set_map()
132 unsigned int i, cnt; in cxgbit_ddp_set_map() local
133 struct scatterlist *sg = ttinfo->sgl; in cxgbit_ddp_set_map()
137 for (i = 0; i < npods; i += cnt, pidx += cnt) { in cxgbit_ddp_set_map()
138 cnt = npods - i; in cxgbit_ddp_set_map()
140 if (cnt > ULPMEM_IDATA_MAX_NPPODS) in cxgbit_ddp_set_map()
141 cnt = ULPMEM_IDATA_MAX_NPPODS; in cxgbit_ddp_set_map()
143 ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, in cxgbit_ddp_set_map()
155 unsigned int last_sgidx = nents - 1; in cxgbit_ddp_sgl_check()
159 unsigned int len = sg->length + sg->offset; in cxgbit_ddp_sgl_check()
161 if ((sg->offset & 0x3) || (i && sg->offset) || in cxgbit_ddp_sgl_check()
163 return -EINVAL; in cxgbit_ddp_sgl_check()
174 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_ddp_reserve()
176 struct scatterlist *sgl = ttinfo->sgl; in cxgbit_ddp_reserve()
177 unsigned int sgcnt = ttinfo->nents; in cxgbit_ddp_reserve()
178 unsigned int sg_offset = sgl->offset; in cxgbit_ddp_reserve()
183 ppm, ppm->tformat.pgsz_idx_dflt, in cxgbit_ddp_reserve()
184 xferlen, ttinfo->nents); in cxgbit_ddp_reserve()
185 return -EINVAL; in cxgbit_ddp_reserve()
189 return -EINVAL; in cxgbit_ddp_reserve()
191 ttinfo->nr_pages = (xferlen + sgl->offset + in cxgbit_ddp_reserve()
192 (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; in cxgbit_ddp_reserve()
197 ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, in cxgbit_ddp_reserve()
198 &ttinfo->tag, 0); in cxgbit_ddp_reserve()
201 ttinfo->npods = ret; in cxgbit_ddp_reserve()
203 sgl->offset = 0; in cxgbit_ddp_reserve()
204 ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); in cxgbit_ddp_reserve()
205 sgl->offset = sg_offset; in cxgbit_ddp_reserve()
212 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, in cxgbit_ddp_reserve()
213 xferlen, &ttinfo->hdr); in cxgbit_ddp_reserve()
217 __skb_queue_purge(&csk->ppodq); in cxgbit_ddp_reserve()
218 dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); in cxgbit_ddp_reserve()
225 cxgbi_ppm_ppod_release(ppm, ttinfo->idx); in cxgbit_ddp_reserve()
226 return -EINVAL; in cxgbit_ddp_reserve()
230 cxgbit_get_r2t_ttt(struct iscsit_conn *conn, struct iscsit_cmd *cmd, in cxgbit_get_r2t_ttt() argument
233 struct cxgbit_sock *csk = conn->context; in cxgbit_get_r2t_ttt()
234 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_get_r2t_ttt()
235 struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); in cxgbit_get_r2t_ttt()
236 struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; in cxgbit_get_r2t_ttt()
239 if ((!ccmd->setup_ddp) || in cxgbit_get_r2t_ttt()
240 (!test_bit(CSK_DDP_ENABLE, &csk->com.flags))) in cxgbit_get_r2t_ttt()
243 ccmd->setup_ddp = false; in cxgbit_get_r2t_ttt()
245 ttinfo->sgl = cmd->se_cmd.t_data_sg; in cxgbit_get_r2t_ttt()
246 ttinfo->nents = cmd->se_cmd.t_data_nents; in cxgbit_get_r2t_ttt()
248 ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); in cxgbit_get_r2t_ttt()
250 pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", in cxgbit_get_r2t_ttt()
251 csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); in cxgbit_get_r2t_ttt()
253 ttinfo->sgl = NULL; in cxgbit_get_r2t_ttt()
254 ttinfo->nents = 0; in cxgbit_get_r2t_ttt()
256 ccmd->release = true; in cxgbit_get_r2t_ttt()
259 pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag); in cxgbit_get_r2t_ttt()
260 r2t->targ_xfer_tag = ttinfo->tag; in cxgbit_get_r2t_ttt()
263 void cxgbit_unmap_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd) in cxgbit_unmap_cmd() argument
265 struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); in cxgbit_unmap_cmd()
267 if (ccmd->release) { in cxgbit_unmap_cmd()
268 if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { in cxgbit_unmap_cmd()
269 put_page(sg_page(&ccmd->sg)); in cxgbit_unmap_cmd()
271 struct cxgbit_sock *csk = conn->context; in cxgbit_unmap_cmd()
272 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_unmap_cmd()
274 struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; in cxgbit_unmap_cmd()
278 * the cmd. in cxgbit_unmap_cmd()
280 if (unlikely(cmd->write_data_done != in cxgbit_unmap_cmd()
281 cmd->se_cmd.data_length)) in cxgbit_unmap_cmd()
284 if (unlikely(ttinfo->sgl)) { in cxgbit_unmap_cmd()
285 dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, in cxgbit_unmap_cmd()
286 ttinfo->nents, DMA_FROM_DEVICE); in cxgbit_unmap_cmd()
287 ttinfo->nents = 0; in cxgbit_unmap_cmd()
288 ttinfo->sgl = NULL; in cxgbit_unmap_cmd()
290 cxgbi_ppm_ppod_release(ppm, ttinfo->idx); in cxgbit_unmap_cmd()
292 ccmd->release = false; in cxgbit_unmap_cmd()
298 struct cxgb4_lld_info *lldi = &cdev->lldi; in cxgbit_ddp_init()
299 struct net_device *ndev = cdev->lldi.ports[0]; in cxgbit_ddp_init()
303 if (!lldi->vr->iscsi.size) { in cxgbit_ddp_init()
304 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); in cxgbit_ddp_init()
305 return -EACCES; in cxgbit_ddp_init()
310 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) in cxgbit_ddp_init()
312 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); in cxgbit_ddp_init()
314 ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0], in cxgbit_ddp_init()
315 cdev->lldi.pdev, &cdev->lldi, &tformat, in cxgbit_ddp_init()
316 lldi->vr->iscsi.size, lldi->iscsi_llimit, in cxgbit_ddp_init()
317 lldi->vr->iscsi.start, 2, in cxgbit_ddp_init()
318 lldi->vr->ppod_edram.start, in cxgbit_ddp_init()
319 lldi->vr->ppod_edram.size); in cxgbit_ddp_init()
321 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm); in cxgbit_ddp_init()
323 if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) && in cxgbit_ddp_init()
324 (ppm->ppmax >= 1024)) in cxgbit_ddp_init()
325 set_bit(CDEV_DDP_ENABLE, &cdev->flags); in cxgbit_ddp_init()