1 /*
2 * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /* OS abstraction libraries */
21 #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
22 #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
23 #include <qdf_util.h> /* qdf_unlikely */
24
25 /* APIs for other modules */
26 #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
27 #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
28
29 /* internal header files relevant for all systems */
30 #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
31 #include <ol_tx_desc.h> /* ol_tx_desc */
32 #include <ol_tx_send.h> /* ol_tx_send */
33 #include <ol_txrx.h>
34
35 /* internal header files relevant only for HL systems */
36 #include <ol_tx_classify.h> /* ol_tx_classify, ol_tx_classify_mgmt */
37 #include <ol_tx_queue.h> /* ol_tx_enqueue */
38 #include <ol_tx_sched.h> /* ol_tx_sched */
39
40 /* internal header files relevant only for specific systems (Pronto) */
41 #include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
42 #include <ol_tx.h>
43 #include <cdp_txrx_ipa.h>
44
45 #include <hif.h> /* HIF_DEVICE */
46 #include <htc_api.h> /* Layering violation, but required for fast path */
47 #include <htt_internal.h>
48 #include <htt_types.h> /* htc_endpoint */
49 #include <cdp_txrx_peer_ops.h>
50 #include <cdp_txrx_handle.h>
51
52 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
53 defined(HIF_IPCI)
54 #include <ce_api.h>
55 #endif
56
57 /**
58 * ol_tx_setup_fastpath_ce_handles() Update ce_handle for fastpath use.
59 *
60 * @osc: pointer to HIF context
61 * @pdev: pointer to ol pdev
62 *
63 * Return: void
64 */
ol_tx_setup_fastpath_ce_handles(struct hif_opaque_softc * osc,struct ol_txrx_pdev_t * pdev)65 void ol_tx_setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
66 struct ol_txrx_pdev_t *pdev)
67 {
68 /*
69 * Before the HTT attach, set up the CE handles
70 * CE handles are (struct CE_state *)
71 * This is only required in the fast path
72 */
73 pdev->ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_H2T_MSG);
74 }
75
76 qdf_nbuf_t
ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev,qdf_nbuf_t msdu_list)77 ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
78 {
79 struct hif_opaque_softc *hif_device =
80 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
81
82 if (qdf_likely(hif_device &&
83 hif_is_fastpath_mode_enabled(hif_device))) {
84 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
85 } else {
86 qdf_print("Fast path is disabled");
87 QDF_BUG(0);
88 }
89 return msdu_list;
90 }
91
92 /**
93 * ol_tx_tso_adjust_pkt_dnld_len() Update download len for TSO pkt
94 *
95 * @msdu: tso mdsu for which download length is updated
96 * @msdu_info: tso msdu_info for the msdu
97 * @download_len: packet download length
98 *
99 * Return: Updated download length
100 */
101 #if defined(FEATURE_TSO)
102 static uint32_t
ol_tx_tso_adjust_pkt_dnld_len(qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info,uint32_t download_len)103 ol_tx_tso_adjust_pkt_dnld_len(qdf_nbuf_t msdu,
104 struct ol_txrx_msdu_info_t *msdu_info,
105 uint32_t download_len)
106 {
107 uint32_t frag0_len = 0, delta = 0, eit_hdr_len = 0;
108 uint32_t loc_download_len = download_len;
109
110 frag0_len = qdf_nbuf_get_frag_len(msdu, 0);
111 loc_download_len -= frag0_len;
112 eit_hdr_len = msdu_info->tso_info.curr_seg->seg.tso_frags[0].length;
113
114 if (eit_hdr_len < loc_download_len) {
115 delta = loc_download_len - eit_hdr_len;
116 download_len -= delta;
117 }
118
119 return download_len;
120 }
121 #else
122 static uint32_t
ol_tx_tso_adjust_pkt_dnld_len(qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info,uint32_t download_len)123 ol_tx_tso_adjust_pkt_dnld_len(qdf_nbuf_t msdu,
124 struct ol_txrx_msdu_info_t *msdu_info,
125 uint32_t download_len)
126 {
127 return download_len;
128 }
129 #endif
130
131 /**
132 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
133 *
134 * Allocate and prepare Tx descriptor with msdu and fragment descritor
135 * information.
136 *
137 * @pdev: pointer to ol pdev handle
138 * @vdev: pointer to ol vdev handle
139 * @msdu: linked list of msdu packets
140 * @pkt_download_len: packet download length
141 * @ep_id: endpoint ID
142 * @msdu_info: Handle to msdu_info
143 *
144 * Return: Pointer to Tx descriptor
145 */
146 static inline struct ol_tx_desc_t *
ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t * pdev,ol_txrx_vdev_handle vdev,qdf_nbuf_t msdu,uint32_t * pkt_download_len,uint32_t ep_id,struct ol_txrx_msdu_info_t * msdu_info)147 ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
148 ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
149 uint32_t *pkt_download_len, uint32_t ep_id,
150 struct ol_txrx_msdu_info_t *msdu_info)
151 {
152 struct ol_tx_desc_t *tx_desc = NULL;
153 uint32_t *htt_tx_desc;
154 void *htc_hdr_vaddr;
155 u_int32_t num_frags, i;
156 enum extension_header_type type;
157
158 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
159 if (qdf_unlikely(!tx_desc))
160 return NULL;
161
162 tx_desc->netbuf = msdu;
163 if (msdu_info->tso_info.is_tso) {
164 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
165 qdf_tso_seg_dbg_setowner(tx_desc->tso_desc, tx_desc);
166 qdf_tso_seg_dbg_record(tx_desc->tso_desc,
167 TSOSEG_LOC_TXPREPLLFAST);
168 tx_desc->tso_num_desc = msdu_info->tso_info.tso_num_seg_list;
169 tx_desc->pkt_type = OL_TX_FRM_TSO;
170 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
171 } else {
172 tx_desc->pkt_type = OL_TX_FRM_STD;
173 }
174
175 htt_tx_desc = tx_desc->htt_tx_desc;
176
177 #if defined(HELIUMPLUS)
178 qdf_mem_zero(tx_desc->htt_frag_desc, sizeof(struct msdu_ext_desc_t));
179 #endif
180
181 /* Make sure frags num is set to 0 */
182 /*
183 * Do this here rather than in hardstart, so
184 * that we can hopefully take only one cache-miss while
185 * accessing skb->cb.
186 */
187
188 /* HTT Header */
189 /* TODO : Take care of multiple fragments */
190
191 type = ol_tx_get_ext_header_type(vdev, msdu);
192
193 /* TODO: Precompute and store paddr in ol_tx_desc_t */
194 /* Virtual address of the HTT/HTC header, added by driver */
195 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
196 if (qdf_unlikely(htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
197 tx_desc->htt_tx_desc_paddr,
198 tx_desc->id, msdu,
199 &msdu_info->htt,
200 &msdu_info->tso_info,
201 NULL, type))) {
202 /*
203 * HTT Tx descriptor initialization failed.
204 * therefore, free the tx desc
205 */
206 ol_tx_desc_free(pdev, tx_desc);
207 return NULL;
208 }
209
210 num_frags = qdf_nbuf_get_num_frags(msdu);
211 /* num_frags are expected to be 2 max */
212 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
213 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
214 : num_frags;
215 #if defined(HELIUMPLUS)
216 /*
217 * Use num_frags - 1, since 1 frag is used to store
218 * the HTT/HTC descriptor
219 * Refer to htt_tx_desc_init()
220 */
221 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
222 num_frags - 1);
223 #else /* ! defined(HELIUMPLUS) */
224 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
225 num_frags - 1);
226 #endif /* defined(HELIUMPLUS) */
227 if (msdu_info->tso_info.is_tso) {
228 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
229 tx_desc->htt_frag_desc,
230 &msdu_info->tso_info);
231 TXRX_STATS_TSO_SEG_UPDATE(pdev,
232 msdu_info->tso_info.msdu_stats_idx,
233 msdu_info->tso_info.curr_seg->seg);
234 } else {
235 for (i = 1; i < num_frags; i++) {
236 qdf_size_t frag_len;
237 qdf_dma_addr_t frag_paddr;
238
239 frag_len = qdf_nbuf_get_frag_len(msdu, i);
240 frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
241 if (type != EXT_HEADER_NOT_PRESENT) {
242 frag_paddr +=
243 sizeof(struct htt_tx_msdu_desc_ext_t);
244 frag_len -=
245 sizeof(struct htt_tx_msdu_desc_ext_t);
246 }
247 #if defined(HELIUMPLUS)
248 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
249 i - 1, frag_paddr, frag_len);
250 #if defined(HELIUMPLUS_DEBUG)
251 qdf_debug("htt_fdesc=%pK frag=%d frag_paddr=0x%0llx len=%zu",
252 tx_desc->htt_frag_desc,
253 i - 1, frag_paddr, frag_len);
254 ol_txrx_dump_pkt(netbuf, frag_paddr, 64);
255 #endif /* HELIUMPLUS_DEBUG */
256 #else /* ! defined(HELIUMPLUS) */
257 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
258 i - 1, frag_paddr, frag_len);
259 #endif /* defined(HELIUMPLUS) */
260 }
261 }
262
263 /*
264 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
265 * this is not required. We still have to mark the swap bit correctly,
266 * when posting to the ring
267 */
268 /* Check to make sure, data download length is correct */
269
270 /*
271 * TODO : Can we remove this check and always download a fixed length ?
272 */
273
274 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
275 *pkt_download_len += sizeof(struct htt_tx_msdu_desc_ext_t);
276
277 if (qdf_unlikely(qdf_nbuf_len(msdu) < *pkt_download_len))
278 *pkt_download_len = qdf_nbuf_len(msdu);
279
280 if (msdu_info->tso_info.curr_seg)
281 *pkt_download_len = ol_tx_tso_adjust_pkt_dnld_len(
282 msdu, msdu_info,
283 *pkt_download_len);
284
285 /* Fill the HTC header information */
286 /*
287 * Passing 0 as the seq_no field, we can probably get away
288 * with it for the time being, since this is not checked in f/w
289 */
290 /* TODO : Prefill this, look at multi-fragment case */
291 if (ol_txrx_get_new_htt_msg_format(pdev))
292 HTC_TX_DESC_FILL(htc_hdr_vaddr,
293 *pkt_download_len - HTC_HEADER_LEN, ep_id, 0);
294 else
295 HTC_TX_DESC_FILL(htc_hdr_vaddr, *pkt_download_len, ep_id, 0);
296
297 return tx_desc;
298 }
299
300 #if defined(FEATURE_TSO)
301 /**
302 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
303 *
304 * @vdev: handle to ol_txrx_vdev_t
305 * @msdu_list: msdu list to be sent out.
306 *
307 * Return: on success return NULL, pointer to nbuf when it fails to send.
308 */
309 qdf_nbuf_t
ol_tx_ll_fast(ol_txrx_vdev_handle vdev,qdf_nbuf_t msdu_list)310 ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
311 {
312 qdf_nbuf_t msdu = msdu_list;
313 struct ol_txrx_pdev_t *pdev = vdev->pdev;
314 uint32_t pkt_download_len;
315 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
316 struct ol_txrx_msdu_info_t msdu_info;
317 uint32_t tso_msdu_stats_idx = 0;
318
319 qdf_mem_zero(&msdu_info, sizeof(msdu_info));
320 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
321 msdu_info.htt.action.tx_comp_req = 0;
322 /*
323 * The msdu_list variable could be used instead of the msdu var,
324 * but just to clarify which operations are done on a single MSDU
325 * vs. a list of MSDUs, use a distinct variable for single MSDUs
326 * within the list.
327 */
328 while (msdu) {
329 qdf_nbuf_t next;
330 struct ol_tx_desc_t *tx_desc;
331 int segments = 1;
332
333 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
334 msdu_info.peer = NULL;
335
336 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
337 ol_txrx_err("ol_tx_prepare_tso failed");
338 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
339 tx.dropped.host_reject,
340 msdu);
341 return msdu;
342 }
343
344 segments = msdu_info.tso_info.num_segs;
345
346 if (msdu_info.tso_info.is_tso) {
347 tso_msdu_stats_idx =
348 ol_tx_tso_get_stats_idx(vdev->pdev);
349 msdu_info.tso_info.msdu_stats_idx = tso_msdu_stats_idx;
350 ol_tx_tso_update_stats(vdev->pdev,
351 &(msdu_info.tso_info),
352 msdu, tso_msdu_stats_idx);
353 }
354
355 /*
356 * The netbuf may get linked into a different list
357 * inside the ce_send_fast function, so store the next
358 * pointer before the ce_send call.
359 */
360 next = qdf_nbuf_next(msdu);
361
362 /* init the current segment to the 1st segment in the list */
363 while (segments) {
364 if (msdu_info.tso_info.curr_seg)
365 QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
366 curr_seg->seg.tso_frags[0].paddr;
367
368 segments--;
369
370 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
371 msdu_info.htt.info.vdev_id = vdev->vdev_id;
372 msdu_info.htt.action.cksum_offload =
373 qdf_nbuf_get_tx_cksum(msdu);
374 switch (qdf_nbuf_get_exemption_type(msdu)) {
375 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
376 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
377 /* We want to encrypt this frame */
378 msdu_info.htt.action.do_encrypt = 1;
379 break;
380 case QDF_NBUF_EXEMPT_ALWAYS:
381 /* We don't want to encrypt this frame */
382 msdu_info.htt.action.do_encrypt = 0;
383 break;
384 default:
385 msdu_info.htt.action.do_encrypt = 1;
386 qdf_assert(0);
387 break;
388 }
389
390 pkt_download_len = ((struct htt_pdev_t *)
391 (pdev->htt_pdev))->download_len;
392 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
393 &pkt_download_len,
394 ep_id, &msdu_info);
395
396 TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu);
397
398 if (qdf_likely(tx_desc)) {
399 struct qdf_tso_seg_elem_t *next_seg;
400
401 ol_tx_trace_pkt(msdu, tx_desc->id,
402 vdev->vdev_id,
403 vdev->qdf_opmode);
404 /*
405 * If debug display is enabled, show the meta
406 * data being downloaded to the target via the
407 * HTT tx descriptor.
408 */
409 htt_tx_desc_display(tx_desc->htt_tx_desc);
410
411 /* mark the relevant tso_seg free-able */
412 if (msdu_info.tso_info.curr_seg) {
413 msdu_info.tso_info.curr_seg->
414 sent_to_target = 1;
415 next_seg = msdu_info.tso_info.
416 curr_seg->next;
417 /*
418 * If this is a jumbo nbuf, then increment the
419 * number of nbuf users for each additional
420 * segment of the msdu. This will ensure that
421 * the skb is freed only after receiving tx
422 * completion for all segments of an nbuf
423 */
424 if (next_seg)
425 qdf_nbuf_inc_users(msdu);
426 } else {
427 next_seg = NULL;
428 }
429
430 if ((ce_send_fast(pdev->ce_tx_hdl, msdu,
431 ep_id,
432 pkt_download_len) == 0)) {
433 struct qdf_tso_info_t *tso_info =
434 &msdu_info.tso_info;
435 /*
436 * If TSO packet, free associated
437 * remaining TSO segment descriptors
438 */
439 if (tx_desc->pkt_type ==
440 OL_TX_FRM_TSO) {
441 tso_info->curr_seg = next_seg;
442 ol_free_remaining_tso_segs(vdev,
443 &msdu_info, true);
444 /*
445 * Revert the nbuf users
446 * increment done for the
447 * current segment
448 */
449 if (next_seg)
450 qdf_nbuf_tx_free(
451 msdu,
452 QDF_NBUF_PKT_ERROR);
453 }
454
455 /*
456 * The packet could not be sent.
457 * Free the descriptor, return the
458 * packet to the caller.
459 */
460 ol_tx_desc_frame_free_nonstd(pdev,
461 tx_desc,
462 htt_tx_status_download_fail);
463 return msdu;
464 }
465 if (msdu_info.tso_info.curr_seg)
466 msdu_info.tso_info.curr_seg = next_seg;
467
468 if (msdu_info.tso_info.is_tso) {
469 TXRX_STATS_TSO_INC_SEG(vdev->pdev,
470 tso_msdu_stats_idx);
471 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev,
472 tso_msdu_stats_idx);
473 }
474 } else {
475 /*
476 * If TSO packet, free associated
477 * remaining TSO segment descriptors
478 */
479 if (qdf_nbuf_is_tso(msdu))
480 ol_free_remaining_tso_segs(vdev,
481 &msdu_info, true);
482 TXRX_STATS_MSDU_LIST_INCR(
483 pdev, tx.dropped.host_reject, msdu);
484 /* the list of unaccepted MSDUs */
485 return msdu;
486 }
487 } /* while segments */
488
489 msdu = next;
490 } /* while msdus */
491 return NULL; /* all MSDUs were accepted */
492 }
493 #else
494 qdf_nbuf_t
ol_tx_ll_fast(ol_txrx_vdev_handle vdev,qdf_nbuf_t msdu_list)495 ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
496 {
497 qdf_nbuf_t msdu = msdu_list;
498 struct ol_txrx_pdev_t *pdev = vdev->pdev;
499 uint32_t pkt_download_len;
500 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
501 struct ol_txrx_msdu_info_t msdu_info;
502
503 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
504 msdu_info.htt.action.tx_comp_req = 0;
505 msdu_info.tso_info.is_tso = 0;
506 /*
507 * The msdu_list variable could be used instead of the msdu var,
508 * but just to clarify which operations are done on a single MSDU
509 * vs. a list of MSDUs, use a distinct variable for single MSDUs
510 * within the list.
511 */
512 while (msdu) {
513 qdf_nbuf_t next;
514 struct ol_tx_desc_t *tx_desc;
515
516 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
517 msdu_info.peer = NULL;
518
519 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
520 msdu_info.htt.info.vdev_id = vdev->vdev_id;
521 msdu_info.htt.action.cksum_offload =
522 qdf_nbuf_get_tx_cksum(msdu);
523 switch (qdf_nbuf_get_exemption_type(msdu)) {
524 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
525 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
526 /* We want to encrypt this frame */
527 msdu_info.htt.action.do_encrypt = 1;
528 break;
529 case QDF_NBUF_EXEMPT_ALWAYS:
530 /* We don't want to encrypt this frame */
531 msdu_info.htt.action.do_encrypt = 0;
532 break;
533 default:
534 msdu_info.htt.action.do_encrypt = 1;
535 qdf_assert(0);
536 break;
537 }
538
539 pkt_download_len = ((struct htt_pdev_t *)
540 (pdev->htt_pdev))->download_len;
541 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
542 &pkt_download_len, ep_id,
543 &msdu_info);
544
545 TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu);
546
547 if (qdf_likely(tx_desc)) {
548 DPTRACE(qdf_dp_trace_ptr(msdu,
549 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
550 QDF_TRACE_DEFAULT_PDEV_ID,
551 qdf_nbuf_data_addr(msdu),
552 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
553 vdev->vdev_id, 0, vdev->qdf_opmode));
554
555 ol_tx_trace_pkt(msdu, tx_desc->id, vdev->vdev_id,
556 vdev->qdf_opmode);
557 /*
558 * If debug display is enabled, show the meta-data being
559 * downloaded to the target via the HTT tx descriptor.
560 */
561 htt_tx_desc_display(tx_desc->htt_tx_desc);
562 /*
563 * The netbuf may get linked into a different list
564 * inside the ce_send_fast function, so store the next
565 * pointer before the ce_send call.
566 */
567 next = qdf_nbuf_next(msdu);
568 if ((ce_send_fast(pdev->ce_tx_hdl, msdu,
569 ep_id, pkt_download_len) == 0)) {
570 /*
571 * The packet could not be sent
572 * Free the descriptor, return the packet to the
573 * caller
574 */
575 ol_tx_desc_free(pdev, tx_desc);
576 return msdu;
577 }
578 msdu = next;
579 } else {
580 TXRX_STATS_MSDU_LIST_INCR(
581 pdev, tx.dropped.host_reject, msdu);
582 return msdu; /* the list of unaccepted MSDUs */
583 }
584 }
585
586 return NULL; /* all MSDUs were accepted */
587 }
588 #endif /* FEATURE_TSO */
589