1 /*
2  * Copyright (c) 2011, 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * @file htt_tx.c
22  * @brief Implement transmit aspects of HTT.
23  * @details
24  *  This file contains three categories of HTT tx code:
25  *  1.  An abstraction of the tx descriptor, to hide the
26  *      differences between the HL vs. LL tx descriptor.
27  *  2.  Functions for allocating and freeing HTT tx descriptors.
28  *  3.  The function that accepts a tx frame from txrx and sends the
29  *      tx frame to HTC.
30  */
31 #include <osdep.h>              /* uint32_t, offsetof, etc. */
32 #include <qdf_types.h>          /* qdf_dma_addr_t */
33 #include <qdf_mem.h>         /* qdf_mem_alloc_consistent et al */
34 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
35 #include <qdf_time.h>           /* qdf_mdelay */
36 
37 #include <htt.h>                /* htt_tx_msdu_desc_t */
38 #include <htc.h>                /* HTC_HDR_LENGTH */
39 #include <htc_api.h>            /* htc_flush_surprise_remove */
40 #include <ol_cfg.h>             /* ol_cfg_netbuf_frags_max, etc. */
41 #include <ol_htt_tx_api.h>      /* HTT_TX_DESC_VADDR_OFFSET */
42 #include <ol_txrx_htt_api.h>    /* ol_tx_msdu_id_storage */
43 #include <ol_txrx_internal.h>
44 #include <htt_internal.h>
45 
46 #include <cds_utils.h>
47 #include <ce_api.h>
48 #include <ce_internal.h>
49 
50 /* IPA Micro controller TX data packet HTT Header Preset
51  * 31 | 30  29 | 28 | 27 | 26  22  | 21   16 | 15  13   | 12  8      | 7 0
52  ***----------------------------------------------------------------------------
53  * R  | CS  OL | R  | PP | ext TID | vdev ID | pkt type | pkt subtyp | msg type
54  * 0  | 0      | 0  |    | 0x1F    | 0       | 2        | 0          | 0x01
55  ***----------------------------------------------------------------------------
56  * pkt ID                                    | pkt length
57  ***----------------------------------------------------------------------------
58  *                                frag_desc_ptr
59  ***----------------------------------------------------------------------------
60  *                                   peer_id
61  ***----------------------------------------------------------------------------
62  */
63 #define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
64 
65 #ifdef QCA_WIFI_3_0
66 #define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 20
67 #define IPA_UC_TX_BUF_FRAG_HDR_OFFSET  64
68 #define IPA_UC_TX_BUF_TSO_HDR_SIZE     6
69 #define IPA_UC_TX_BUF_PADDR_HI_MASK    0x0000001F
70 #else
71 #define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 16
72 #define IPA_UC_TX_BUF_FRAG_HDR_OFFSET  32
73 #endif /* QCA_WIFI_3_0 */
74 
75 #if HTT_PADDR64
76 #define HTT_TX_DESC_FRAG_FIELD_UPDATE(frag_filed_ptr, frag_desc_addr)          \
77 do {                                                                           \
78 	*frag_filed_ptr = qdf_get_lower_32_bits(frag_desc_addr);               \
79 	frag_filed_ptr++;                                                      \
80 	/* frags_desc_ptr.hi */                                                \
81 	*frag_filed_ptr = qdf_get_upper_32_bits(frag_desc_addr) & 0x1F;        \
82 } while (0)
83 #else
84 #define HTT_TX_DESC_FRAG_FIELD_UPDATE(frag_filed_ptr, frag_desc_addr)          \
85 do {                                                                           \
86 	*frag_filed_ptr = qdf_get_lower_32_bits(frag_desc_addr);               \
87 } while (0)
88 #endif
89 
90 /*--- setup / tear-down functions -------------------------------------------*/
91 
92 static qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
93 				char *target_vaddr);
94 
95 #ifdef HELIUMPLUS
96 /**
97  * htt_tx_desc_get_size() - get tx descripotrs size
98  * @pdev:	htt device instance pointer
99  *
100  * This function will get HTT TX descriptor size and fragment descriptor size
101  *
102  * Return: None
103  */
htt_tx_desc_get_size(struct htt_pdev_t * pdev)104 static void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
105 {
106 	pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
107 	if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
108 		/*
109 		 * sizeof MSDU_EXT/Fragmentation descriptor.
110 		 */
111 		pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
112 	} else {
113 		/*
114 		 * Add the fragmentation descriptor elements.
115 		 * Add the most that the OS may deliver, plus one more
116 		 * in case the txrx code adds a prefix fragment (for
117 		 * TSO or audio interworking SNAP header)
118 		 */
119 		pdev->frag_descs.size =
120 			(ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
121 			+ 4;
122 	}
123 }
124 
125 /**
126  * htt_tx_frag_desc_field_update() - Update fragment descriptor field
127  * @pdev:	htt device instance pointer
128  * @fptr:	Fragment descriptor field pointer
129  * @index:	Descriptor index to find page and offset
130  * @desc_v_ptr:	descriptor virtual pointot to find offset
131  *
132  * This function will update fragment descriptor field with actual fragment
133  * descriptor stating physical pointer
134  *
135  * Return: None
136  */
htt_tx_frag_desc_field_update(struct htt_pdev_t * pdev,uint32_t * fptr,unsigned int index,struct htt_tx_msdu_desc_t * desc_v_ptr)137 static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
138 		uint32_t *fptr, unsigned int index,
139 		struct htt_tx_msdu_desc_t *desc_v_ptr)
140 {
141 	unsigned int target_page;
142 	unsigned int offset;
143 	struct qdf_mem_dma_page_t *dma_page;
144 	qdf_dma_addr_t frag_desc_addr;
145 
146 	target_page = index / pdev->frag_descs.desc_pages.num_element_per_page;
147 	offset = index % pdev->frag_descs.desc_pages.num_element_per_page;
148 	dma_page = &pdev->frag_descs.desc_pages.dma_pages[target_page];
149 	frag_desc_addr = (dma_page->page_p_addr +
150 		offset * pdev->frag_descs.size);
151 	HTT_TX_DESC_FRAG_FIELD_UPDATE(fptr, frag_desc_addr);
152 }
153 
154 /**
155  * htt_tx_frag_desc_attach() - Attach fragment descriptor
156  * @pdev:		htt device instance pointer
157  * @desc_pool_elems:	Number of fragment descriptor
158  *
159  * This function will allocate fragment descriptor
160  *
161  * Return: 0 success
162  */
htt_tx_frag_desc_attach(struct htt_pdev_t * pdev,uint16_t desc_pool_elems)163 static int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
164 	uint16_t desc_pool_elems)
165 {
166 	pdev->frag_descs.pool_elems = desc_pool_elems;
167 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->frag_descs.desc_pages,
168 		pdev->frag_descs.size, desc_pool_elems,
169 		qdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
170 	if ((0 == pdev->frag_descs.desc_pages.num_pages) ||
171 		(!pdev->frag_descs.desc_pages.dma_pages)) {
172 		ol_txrx_err("FRAG descriptor alloc fail");
173 		return -ENOBUFS;
174 	}
175 	return 0;
176 }
177 
178 /**
179  * htt_tx_frag_desc_detach() - Detach fragment descriptor
180  * @pdev:		htt device instance pointer
181  *
182  * This function will free fragment descriptor
183  *
184  * Return: None
185  */
htt_tx_frag_desc_detach(struct htt_pdev_t * pdev)186 static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev)
187 {
188 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->frag_descs.desc_pages,
189 		qdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
190 }
191 
192 /**
193  * htt_tx_frag_alloc() - Allocate single fragment descriptor from the pool
194  * @pdev:		htt device instance pointer
195  * @index:		Descriptor index
196  * @frag_paddr:	        Fragment descriptor physical address
197  * @frag_ptr:		Fragment descriptor virtual address
198  *
199  * This function will free fragment descriptor
200  *
201  * Return: None
202  */
htt_tx_frag_alloc(htt_pdev_handle pdev,u_int16_t index,qdf_dma_addr_t * frag_paddr,void ** frag_ptr)203 int htt_tx_frag_alloc(htt_pdev_handle pdev,
204 	u_int16_t index, qdf_dma_addr_t *frag_paddr, void **frag_ptr)
205 {
206 	uint16_t frag_page_index;
207 	uint16_t frag_elem_index;
208 	struct qdf_mem_dma_page_t *dma_page;
209 
210 	/*
211 	 * Index should never be 0, since its used by the hardware
212 	 * to terminate the link.
213 	 */
214 	if (index >= pdev->tx_descs.pool_elems) {
215 		*frag_ptr = NULL;
216 		return 1;
217 	}
218 
219 	frag_page_index = index /
220 		pdev->frag_descs.desc_pages.num_element_per_page;
221 	frag_elem_index = index %
222 		pdev->frag_descs.desc_pages.num_element_per_page;
223 	dma_page = &pdev->frag_descs.desc_pages.dma_pages[frag_page_index];
224 
225 	*frag_ptr = dma_page->page_v_addr_start +
226 		frag_elem_index * pdev->frag_descs.size;
227 	if (((char *)(*frag_ptr) < dma_page->page_v_addr_start) ||
228 		((char *)(*frag_ptr) > dma_page->page_v_addr_end)) {
229 		*frag_ptr = NULL;
230 		return 1;
231 	}
232 
233 	*frag_paddr = dma_page->page_p_addr +
234 		frag_elem_index * pdev->frag_descs.size;
235 	return 0;
236 }
237 #else
238 
239 /**
240  * htt_tx_desc_get_size() - get tx descripotrs size
241  * @pdev:	htt device instance pointer
242  *
243  * This function will get HTT TX descriptor size and fragment descriptor size
244  *
245  * Return: None
246  */
htt_tx_desc_get_size(struct htt_pdev_t * pdev)247 static inline void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
248 {
249 	if (pdev->cfg.is_high_latency) {
250 		pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
251 	} else {
252 		/*
253 		 * Start with the size of the base struct
254 		 * that actually gets downloaded.
255 		 *
256 		 * Add the fragmentation descriptor elements.
257 		 * Add the most that the OS may deliver, plus one more
258 		 * in case the txrx code adds a prefix fragment (for
259 		 * TSO or audio interworking SNAP header)
260 		 */
261 		pdev->tx_descs.size =
262 		sizeof(struct htt_host_tx_desc_t)
263 		+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
264 		/* 2x uint32_t */
265 		+ 4; /* uint32_t fragmentation list terminator */
266 	}
267 }
268 
269 #ifndef CONFIG_HL_SUPPORT
270 
271 /**
272  * htt_tx_frag_desc_field_update() - Update fragment descriptor field
273  * @pdev:	htt device instance pointer
274  * @fptr:	Fragment descriptor field pointer
275  * @index:	Descriptor index to find page and offset
276  * @desc_v_ptr:	descriptor virtual pointot to find offset
277  *
278  * This function will update fragment descriptor field with actual fragment
279  * descriptor stating physical pointer
280  *
281  * Return: None
282  */
htt_tx_frag_desc_field_update(struct htt_pdev_t * pdev,uint32_t * fptr,unsigned int index,struct htt_tx_msdu_desc_t * desc_v_ptr)283 static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
284 		uint32_t *fptr, unsigned int index,
285 		struct htt_tx_msdu_desc_t *desc_v_ptr)
286 {
287 	*fptr = (uint32_t)htt_tx_get_paddr(pdev, (char *)desc_v_ptr) +
288 		HTT_TX_DESC_LEN;
289 }
290 #endif
291 
292 /**
293  * htt_tx_frag_desc_attach() - Attach fragment descriptor
294  * @pdev:	htt device instance pointer
295  * @desc_pool_elems:	Number of fragment descriptor
296  *
297  * This function will allocate fragment descriptor
298  *
299  * Return: 0 success
300  */
htt_tx_frag_desc_attach(struct htt_pdev_t * pdev,int desc_pool_elems)301 static inline int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
302 	int desc_pool_elems)
303 {
304 	return 0;
305 }
306 
307 /**
308  * htt_tx_frag_desc_detach() - Detach fragment descriptor
309  * @pdev:		htt device instance pointer
310  *
311  * This function will free fragment descriptor
312  *
313  * Return: None
314  */
htt_tx_frag_desc_detach(struct htt_pdev_t * pdev)315 static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev) {}
316 #endif /* HELIUMPLUS */
317 
318 #ifdef CONFIG_HL_SUPPORT
319 
320 /**
321  * htt_tx_attach() - Attach HTT device instance
322  * @pdev:		htt device instance pointer
323  * @desc_pool_elems:	Number of TX descriptors
324  *
325  * This function will allocate HTT TX resources
326  *
327  * Return: 0 Success
328  */
htt_tx_attach(struct htt_pdev_t * pdev,int desc_pool_elems)329 int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
330 {
331 	int i, i_int, pool_size;
332 	uint32_t **p;
333 	uint32_t num_link = 0;
334 	uint16_t num_page, num_desc_per_page;
335 	void **cacheable_pages = NULL;
336 
337 	htt_tx_desc_get_size(pdev);
338 
339 	/*
340 	 * Make sure tx_descs.size is a multiple of 4-bytes.
341 	 * It should be, but round up just to be sure.
342 	 */
343 	pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
344 
345 	pdev->tx_descs.pool_elems = desc_pool_elems;
346 	pdev->tx_descs.alloc_cnt = 0;
347 	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
348 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages,
349 				  pdev->tx_descs.size,
350 				  pdev->tx_descs.pool_elems,
351 				  qdf_get_dma_mem_context((&pdev->tx_descs),
352 							  memctx), true);
353 	if ((0 == pdev->tx_descs.desc_pages.num_pages) ||
354 	    (!pdev->tx_descs.desc_pages.cacheable_pages)) {
355 		ol_txrx_err("HTT desc alloc fail");
356 		goto out_fail;
357 	}
358 	num_page = pdev->tx_descs.desc_pages.num_pages;
359 	num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page;
360 
361 	/* link tx descriptors into a freelist */
362 	cacheable_pages = pdev->tx_descs.desc_pages.cacheable_pages;
363 
364 	pdev->tx_descs.freelist = (uint32_t *)cacheable_pages[0];
365 	p = (uint32_t **)pdev->tx_descs.freelist;
366 	for (i = 0; i < num_page; i++) {
367 		for (i_int = 0; i_int < num_desc_per_page; i_int++) {
368 			if (i_int == (num_desc_per_page - 1)) {
369 				/*
370 				 * Last element on this page,
371 				 * should point next page
372 				 */
373 				if (!cacheable_pages[i + 1]) {
374 					ol_txrx_err("over flow num link %d",
375 						   num_link);
376 					goto free_htt_desc;
377 				}
378 				*p = (uint32_t *)cacheable_pages[i + 1];
379 			} else {
380 				*p = (uint32_t *)
381 					(((char *)p) + pdev->tx_descs.size);
382 			}
383 			num_link++;
384 			p = (uint32_t **) *p;
385 			/* Last link established exit */
386 			if (num_link == (pdev->tx_descs.pool_elems - 1))
387 				break;
388 		}
389 	}
390 	*p = NULL;
391 
392 	if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) {
393 		ol_txrx_err("HTT Frag descriptor alloc fail");
394 		goto free_htt_desc;
395 	}
396 
397 	/* success */
398 	return 0;
399 
400 free_htt_desc:
401 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
402 				 qdf_get_dma_mem_context((&pdev->tx_descs),
403 							 memctx), true);
404 out_fail:
405 	return -ENOBUFS;
406 }
407 
htt_tx_detach(struct htt_pdev_t * pdev)408 void htt_tx_detach(struct htt_pdev_t *pdev)
409 {
410 	if (!pdev) {
411 		qdf_print("htt tx detach invalid instance");
412 		return;
413 	}
414 
415 	htt_tx_frag_desc_detach(pdev);
416 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
417 				 qdf_get_dma_mem_context((&pdev->tx_descs),
418 							 memctx), true);
419 }
420 
421 /**
422  * htt_tx_set_frag_desc_addr() - set up the fragmentation descriptor address
423  * @pdev: pointer to the HTT instance making the allocation
424  * @htt_tx_desc: Host tx descriptor that does not include HTC hdr
425  * @index: index to alloc htt tx desc
426  *
427  *
428  * Return: None
429  */
430 static inline void
htt_tx_set_frag_desc_addr(struct htt_pdev_t * pdev,struct htt_tx_msdu_desc_t * htt_tx_desc,uint16_t index)431 htt_tx_set_frag_desc_addr(struct htt_pdev_t *pdev,
432 			  struct htt_tx_msdu_desc_t *htt_tx_desc,
433 			  uint16_t index)
434 {
435 }
436 
437 /**
438  * htt_tx_desc_frags_table_set() - set up the descriptor and payload
439  *				   to correspondinf fragments
440  * @pdev: pointer to the HTT instance making the allocation
441  * @htt_tx_desc: Host tx descriptor that does not include HTC hdr
442  * @paddr: fragment physical address
443  * @frag_desc_paddr_lo: frag descriptor address
444  * @reset: reset
445  *
446  * Return: None
447  */
htt_tx_desc_frags_table_set(htt_pdev_handle pdev,void * desc,qdf_dma_addr_t paddr,qdf_dma_addr_t frag_desc_paddr,int reset)448 void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
449 				 void *desc,
450 				 qdf_dma_addr_t paddr,
451 				 qdf_dma_addr_t frag_desc_paddr,
452 				 int reset)
453 {
454 	/* fragments table only applies to LL systems */
455 }
456 
457 /**
458  * htt_tx_credit_update() - get the number of credits by which the amount of
459  *			    target credits needs to be updated
460  * @pdev: htt context
461  *
462  * Return: number of credits
463  */
htt_tx_credit_update(struct htt_pdev_t * pdev)464 int htt_tx_credit_update(struct htt_pdev_t *pdev)
465 {
466 	int credit_delta;
467 
468 	credit_delta = QDF_MIN(qdf_atomic_read(
469 			&pdev->htt_tx_credit.target_delta),
470 			qdf_atomic_read(&pdev->htt_tx_credit.bus_delta));
471 	if (credit_delta) {
472 		qdf_atomic_add(-credit_delta,
473 			       &pdev->htt_tx_credit.target_delta);
474 		qdf_atomic_add(-credit_delta,
475 			       &pdev->htt_tx_credit.bus_delta);
476 	}
477 	return credit_delta;
478 }
479 
480 /**
481  * htt_tx_get_paddr() - get physical address for htt desc
482  *
483  * Get HTT descriptor physical address from virtual address
484  * Find page first and find offset
485  * Not required for HL systems
486  *
487  * Return: Physical address of descriptor
488  */
489 static inline
htt_tx_get_paddr(htt_pdev_handle pdev,char * target_vaddr)490 qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
491 				char *target_vaddr)
492 {
493 	return 0;
494 }
495 
496 
497 #else
498 
htt_tx_attach(struct htt_pdev_t * pdev,int desc_pool_elems)499 int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
500 {
501 	int i, i_int, pool_size;
502 	uint32_t **p;
503 	struct qdf_mem_dma_page_t *page_info;
504 	uint32_t num_link = 0;
505 	uint16_t num_page, num_desc_per_page;
506 
507 	htt_tx_desc_get_size(pdev);
508 
509 	/*
510 	 * Make sure tx_descs.size is a multiple of 4-bytes.
511 	 * It should be, but round up just to be sure.
512 	 */
513 	pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
514 
515 	pdev->tx_descs.pool_elems = desc_pool_elems;
516 	pdev->tx_descs.alloc_cnt = 0;
517 	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
518 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages,
519 		pdev->tx_descs.size, pdev->tx_descs.pool_elems,
520 		qdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
521 	if ((0 == pdev->tx_descs.desc_pages.num_pages) ||
522 		(!pdev->tx_descs.desc_pages.dma_pages)) {
523 		ol_txrx_err("HTT desc alloc fail");
524 		goto out_fail;
525 	}
526 	num_page = pdev->tx_descs.desc_pages.num_pages;
527 	num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page;
528 
529 	/* link tx descriptors into a freelist */
530 	page_info = pdev->tx_descs.desc_pages.dma_pages;
531 	pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
532 	p = (uint32_t **) pdev->tx_descs.freelist;
533 	for (i = 0; i < num_page; i++) {
534 		for (i_int = 0; i_int < num_desc_per_page; i_int++) {
535 			if (i_int == (num_desc_per_page - 1)) {
536 				/*
537 				 * Last element on this page,
538 				 * should pint next page
539 				 */
540 				if (!page_info->page_v_addr_start) {
541 					ol_txrx_err("over flow num link %d",
542 						num_link);
543 					goto free_htt_desc;
544 				}
545 				page_info++;
546 				*p = (uint32_t *)page_info->page_v_addr_start;
547 			} else {
548 				*p = (uint32_t *)
549 					(((char *) p) + pdev->tx_descs.size);
550 			}
551 			num_link++;
552 			p = (uint32_t **) *p;
553 			/* Last link established exit */
554 			if (num_link == (pdev->tx_descs.pool_elems - 1))
555 				break;
556 		}
557 	}
558 	*p = NULL;
559 
560 	if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) {
561 		ol_txrx_err("HTT Frag descriptor alloc fail");
562 		goto free_htt_desc;
563 	}
564 
565 	/* success */
566 	return 0;
567 
568 free_htt_desc:
569 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
570 		qdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
571 out_fail:
572 	return -ENOBUFS;
573 }
574 
htt_tx_detach(struct htt_pdev_t * pdev)575 void htt_tx_detach(struct htt_pdev_t *pdev)
576 {
577 	if (!pdev) {
578 		qdf_print("htt tx detach invalid instance");
579 		return;
580 	}
581 
582 	htt_tx_frag_desc_detach(pdev);
583 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
584 		qdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
585 }
586 
587 static void
htt_tx_set_frag_desc_addr(struct htt_pdev_t * pdev,struct htt_tx_msdu_desc_t * htt_tx_desc,uint16_t index)588 htt_tx_set_frag_desc_addr(struct htt_pdev_t *pdev,
589 			  struct htt_tx_msdu_desc_t *htt_tx_desc,
590 			  uint16_t index)
591 {
592 	uint32_t *fragmentation_descr_field_ptr;
593 
594 	fragmentation_descr_field_ptr = (uint32_t *)
595 		((uint32_t *)htt_tx_desc) +
596 		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
597 	/*
598 	 * The fragmentation descriptor is allocated from consistent
599 	 * memory. Therefore, we can use the address directly rather
600 	 * than having to map it from a virtual/CPU address to a
601 	 * physical/bus address.
602 	 */
603 	htt_tx_frag_desc_field_update(pdev, fragmentation_descr_field_ptr,
604 				      index, htt_tx_desc);
605 
606 		return;
607 }
608 
htt_tx_desc_frags_table_set(htt_pdev_handle pdev,void * htt_tx_desc,qdf_dma_addr_t paddr,qdf_dma_addr_t frag_desc_paddr,int reset)609 void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
610 				 void *htt_tx_desc,
611 				 qdf_dma_addr_t paddr,
612 				 qdf_dma_addr_t frag_desc_paddr,
613 				 int reset)
614 {
615 	uint32_t *fragmentation_descr_field_ptr;
616 
617 	fragmentation_descr_field_ptr = (uint32_t *)
618 		((uint32_t *) htt_tx_desc) +
619 		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
620 	if (reset) {
621 #if defined(HELIUMPLUS)
622 		*fragmentation_descr_field_ptr = frag_desc_paddr;
623 #else
624 		*fragmentation_descr_field_ptr =
625 			htt_tx_get_paddr(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
626 #endif
627 	} else {
628 		*fragmentation_descr_field_ptr = paddr;
629 	}
630 }
631 
htt_tx_pending_discard(htt_pdev_handle pdev)632 void htt_tx_pending_discard(htt_pdev_handle pdev)
633 {
634 	htc_flush_surprise_remove(pdev->htc_pdev);
635 }
636 
htt_tx_get_paddr(htt_pdev_handle pdev,char * target_vaddr)637 static qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
638 				char *target_vaddr)
639 {
640 	uint16_t i;
641 	struct qdf_mem_dma_page_t *page_info = NULL;
642 	uint64_t offset;
643 
644 	for (i = 0; i < pdev->tx_descs.desc_pages.num_pages; i++) {
645 		page_info = pdev->tx_descs.desc_pages.dma_pages + i;
646 		if (!page_info->page_v_addr_start) {
647 			qdf_assert(0);
648 			return 0;
649 		}
650 		if ((target_vaddr >= page_info->page_v_addr_start) &&
651 			(target_vaddr <= page_info->page_v_addr_end))
652 			break;
653 	}
654 
655 	if (!page_info) {
656 		ol_txrx_err("invalid page_info");
657 		return 0;
658 	}
659 
660 	offset = (uint64_t)(target_vaddr - page_info->page_v_addr_start);
661 	return page_info->page_p_addr + offset;
662 }
663 
664 #endif
665 
666 /*--- descriptor allocation functions ---------------------------------------*/
667 
htt_tx_desc_alloc(htt_pdev_handle pdev,qdf_dma_addr_t * paddr,uint16_t index)668 void *htt_tx_desc_alloc(htt_pdev_handle pdev, qdf_dma_addr_t *paddr,
669 			uint16_t index)
670 {
671 	struct htt_host_tx_desc_t *htt_host_tx_desc;    /* includes HTC hdr */
672 	struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include  HTC hdr */
673 
674 	htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
675 	if (!htt_host_tx_desc)
676 		return NULL;    /* pool is exhausted */
677 
678 	htt_tx_desc = &htt_host_tx_desc->align32.tx_desc;
679 
680 	if (pdev->tx_descs.freelist) {
681 		pdev->tx_descs.freelist =
682 			*((uint32_t **) pdev->tx_descs.freelist);
683 		pdev->tx_descs.alloc_cnt++;
684 	}
685 	/*
686 	 * For LL, set up the fragmentation descriptor address.
687 	 * Currently, this HTT tx desc allocation is performed once up front.
688 	 * If this is changed to have the allocation done during tx, then it
689 	 * would be helpful to have separate htt_tx_desc_alloc functions for
690 	 * HL vs. LL, to remove the below conditional branch.
691 	 */
692 	htt_tx_set_frag_desc_addr(pdev, htt_tx_desc, index);
693 
694 	/*
695 	 * Include the headroom for the HTC frame header when specifying the
696 	 * physical address for the HTT tx descriptor.
697 	 */
698 	*paddr = (qdf_dma_addr_t)htt_tx_get_paddr(pdev,
699 						  (char *)htt_host_tx_desc);
700 	/*
701 	 * The allocated tx descriptor space includes headroom for a
702 	 * HTC frame header.  Hide this headroom, so that we don't have
703 	 * to jump past the headroom each time we program a field within
704 	 * the tx desc, but only once when we download the tx desc (and
705 	 * the headroom) to the target via HTC.
706 	 * Skip past the headroom and return the address of the HTT tx desc.
707 	 */
708 	return (void *)htt_tx_desc;
709 }
710 
htt_tx_desc_free(htt_pdev_handle pdev,void * tx_desc)711 void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
712 {
713 	char *htt_host_tx_desc = tx_desc;
714 	/* rewind over the HTC frame header space */
715 	htt_host_tx_desc -=
716 		offsetof(struct htt_host_tx_desc_t, align32.tx_desc);
717 	*((uint32_t **) htt_host_tx_desc) = pdev->tx_descs.freelist;
718 	pdev->tx_descs.freelist = (uint32_t *) htt_host_tx_desc;
719 	pdev->tx_descs.alloc_cnt--;
720 }
721 
722 /*--- descriptor field access methods ---------------------------------------*/
723 
724 /* PUT THESE AS inline IN ol_htt_tx_api.h */
725 
htt_tx_desc_flag_postponed(htt_pdev_handle pdev,void * desc)726 void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
727 {
728 }
729 
htt_tx_desc_flag_batch_more(htt_pdev_handle pdev,void * desc)730 void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc)
731 {
732 }
733 
734 /*--- tx send function ------------------------------------------------------*/
735 
736 #ifdef ATH_11AC_TXCOMPACT
737 
738 /*
739  * Scheduling the Queued packets in HTT which could not be sent out
740  * because of No CE desc
741  */
htt_tx_sched(htt_pdev_handle pdev)742 void htt_tx_sched(htt_pdev_handle pdev)
743 {
744 	qdf_nbuf_t msdu;
745 	int download_len = pdev->download_len;
746 	int packet_len;
747 
748 	HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
749 	while (msdu) {
750 		int not_accepted;
751 		/* packet length includes HTT tx desc frag added above */
752 		packet_len = qdf_nbuf_len(msdu);
753 		if (packet_len < download_len) {
754 			/*
755 			 * This case of packet length being less than the
756 			 * nominal download length can happen for a couple
757 			 * of reasons:
758 			 * In HL, the nominal download length is a large
759 			 * artificial value.
760 			 * In LL, the frame may not have the optional header
761 			 * fields accounted for in the nominal download size
762 			 * (LLC/SNAP header, IPv4 or IPv6 header).
763 			 */
764 			download_len = packet_len;
765 		}
766 
767 		not_accepted =
768 			htc_send_data_pkt(pdev->htc_pdev, msdu,
769 					  pdev->htc_tx_endpoint,
770 					  download_len);
771 		if (not_accepted) {
772 			HTT_TX_NBUF_QUEUE_INSERT_HEAD(pdev, msdu);
773 			return;
774 		}
775 		HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
776 	}
777 }
778 
htt_tx_send_std(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id)779 int htt_tx_send_std(htt_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t msdu_id)
780 {
781 
782 	int download_len = pdev->download_len;
783 
784 	int packet_len;
785 
786 	/* packet length includes HTT tx desc frag added above */
787 	packet_len = qdf_nbuf_len(msdu);
788 	if (packet_len < download_len) {
789 		/*
790 		 * This case of packet length being less than the nominal
791 		 * download length can happen for a couple of reasons:
792 		 * In HL, the nominal download length is a large artificial
793 		 * value.
794 		 * In LL, the frame may not have the optional header fields
795 		 * accounted for in the nominal download size (LLC/SNAP header,
796 		 * IPv4 or IPv6 header).
797 		 */
798 		download_len = packet_len;
799 	}
800 
801 	if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
802 		download_len += sizeof(struct htt_tx_msdu_desc_ext_t);
803 
804 
805 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_HTT);
806 	DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
807 				QDF_TRACE_DEFAULT_PDEV_ID,
808 				qdf_nbuf_data_addr(msdu),
809 				sizeof(qdf_nbuf_data(msdu)), QDF_TX));
810 	if (qdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
811 		HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
812 		htt_tx_sched(pdev);
813 		return 0;
814 	}
815 
816 	if (htc_send_data_pkt(pdev->htc_pdev, msdu,
817 			      pdev->htc_tx_endpoint, download_len)) {
818 		HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
819 	}
820 
821 	return 0;               /* success */
822 
823 }
824 
825 #ifndef CONFIG_HL_SUPPORT
826 #ifdef FEATURE_RUNTIME_PM
827 /**
828  * htt_tx_resume_handler() - resume callback for the htt endpoint
829  * @context: a pointer to the htt context
830  *
831  * runs htt_tx_sched.
832  */
htt_tx_resume_handler(void * context)833 void htt_tx_resume_handler(void *context)
834 {
835 	struct htt_pdev_t *pdev =  (struct htt_pdev_t *) context;
836 
837 	htt_tx_sched(pdev);
838 }
839 #else
840 void
htt_tx_resume_handler(void * context)841 htt_tx_resume_handler(void *context) { }
842 #endif
843 #endif
844 
845 qdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle pdev,qdf_nbuf_t head_msdu,int num_msdus)846 htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
847 {
848 	qdf_print("Not apply to LL");
849 	qdf_assert(0);
850 	return head_msdu;
851 
852 }
853 
854 int
htt_tx_send_nonstd(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id,enum htt_pkt_type pkt_type)855 htt_tx_send_nonstd(htt_pdev_handle pdev,
856 		   qdf_nbuf_t msdu,
857 		   uint16_t msdu_id, enum htt_pkt_type pkt_type)
858 {
859 	int download_len;
860 
861 	/*
862 	 * The pkt_type could be checked to see what L2 header type is present,
863 	 * and then the L2 header could be examined to determine its length.
864 	 * But for simplicity, just use the maximum possible header size,
865 	 * rather than computing the actual header size.
866 	 */
867 	download_len = sizeof(struct htt_host_tx_desc_t)
868 		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
869 		+ HTT_TX_HDR_SIZE_802_1Q
870 		+ HTT_TX_HDR_SIZE_LLC_SNAP
871 		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
872 	qdf_assert(download_len <= pdev->download_len);
873 	return htt_tx_send_std(pdev, msdu, msdu_id);
874 }
875 
876 #ifndef QCA_TX_PADDING_CREDIT_SUPPORT
htt_tx_padding_credit_update_handler(void * context,int pad_credit)877 int htt_tx_padding_credit_update_handler(void *context, int pad_credit)
878 {
879 	return 1;
880 }
881 #endif
882 
883 #else                           /*ATH_11AC_TXCOMPACT */
884 
885 #ifdef QCA_TX_PADDING_CREDIT_SUPPORT
htt_tx_padding_credit_update(htt_pdev_handle htt_pdev,int pad_credit)886 static int htt_tx_padding_credit_update(htt_pdev_handle htt_pdev,
887 					int pad_credit)
888 {
889 	int ret = 0;
890 
891 	if (pad_credit)
892 		qdf_atomic_add(pad_credit,
893 			       &htt_pdev->txrx_pdev->pad_reserve_tx_credit);
894 
895 	ret = qdf_atomic_read(&htt_pdev->txrx_pdev->pad_reserve_tx_credit);
896 
897 	return ret;
898 }
899 
htt_tx_padding_credit_update_handler(void * context,int pad_credit)900 int htt_tx_padding_credit_update_handler(void *context, int pad_credit)
901 {
902 	struct htt_pdev_t *htt_pdev = (struct htt_pdev_t *)context;
903 
904 	return htt_tx_padding_credit_update(htt_pdev, pad_credit);
905 }
906 #else
htt_tx_padding_credit_update_handler(void * context,int pad_credit)907 int htt_tx_padding_credit_update_handler(void *context, int pad_credit)
908 {
909 	return 1;
910 }
911 #endif
912 
913 #ifdef QCA_TX_HTT2_SUPPORT
914 static inline HTC_ENDPOINT_ID
htt_tx_htt2_get_ep_id(htt_pdev_handle pdev,qdf_nbuf_t msdu)915 htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, qdf_nbuf_t msdu)
916 {
917 	/*
918 	 * TX HTT2 service mainly for small sized frame and check if
919 	 * this candidate frame allow or not.
920 	 */
921 	if ((pdev->htc_tx_htt2_endpoint != ENDPOINT_UNUSED) &&
922 	    qdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
923 	    (qdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
924 		return pdev->htc_tx_htt2_endpoint;
925 	else
926 		return pdev->htc_tx_endpoint;
927 }
928 #else
929 #define htt_tx_htt2_get_ep_id(pdev, msdu)     (pdev->htc_tx_endpoint)
930 #endif /* QCA_TX_HTT2_SUPPORT */
931 
932 static inline int
htt_tx_send_base(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id,int download_len,uint8_t more_data)933 htt_tx_send_base(htt_pdev_handle pdev,
934 		 qdf_nbuf_t msdu,
935 		 uint16_t msdu_id, int download_len, uint8_t more_data)
936 {
937 	struct htt_host_tx_desc_t *htt_host_tx_desc;
938 	struct htt_htc_pkt *pkt;
939 	int packet_len;
940 	HTC_ENDPOINT_ID ep_id;
941 
942 	/*
943 	 * The HTT tx descriptor was attached as the prefix fragment to the
944 	 * msdu netbuf during the call to htt_tx_desc_init.
945 	 * Retrieve it so we can provide its HTC header space to HTC.
946 	 */
947 	htt_host_tx_desc = (struct htt_host_tx_desc_t *)
948 			   qdf_nbuf_get_frag_vaddr(msdu, 0);
949 
950 	pkt = htt_htc_pkt_alloc(pdev);
951 	if (!pkt)
952 		return -ENOBUFS;       /* failure */
953 
954 	pkt->msdu_id = msdu_id;
955 	pkt->pdev_ctxt = pdev->txrx_pdev;
956 
957 	/* packet length includes HTT tx desc frag added above */
958 	packet_len = qdf_nbuf_len(msdu);
959 	if (packet_len < download_len) {
960 		/*
961 		 * This case of packet length being less than the nominal
962 		 * download length can happen for a couple reasons:
963 		 * In HL, the nominal download length is a large artificial
964 		 * value.
965 		 * In LL, the frame may not have the optional header fields
966 		 * accounted for in the nominal download size (LLC/SNAP header,
967 		 * IPv4 or IPv6 header).
968 		 */
969 		download_len = packet_len;
970 	}
971 
972 	ep_id = htt_tx_htt2_get_ep_id(pdev, msdu);
973 
974 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
975 			       pdev->tx_send_complete_part2,
976 			       (unsigned char *)htt_host_tx_desc,
977 			       download_len - HTC_HDR_LENGTH,
978 			       ep_id,
979 			       1); /* tag - not relevant here */
980 
981 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msdu);
982 
983 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_HTT);
984 	DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
985 				QDF_TRACE_DEFAULT_PDEV_ID,
986 				qdf_nbuf_data_addr(msdu),
987 				sizeof(qdf_nbuf_data(msdu)), QDF_TX));
988 	htc_send_data_pkt(pdev->htc_pdev, &pkt->htc_pkt, more_data);
989 
990 	return 0;               /* success */
991 }
992 
993 qdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle pdev,qdf_nbuf_t head_msdu,int num_msdus)994 htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
995 {
996 	qdf_nbuf_t rejected = NULL;
997 	uint16_t *msdu_id_storage;
998 	uint16_t msdu_id;
999 	qdf_nbuf_t msdu;
1000 
1001 	/*
1002 	 * FOR NOW, iterate through the batch, sending the frames singly.
1003 	 * Eventually HTC and HIF should be able to accept a batch of
1004 	 * data frames rather than singles.
1005 	 */
1006 	msdu = head_msdu;
1007 	while (num_msdus--) {
1008 		qdf_nbuf_t next_msdu = qdf_nbuf_next(msdu);
1009 
1010 		msdu_id_storage = ol_tx_msdu_id_storage(msdu);
1011 		msdu_id = *msdu_id_storage;
1012 
1013 		/* htt_tx_send_base returns 0 as success and 1 as failure */
1014 		if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len,
1015 				     num_msdus)) {
1016 			qdf_nbuf_set_next(msdu, rejected);
1017 			rejected = msdu;
1018 		}
1019 		msdu = next_msdu;
1020 	}
1021 	return rejected;
1022 }
1023 
1024 int
htt_tx_send_nonstd(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id,enum htt_pkt_type pkt_type)1025 htt_tx_send_nonstd(htt_pdev_handle pdev,
1026 		   qdf_nbuf_t msdu,
1027 		   uint16_t msdu_id, enum htt_pkt_type pkt_type)
1028 {
1029 	int download_len;
1030 
1031 	/*
1032 	 * The pkt_type could be checked to see what L2 header type is present,
1033 	 * and then the L2 header could be examined to determine its length.
1034 	 * But for simplicity, just use the maximum possible header size,
1035 	 * rather than computing the actual header size.
1036 	 */
1037 	download_len = sizeof(struct htt_host_tx_desc_t)
1038 		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX      /* worst case */
1039 		+ HTT_TX_HDR_SIZE_802_1Q
1040 		+ HTT_TX_HDR_SIZE_LLC_SNAP
1041 		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
1042 	return htt_tx_send_base(pdev, msdu, msdu_id, download_len, 0);
1043 }
1044 
htt_tx_send_std(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id)1045 int htt_tx_send_std(htt_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t msdu_id)
1046 {
1047 	return htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, 0);
1048 }
1049 
1050 #endif /*ATH_11AC_TXCOMPACT */
1051 
1052 #if defined(HTT_DBG)
htt_tx_desc_display(void * tx_desc)1053 void htt_tx_desc_display(void *tx_desc)
1054 {
1055 	struct htt_tx_msdu_desc_t *htt_tx_desc;
1056 
1057 	htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;
1058 
1059 	/* only works for little-endian */
1060 	qdf_debug("HTT tx desc (@ %pK):", htt_tx_desc);
1061 	qdf_debug("  msg type = %d", htt_tx_desc->msg_type);
1062 	qdf_debug("  pkt subtype = %d", htt_tx_desc->pkt_subtype);
1063 	qdf_debug("  pkt type = %d", htt_tx_desc->pkt_type);
1064 	qdf_debug("  vdev ID = %d", htt_tx_desc->vdev_id);
1065 	qdf_debug("  ext TID = %d", htt_tx_desc->ext_tid);
1066 	qdf_debug("  postponed = %d", htt_tx_desc->postponed);
1067 	qdf_debug("  extension = %d", htt_tx_desc->extension);
1068 	qdf_debug("  cksum_offload = %d", htt_tx_desc->cksum_offload);
1069 	qdf_debug("  tx_compl_req= %d", htt_tx_desc->tx_compl_req);
1070 	qdf_debug("  length = %d", htt_tx_desc->len);
1071 	qdf_debug("  id = %d", htt_tx_desc->id);
1072 #if HTT_PADDR64
1073 	qdf_debug("  frag desc addr.lo = %#x",
1074 		  htt_tx_desc->frags_desc_ptr.lo);
1075 	qdf_debug("  frag desc addr.hi = %#x",
1076 		  htt_tx_desc->frags_desc_ptr.hi);
1077 #else /* ! HTT_PADDR64 */
1078 	qdf_debug("  frag desc addr = %#x", htt_tx_desc->frags_desc_ptr);
1079 #endif /* HTT_PADDR64 */
1080 	qdf_debug("  peerid = %d", htt_tx_desc->peerid);
1081 	qdf_debug("  chanfreq = %d", htt_tx_desc->chanfreq);
1082 }
1083 #endif
1084 
1085 #ifdef IPA_OFFLOAD
1086 #ifdef QCA_WIFI_3_0
1087 
1088 #ifndef LIMIT_IPA_TX_BUFFER
1089 #define LIMIT_IPA_TX_BUFFER 2048
1090 #endif
1091 
1092 /**
1093  * htt_tx_ipa_get_tx_buf_count() - Update WDI TX buffers count
1094  * @uc_tx_buf_cnt: TX Buffer count
1095  *
1096  * Return: new uc tx buffer count
1097  */
htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)1098 static int htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)
1099 {
1100 	/* In order to improve the Genoa IPA DBS KPI, need to set
1101 	 * IpaUcTxBufCount=2048, so tx complete ring size=2048, and
1102 	 * total tx buffer count = 2047.
1103 	 * But in fact, wlan fw just only have 5G 1100 tx desc +
1104 	 * 2.4G 400 desc, it can cover about 1500 packets from
1105 	 * IPA side.
1106 	 * So the remaining 2047-1500 packet are not used,
1107 	 * in order to save some memory, so we can use
1108 	 * LIMIT_IPA_TX_BUFFER to limit the max tx buffer
1109 	 * count, which varied from platform.
1110 	 * And then the tx buffer count always equal to tx complete
1111 	 * ring size -1 is not mandatory now.
1112 	 * From the trying, it has the same KPI achievement while
1113 	 * set LIMIT_IPA_TX_BUFFER=1500 or 2048.
1114 	 */
1115 	if (uc_tx_buf_cnt > LIMIT_IPA_TX_BUFFER)
1116 		return LIMIT_IPA_TX_BUFFER;
1117 	else
1118 		return uc_tx_buf_cnt;
1119 }
1120 
1121 /**
1122  * htt_tx_ipa_uc_wdi_tx_buf_alloc() - Alloc WDI TX buffers
1123  * @pdev: htt context
1124  * @uc_tx_buf_sz: TX buffer size
1125  * @uc_tx_buf_cnt: TX Buffer count
1126  * @uc_tx_partition_base: IPA UC TX partition base value
1127  *
1128  * Allocate WDI TX buffers. Also note Rome supports only WDI 1.0.
1129  *
1130  * Return: 0 success
1131  */
1132 
htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t * pdev,unsigned int uc_tx_buf_sz,unsigned int uc_tx_buf_cnt,unsigned int uc_tx_partition_base)1133 static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
1134 					  unsigned int uc_tx_buf_sz,
1135 					  unsigned int uc_tx_buf_cnt,
1136 					  unsigned int uc_tx_partition_base)
1137 {
1138 	unsigned int tx_buffer_count;
1139 	qdf_dma_addr_t buffer_paddr;
1140 	uint32_t *header_ptr;
1141 	target_paddr_t *ring_vaddr;
1142 	qdf_shared_mem_t *shared_tx_buffer;
1143 
1144 	ring_vaddr = (target_paddr_t *)pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
1145 
1146 	/* Allocate TX buffers as many as possible */
1147 	for (tx_buffer_count = 0;
1148 	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
1149 
1150 		shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
1151 							    uc_tx_buf_sz);
1152 		if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
1153 			qdf_print("IPA WDI TX buffer alloc fail %d allocated",
1154 				tx_buffer_count);
1155 			goto out;
1156 		}
1157 
1158 		header_ptr = shared_tx_buffer->vaddr;
1159 		buffer_paddr = qdf_mem_get_dma_addr(pdev->osdev,
1160 						&shared_tx_buffer->mem_info);
1161 
1162 		/* HTT control header */
1163 		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
1164 		header_ptr++;
1165 
1166 		/* PKT ID */
1167 		*header_ptr |= ((uint16_t) uc_tx_partition_base +
1168 				tx_buffer_count) << 16;
1169 
1170 		header_ptr++;
1171 
1172 		/* Frag Desc Pointer */
1173 		/* 64bits descriptor, Low 32bits */
1174 		*header_ptr = qdf_get_lower_32_bits(buffer_paddr +
1175 					IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
1176 		header_ptr++;
1177 
1178 		/* 64bits descriptor, high 32bits */
1179 		*header_ptr = qdf_get_upper_32_bits(buffer_paddr) &
1180 			IPA_UC_TX_BUF_PADDR_HI_MASK;
1181 		header_ptr++;
1182 
1183 		/* chanreq, peerid */
1184 		*header_ptr = 0xFFFFFFFF;
1185 		header_ptr++;
1186 
1187 		/* FRAG Header */
1188 		/* 6 words TSO header */
1189 		header_ptr += IPA_UC_TX_BUF_TSO_HDR_SIZE;
1190 		*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
1191 
1192 		*ring_vaddr = buffer_paddr;
1193 		pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[tx_buffer_count] =
1194 			shared_tx_buffer;
1195 
1196 		/* Memory barrier to ensure actual value updated */
1197 
1198 		ring_vaddr++;
1199 	}
1200 
1201 out:
1202 
1203 	return tx_buffer_count;
1204 }
1205 
1206 /**
1207  * htt_tx_buf_pool_free() - Free tx buffer pool
1208  * @pdev: htt context
1209  *
1210  * Free memory in tx buffer pool
1211  *
1212  * Return: 0 success
1213  */
htt_tx_buf_pool_free(struct htt_pdev_t * pdev)1214 static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
1215 {
1216 	uint16_t idx;
1217 
1218 	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1219 		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
1220 			qdf_mem_shared_mem_free(pdev->osdev,
1221 						pdev->ipa_uc_tx_rsc.
1222 							tx_buf_pool_strg[idx]);
1223 			pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
1224 		}
1225 	}
1226 }
1227 #else
htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)1228 static int htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)
1229 {
1230 	return uc_tx_buf_cnt;
1231 }
1232 
htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t * pdev,unsigned int uc_tx_buf_sz,unsigned int uc_tx_buf_cnt,unsigned int uc_tx_partition_base)1233 static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
1234 					  unsigned int uc_tx_buf_sz,
1235 					  unsigned int uc_tx_buf_cnt,
1236 					  unsigned int uc_tx_partition_base)
1237 {
1238 	unsigned int tx_buffer_count;
1239 	unsigned int  tx_buffer_count_pwr2;
1240 	qdf_dma_addr_t buffer_paddr;
1241 	uint32_t *header_ptr;
1242 	uint32_t *ring_vaddr;
1243 	uint16_t idx;
1244 	qdf_shared_mem_t *shared_tx_buffer;
1245 
1246 	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
1247 
1248 	/* Allocate TX buffers as many as possible */
1249 	for (tx_buffer_count = 0;
1250 	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
1251 		shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
1252 							    uc_tx_buf_sz);
1253 		if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
1254 			qdf_print("TX BUF alloc fail, loop index: %d",
1255 				  tx_buffer_count);
1256 			goto pwr2;
1257 		}
1258 
1259 		/* Init buffer */
1260 		qdf_mem_zero(shared_tx_buffer->vaddr, uc_tx_buf_sz);
1261 		header_ptr = (uint32_t *)shared_tx_buffer->vaddr;
1262 		buffer_paddr = qdf_mem_get_dma_addr(pdev->osdev,
1263 						&shared_tx_buffer->mem_info);
1264 
1265 		/* HTT control header */
1266 		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
1267 		header_ptr++;
1268 
1269 		/* PKT ID */
1270 		*header_ptr |= ((uint16_t) uc_tx_partition_base +
1271 				tx_buffer_count) << 16;
1272 		header_ptr++;
1273 
1274 		/*FRAG Desc Pointer */
1275 		*header_ptr = (uint32_t) (buffer_paddr +
1276 						IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
1277 		header_ptr++;
1278 		*header_ptr = 0xFFFFFFFF;
1279 
1280 		/* FRAG Header */
1281 		header_ptr++;
1282 		*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
1283 
1284 		*ring_vaddr = buffer_paddr;
1285 		pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[tx_buffer_count] =
1286 			shared_tx_buffer;
1287 		/* Memory barrier to ensure actual value updated */
1288 
1289 		ring_vaddr++;
1290 	}
1291 
1292 pwr2:
1293 	/*
1294 	 * Tx complete ring buffer count should be power of 2.
1295 	 * So, allocated Tx buffer count should be one less than ring buffer
1296 	 * size.
1297 	 */
1298 	tx_buffer_count_pwr2 = qdf_rounddown_pow_of_two(tx_buffer_count + 1)
1299 			       - 1;
1300 	if (tx_buffer_count > tx_buffer_count_pwr2) {
1301 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1302 			  "%s: Allocated Tx buffer count %d is rounded down to %d",
1303 			  __func__, tx_buffer_count, tx_buffer_count_pwr2);
1304 
1305 		/* Free over allocated buffers below power of 2 */
1306 		for (idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
1307 			if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
1308 				qdf_mem_shared_mem_free(pdev->osdev,
1309 							pdev->ipa_uc_tx_rsc.
1310 							tx_buf_pool_strg[idx]);
1311 				pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] =
1312 									NULL;
1313 			}
1314 		}
1315 	}
1316 
1317 	return tx_buffer_count_pwr2;
1318 }
1319 
htt_tx_buf_pool_free(struct htt_pdev_t * pdev)1320 static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
1321 {
1322 	uint16_t idx;
1323 
1324 	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1325 		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
1326 			qdf_mem_shared_mem_free(pdev->osdev,
1327 						pdev->ipa_uc_tx_rsc.
1328 							tx_buf_pool_strg[idx]);
1329 			pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
1330 		}
1331 	}
1332 }
1333 #endif
1334 
1335 /**
1336  * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
1337  * @pdev: htt context
1338  * @uc_tx_buf_sz: single tx buffer size
1339  * @uc_tx_buf_cnt: total tx buffer count
1340  * @uc_tx_partition_base: tx buffer partition start
1341  *
1342  * Return: 0 success
1343  *         ENOBUFS No memory fail
1344  */
htt_tx_ipa_uc_attach(struct htt_pdev_t * pdev,unsigned int uc_tx_buf_sz,unsigned int uc_tx_buf_cnt,unsigned int uc_tx_partition_base)1345 int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
1346 			 unsigned int uc_tx_buf_sz,
1347 			 unsigned int uc_tx_buf_cnt,
1348 			 unsigned int uc_tx_partition_base)
1349 {
1350 	int return_code = 0;
1351 	unsigned int tx_comp_ring_size;
1352 
1353 	/* Allocate CE Write Index WORD */
1354 	pdev->ipa_uc_tx_rsc.tx_ce_idx =
1355 		qdf_mem_shared_mem_alloc(pdev->osdev, 4);
1356 	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx) {
1357 		qdf_print("Unable to allocate memory for IPA tx ce idx");
1358 		return -ENOBUFS;
1359 	}
1360 
1361 	/* Allocate TX COMP Ring */
1362 	tx_comp_ring_size = qdf_get_pwr2(uc_tx_buf_cnt)
1363 			    * sizeof(target_paddr_t);
1364 	pdev->ipa_uc_tx_rsc.tx_comp_ring =
1365 		qdf_mem_shared_mem_alloc(pdev->osdev,
1366 					 tx_comp_ring_size);
1367 	if (!pdev->ipa_uc_tx_rsc.tx_comp_ring ||
1368 	    !pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr) {
1369 		qdf_print("TX COMP ring alloc fail");
1370 		return_code = -ENOBUFS;
1371 		goto free_tx_ce_idx;
1372 	}
1373 
1374 	uc_tx_buf_cnt = htt_tx_ipa_get_limit_tx_buf_count(uc_tx_buf_cnt);
1375 	/* Allocate TX BUF vAddress Storage */
1376 	pdev->ipa_uc_tx_rsc.tx_buf_pool_strg =
1377 		qdf_mem_malloc(uc_tx_buf_cnt *
1378 			sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_strg));
1379 	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_strg) {
1380 		return_code = -ENOBUFS;
1381 		goto free_tx_comp_base;
1382 	}
1383 
1384 	qdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_strg,
1385 		     uc_tx_buf_cnt *
1386 		     sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_strg));
1387 
1388 	pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = htt_tx_ipa_uc_wdi_tx_buf_alloc(
1389 		pdev, uc_tx_buf_sz, uc_tx_buf_cnt, uc_tx_partition_base);
1390 
1391 	pdev->ipa_uc_tx_rsc.ipa_smmu_mapped = false;
1392 
1393 
1394 	return 0;
1395 
1396 free_tx_comp_base:
1397 	qdf_mem_shared_mem_free(pdev->osdev,
1398 				pdev->ipa_uc_tx_rsc.tx_comp_ring);
1399 free_tx_ce_idx:
1400 	qdf_mem_shared_mem_free(pdev->osdev,
1401 				pdev->ipa_uc_tx_rsc.tx_ce_idx);
1402 
1403 	return return_code;
1404 }
1405 
1406 /**
1407  * htt_tx_ipa_uc_detach() - Free WDI TX resources
1408  * @pdev: htt context
1409  *
1410  * Remove IPA WDI TX resources during device detach
1411  * Free all of allocated resources
1412  *
1413  * Return: 0 success
1414  */
htt_tx_ipa_uc_detach(struct htt_pdev_t * pdev)1415 int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
1416 {
1417 	qdf_mem_shared_mem_free(pdev->osdev,
1418 				pdev->ipa_uc_tx_rsc.tx_ce_idx);
1419 	qdf_mem_shared_mem_free(pdev->osdev,
1420 				pdev->ipa_uc_tx_rsc.tx_comp_ring);
1421 
1422 	/* Free each single buffer */
1423 	htt_tx_buf_pool_free(pdev);
1424 
1425 	/* Free storage */
1426 	qdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_strg);
1427 
1428 	return 0;
1429 }
1430 #endif /* IPA_OFFLOAD */
1431 
1432 #if defined(FEATURE_TSO) && defined(HELIUMPLUS)
1433 void
htt_tx_desc_fill_tso_info(htt_pdev_handle pdev,void * desc,struct qdf_tso_info_t * tso_info)1434 htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
1435 	 struct qdf_tso_info_t *tso_info)
1436 {
1437 	u_int32_t *word;
1438 	int i;
1439 	struct qdf_tso_seg_elem_t *tso_seg = tso_info->curr_seg;
1440 	struct msdu_ext_desc_t *msdu_ext_desc = (struct msdu_ext_desc_t *)desc;
1441 
1442 	word = (u_int32_t *)(desc);
1443 
1444 	/* Initialize the TSO flags per MSDU */
1445 	msdu_ext_desc->tso_flags =
1446 		 tso_seg->seg.tso_flags;
1447 
1448 	/* First 24 bytes (6*4) contain the TSO flags */
1449 	TSO_DEBUG("%s seq# %u l2 len %d, ip len %d",
1450 		  __func__,
1451 		  tso_seg->seg.tso_flags.tcp_seq_num,
1452 		  tso_seg->seg.tso_flags.l2_len,
1453 		  tso_seg->seg.tso_flags.ip_len);
1454 	TSO_DEBUG("%s flags 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
1455 		  __func__,
1456 		  *word,
1457 		  *(word + 1),
1458 		  *(word + 2),
1459 		  *(word + 3),
1460 		  *(word + 4),
1461 		  *(word + 5));
1462 
1463 	word += 6;
1464 
1465 	for (i = 0; i < tso_seg->seg.num_frags; i++) {
1466 		uint32_t lo = 0;
1467 		uint32_t hi = 0;
1468 
1469 		qdf_dmaaddr_to_32s(tso_seg->seg.tso_frags[i].paddr,
1470 						&lo, &hi);
1471 		/* [31:0] first 32 bits of the buffer pointer  */
1472 		*word = lo;
1473 		word++;
1474 		/* [15:0] the upper 16 bits of the first buffer pointer */
1475 		/* [31:16] length of the first buffer */
1476 		*word = (tso_seg->seg.tso_frags[i].length << 16) | hi;
1477 		word++;
1478 		TSO_DEBUG("%s frag[%d] ptr_low 0x%x ptr_hi 0x%x len %u",
1479 			__func__, i,
1480 			msdu_ext_desc->frags[i].u.frag32.ptr_low,
1481 			msdu_ext_desc->frags[i].u.frag32.ptr_hi,
1482 			msdu_ext_desc->frags[i].u.frag32.len);
1483 	}
1484 
1485 	if (tso_seg->seg.num_frags < FRAG_NUM_MAX)
1486 		*word = 0;
1487 	qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_FILLHTTSEG);
1488 }
1489 #endif /* FEATURE_TSO */
1490 
1491 /**
1492  * htt_get_ext_tid() - get ext_tid value
1493  * @type: extension header type
1494  * @ext_header_data: header data
1495  * @msdu_info: msdu info
1496  *
1497  * Return: ext_tid value
1498  */
1499 static inline
htt_get_ext_tid(enum extension_header_type type,void * ext_header_data,struct htt_msdu_info_t * msdu_info)1500 int htt_get_ext_tid(enum extension_header_type type,
1501 	void *ext_header_data, struct htt_msdu_info_t *msdu_info)
1502 {
1503 	if (type == OCB_MODE_EXT_HEADER && ext_header_data)
1504 		return ((struct ocb_tx_ctrl_hdr_t *)ext_header_data)->ext_tid;
1505 	else
1506 		return msdu_info->info.ext_tid;
1507 }
1508 
1509 /**
1510  * htt_get_channel_freq() - get channel frequency
1511  * @type: extension header type
1512  * @ext_header_data: header data
1513  *
1514  * Return: channel frequency number
1515  */
1516 static inline
htt_get_channel_freq(enum extension_header_type type,void * ext_header_data)1517 int htt_get_channel_freq(enum extension_header_type type,
1518 	void *ext_header_data)
1519 {
1520 	if (type == OCB_MODE_EXT_HEADER && ext_header_data)
1521 		return ((struct ocb_tx_ctrl_hdr_t *)ext_header_data)
1522 							->channel_freq;
1523 	else
1524 		return HTT_INVALID_CHANNEL;
1525 }
1526 
1527 /**
1528  * htt_fill_ocb_ext_header() - fill OCB extension header
1529  * @msdu: network buffer
1530  * @local_desc_ext: extension descriptor
1531  * @type: extension header type
1532  * @ext_header_data: header data
1533  * @is_dsrc: is dsrc is eenabled or not
1534  *
1535  * Return: none
1536  */
1537 #ifdef WLAN_FEATURE_DSRC
1538 static
htt_fill_ocb_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1539 void htt_fill_ocb_ext_header(qdf_nbuf_t msdu,
1540 			     struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1541 			     enum extension_header_type type,
1542 			     void *ext_header_data)
1543 {
1544 	struct ocb_tx_ctrl_hdr_t *tx_ctrl =
1545 		(struct ocb_tx_ctrl_hdr_t *)ext_header_data;
1546 
1547 	if (tx_ctrl->all_flags == 0)
1548 		return;
1549 	/*
1550 	 * Copy the info that was read from TX control header from the
1551 	 * user application to the extended HTT header.
1552 	 * First copy everything
1553 	 * to a local temp structure, and then copy everything to the
1554 	 * actual uncached structure in one go to save memory writes.
1555 	 */
1556 	local_desc_ext->valid_pwr = tx_ctrl->valid_pwr;
1557 	local_desc_ext->valid_mcs_mask = tx_ctrl->valid_datarate;
1558 	local_desc_ext->valid_retries = tx_ctrl->valid_retries;
1559 	local_desc_ext->valid_expire_tsf = tx_ctrl->valid_expire_tsf;
1560 	local_desc_ext->valid_chainmask = tx_ctrl->valid_chain_mask;
1561 
1562 	local_desc_ext->pwr = tx_ctrl->pwr;
1563 	if (tx_ctrl->valid_datarate &&
1564 			tx_ctrl->datarate <= htt_ofdm_datarate_max)
1565 		local_desc_ext->mcs_mask =
1566 			(1 << (tx_ctrl->datarate + 4));
1567 	local_desc_ext->retry_limit = tx_ctrl->retry_limit;
1568 	local_desc_ext->expire_tsf_lo = tx_ctrl->expire_tsf_lo;
1569 	local_desc_ext->expire_tsf_hi = tx_ctrl->expire_tsf_hi;
1570 	local_desc_ext->chain_mask = tx_ctrl->chain_mask;
1571 	local_desc_ext->is_dsrc = 1;
1572 	qdf_nbuf_push_head(msdu, sizeof(struct htt_tx_msdu_desc_ext_t));
1573 	qdf_mem_copy(qdf_nbuf_data(msdu), local_desc_ext,
1574 			sizeof(struct htt_tx_msdu_desc_ext_t));
1575 	QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu) = 1;
1576 }
1577 #else
1578 static
htt_fill_ocb_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1579 void htt_fill_ocb_ext_header(qdf_nbuf_t msdu,
1580 			     struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1581 			     enum extension_header_type type,
1582 			     void *ext_header_data)
1583 {
1584 }
1585 #endif
1586 
1587 /**
1588  * htt_fill_wisa_ext_header() - fill WiSA extension header
1589  * @msdu: network buffer
1590  * @local_desc_ext: extension descriptor
1591  * @type: extension header type
1592  * @ext_header_data: header data
1593  *
1594  * Return: none
1595  */
1596 static
htt_fill_wisa_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1597 void htt_fill_wisa_ext_header(qdf_nbuf_t msdu,
1598 	struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1599 	enum extension_header_type type, void *ext_header_data)
1600 {
1601 	void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1602 	QDF_STATUS status;
1603 
1604 	if (!qdf_ctx)
1605 		return;
1606 
1607 	local_desc_ext->valid_mcs_mask = 1;
1608 	if (WISA_MODE_EXT_HEADER_6MBPS == type)
1609 		local_desc_ext->mcs_mask = htt_ofdm_datarate_6_mbps;
1610 	else
1611 		local_desc_ext->mcs_mask = htt_ofdm_datarate_24_mbps;
1612 	local_desc_ext->valid_nss_mask = 1;
1613 	local_desc_ext->nss_mask = 1;
1614 	local_desc_ext->valid_bandwidth = 1;
1615 	local_desc_ext->bandwidth_mask = htt_tx_bandwidth_20MHz;
1616 	local_desc_ext->valid_guard_interval = 1;
1617 	local_desc_ext->guard_interval = htt_tx_guard_interval_regular;
1618 
1619 	/*
1620 	 * Do dma_unmap and dma_map again if already mapped
1621 	 * as adding extra bytes in skb
1622 	 */
1623 	if (QDF_NBUF_CB_PADDR(msdu) != 0)
1624 		qdf_nbuf_unmap_single(qdf_ctx, msdu, QDF_DMA_TO_DEVICE);
1625 
1626 	qdf_nbuf_push_head(msdu, sizeof(struct htt_tx_msdu_desc_ext_t));
1627 	qdf_mem_copy(qdf_nbuf_data(msdu), local_desc_ext,
1628 			sizeof(struct htt_tx_msdu_desc_ext_t));
1629 
1630 	if (QDF_NBUF_CB_PADDR(msdu) != 0) {
1631 		status = qdf_nbuf_map_single(qdf_ctx, msdu, QDF_DMA_TO_DEVICE);
1632 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1633 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
1634 				"%s: nbuf map failed", __func__);
1635 			return;
1636 		}
1637 	}
1638 	QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu) = 1;
1639 }
1640 
1641 /**
1642  * htt_push_ext_header() - fill extension header
1643  * @msdu: network buffer
1644  * @local_desc_ext: extension descriptor
1645  * @type: extension header type
1646  * @ext_header_data: header data
1647  * @is_dsrc: is dsrc is eenabled or not
1648  *
1649  * Return: none
1650  */
1651 static
htt_push_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1652 void htt_push_ext_header(qdf_nbuf_t msdu,
1653 	struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1654 	enum extension_header_type type, void *ext_header_data)
1655 {
1656 	switch (type) {
1657 	case OCB_MODE_EXT_HEADER:
1658 		htt_fill_ocb_ext_header(msdu, local_desc_ext,
1659 					type, ext_header_data);
1660 		break;
1661 	case WISA_MODE_EXT_HEADER_6MBPS:
1662 	case WISA_MODE_EXT_HEADER_24MBPS:
1663 		htt_fill_wisa_ext_header(msdu, local_desc_ext,
1664 					type, ext_header_data);
1665 		break;
1666 	default:
1667 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1668 			"Invalid EXT header type %d\n", type);
1669 		break;
1670 	}
1671 }
1672 
1673 QDF_STATUS
htt_tx_desc_init(htt_pdev_handle pdev,void * htt_tx_desc,qdf_dma_addr_t htt_tx_desc_paddr,uint16_t msdu_id,qdf_nbuf_t msdu,struct htt_msdu_info_t * msdu_info,struct qdf_tso_info_t * tso_info,void * ext_header_data,enum extension_header_type type)1674 htt_tx_desc_init(htt_pdev_handle pdev,
1675 		 void *htt_tx_desc,
1676 		 qdf_dma_addr_t htt_tx_desc_paddr,
1677 		 uint16_t msdu_id,
1678 		 qdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
1679 		 struct qdf_tso_info_t *tso_info,
1680 		 void *ext_header_data,
1681 		 enum extension_header_type type)
1682 {
1683 	uint8_t  pkt_type, pkt_subtype = 0, ce_pkt_type = 0;
1684 	uint32_t hw_classify = 0, data_attr = 0;
1685 	uint32_t *word0, *word1, local_word3;
1686 #if HTT_PADDR64
1687 	uint32_t *word4;
1688 #else /* ! HTT_PADDR64 */
1689 	uint32_t *word3;
1690 #endif /* HTT_PADDR64 */
1691 	uint32_t local_word0, local_word1;
1692 	struct htt_host_tx_desc_t *htt_host_tx_desc =
1693 		(struct htt_host_tx_desc_t *)
1694 		(((char *)htt_tx_desc) - HTT_TX_DESC_VADDR_OFFSET);
1695 	bool desc_ext_required = (type != EXT_HEADER_NOT_PRESENT);
1696 	int channel_freq;
1697 	void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1698 	qdf_dma_dir_t dir;
1699 	QDF_STATUS status;
1700 
1701 	if (qdf_unlikely(!qdf_ctx))
1702 		return QDF_STATUS_E_FAILURE;
1703 
1704 	if (qdf_unlikely(!msdu_info)) {
1705 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1706 			"%s: bad arg: msdu_info is NULL", __func__);
1707 		return QDF_STATUS_E_FAILURE;
1708 	}
1709 	if (qdf_unlikely(!tso_info)) {
1710 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1711 			"%s: bad arg: tso_info is NULL", __func__);
1712 		return QDF_STATUS_E_FAILURE;
1713 	}
1714 
1715 	word0 = (uint32_t *) htt_tx_desc;
1716 	word1 = word0 + 1;
1717 	/*
1718 	 * word2 is frag desc pointer
1719 	 * word3 or 4 is peer_id
1720 	 */
1721 #if HTT_PADDR64
1722 	word4 = word0 + 4;      /* Dword 3 */
1723 #else /* ! HTT_PADDR64  */
1724 	word3 = word0 + 3;      /* Dword 3 */
1725 #endif /* HTT_PADDR64 */
1726 
1727 	pkt_type = msdu_info->info.l2_hdr_type;
1728 
1729 	if (qdf_likely(pdev->cfg.ce_classify_enabled)) {
1730 		if (qdf_likely(pkt_type == htt_pkt_type_eth2 ||
1731 			pkt_type == htt_pkt_type_ethernet))
1732 			qdf_nbuf_tx_info_get(msdu, pkt_type, pkt_subtype,
1733 				     hw_classify);
1734 
1735 		ce_pkt_type = htt_to_ce_pkt_type[pkt_type];
1736 		if (0xffffffff == ce_pkt_type) {
1737 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1738 			"Invalid HTT pkt type %d\n", pkt_type);
1739 			return QDF_STATUS_E_INVAL;
1740 		}
1741 	}
1742 
1743 	/*
1744 	 * HTT Tx Desc is in uncached memory. Used cached writes per word, to
1745 	 * reduce unnecessary memory access.
1746 	 */
1747 
1748 	local_word0 = 0;
1749 
1750 	HTT_H2T_MSG_TYPE_SET(local_word0, HTT_H2T_MSG_TYPE_TX_FRM);
1751 	HTT_TX_DESC_PKT_TYPE_SET(local_word0, pkt_type);
1752 	HTT_TX_DESC_PKT_SUBTYPE_SET(local_word0, pkt_subtype);
1753 	HTT_TX_DESC_VDEV_ID_SET(local_word0, msdu_info->info.vdev_id);
1754 	HTT_TX_DESC_EXT_TID_SET(local_word0, htt_get_ext_tid(type,
1755 					ext_header_data, msdu_info));
1756 	HTT_TX_DESC_EXTENSION_SET(local_word0, desc_ext_required);
1757 	HTT_TX_DESC_EXT_TID_SET(local_word0, msdu_info->info.ext_tid);
1758 	HTT_TX_DESC_CKSUM_OFFLOAD_SET(local_word0,
1759 				      msdu_info->action.cksum_offload);
1760 	if (pdev->cfg.is_high_latency)
1761 		HTT_TX_DESC_TX_COMP_SET(local_word0, msdu_info->action.
1762 							tx_comp_req);
1763 	HTT_TX_DESC_NO_ENCRYPT_SET(local_word0,
1764 				   msdu_info->action.do_encrypt ?
1765 				   0 : 1);
1766 
1767 	*word0 = local_word0;
1768 
1769 	local_word1 = 0;
1770 
1771 	if (tso_info->is_tso) {
1772 		uint32_t total_len = tso_info->curr_seg->seg.total_len;
1773 
1774 		HTT_TX_DESC_FRM_LEN_SET(local_word1, total_len);
1775 		TSO_DEBUG("%s setting HTT TX DESC Len = %d",
1776 			  __func__, total_len);
1777 	} else {
1778 		HTT_TX_DESC_FRM_LEN_SET(local_word1, qdf_nbuf_len(msdu));
1779 	}
1780 
1781 	QDF_BUG(HTT_TX_DESC_FRM_LEN_GET(local_word1) != 0);
1782 
1783 	HTT_TX_DESC_FRM_ID_SET(local_word1, msdu_id);
1784 	*word1 = local_word1;
1785 
1786 	/*
1787 	 * Initialize peer_id to INVALID_PEER because
1788 	 * this is NOT Reinjection path
1789 	 */
1790 	local_word3 = HTT_INVALID_PEER;
1791 	channel_freq = htt_get_channel_freq(type, ext_header_data);
1792 	if (channel_freq != HTT_INVALID_CHANNEL && channel_freq > 0)
1793 		HTT_TX_DESC_CHAN_FREQ_SET(local_word3, channel_freq);
1794 #if HTT_PADDR64
1795 	*word4 = local_word3;
1796 #else /* ! HTT_PADDR64 */
1797 	*word3 = local_word3;
1798 #endif /* HTT_PADDR64 */
1799 
1800 	/*
1801 	 *  If any of the tx control flags are set, then we need the extended
1802 	 *  HTT header.
1803 	 */
1804 	if (desc_ext_required) {
1805 		struct htt_tx_msdu_desc_ext_t local_desc_ext = {0};
1806 
1807 		htt_push_ext_header(msdu, &local_desc_ext,
1808 			type, ext_header_data);
1809 	}
1810 
1811 	/*
1812 	 * Specify that the data provided by the OS is a bytestream,
1813 	 * and thus should not be byte-swapped during the HIF download
1814 	 * even if the host is big-endian.
1815 	 * There could be extra fragments added before the OS's fragments,
1816 	 * e.g. for TSO, so it's incorrect to clear the frag 0 wordstream flag.
1817 	 * Instead, clear the wordstream flag for the final fragment, which
1818 	 * is certain to be (one of the) fragment(s) provided by the OS.
1819 	 * Setting the flag for this final fragment suffices for specifying
1820 	 * all fragments provided by the OS rather than added by the driver.
1821 	 */
1822 	qdf_nbuf_set_frag_is_wordstream(msdu, qdf_nbuf_get_num_frags(msdu) - 1,
1823 					0);
1824 
1825 	if (QDF_NBUF_CB_PADDR(msdu) == 0) {
1826 		dir = QDF_NBUF_CB_TX_DMA_BI_MAP(msdu) ?
1827 			QDF_DMA_BIDIRECTIONAL : QDF_DMA_TO_DEVICE;
1828 		status = qdf_nbuf_map_single(qdf_ctx, msdu, dir);
1829 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1830 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1831 				"%s: nbuf map failed", __func__);
1832 			return QDF_STATUS_E_NOMEM;
1833 		}
1834 	}
1835 
1836 	/* store a link to the HTT tx descriptor within the netbuf */
1837 	qdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
1838 				(char *)htt_host_tx_desc, /* virtual addr */
1839 				htt_tx_desc_paddr);
1840 
1841 	/*
1842 	 * Indicate that the HTT header (and HTC header) is a meta-data
1843 	 * "wordstream", i.e. series of uint32_t, rather than a data
1844 	 * bytestream.
1845 	 * This allows the HIF download to byteswap the HTT + HTC headers if
1846 	 * the host is big-endian, to convert to the target's little-endian
1847 	 * format.
1848 	 */
1849 	qdf_nbuf_set_frag_is_wordstream(msdu, 0, 1);
1850 
1851 	if (qdf_likely(pdev->cfg.ce_classify_enabled &&
1852 		(msdu_info->info.l2_hdr_type != htt_pkt_type_mgmt))) {
1853 		uint32_t pkt_offset = qdf_nbuf_get_frag_len(msdu, 0);
1854 
1855 		data_attr = hw_classify << CE_DESC_TX_CLASSIFY_BIT_S;
1856 		data_attr |= ce_pkt_type << CE_DESC_PKT_TYPE_BIT_S;
1857 		data_attr |= pkt_offset  << CE_DESC_PKT_OFFSET_BIT_S;
1858 	}
1859 
1860 	qdf_nbuf_data_attr_set(msdu, data_attr);
1861 	return QDF_STATUS_SUCCESS;
1862 }
1863 
1864 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
1865 
1866 /**
1867  * htt_tx_group_credit_process() - process group data for
1868  *				   credit update indication
1869  * @pdev: pointer to htt device.
1870  * @msg_word: htt msg
1871  *
1872  * Return: None
1873  */
htt_tx_group_credit_process(struct htt_pdev_t * pdev,u_int32_t * msg_word)1874 void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word)
1875 {
1876 	int group_credit_sign;
1877 	int32_t group_credit;
1878 	u_int32_t group_credit_abs, vdev_id_mask, ac_mask;
1879 	u_int8_t group_abs, group_id;
1880 	u_int8_t group_offset = 0, more_group_present = 0;
1881 
1882 	more_group_present = HTT_TX_CREDIT_TXQ_GRP_GET(*msg_word);
1883 
1884 	while (more_group_present) {
1885 		/* Parse the Group Data */
1886 		group_id = HTT_TXQ_GROUP_ID_GET(*(msg_word+1
1887 						+group_offset));
1888 		group_credit_abs =
1889 			HTT_TXQ_GROUP_CREDIT_COUNT_GET(*(msg_word+1
1890 						+group_offset));
1891 		group_credit_sign =
1892 			HTT_TXQ_GROUP_SIGN_GET(*(msg_word+1
1893 						+group_offset)) ? -1 : 1;
1894 		group_credit = group_credit_sign * group_credit_abs;
1895 		group_abs = HTT_TXQ_GROUP_ABS_GET(*(msg_word+1
1896 						+group_offset));
1897 
1898 		vdev_id_mask =
1899 			HTT_TXQ_GROUP_VDEV_ID_MASK_GET(*(msg_word+2
1900 						+group_offset));
1901 		ac_mask = HTT_TXQ_GROUP_AC_MASK_GET(*(msg_word+2
1902 						+group_offset));
1903 
1904 		ol_txrx_update_tx_queue_groups(pdev->txrx_pdev, group_id,
1905 					       group_credit, group_abs,
1906 					       vdev_id_mask, ac_mask);
1907 		more_group_present = HTT_TXQ_GROUP_EXT_GET(*(msg_word+1
1908 						+group_offset));
1909 		group_offset += HTT_TX_GROUP_INDEX_OFFSET;
1910 	}
1911 	ol_tx_update_group_credit_stats(pdev->txrx_pdev);
1912 }
1913 #endif
1914 
1915