xref: /wlan-dirver/qcacld-3.0/core/dp/htt/htt.c (revision 63d7e2a202b3cd37d6c1c20a39582b297a267b6b)
1 /*
2  * Copyright (c) 2011, 2014-2019-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file htt.c
21  * @brief Provide functions to create+init and destroy a HTT instance.
22  * @details
23  *  This file contains functions for creating a HTT instance; initializing
24  *  the HTT instance, e.g. by allocating a pool of HTT tx descriptors and
25  *  connecting the HTT service with HTC; and deleting a HTT instance.
26  */
27 
28 #include <qdf_mem.h>         /* qdf_mem_malloc */
29 #include <qdf_types.h>          /* qdf_device_t, qdf_print */
30 
31 #include <htt.h>                /* htt_tx_msdu_desc_t */
32 #include <ol_cfg.h>
33 #include <ol_txrx_htt_api.h>    /* ol_tx_dowload_done_ll, etc. */
34 #include <ol_htt_api.h>
35 
36 #include <htt_internal.h>
37 #include <ol_htt_tx_api.h>
38 #include <cds_api.h>
39 #include "hif.h"
40 #include <cdp_txrx_handle.h>
41 #include <ol_txrx_peer_find.h>
42 
43 #define HTT_HTC_PKT_POOL_INIT_SIZE 100  /* enough for a large A-MPDU */
44 
45 QDF_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
46 QDF_STATUS(*htt_h2t_rx_ring_rfs_cfg_msg)(struct htt_pdev_t *pdev);
47 
48 #ifdef IPA_OFFLOAD
49 static QDF_STATUS htt_ipa_config(htt_pdev_handle pdev, QDF_STATUS status)
50 {
51 	if ((QDF_STATUS_SUCCESS == status) &&
52 	    ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
53 		status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
54 	return status;
55 }
56 
57 #define HTT_IPA_CONFIG htt_ipa_config
58 #else
59 #define HTT_IPA_CONFIG(pdev, status) status     /* no-op */
60 #endif /* IPA_OFFLOAD */
61 
62 struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
63 {
64 	struct htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
67 	if (pdev->htt_htc_pkt_freelist) {
68 		pkt = pdev->htt_htc_pkt_freelist;
69 		pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 	return &pkt->u.pkt;     /* not actually a dereference */
81 }
82 
83 void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
84 {
85 	struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
86 
87 	if (!u_pkt) {
88 		qdf_print("HTC packet is NULL");
89 		return;
90 	}
91 
92 	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = pdev->htt_htc_pkt_freelist;
95 	pdev->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
97 }
98 
99 void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
100 {
101 	struct htt_htc_pkt_union *pkt, *next;
102 
103 	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
104 	pkt = pdev->htt_htc_pkt_freelist;
105 	pdev->htt_htc_pkt_freelist = NULL;
106 	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
107 
108 	while (pkt) {
109 		next = pkt->u.next;
110 		qdf_mem_free(pkt);
111 		pkt = next;
112 	}
113 }
114 
115 #ifdef ATH_11AC_TXCOMPACT
116 
117 void
118 htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level)
119 {
120 	struct htt_htc_pkt_union *pkt, *next, *prev = NULL;
121 	int i = 0;
122 	qdf_nbuf_t netbuf;
123 
124 	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
125 	pkt = pdev->htt_htc_pkt_misclist;
126 	while (pkt) {
127 		next = pkt->u.next;
128 		/* trim the out grown list*/
129 		if (++i > level) {
130 			netbuf =
131 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
132 			qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
133 			qdf_nbuf_free(netbuf);
134 			qdf_mem_free(pkt);
135 			pkt = NULL;
136 			if (prev)
137 				prev->u.next = NULL;
138 		}
139 		prev = pkt;
140 		pkt = next;
141 	}
142 	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
143 }
144 
145 void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
146 {
147 	struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
148 	int misclist_trim_level = htc_get_tx_queue_depth(pdev->htc_pdev,
149 							pkt->htc_pkt.Endpoint)
150 				+ HTT_HTC_PKT_MISCLIST_SIZE;
151 
152 	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
153 	if (pdev->htt_htc_pkt_misclist) {
154 		u_pkt->u.next = pdev->htt_htc_pkt_misclist;
155 		pdev->htt_htc_pkt_misclist = u_pkt;
156 	} else {
157 		pdev->htt_htc_pkt_misclist = u_pkt;
158 	}
159 	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
160 
161 	/* only ce pipe size + tx_queue_depth could possibly be in use
162 	 * free older packets in the msiclist
163 	 */
164 	htt_htc_misc_pkt_list_trim(pdev, misclist_trim_level);
165 }
166 
167 void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
168 {
169 	struct htt_htc_pkt_union *pkt, *next;
170 	qdf_nbuf_t netbuf;
171 
172 	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
173 	pkt = pdev->htt_htc_pkt_misclist;
174 	pdev->htt_htc_pkt_misclist = NULL;
175 	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
176 
177 	while (pkt) {
178 		next = pkt->u.next;
179 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
180 				HTC_PACKET_MAGIC_COOKIE) {
181 			QDF_ASSERT(0);
182 			pkt = next;
183 			continue;
184 		}
185 
186 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
187 		qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
188 		qdf_nbuf_free(netbuf);
189 		qdf_mem_free(pkt);
190 		pkt = next;
191 	}
192 }
193 #endif
194 
195 
196 /* AR6004 don't need HTT layer. */
197 #ifdef AR6004_HW
198 #define NO_HTT_NEEDED true
199 #else
200 #define NO_HTT_NEEDED false
201 #endif
202 
203 #if defined(QCA_TX_HTT2_SUPPORT) && defined(CONFIG_HL_SUPPORT)
204 
205 /**
206  * htt_htc_tx_htt2_service_start() - Start TX HTT2 service
207  *
208  * @pdev: pointer to htt device.
209  * @connect_req: pointer to service connection request information
210  * @connect_resp: pointer to service connection response information
211  *
212  *
213  * Return: None
214  */
215 static void
216 htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
217 			      struct htc_service_connect_req *connect_req,
218 			      struct htc_service_connect_resp *connect_resp)
219 {
220 	QDF_STATUS status;
221 
222 	qdf_mem_zero(connect_req, sizeof(struct htc_service_connect_req));
223 	qdf_mem_zero(connect_resp, sizeof(struct htc_service_connect_resp));
224 
225 	/* The same as HTT service but no RX. */
226 	connect_req->EpCallbacks.pContext = pdev;
227 	connect_req->EpCallbacks.EpTxComplete = htt_h2t_send_complete;
228 	connect_req->EpCallbacks.EpSendFull = htt_h2t_full;
229 	connect_req->MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
230 	/* Should NOT support credit flow control. */
231 	connect_req->ConnectionFlags |=
232 				HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
233 	/* Enable HTC schedule mechanism for TX HTT2 service. */
234 	connect_req->ConnectionFlags |= HTC_CONNECT_FLAGS_ENABLE_HTC_SCHEDULE;
235 
236 	connect_req->service_id = HTT_DATA2_MSG_SVC;
237 
238 	status = htc_connect_service(pdev->htc_pdev, connect_req, connect_resp);
239 
240 	if (status != QDF_STATUS_SUCCESS) {
241 		pdev->htc_tx_htt2_endpoint = ENDPOINT_UNUSED;
242 		pdev->htc_tx_htt2_max_size = 0;
243 	} else {
244 		pdev->htc_tx_htt2_endpoint = connect_resp->Endpoint;
245 		pdev->htc_tx_htt2_max_size = HTC_TX_HTT2_MAX_SIZE;
246 	}
247 
248 	qdf_print("TX HTT %s, ep %d size %d\n",
249 		  (status == QDF_STATUS_SUCCESS ? "ON" : "OFF"),
250 		  pdev->htc_tx_htt2_endpoint,
251 		  pdev->htc_tx_htt2_max_size);
252 }
253 #else
254 
255 static inline void
256 htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
257 			      struct htc_service_connect_req *connect_req,
258 			      struct htc_service_connect_resp *connect_resp)
259 {
260 }
261 #endif
262 
263 /**
264  * htt_htc_credit_flow_disable() - disable flow control for
265  *				   HTT data message service
266  *
267  * @pdev: pointer to htt device.
268  * @connect_req: pointer to service connection request information
269  *
270  * HTC Credit mechanism is disabled based on
271  * default_tx_comp_req as throughput will be lower
272  * if we disable htc credit mechanism with default_tx_comp_req
273  * set since txrx download packet will be limited by ota
274  * completion.
275  *
276  * Return: None
277  */
278 static
279 void htt_htc_credit_flow_disable(struct htt_pdev_t *pdev,
280 				 struct htc_service_connect_req *connect_req)
281 {
282 	if (pdev->osdev->bus_type == QDF_BUS_TYPE_SDIO) {
283 		/*
284 		 * TODO:Conditional disabling will be removed once firmware
285 		 * with reduced tx completion is pushed into release builds.
286 		 */
287 		if (!pdev->cfg.default_tx_comp_req)
288 			connect_req->ConnectionFlags |=
289 			HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
290 	} else {
291 		connect_req->ConnectionFlags |=
292 			HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
293 	}
294 }
295 
296 #if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
297 
298 /**
299  * htt_dump_bundle_stats() - dump wlan stats
300  * @pdev: handle to the HTT instance
301  *
302  * Return: None
303  */
304 void htt_dump_bundle_stats(htt_pdev_handle pdev)
305 {
306 	htc_dump_bundle_stats(pdev->htc_pdev);
307 }
308 
309 /**
310  * htt_clear_bundle_stats() - clear wlan stats
311  * @pdev: handle to the HTT instance
312  *
313  * Return: None
314  */
315 void htt_clear_bundle_stats(htt_pdev_handle pdev)
316 {
317 	htc_clear_bundle_stats(pdev->htc_pdev);
318 }
319 #endif
320 
321 #if defined(QCA_WIFI_3_0_ADRASTEA)
322 /**
323  * htt_htc_attach_all() - Connect to HTC service for HTT
324  * @pdev: pdev ptr
325  *
326  * Return: 0 for success or error code.
327  */
328 
329 #if defined(QCN7605_SUPPORT) && defined(IPA_OFFLOAD)
330 
331 /* In case of QCN7605 with IPA offload only 2 CE
332  * are used for RFS
333  */
334 static int
335 htt_htc_attach_all(struct htt_pdev_t *pdev)
336 {
337 	if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC))
338 		goto flush_endpoint;
339 
340 	if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC))
341 		goto flush_endpoint;
342 
343 	return 0;
344 
345 flush_endpoint:
346 	htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL);
347 
348 	return -EIO;
349 }
350 
351 #else
352 
353 static int
354 htt_htc_attach_all(struct htt_pdev_t *pdev)
355 {
356 	if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC))
357 		goto flush_endpoint;
358 
359 	if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC))
360 		goto flush_endpoint;
361 
362 	if (htt_htc_attach(pdev, HTT_DATA3_MSG_SVC))
363 		goto flush_endpoint;
364 
365 	return 0;
366 
367 flush_endpoint:
368 	htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL);
369 
370 	return -EIO;
371 }
372 
373 #endif
374 
375 #else
376 /**
377  * htt_htc_attach_all() - Connect to HTC service for HTT
378  * @pdev: pdev ptr
379  *
380  * Return: 0 for success or error code.
381  */
382 static int
383 htt_htc_attach_all(struct htt_pdev_t *pdev)
384 {
385 	return htt_htc_attach(pdev, HTT_DATA_MSG_SVC);
386 }
387 #endif
388 
389 /**
390  * htt_pdev_alloc() - allocate HTT pdev
391  * @txrx_pdev: txrx pdev
392  * @ctrl_pdev: cfg pdev
393  * @htc_pdev: HTC pdev
394  * @osdev: os device
395  *
396  * Return: HTT pdev handle
397  */
398 htt_pdev_handle
399 htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
400 	   struct cdp_cfg *ctrl_pdev,
401 	   HTC_HANDLE htc_pdev, qdf_device_t osdev)
402 {
403 	struct htt_pdev_t *pdev;
404 	struct hif_opaque_softc *osc =  cds_get_context(QDF_MODULE_ID_HIF);
405 
406 	if (!osc)
407 		goto fail1;
408 
409 	pdev = qdf_mem_malloc(sizeof(*pdev));
410 	if (!pdev)
411 		goto fail1;
412 
413 	pdev->osdev = osdev;
414 	pdev->ctrl_pdev = ctrl_pdev;
415 	pdev->txrx_pdev = txrx_pdev;
416 	pdev->htc_pdev = htc_pdev;
417 
418 	pdev->htt_htc_pkt_freelist = NULL;
419 #ifdef ATH_11AC_TXCOMPACT
420 	pdev->htt_htc_pkt_misclist = NULL;
421 #endif
422 
423 	/* for efficiency, store a local copy of the is_high_latency flag */
424 	pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev);
425 	/*
426 	 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
427 	 * enabled or not.
428 	 */
429 	pdev->cfg.credit_update_enabled =
430 		ol_cfg_is_credit_update_enabled(pdev->ctrl_pdev);
431 
432 	pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
433 		cds_is_packet_log_enabled();
434 
435 	pdev->cfg.default_tx_comp_req =
436 			!ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
437 
438 	pdev->cfg.is_full_reorder_offload =
439 			ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
440 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
441 		  "full_reorder_offloaded %d",
442 		  (int)pdev->cfg.is_full_reorder_offload);
443 
444 	pdev->cfg.ce_classify_enabled =
445 		ol_cfg_is_ce_classify_enabled(ctrl_pdev);
446 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
447 		  "ce_classify %d",
448 		  pdev->cfg.ce_classify_enabled);
449 
450 	if (pdev->cfg.is_high_latency) {
451 		qdf_atomic_init(&pdev->htt_tx_credit.target_delta);
452 		qdf_atomic_init(&pdev->htt_tx_credit.bus_delta);
453 		qdf_atomic_add(HTT_MAX_BUS_CREDIT,
454 			       &pdev->htt_tx_credit.bus_delta);
455 	}
456 
457 	pdev->targetdef = htc_get_targetdef(htc_pdev);
458 #if defined(HELIUMPLUS)
459 	HTT_SET_WIFI_IP(pdev, 2, 0);
460 #endif /* defined(HELIUMPLUS) */
461 
462 	if (NO_HTT_NEEDED)
463 		goto success;
464 	/*
465 	 * Connect to HTC service.
466 	 * This has to be done before calling htt_rx_attach,
467 	 * since htt_rx_attach involves sending a rx ring configure
468 	 * message to the target.
469 	 */
470 	HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex);
471 	HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);
472 	HTT_TX_MUTEX_INIT(&pdev->credit_mutex);
473 	if (htt_htc_attach_all(pdev))
474 		goto htt_htc_attach_fail;
475 	if (hif_ce_fastpath_cb_register(osc, htt_t2h_msg_handler_fast, pdev))
476 		qdf_print("failed to register fastpath callback\n");
477 
478 success:
479 	return pdev;
480 
481 htt_htc_attach_fail:
482 	HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
483 	HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
484 	HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
485 	qdf_mem_free(pdev);
486 
487 fail1:
488 	return NULL;
489 
490 }
491 
492 /**
493  * htt_attach() - Allocate and setup HTT TX/RX descriptors
494  * @pdev: pdev ptr
495  * @desc_pool_size: size of tx descriptors
496  *
497  * Return: 0 for success or error code.
498  */
499 int
500 htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
501 {
502 	int i;
503 	int ret = 0;
504 
505 	pdev->is_ipa_uc_enabled = false;
506 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
507 		pdev->is_ipa_uc_enabled = true;
508 
509 	pdev->new_htt_format_enabled = false;
510 	if (ol_cfg_is_htt_new_format_enabled(pdev->ctrl_pdev))
511 		pdev->new_htt_format_enabled = true;
512 
513 	htc_enable_hdr_length_check(pdev->htc_pdev,
514 				    pdev->new_htt_format_enabled);
515 
516 	ret = htt_tx_attach(pdev, desc_pool_size);
517 	if (ret)
518 		goto fail1;
519 
520 	ret = htt_rx_attach(pdev);
521 	if (ret)
522 		goto fail2;
523 
524 	/* pre-allocate some HTC_PACKET objects */
525 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
526 		struct htt_htc_pkt_union *pkt;
527 
528 		pkt = qdf_mem_malloc(sizeof(*pkt));
529 		if (!pkt)
530 			break;
531 		htt_htc_pkt_free(pdev, &pkt->u.pkt);
532 	}
533 
534 	if (pdev->cfg.is_high_latency) {
535 		/*
536 		 * HL - download the whole frame.
537 		 * Specify a download length greater than the max MSDU size,
538 		 * so the downloads will be limited by the actual frame sizes.
539 		 */
540 		pdev->download_len = 5000;
541 
542 		if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev) &&
543 		    !pdev->cfg.request_tx_comp)
544 			pdev->tx_send_complete_part2 =
545 						ol_tx_download_done_hl_free;
546 		else
547 			pdev->tx_send_complete_part2 =
548 						ol_tx_download_done_hl_retain;
549 
550 		/*
551 		 * CHECK THIS LATER: does the HL HTT version of
552 		 * htt_rx_mpdu_desc_list_next
553 		 * (which is not currently implemented) present the
554 		 * adf_nbuf_data(rx_ind_msg)
555 		 * as the abstract rx descriptor?
556 		 * If not, the rx_fw_desc_offset initialization
557 		 * here will have to be adjusted accordingly.
558 		 * NOTE: for HL, because fw rx desc is in ind msg,
559 		 * not in rx desc, so the
560 		 * offset should be negtive value
561 		 */
562 		pdev->rx_fw_desc_offset =
563 			HTT_ENDIAN_BYTE_IDX_SWAP(
564 					HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET
565 					- HTT_RX_IND_HL_BYTES);
566 
567 		htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl;
568 		htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_hl;
569 
570 		/* initialize the txrx credit count */
571 		ol_tx_target_credit_update(
572 				pdev->txrx_pdev, ol_cfg_target_tx_credit(
573 					pdev->ctrl_pdev));
574 		DPTRACE(qdf_dp_trace_credit_record(QDF_HTT_ATTACH,
575 			QDF_CREDIT_INC,
576 			ol_cfg_target_tx_credit(pdev->ctrl_pdev),
577 			qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit),
578 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
579 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
580 
581 	} else {
582 		enum wlan_frm_fmt frm_type;
583 
584 		/*
585 		 * LL - download just the initial portion of the frame.
586 		 * Download enough to cover the encapsulation headers checked
587 		 * by the target's tx classification descriptor engine.
588 		 *
589 		 * For LL, the FW rx desc directly referenced at its location
590 		 * inside the rx indication message.
591 		 */
592 
593 		/* account for the 802.3 or 802.11 header */
594 		frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
595 
596 		if (frm_type == wlan_frm_fmt_native_wifi) {
597 			pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
598 		} else if (frm_type == wlan_frm_fmt_802_3) {
599 			pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
600 		} else {
601 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
602 				  "Unexpected frame type spec: %d", frm_type);
603 			HTT_ASSERT0(0);
604 		}
605 
606 		/*
607 		 * Account for the optional L2 / ethernet header fields:
608 		 * 802.1Q, LLC/SNAP
609 		 */
610 		pdev->download_len +=
611 			HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
612 
613 		/*
614 		 * Account for the portion of the L3 (IP) payload that the
615 		 * target needs for its tx classification.
616 		 */
617 		pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
618 
619 		/*
620 		 * Account for the HTT tx descriptor, including the
621 		 * HTC header + alignment padding.
622 		 */
623 		pdev->download_len += sizeof(struct htt_host_tx_desc_t);
624 
625 		/*
626 		 * The TXCOMPACT htt_tx_sched function uses pdev->download_len
627 		 * to apply for all requeued tx frames.  Thus,
628 		 * pdev->download_len has to be the largest download length of
629 		 * any tx frame that will be downloaded.
630 		 * This maximum download length is for management tx frames,
631 		 * which have an 802.11 header.
632 		 */
633 #ifdef ATH_11AC_TXCOMPACT
634 		pdev->download_len = sizeof(struct htt_host_tx_desc_t)
635 			+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
636 			+ HTT_TX_HDR_SIZE_802_1Q
637 			+ HTT_TX_HDR_SIZE_LLC_SNAP
638 			+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
639 #endif
640 		pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
641 
642 		/*
643 		 * For LL, the FW rx desc is alongside the HW rx desc fields in
644 		 * the htt_host_rx_desc_base struct/.
645 		 */
646 		pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
647 
648 		htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
649 		htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_ll;
650 	}
651 
652 	return 0;
653 
654 fail2:
655 	htt_tx_detach(pdev);
656 
657 fail1:
658 	return ret;
659 }
660 
661 QDF_STATUS htt_attach_target(htt_pdev_handle pdev)
662 {
663 	QDF_STATUS status;
664 
665 	status = htt_h2t_ver_req_msg(pdev);
666 	if (status != QDF_STATUS_SUCCESS) {
667 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
668 			  "%s:%d: could not send h2t_ver_req msg",
669 			  __func__, __LINE__);
670 		return status;
671 	}
672 #if defined(HELIUMPLUS)
673 	/*
674 	 * Send the frag_desc info to target.
675 	 */
676 	status = htt_h2t_frag_desc_bank_cfg_msg(pdev);
677 	if (status != QDF_STATUS_SUCCESS) {
678 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
679 			  "%s:%d: could not send h2t_frag_desc_bank_cfg msg",
680 			  __func__, __LINE__);
681 		return status;
682 	}
683 #endif /* defined(HELIUMPLUS) */
684 
685 
686 	/*
687 	 * If applicable, send the rx ring config message to the target.
688 	 * The host could wait for the HTT version number confirmation message
689 	 * from the target before sending any further HTT messages, but it's
690 	 * reasonable to assume that the host and target HTT version numbers
691 	 * match, and proceed immediately with the remaining configuration
692 	 * handshaking.
693 	 */
694 
695 	status = htt_h2t_rx_ring_rfs_cfg_msg(pdev);
696 	if (status != QDF_STATUS_SUCCESS) {
697 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
698 			  "%s:%d: could not send h2t_rx_ring_rfs_cfg msg",
699 			  __func__, __LINE__);
700 		return status;
701 	}
702 
703 	status = htt_h2t_rx_ring_cfg_msg(pdev);
704 	if (status != QDF_STATUS_SUCCESS) {
705 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
706 			  "%s:%d: could not send h2t_rx_ring_cfg msg",
707 			  __func__, __LINE__);
708 		return status;
709 	}
710 
711 	status = HTT_IPA_CONFIG(pdev, status);
712 	if (status != QDF_STATUS_SUCCESS) {
713 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
714 			  "%s:%d: could not send h2t_ipa_uc_rsc_cfg msg",
715 			  __func__, __LINE__);
716 		return status;
717 	}
718 
719 	return status;
720 }
721 
722 void htt_detach(htt_pdev_handle pdev)
723 {
724 	htt_rx_detach(pdev);
725 	htt_tx_detach(pdev);
726 	htt_htc_pkt_pool_free(pdev);
727 #ifdef ATH_11AC_TXCOMPACT
728 	htt_htc_misc_pkt_pool_free(pdev);
729 #endif
730 	HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
731 	HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
732 	HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
733 }
734 
735 /**
736  * htt_pdev_free() - Free HTT pdev
737  * @pdev: htt pdev
738  *
739  * Return: none
740  */
741 void htt_pdev_free(htt_pdev_handle pdev)
742 {
743 	qdf_mem_free(pdev);
744 }
745 
746 void htt_detach_target(htt_pdev_handle pdev)
747 {
748 }
749 
750 static inline
751 int htt_update_endpoint(struct htt_pdev_t *pdev,
752 			uint16_t service_id, HTC_ENDPOINT_ID ep)
753 {
754 	struct hif_opaque_softc *hif_ctx;
755 	uint8_t ul = 0xff, dl = 0xff;
756 	int     ul_polled, dl_polled;
757 	int     tx_service = 0;
758 	int     rc = 0;
759 
760 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
761 	if (qdf_unlikely(!hif_ctx)) {
762 		QDF_ASSERT(hif_ctx);
763 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
764 			  "%s:%d: assuming non-tx service.",
765 			  __func__, __LINE__);
766 	} else {
767 		ul = dl = 0xff;
768 		if (QDF_STATUS_SUCCESS !=
769 		    hif_map_service_to_pipe(hif_ctx, service_id,
770 					    &ul, &dl,
771 					    &ul_polled, &dl_polled))
772 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
773 				  "%s:%d: assuming non-tx srv.",
774 				  __func__, __LINE__);
775 		else
776 			tx_service = (ul != 0xff);
777 	}
778 	if (tx_service) {
779 		/* currently we have only one OUT htt tx service */
780 		QDF_BUG(service_id == HTT_DATA_MSG_SVC);
781 
782 		pdev->htc_tx_endpoint = ep;
783 		hif_save_htc_htt_config_endpoint(hif_ctx, ep);
784 		rc = 1;
785 	}
786 	return rc;
787 }
788 
789 int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id)
790 {
791 	struct htc_service_connect_req connect;
792 	struct htc_service_connect_resp response;
793 	QDF_STATUS status;
794 
795 	qdf_mem_zero(&connect, sizeof(connect));
796 	qdf_mem_zero(&response, sizeof(response));
797 
798 	connect.pMetaData = NULL;
799 	connect.MetaDataLength = 0;
800 	connect.EpCallbacks.pContext = pdev;
801 	connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete;
802 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
803 	connect.EpCallbacks.EpRecv = htt_t2h_msg_handler;
804 	connect.EpCallbacks.ep_resume_tx_queue = htt_tx_resume_handler;
805 	connect.EpCallbacks.ep_padding_credit_update =
806 					htt_tx_padding_credit_update_handler;
807 
808 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
809 	connect.EpCallbacks.EpRecvRefill = NULL;
810 	connect.EpCallbacks.RecvRefillWaterMark = 1;
811 	/* N/A, fill is done by HIF */
812 
813 	connect.EpCallbacks.EpSendFull = htt_h2t_full;
814 	/*
815 	 * Specify how deep to let a queue get before htc_send_pkt will
816 	 * call the EpSendFull function due to excessive send queue depth.
817 	 */
818 	connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
819 
820 	/* disable flow control for HTT data message service */
821 	htt_htc_credit_flow_disable(pdev, &connect);
822 
823 	/* connect to control service */
824 	connect.service_id = service_id;
825 
826 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
827 
828 	if (status != QDF_STATUS_SUCCESS) {
829 		if (cds_is_fw_down())
830 			return -EIO;
831 
832 		if (status == QDF_STATUS_E_NOMEM ||
833 		    cds_is_self_recovery_enabled())
834 			return qdf_status_to_os_return(status);
835 
836 		QDF_BUG(0);
837 	}
838 
839 	htt_update_endpoint(pdev, service_id, response.Endpoint);
840 
841 	/* Start TX HTT2 service if the target support it. */
842 	htt_htc_tx_htt2_service_start(pdev, &connect, &response);
843 
844 	return 0;               /* success */
845 }
846 
847 void htt_log_rx_ring_info(htt_pdev_handle pdev)
848 {
849 	if (!pdev) {
850 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
851 			  "%s: htt pdev is NULL", __func__);
852 		return;
853 	}
854 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG,
855 		  "%s: Data Stall Detected with reason 4 (=FW_RX_REFILL_FAILED)."
856 		  "src htt rx ring:  space for %d elements, filled with %d buffers, buffers in the ring %d, refill debt %d",
857 		  __func__, pdev->rx_ring.size, pdev->rx_ring.fill_level,
858 		  qdf_atomic_read(&pdev->rx_ring.fill_cnt),
859 		  qdf_atomic_read(&pdev->rx_ring.refill_debt));
860 }
861 
862 void htt_rx_refill_failure(htt_pdev_handle pdev)
863 {
864 	QDF_BUG(qdf_atomic_read(&pdev->rx_ring.refill_debt));
865 }
866 
867 #if HTT_DEBUG_LEVEL > 5
868 void htt_display(htt_pdev_handle pdev, int indent)
869 {
870 	qdf_print("%*s%s:\n", indent, " ", "HTT");
871 	qdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n",
872 		  indent + 4, " ",
873 		  pdev->tx_descs.pool_elems,
874 		  pdev->tx_descs.size, pdev->tx_descs.alloc_cnt);
875 	qdf_print("%*srx ring: space for %d elems, filled with %d buffers\n",
876 		  indent + 4, " ",
877 		  pdev->rx_ring.size, pdev->rx_ring.fill_level);
878 	qdf_print("%*sat %pK (%llx paddr)\n", indent + 8, " ",
879 		  pdev->rx_ring.buf.paddrs_ring,
880 		  (unsigned long long)pdev->rx_ring.base_paddr);
881 	qdf_print("%*snetbuf ring @ %pK\n", indent + 8, " ",
882 		  pdev->rx_ring.buf.netbufs_ring);
883 	qdf_print("%*sFW_IDX shadow register: vaddr = %pK, paddr = %llx\n",
884 		  indent + 8, " ",
885 		  pdev->rx_ring.alloc_idx.vaddr,
886 		  (unsigned long long)pdev->rx_ring.alloc_idx.paddr);
887 	qdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n",
888 		  indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr,
889 		  pdev->rx_ring.sw_rd_idx.msdu_desc,
890 		  pdev->rx_ring.sw_rd_idx.msdu_payld);
891 }
892 #endif
893 
894 #ifdef IPA_OFFLOAD
895 /**
896  * htt_ipa_uc_attach() - Allocate UC data path resources
897  * @pdev: handle to the HTT instance
898  *
899  * Return: 0 success
900  *         none 0 fail
901  */
902 int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
903 {
904 	int error;
905 
906 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
907 		  __func__);
908 
909 	/* TX resource attach */
910 	error = htt_tx_ipa_uc_attach(
911 		pdev,
912 		ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
913 		ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
914 		ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
915 	if (error) {
916 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
917 			  "HTT IPA UC TX attach fail code %d", error);
918 		HTT_ASSERT0(0);
919 		return error;
920 	}
921 
922 	/* RX resource attach */
923 	error = htt_rx_ipa_uc_attach(
924 		pdev, qdf_get_pwr2(pdev->rx_ring.fill_level));
925 	if (error) {
926 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
927 			  "HTT IPA UC RX attach fail code %d", error);
928 		htt_tx_ipa_uc_detach(pdev);
929 		HTT_ASSERT0(0);
930 		return error;
931 	}
932 
933 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit",
934 		__func__);
935 	return 0;               /* success */
936 }
937 
938 /**
939  * htt_ipa_uc_attach() - Remove UC data path resources
940  * @pdev: handle to the HTT instance
941  *
942  * Return: None
943  */
944 void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
945 {
946 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
947 		__func__);
948 
949 	/* TX IPA micro controller detach */
950 	htt_tx_ipa_uc_detach(pdev);
951 
952 	/* RX IPA micro controller detach */
953 	htt_rx_ipa_uc_detach(pdev);
954 
955 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit",
956 		__func__);
957 }
958 
959 int
960 htt_ipa_uc_get_resource(htt_pdev_handle pdev,
961 			qdf_shared_mem_t **ce_sr,
962 			qdf_shared_mem_t **tx_comp_ring,
963 			qdf_shared_mem_t **rx_rdy_ring,
964 			qdf_shared_mem_t **rx2_rdy_ring,
965 			qdf_shared_mem_t **rx_proc_done_idx,
966 			qdf_shared_mem_t **rx2_proc_done_idx,
967 			uint32_t *ce_sr_ring_size,
968 			qdf_dma_addr_t *ce_reg_paddr,
969 			uint32_t *tx_num_alloc_buffer)
970 {
971 	/* Release allocated resource to client */
972 	*tx_comp_ring = pdev->ipa_uc_tx_rsc.tx_comp_ring;
973 	*rx_rdy_ring = pdev->ipa_uc_rx_rsc.rx_ind_ring;
974 	*rx2_rdy_ring = pdev->ipa_uc_rx_rsc.rx2_ind_ring;
975 	*rx_proc_done_idx = pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx;
976 	*rx2_proc_done_idx = pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx;
977 	*tx_num_alloc_buffer = (uint32_t)pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
978 
979 	/* Get copy engine, bus resource */
980 	htc_ipa_get_ce_resource(pdev->htc_pdev, ce_sr,
981 				ce_sr_ring_size, ce_reg_paddr);
982 
983 	return 0;
984 }
985 
986 /**
987  * htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address
988  * @pdev: handle to the HTT instance
989  * @ipa_uc_tx_doorbell_paddr: TX doorbell base physical address
990  * @ipa_uc_rx_doorbell_paddr: RX doorbell base physical address
991  *
992  * Return: 0 success
993  */
994 int
995 htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
996 			      qdf_dma_addr_t ipa_uc_tx_doorbell_paddr,
997 			      qdf_dma_addr_t ipa_uc_rx_doorbell_paddr)
998 {
999 	pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
1000 	pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
1001 	return 0;
1002 }
1003 #endif /* IPA_OFFLOAD */
1004 
1005 /**
1006  * htt_mark_first_wakeup_packet() - set flag to indicate that
1007  *    fw is compatible for marking first packet after wow wakeup
1008  * @pdev: pointer to htt pdev
1009  * @value: 1 for enabled/ 0 for disabled
1010  *
1011  * Return: None
1012  */
1013 void htt_mark_first_wakeup_packet(htt_pdev_handle pdev,
1014 			uint8_t value)
1015 {
1016 	if (!pdev) {
1017 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1018 			  "%s: htt pdev is NULL", __func__);
1019 		return;
1020 	}
1021 
1022 	pdev->cfg.is_first_wakeup_packet = value;
1023 }
1024 
1025