xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.c (revision b62151f8dd0743da724a4533988c78d2c7385d4f)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef REO_QDESC_HISTORY
54 static inline void
55 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
56 			    enum reo_qdesc_event_type type)
57 {
58 	struct reo_qdesc_event *evt;
59 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
60 	uint32_t idx;
61 
62 	reo_qdesc_history_idx++;
63 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
64 
65 	evt = &reo_qdesc_history[idx];
66 
67 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
68 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
69 	evt->ts = qdf_get_log_timestamp();
70 	evt->type = type;
71 }
72 
73 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
74 static inline void
75 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
76 				 enum reo_qdesc_event_type type)
77 {
78 	struct reo_qdesc_event *evt;
79 	uint32_t idx;
80 
81 	reo_qdesc_history_idx++;
82 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
83 
84 	evt = &reo_qdesc_history[idx];
85 
86 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
87 	evt->qdesc_addr = desc->hw_qdesc_paddr;
88 	evt->ts = qdf_get_log_timestamp();
89 	evt->type = type;
90 }
91 
92 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
93 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
94 
95 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
96 	qdf_mem_copy((desc)->peer_mac, (freedesc)->peer_mac, QDF_MAC_ADDR_SIZE)
97 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
98 
99 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
100 	qdf_mem_copy((freedesc)->peer_mac, (peer)->mac_addr.raw, QDF_MAC_ADDR_SIZE)
101 
102 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
103 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
104 
105 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
106 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
107 
108 #else
109 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
110 
111 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
112 
113 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
114 
115 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
116 
117 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
118 #endif
119 
120 static inline void
121 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
122 		      uint8_t valid)
123 {
124 	params->u.upd_queue_params.update_svld = 1;
125 	params->u.upd_queue_params.svld = valid;
126 	dp_peer_debug("Setting SSN valid bit to %d",
127 		      valid);
128 }
129 
130 #ifdef IPA_OFFLOAD
131 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
132 				       union hal_reo_status *reo_status)
133 {
134 	struct dp_peer *peer = NULL;
135 	struct dp_rx_tid *rx_tid = NULL;
136 	unsigned long comb_peer_id_tid;
137 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
138 	uint16_t tid;
139 	uint16_t peer_id;
140 
141 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
142 		dp_err("REO stats failure %d\n",
143 		       queue_status->header.status);
144 		return;
145 	}
146 	comb_peer_id_tid = (unsigned long)cb_ctxt;
147 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
148 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
149 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
150 	if (!peer)
151 		return;
152 	rx_tid  = &peer->rx_tid[tid];
153 
154 	if (!rx_tid) {
155 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
156 		return;
157 	}
158 
159 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
160 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
161 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
162 }
163 
164 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
165 #endif
166 
167 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
168 			union hal_reo_status *reo_status)
169 {
170 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
171 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
172 
173 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
174 		return;
175 
176 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
177 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
178 			       queue_status->header.status, rx_tid->tid);
179 		return;
180 	}
181 
182 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
183 		       "ssn: %d\n"
184 		       "curr_idx  : %d\n"
185 		       "pn_31_0   : %08x\n"
186 		       "pn_63_32  : %08x\n"
187 		       "pn_95_64  : %08x\n"
188 		       "pn_127_96 : %08x\n"
189 		       "last_rx_enq_tstamp : %08x\n"
190 		       "last_rx_deq_tstamp : %08x\n"
191 		       "rx_bitmap_31_0     : %08x\n"
192 		       "rx_bitmap_63_32    : %08x\n"
193 		       "rx_bitmap_95_64    : %08x\n"
194 		       "rx_bitmap_127_96   : %08x\n"
195 		       "rx_bitmap_159_128  : %08x\n"
196 		       "rx_bitmap_191_160  : %08x\n"
197 		       "rx_bitmap_223_192  : %08x\n"
198 		       "rx_bitmap_255_224  : %08x\n",
199 		       rx_tid->tid,
200 		       queue_status->ssn, queue_status->curr_idx,
201 		       queue_status->pn_31_0, queue_status->pn_63_32,
202 		       queue_status->pn_95_64, queue_status->pn_127_96,
203 		       queue_status->last_rx_enq_tstamp,
204 		       queue_status->last_rx_deq_tstamp,
205 		       queue_status->rx_bitmap_31_0,
206 		       queue_status->rx_bitmap_63_32,
207 		       queue_status->rx_bitmap_95_64,
208 		       queue_status->rx_bitmap_127_96,
209 		       queue_status->rx_bitmap_159_128,
210 		       queue_status->rx_bitmap_191_160,
211 		       queue_status->rx_bitmap_223_192,
212 		       queue_status->rx_bitmap_255_224);
213 
214 	DP_PRINT_STATS(
215 		       "curr_mpdu_cnt      : %d\n"
216 		       "curr_msdu_cnt      : %d\n"
217 		       "fwd_timeout_cnt    : %d\n"
218 		       "fwd_bar_cnt        : %d\n"
219 		       "dup_cnt            : %d\n"
220 		       "frms_in_order_cnt  : %d\n"
221 		       "bar_rcvd_cnt       : %d\n"
222 		       "mpdu_frms_cnt      : %d\n"
223 		       "msdu_frms_cnt      : %d\n"
224 		       "total_byte_cnt     : %d\n"
225 		       "late_recv_mpdu_cnt : %d\n"
226 		       "win_jump_2k        : %d\n"
227 		       "hole_cnt           : %d\n",
228 		       queue_status->curr_mpdu_cnt,
229 		       queue_status->curr_msdu_cnt,
230 		       queue_status->fwd_timeout_cnt,
231 		       queue_status->fwd_bar_cnt,
232 		       queue_status->dup_cnt,
233 		       queue_status->frms_in_order_cnt,
234 		       queue_status->bar_rcvd_cnt,
235 		       queue_status->mpdu_frms_cnt,
236 		       queue_status->msdu_frms_cnt,
237 		       queue_status->total_cnt,
238 		       queue_status->late_recv_mpdu_cnt,
239 		       queue_status->win_jump_2k,
240 		       queue_status->hole_cnt);
241 
242 	DP_PRINT_STATS("Addba Req          : %d\n"
243 			"Addba Resp         : %d\n"
244 			"Addba Resp success : %d\n"
245 			"Addba Resp failed  : %d\n"
246 			"Delba Req received : %d\n"
247 			"Delba Tx success   : %d\n"
248 			"Delba Tx Fail      : %d\n"
249 			"BA window size     : %d\n"
250 			"Pn size            : %d\n",
251 			rx_tid->num_of_addba_req,
252 			rx_tid->num_of_addba_resp,
253 			rx_tid->num_addba_rsp_success,
254 			rx_tid->num_addba_rsp_failed,
255 			rx_tid->num_of_delba_req,
256 			rx_tid->delba_tx_success_cnt,
257 			rx_tid->delba_tx_fail_cnt,
258 			rx_tid->ba_win_size,
259 			rx_tid->pn_size);
260 }
261 
262 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
263 				union hal_reo_status *reo_status)
264 {
265 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
266 
267 	if ((reo_status->rx_queue_status.header.status !=
268 		HAL_REO_CMD_SUCCESS) &&
269 		(reo_status->rx_queue_status.header.status !=
270 		HAL_REO_CMD_DRAIN)) {
271 		/* Should not happen normally. Just print error for now */
272 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
273 			    soc, reo_status->rx_queue_status.header.status,
274 			    rx_tid->tid);
275 	}
276 }
277 
278 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
279 {
280 	struct ol_if_ops *ol_ops = NULL;
281 	bool is_roaming = false;
282 	uint8_t vdev_id = -1;
283 	struct cdp_soc_t *soc;
284 
285 	if (!peer) {
286 		dp_peer_info("Peer is NULL. No roaming possible");
287 		return false;
288 	}
289 
290 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
291 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
292 
293 	if (ol_ops && ol_ops->is_roam_inprogress) {
294 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
295 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
296 	}
297 
298 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
299 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
300 
301 	return is_roaming;
302 }
303 
304 #ifdef WLAN_FEATURE_11BE_MLO
305 /**
306  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
307  *			     setup is necessary
308  * @peer: DP peer handle
309  *
310  * Return: true - allow, false - disallow
311  */
312 static inline
313 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
314 {
315 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
316 		return false;
317 
318 	return true;
319 }
320 
321 /**
322  * dp_rx_tid_update_allow() - check if rx_tid update needed
323  * @peer: DP peer handle
324  *
325  * Return: true - allow, false - disallow
326  */
327 static inline
328 bool dp_rx_tid_update_allow(struct dp_peer *peer)
329 {
330 	/* not as expected for MLO connection link peer */
331 	if (IS_MLO_DP_LINK_PEER(peer)) {
332 		QDF_BUG(0);
333 		return false;
334 	}
335 
336 	return true;
337 }
338 #else
339 static inline
340 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
341 {
342 	return true;
343 }
344 
345 static inline
346 bool dp_rx_tid_update_allow(struct dp_peer *peer)
347 {
348 	return true;
349 }
350 #endif
351 
352 QDF_STATUS
353 dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t ba_window_size,
354 		       uint32_t start_seq, bool bar_update)
355 {
356 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
357 	struct dp_soc *soc = peer->vdev->pdev->soc;
358 	struct hal_reo_cmd_params params;
359 
360 	if (!dp_rx_tid_update_allow(peer)) {
361 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
362 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	qdf_mem_zero(&params, sizeof(params));
367 
368 	params.std.need_status = 1;
369 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
370 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
371 	params.u.upd_queue_params.update_ba_window_size = 1;
372 	params.u.upd_queue_params.ba_window_size = ba_window_size;
373 
374 	if (start_seq < IEEE80211_SEQ_MAX) {
375 		params.u.upd_queue_params.update_ssn = 1;
376 		params.u.upd_queue_params.ssn = start_seq;
377 	} else {
378 	    dp_set_ssn_valid_flag(&params, 0);
379 	}
380 
381 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
382 			    dp_rx_tid_update_cb, rx_tid)) {
383 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
384 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
385 	}
386 
387 	rx_tid->ba_win_size = ba_window_size;
388 
389 	if (dp_get_peer_vdev_roaming_in_progress(peer))
390 		return QDF_STATUS_E_PERM;
391 
392 	if (!bar_update)
393 		dp_peer_rx_reorder_queue_setup(soc, peer,
394 					       tid, ba_window_size);
395 
396 	return QDF_STATUS_SUCCESS;
397 }
398 
399 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
400 /**
401  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
402  *                                    the deferred list
403  * @soc: Datapath soc handle
404  * @freedesc: REO DESC reference that needs to be freed
405  *
406  * Return: true if enqueued, else false
407  */
408 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
409 					   struct reo_desc_list_node *freedesc)
410 {
411 	struct reo_desc_deferred_freelist_node *desc;
412 
413 	if (!qdf_atomic_read(&soc->cmn_init_done))
414 		return false;
415 
416 	desc = qdf_mem_malloc(sizeof(*desc));
417 	if (!desc)
418 		return false;
419 
420 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
421 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
422 	desc->hw_qdesc_vaddr_unaligned =
423 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
424 	desc->free_ts = qdf_get_system_timestamp();
425 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
426 
427 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
428 	if (!soc->reo_desc_deferred_freelist_init) {
429 		qdf_mem_free(desc);
430 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
431 		return false;
432 	}
433 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
434 			     (qdf_list_node_t *)desc);
435 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
436 
437 	return true;
438 }
439 
440 /**
441  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
442  *                            based on time threshold
443  * @soc: Datapath soc handle
444  *
445  * Return: true if enqueued, else false
446  */
447 static void dp_reo_desc_defer_free(struct dp_soc *soc)
448 {
449 	struct reo_desc_deferred_freelist_node *desc;
450 	unsigned long curr_ts = qdf_get_system_timestamp();
451 
452 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
453 
454 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
455 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
456 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
457 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
458 				      (qdf_list_node_t **)&desc);
459 
460 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
461 
462 		qdf_mem_unmap_nbytes_single(soc->osdev,
463 					    desc->hw_qdesc_paddr,
464 					    QDF_DMA_BIDIRECTIONAL,
465 					    desc->hw_qdesc_alloc_size);
466 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
467 		qdf_mem_free(desc);
468 
469 		curr_ts = qdf_get_system_timestamp();
470 	}
471 
472 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
473 }
474 #else
475 static inline bool
476 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
477 			       struct reo_desc_list_node *freedesc)
478 {
479 	return false;
480 }
481 
482 static void dp_reo_desc_defer_free(struct dp_soc *soc)
483 {
484 }
485 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
486 
487 /**
488  * dp_reo_desc_free() - Callback free reo descriptor memory after
489  * HW cache flush
490  *
491  * @soc: DP SOC handle
492  * @cb_ctxt: Callback context
493  * @reo_status: REO command status
494  */
495 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
496 			     union hal_reo_status *reo_status)
497 {
498 	struct reo_desc_list_node *freedesc =
499 		(struct reo_desc_list_node *)cb_ctxt;
500 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
501 	unsigned long curr_ts = qdf_get_system_timestamp();
502 
503 	if ((reo_status->fl_cache_status.header.status !=
504 		HAL_REO_CMD_SUCCESS) &&
505 		(reo_status->fl_cache_status.header.status !=
506 		HAL_REO_CMD_DRAIN)) {
507 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
508 			    soc, reo_status->rx_queue_status.header.status,
509 			    freedesc->rx_tid.tid);
510 	}
511 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
512 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
513 		     rx_tid->tid);
514 
515 	/* REO desc is enqueued to be freed at a later point
516 	 * in time, just free the freedesc alone and return
517 	 */
518 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
519 		goto out;
520 
521 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
522 
523 	hal_reo_shared_qaddr_cache_clear(soc->hal_soc);
524 	qdf_mem_unmap_nbytes_single(soc->osdev,
525 				    rx_tid->hw_qdesc_paddr,
526 				    QDF_DMA_BIDIRECTIONAL,
527 				    rx_tid->hw_qdesc_alloc_size);
528 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
529 out:
530 	qdf_mem_free(freedesc);
531 }
532 
533 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
534 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
535 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
536 {
537 	if (dma_addr < 0x50000000)
538 		return QDF_STATUS_E_FAILURE;
539 	else
540 		return QDF_STATUS_SUCCESS;
541 }
542 #else
543 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
544 {
545 	return QDF_STATUS_SUCCESS;
546 }
547 #endif
548 
549 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
550 				 uint32_t ba_window_size, uint32_t start_seq)
551 {
552 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
553 	struct dp_vdev *vdev = peer->vdev;
554 	struct dp_soc *soc = vdev->pdev->soc;
555 	uint32_t hw_qdesc_size;
556 	uint32_t hw_qdesc_align;
557 	int hal_pn_type;
558 	void *hw_qdesc_vaddr;
559 	uint32_t alloc_tries = 0;
560 	QDF_STATUS status = QDF_STATUS_SUCCESS;
561 	struct dp_txrx_peer *txrx_peer;
562 
563 	if (!qdf_atomic_read(&peer->is_default_route_set))
564 		return QDF_STATUS_E_FAILURE;
565 
566 	if (!dp_rx_tid_setup_allow(peer)) {
567 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
568 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
569 		goto send_wmi_reo_cmd;
570 	}
571 
572 	rx_tid->ba_win_size = ba_window_size;
573 	if (rx_tid->hw_qdesc_vaddr_unaligned)
574 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
575 			start_seq, false);
576 	rx_tid->delba_tx_status = 0;
577 	rx_tid->ppdu_id_2k = 0;
578 	rx_tid->num_of_addba_req = 0;
579 	rx_tid->num_of_delba_req = 0;
580 	rx_tid->num_of_addba_resp = 0;
581 	rx_tid->num_addba_rsp_failed = 0;
582 	rx_tid->num_addba_rsp_success = 0;
583 	rx_tid->delba_tx_success_cnt = 0;
584 	rx_tid->delba_tx_fail_cnt = 0;
585 	rx_tid->statuscode = 0;
586 
587 	/* TODO: Allocating HW queue descriptors based on max BA window size
588 	 * for all QOS TIDs so that same descriptor can be used later when
589 	 * ADDBA request is received. This should be changed to allocate HW
590 	 * queue descriptors based on BA window size being negotiated (0 for
591 	 * non BA cases), and reallocate when BA window size changes and also
592 	 * send WMI message to FW to change the REO queue descriptor in Rx
593 	 * peer entry as part of dp_rx_tid_update.
594 	 */
595 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
596 					       ba_window_size, tid);
597 
598 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
599 	/* To avoid unnecessary extra allocation for alignment, try allocating
600 	 * exact size and see if we already have aligned address.
601 	 */
602 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
603 
604 try_desc_alloc:
605 	rx_tid->hw_qdesc_vaddr_unaligned =
606 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
607 
608 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
609 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
610 			    soc, tid);
611 		return QDF_STATUS_E_NOMEM;
612 	}
613 
614 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
615 		hw_qdesc_align) {
616 		/* Address allocated above is not aligned. Allocate extra
617 		 * memory for alignment
618 		 */
619 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
620 		rx_tid->hw_qdesc_vaddr_unaligned =
621 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
622 					hw_qdesc_align - 1);
623 
624 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
625 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
626 				    soc, tid);
627 			return QDF_STATUS_E_NOMEM;
628 		}
629 
630 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
631 			rx_tid->hw_qdesc_vaddr_unaligned,
632 			hw_qdesc_align);
633 
634 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
635 			      soc, rx_tid->hw_qdesc_alloc_size,
636 			      hw_qdesc_vaddr);
637 
638 	} else {
639 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
640 	}
641 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
642 
643 	txrx_peer = dp_get_txrx_peer(peer);
644 
645 	/* TODO: Ensure that sec_type is set before ADDBA is received.
646 	 * Currently this is set based on htt indication
647 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
648 	 */
649 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
650 	case cdp_sec_type_tkip_nomic:
651 	case cdp_sec_type_aes_ccmp:
652 	case cdp_sec_type_aes_ccmp_256:
653 	case cdp_sec_type_aes_gcmp:
654 	case cdp_sec_type_aes_gcmp_256:
655 		hal_pn_type = HAL_PN_WPA;
656 		break;
657 	case cdp_sec_type_wapi:
658 		if (vdev->opmode == wlan_op_mode_ap)
659 			hal_pn_type = HAL_PN_WAPI_EVEN;
660 		else
661 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
662 		break;
663 	default:
664 		hal_pn_type = HAL_PN_NONE;
665 		break;
666 	}
667 
668 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
669 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
670 		vdev->vdev_stats_id);
671 
672 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
673 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
674 		&(rx_tid->hw_qdesc_paddr));
675 
676 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
677 			QDF_STATUS_SUCCESS) {
678 		if (alloc_tries++ < 10) {
679 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
680 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
681 			goto try_desc_alloc;
682 		} else {
683 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
684 				    soc, tid);
685 			status = QDF_STATUS_E_NOMEM;
686 			goto error;
687 		}
688 	}
689 
690 send_wmi_reo_cmd:
691 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
692 		status = QDF_STATUS_E_PERM;
693 		goto error;
694 	}
695 
696 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
697 						tid, ba_window_size);
698 	if (QDF_IS_STATUS_SUCCESS(status))
699 		return status;
700 
701 error:
702 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
703 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
704 		    QDF_STATUS_SUCCESS)
705 			qdf_mem_unmap_nbytes_single(
706 				soc->osdev,
707 				rx_tid->hw_qdesc_paddr,
708 				QDF_DMA_BIDIRECTIONAL,
709 				rx_tid->hw_qdesc_alloc_size);
710 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
711 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
712 		rx_tid->hw_qdesc_paddr = 0;
713 	}
714 	return status;
715 }
716 
717 #ifdef DP_UMAC_HW_RESET_SUPPORT
718 static
719 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
720 {
721 	int tid;
722 
723 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
724 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
725 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
726 
727 		if (vaddr)
728 			dp_reset_rx_reo_tid_queue(soc, vaddr,
729 						  rx_tid->hw_qdesc_alloc_size);
730 	}
731 }
732 
733 void dp_reset_tid_q_setup(struct dp_soc *soc)
734 {
735 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
736 }
737 #endif
738 #ifdef REO_DESC_DEFER_FREE
739 /**
740  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
741  * desc back to freelist and defer the deletion
742  *
743  * @soc: DP SOC handle
744  * @desc: Base descriptor to be freed
745  * @reo_status: REO command status
746  */
747 static void dp_reo_desc_clean_up(struct dp_soc *soc,
748 				 struct reo_desc_list_node *desc,
749 				 union hal_reo_status *reo_status)
750 {
751 	desc->free_ts = qdf_get_system_timestamp();
752 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
753 	qdf_list_insert_back(&soc->reo_desc_freelist,
754 			     (qdf_list_node_t *)desc);
755 }
756 
757 /**
758  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
759  * ring in avoid of REO hang
760  *
761  * @list_size: REO desc list size to be cleaned
762  */
763 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
764 {
765 	unsigned long curr_ts = qdf_get_system_timestamp();
766 
767 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
768 		dp_err_log("%lu:freedesc number %d in freelist",
769 			   curr_ts, *list_size);
770 		/* limit the batch queue size */
771 		*list_size = REO_DESC_FREELIST_SIZE;
772 	}
773 }
774 #else
775 /**
776  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
777  * cache fails free the base REO desc anyway
778  *
779  * @soc: DP SOC handle
780  * @desc: Base descriptor to be freed
781  * @reo_status: REO command status
782  */
783 static void dp_reo_desc_clean_up(struct dp_soc *soc,
784 				 struct reo_desc_list_node *desc,
785 				 union hal_reo_status *reo_status)
786 {
787 	if (reo_status) {
788 		qdf_mem_zero(reo_status, sizeof(*reo_status));
789 		reo_status->fl_cache_status.header.status = 0;
790 		dp_reo_desc_free(soc, (void *)desc, reo_status);
791 	}
792 }
793 
794 /**
795  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
796  * ring in avoid of REO hang
797  *
798  * @list_size: REO desc list size to be cleaned
799  */
800 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
801 {
802 }
803 #endif
804 
805 /**
806  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
807  * cmd and re-insert desc into free list if send fails.
808  *
809  * @soc: DP SOC handle
810  * @desc: desc with resend update cmd flag set
811  * @rx_tid: Desc RX tid associated with update cmd for resetting
812  * valid field to 0 in h/w
813  *
814  * Return: QDF status
815  */
816 static QDF_STATUS
817 dp_resend_update_reo_cmd(struct dp_soc *soc,
818 			 struct reo_desc_list_node *desc,
819 			 struct dp_rx_tid *rx_tid)
820 {
821 	struct hal_reo_cmd_params params;
822 
823 	qdf_mem_zero(&params, sizeof(params));
824 	params.std.need_status = 1;
825 	params.std.addr_lo =
826 		rx_tid->hw_qdesc_paddr & 0xffffffff;
827 	params.std.addr_hi =
828 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
829 	params.u.upd_queue_params.update_vld = 1;
830 	params.u.upd_queue_params.vld = 0;
831 	desc->resend_update_reo_cmd = false;
832 	/*
833 	 * If the cmd send fails then set resend_update_reo_cmd flag
834 	 * and insert the desc at the end of the free list to retry.
835 	 */
836 	if (dp_reo_send_cmd(soc,
837 			    CMD_UPDATE_RX_REO_QUEUE,
838 			    &params,
839 			    dp_rx_tid_delete_cb,
840 			    (void *)desc)
841 	    != QDF_STATUS_SUCCESS) {
842 		desc->resend_update_reo_cmd = true;
843 		desc->free_ts = qdf_get_system_timestamp();
844 		qdf_list_insert_back(&soc->reo_desc_freelist,
845 				     (qdf_list_node_t *)desc);
846 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
847 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
848 		return QDF_STATUS_E_FAILURE;
849 	}
850 
851 	return QDF_STATUS_SUCCESS;
852 }
853 
854 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
855 			 union hal_reo_status *reo_status)
856 {
857 	struct reo_desc_list_node *freedesc =
858 		(struct reo_desc_list_node *)cb_ctxt;
859 	uint32_t list_size;
860 	struct reo_desc_list_node *desc = NULL;
861 	unsigned long curr_ts = qdf_get_system_timestamp();
862 	uint32_t desc_size, tot_desc_size;
863 	struct hal_reo_cmd_params params;
864 	bool flush_failure = false;
865 
866 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
867 
868 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
869 		qdf_mem_zero(reo_status, sizeof(*reo_status));
870 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
871 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
872 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
873 		return;
874 	} else if (reo_status->rx_queue_status.header.status !=
875 		HAL_REO_CMD_SUCCESS) {
876 		/* Should not happen normally. Just print error for now */
877 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
878 			   reo_status->rx_queue_status.header.status,
879 			   freedesc->rx_tid.tid);
880 	}
881 
882 	dp_peer_info("%pK: rx_tid: %d status: %d",
883 		     soc, freedesc->rx_tid.tid,
884 		     reo_status->rx_queue_status.header.status);
885 
886 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
887 	freedesc->free_ts = curr_ts;
888 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
889 				  (qdf_list_node_t *)freedesc, &list_size);
890 
891 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
892 	 * failed. it may cause the number of REO queue pending  in free
893 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
894 	 * flood then cause REO HW in an unexpected condition. So it's
895 	 * needed to limit the number REO cmds in a batch operation.
896 	 */
897 	dp_reo_limit_clean_batch_sz(&list_size);
898 
899 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
900 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
901 		((list_size >= REO_DESC_FREELIST_SIZE) ||
902 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
903 		(desc->resend_update_reo_cmd && list_size))) {
904 		struct dp_rx_tid *rx_tid;
905 
906 		qdf_list_remove_front(&soc->reo_desc_freelist,
907 				      (qdf_list_node_t **)&desc);
908 		list_size--;
909 		rx_tid = &desc->rx_tid;
910 
911 		/* First process descs with resend_update_reo_cmd set */
912 		if (desc->resend_update_reo_cmd) {
913 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
914 			    QDF_STATUS_SUCCESS)
915 				break;
916 			else
917 				continue;
918 		}
919 
920 		/* Flush and invalidate REO descriptor from HW cache: Base and
921 		 * extension descriptors should be flushed separately
922 		 */
923 		if (desc->pending_ext_desc_size)
924 			tot_desc_size = desc->pending_ext_desc_size;
925 		else
926 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
927 		/* Get base descriptor size by passing non-qos TID */
928 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
929 						   DP_NON_QOS_TID);
930 
931 		/* Flush reo extension descriptors */
932 		while ((tot_desc_size -= desc_size) > 0) {
933 			qdf_mem_zero(&params, sizeof(params));
934 			params.std.addr_lo =
935 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
936 				tot_desc_size) & 0xffffffff;
937 			params.std.addr_hi =
938 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
939 
940 			if (QDF_STATUS_SUCCESS !=
941 			    dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params,
942 					    NULL, NULL)) {
943 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
944 					   "tid %d desc %pK", rx_tid->tid,
945 					   (void *)(rx_tid->hw_qdesc_paddr));
946 				desc->pending_ext_desc_size = tot_desc_size +
947 								      desc_size;
948 				dp_reo_desc_clean_up(soc, desc, reo_status);
949 				flush_failure = true;
950 				break;
951 			}
952 		}
953 
954 		if (flush_failure)
955 			break;
956 
957 		desc->pending_ext_desc_size = desc_size;
958 
959 		/* Flush base descriptor */
960 		qdf_mem_zero(&params, sizeof(params));
961 		params.std.need_status = 1;
962 		params.std.addr_lo =
963 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
964 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
965 		if (rx_tid->ba_win_size > 256)
966 			params.u.fl_cache_params.flush_q_1k_desc = 1;
967 		params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
968 
969 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
970 							  CMD_FLUSH_CACHE,
971 							  &params,
972 							  dp_reo_desc_free,
973 							  (void *)desc)) {
974 			union hal_reo_status reo_status;
975 			/*
976 			 * If dp_reo_send_cmd return failure, related TID queue desc
977 			 * should be unmapped. Also locally reo_desc, together with
978 			 * TID queue desc also need to be freed accordingly.
979 			 *
980 			 * Here invoke desc_free function directly to do clean up.
981 			 *
982 			 * In case of MCL path add the desc back to the free
983 			 * desc list and defer deletion.
984 			 */
985 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
986 				   rx_tid->tid);
987 			dp_reo_desc_clean_up(soc, desc, &reo_status);
988 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
989 			break;
990 		}
991 	}
992 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
993 
994 	dp_reo_desc_defer_free(soc);
995 }
996 
997 /**
998  * dp_rx_tid_delete_wifi3() - Delete receive TID queue
999  * @peer: Datapath peer handle
1000  * @tid: TID
1001  *
1002  * Return: 0 on success, error code on failure
1003  */
1004 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1005 {
1006 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1007 	struct dp_soc *soc = peer->vdev->pdev->soc;
1008 	union hal_reo_status reo_status;
1009 	struct hal_reo_cmd_params params;
1010 	struct reo_desc_list_node *freedesc =
1011 		qdf_mem_malloc(sizeof(*freedesc));
1012 
1013 	if (!freedesc) {
1014 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
1015 			    soc, tid);
1016 		qdf_assert(0);
1017 		return -ENOMEM;
1018 	}
1019 
1020 	freedesc->rx_tid = *rx_tid;
1021 	freedesc->resend_update_reo_cmd = false;
1022 
1023 	qdf_mem_zero(&params, sizeof(params));
1024 
1025 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
1026 
1027 	reo_status.rx_queue_status.header.status = HAL_REO_CMD_SUCCESS;
1028 	dp_rx_tid_delete_cb(soc, freedesc, &reo_status);
1029 
1030 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1031 	rx_tid->hw_qdesc_alloc_size = 0;
1032 	rx_tid->hw_qdesc_paddr = 0;
1033 
1034 	return 0;
1035 }
1036 
1037 #ifdef DP_LFR
1038 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1039 {
1040 	int tid;
1041 
1042 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1043 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1044 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
1045 			      tid, peer, peer->local_id);
1046 	}
1047 }
1048 #else
1049 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1050 #endif
1051 
1052 #ifdef WLAN_FEATURE_11BE_MLO
1053 /**
1054  * dp_peer_rx_tids_init() - initialize each tids in peer
1055  * @peer: peer pointer
1056  *
1057  * Return: None
1058  */
1059 static void dp_peer_rx_tids_init(struct dp_peer *peer)
1060 {
1061 	int tid;
1062 	struct dp_rx_tid *rx_tid;
1063 	struct dp_rx_tid_defrag *rx_tid_defrag;
1064 
1065 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1066 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1067 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1068 
1069 			rx_tid_defrag->array = &rx_tid_defrag->base;
1070 			rx_tid_defrag->defrag_timeout_ms = 0;
1071 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1072 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1073 			rx_tid_defrag->base.head = NULL;
1074 			rx_tid_defrag->base.tail = NULL;
1075 			rx_tid_defrag->tid = tid;
1076 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
1077 		}
1078 	}
1079 
1080 	/* if not first assoc link peer,
1081 	 * not to initialize rx_tids again.
1082 	 */
1083 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
1084 		return;
1085 
1086 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1087 		rx_tid = &peer->rx_tid[tid];
1088 		rx_tid->tid = tid;
1089 		rx_tid->ba_win_size = 0;
1090 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1091 	}
1092 }
1093 #else
1094 static void dp_peer_rx_tids_init(struct dp_peer *peer)
1095 {
1096 	int tid;
1097 	struct dp_rx_tid *rx_tid;
1098 	struct dp_rx_tid_defrag *rx_tid_defrag;
1099 
1100 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1101 		rx_tid = &peer->rx_tid[tid];
1102 
1103 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1104 		rx_tid->tid = tid;
1105 		rx_tid->ba_win_size = 0;
1106 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1107 
1108 		rx_tid_defrag->base.head = NULL;
1109 		rx_tid_defrag->base.tail = NULL;
1110 		rx_tid_defrag->tid = tid;
1111 		rx_tid_defrag->array = &rx_tid_defrag->base;
1112 		rx_tid_defrag->defrag_timeout_ms = 0;
1113 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1114 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1115 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
1116 	}
1117 }
1118 #endif
1119 
1120 void dp_peer_rx_tid_setup(struct dp_peer *peer)
1121 {
1122 	dp_peer_rx_tids_init(peer);
1123 
1124 	/* Setup default (non-qos) rx tid queue */
1125 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1126 
1127 	/* Setup rx tid queue for TID 0.
1128 	 * Other queues will be setup on receiving first packet, which will cause
1129 	 * NULL REO queue error
1130 	 */
1131 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1132 
1133 	/*
1134 	 * Setup the rest of TID's to handle LFR
1135 	 */
1136 	dp_peer_setup_remaining_tids(peer);
1137 }
1138 
1139 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1140 {
1141 	int tid;
1142 	uint32_t tid_delete_mask = 0;
1143 
1144 	if (!peer->txrx_peer)
1145 		return;
1146 
1147 	dp_info("Remove tids for peer: %pK", peer);
1148 
1149 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1150 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1151 		struct dp_rx_tid_defrag *defrag_rx_tid =
1152 				&peer->txrx_peer->rx_tid[tid];
1153 
1154 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
1155 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
1156 			/* Cleanup defrag related resource */
1157 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
1158 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
1159 		}
1160 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
1161 
1162 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1163 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
1164 			dp_rx_tid_delete_wifi3(peer, tid);
1165 
1166 			tid_delete_mask |= (1 << tid);
1167 		}
1168 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1169 	}
1170 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1171 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1172 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
1173 			peer->vdev->pdev->pdev_id,
1174 			peer->vdev->vdev_id, peer->mac_addr.raw,
1175 			tid_delete_mask);
1176 	}
1177 #endif
1178 }
1179 
1180 /**
1181  * dp_teardown_256_ba_sessions() - Teardown sessions using 256
1182  *                                window size when a request with
1183  *                                64 window size is received.
1184  *                                This is done as a WAR since HW can
1185  *                                have only one setting per peer (64 or 256).
1186  *                                For HKv2, we use per tid buffersize setting
1187  *                                for 0 to per_tid_basize_max_tid. For tid
1188  *                                more than per_tid_basize_max_tid we use HKv1
1189  *                                method.
1190  * @peer: Datapath peer
1191  *
1192  * Return: void
1193  */
1194 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
1195 {
1196 	uint8_t delba_rcode = 0;
1197 	int tid;
1198 	struct dp_rx_tid *rx_tid = NULL;
1199 
1200 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
1201 	for (; tid < DP_MAX_TIDS; tid++) {
1202 		rx_tid = &peer->rx_tid[tid];
1203 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1204 
1205 		if (rx_tid->ba_win_size <= 64) {
1206 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1207 			continue;
1208 		} else {
1209 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
1210 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1211 				/* send delba */
1212 				if (!rx_tid->delba_tx_status) {
1213 					rx_tid->delba_tx_retry++;
1214 					rx_tid->delba_tx_status = 1;
1215 					rx_tid->delba_rcode =
1216 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
1217 					delba_rcode = rx_tid->delba_rcode;
1218 
1219 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1220 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1221 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1222 							peer->vdev->pdev->soc->ctrl_psoc,
1223 							peer->vdev->vdev_id,
1224 							peer->mac_addr.raw,
1225 							tid, delba_rcode,
1226 							CDP_DELBA_REASON_NONE);
1227 				} else {
1228 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1229 				}
1230 			} else {
1231 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
1232 			}
1233 		}
1234 	}
1235 }
1236 
1237 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1238 				      uint8_t *peer_mac,
1239 				      uint16_t vdev_id,
1240 				      uint8_t tid, int status)
1241 {
1242 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1243 					(struct dp_soc *)cdp_soc,
1244 					peer_mac, 0, vdev_id,
1245 					DP_MOD_ID_CDP);
1246 	struct dp_rx_tid *rx_tid = NULL;
1247 
1248 	if (!peer) {
1249 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
1250 		goto fail;
1251 	}
1252 	rx_tid = &peer->rx_tid[tid];
1253 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1254 	if (status) {
1255 		rx_tid->num_addba_rsp_failed++;
1256 		if (rx_tid->hw_qdesc_vaddr_unaligned)
1257 			dp_rx_tid_update_wifi3(peer, tid, 1,
1258 					       IEEE80211_SEQ_MAX, false);
1259 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1260 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1261 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
1262 
1263 		goto success;
1264 	}
1265 
1266 	rx_tid->num_addba_rsp_success++;
1267 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1268 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1269 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1270 			    cdp_soc, tid);
1271 		goto fail;
1272 	}
1273 
1274 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
1275 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1276 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
1277 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1278 		goto fail;
1279 	}
1280 
1281 	if (dp_rx_tid_update_wifi3(peer, tid,
1282 				   rx_tid->ba_win_size,
1283 				   rx_tid->startseqnum,
1284 				   false)) {
1285 		dp_err("Failed update REO SSN");
1286 	}
1287 
1288 	dp_info("tid %u window_size %u start_seq_num %u",
1289 		tid, rx_tid->ba_win_size,
1290 		rx_tid->startseqnum);
1291 
1292 	/* First Session */
1293 	if (peer->active_ba_session_cnt == 0) {
1294 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
1295 			peer->hw_buffer_size = 256;
1296 		else if (rx_tid->ba_win_size <= 1024 &&
1297 			 rx_tid->ba_win_size > 256)
1298 			peer->hw_buffer_size = 1024;
1299 		else
1300 			peer->hw_buffer_size = 64;
1301 	}
1302 
1303 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1304 
1305 	peer->active_ba_session_cnt++;
1306 
1307 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1308 
1309 	/* Kill any session having 256 buffer size
1310 	 * when 64 buffer size request is received.
1311 	 * Also, latch on to 64 as new buffer size.
1312 	 */
1313 	if (peer->kill_256_sessions) {
1314 		dp_teardown_256_ba_sessions(peer);
1315 		peer->kill_256_sessions = 0;
1316 	}
1317 
1318 success:
1319 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1320 	return QDF_STATUS_SUCCESS;
1321 
1322 fail:
1323 	if (peer)
1324 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1325 
1326 	return QDF_STATUS_E_FAILURE;
1327 }
1328 
1329 QDF_STATUS
1330 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1331 			     uint16_t vdev_id, uint8_t tid,
1332 			     uint8_t *dialogtoken, uint16_t *statuscode,
1333 			     uint16_t *buffersize, uint16_t *batimeout)
1334 {
1335 	struct dp_rx_tid *rx_tid = NULL;
1336 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1337 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
1338 						       peer_mac, 0, vdev_id,
1339 						       DP_MOD_ID_CDP);
1340 
1341 	if (!peer) {
1342 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
1343 		return QDF_STATUS_E_FAILURE;
1344 	}
1345 	rx_tid = &peer->rx_tid[tid];
1346 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1347 	rx_tid->num_of_addba_resp++;
1348 	/* setup ADDBA response parameters */
1349 	*dialogtoken = rx_tid->dialogtoken;
1350 	*statuscode = rx_tid->statuscode;
1351 	*buffersize = rx_tid->ba_win_size;
1352 	*batimeout  = 0;
1353 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1354 
1355 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1356 
1357 	return status;
1358 }
1359 
1360 /**
1361  * dp_check_ba_buffersize() - Check buffer size in request
1362  *                            and latch onto this size based on
1363  *                            size used in first active session.
1364  * @peer: Datapath peer
1365  * @tid: Tid
1366  * @buffersize: Block ack window size
1367  *
1368  * Return: void
1369  */
1370 static void dp_check_ba_buffersize(struct dp_peer *peer,
1371 				   uint16_t tid,
1372 				   uint16_t buffersize)
1373 {
1374 	struct dp_rx_tid *rx_tid = NULL;
1375 	struct dp_soc *soc = peer->vdev->pdev->soc;
1376 	uint16_t max_ba_window;
1377 
1378 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
1379 	dp_info("Input buffersize %d, max dp allowed %d",
1380 		buffersize, max_ba_window);
1381 	/* Adjust BA window size, restrict it to max DP allowed */
1382 	buffersize = QDF_MIN(buffersize, max_ba_window);
1383 
1384 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
1385 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1386 		soc->per_tid_basize_max_tid, tid, buffersize,
1387 		peer->hw_buffer_size);
1388 
1389 	rx_tid = &peer->rx_tid[tid];
1390 	if (soc->per_tid_basize_max_tid &&
1391 	    tid < soc->per_tid_basize_max_tid) {
1392 		rx_tid->ba_win_size = buffersize;
1393 		goto out;
1394 	} else {
1395 		if (peer->active_ba_session_cnt == 0) {
1396 			rx_tid->ba_win_size = buffersize;
1397 		} else {
1398 			if (peer->hw_buffer_size == 64) {
1399 				if (buffersize <= 64)
1400 					rx_tid->ba_win_size = buffersize;
1401 				else
1402 					rx_tid->ba_win_size = peer->hw_buffer_size;
1403 			} else if (peer->hw_buffer_size == 256) {
1404 				if (buffersize > 64) {
1405 					rx_tid->ba_win_size = buffersize;
1406 				} else {
1407 					rx_tid->ba_win_size = buffersize;
1408 					peer->hw_buffer_size = 64;
1409 					peer->kill_256_sessions = 1;
1410 				}
1411 			} else if (buffersize <= 1024) {
1412 				/*
1413 				 * Above checks are only for HK V2
1414 				 * Set incoming buffer size for others
1415 				 */
1416 				rx_tid->ba_win_size = buffersize;
1417 			} else {
1418 				dp_err("Invalid buffer size %d", buffersize);
1419 				qdf_assert_always(0);
1420 			}
1421 		}
1422 	}
1423 
1424 out:
1425 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
1426 		rx_tid->ba_win_size,
1427 		peer->hw_buffer_size,
1428 		peer->kill_256_sessions);
1429 }
1430 
1431 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
1432 					uint8_t *peer_mac, uint16_t vdev_id,
1433 					uint8_t tid, uint16_t buffersize)
1434 {
1435 	struct dp_rx_tid *rx_tid = NULL;
1436 	struct dp_peer *peer;
1437 
1438 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1439 					      peer_mac, 0, vdev_id,
1440 					      DP_MOD_ID_CDP);
1441 	if (!peer) {
1442 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
1443 		return QDF_STATUS_E_FAILURE;
1444 	}
1445 
1446 	rx_tid = &peer->rx_tid[tid];
1447 
1448 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1449 	rx_tid->ba_win_size = buffersize;
1450 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1451 
1452 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
1453 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
1454 
1455 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1456 
1457 	return QDF_STATUS_SUCCESS;
1458 }
1459 
1460 #define DP_RX_BA_SESSION_DISABLE  1
1461 
1462 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1463 				  uint8_t *peer_mac,
1464 				  uint16_t vdev_id,
1465 				  uint8_t dialogtoken,
1466 				  uint16_t tid, uint16_t batimeout,
1467 				  uint16_t buffersize,
1468 				  uint16_t startseqnum)
1469 {
1470 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1471 	struct dp_rx_tid *rx_tid = NULL;
1472 	struct dp_peer *peer;
1473 
1474 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1475 					      peer_mac,
1476 					      0, vdev_id,
1477 					      DP_MOD_ID_CDP);
1478 
1479 	if (!peer) {
1480 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
1481 		return QDF_STATUS_E_FAILURE;
1482 	}
1483 	rx_tid = &peer->rx_tid[tid];
1484 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1485 	rx_tid->num_of_addba_req++;
1486 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
1487 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
1488 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1489 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1490 		peer->active_ba_session_cnt--;
1491 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
1492 			      cdp_soc, tid);
1493 	}
1494 
1495 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1496 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1497 		status = QDF_STATUS_E_FAILURE;
1498 		goto fail;
1499 	}
1500 
1501 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
1502 		dp_peer_info("%pK: disable BA session",
1503 			     cdp_soc);
1504 
1505 		buffersize = 1;
1506 	} else if (rx_tid->rx_ba_win_size_override) {
1507 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
1508 			     rx_tid->rx_ba_win_size_override);
1509 
1510 		buffersize = rx_tid->rx_ba_win_size_override;
1511 	} else {
1512 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
1513 			     buffersize);
1514 	}
1515 
1516 	dp_check_ba_buffersize(peer, tid, buffersize);
1517 
1518 	if (dp_rx_tid_setup_wifi3(peer, tid,
1519 	    rx_tid->ba_win_size, startseqnum)) {
1520 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1521 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1522 		status = QDF_STATUS_E_FAILURE;
1523 		goto fail;
1524 	}
1525 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
1526 
1527 	rx_tid->dialogtoken = dialogtoken;
1528 	rx_tid->startseqnum = startseqnum;
1529 
1530 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1531 		rx_tid->statuscode = rx_tid->userstatuscode;
1532 	else
1533 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1534 
1535 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
1536 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
1537 
1538 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1539 
1540 fail:
1541 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1542 
1543 	return status;
1544 }
1545 
1546 QDF_STATUS
1547 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1548 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
1549 {
1550 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1551 					(struct dp_soc *)cdp_soc,
1552 					peer_mac, 0, vdev_id,
1553 					DP_MOD_ID_CDP);
1554 	struct dp_rx_tid *rx_tid;
1555 
1556 	if (!peer) {
1557 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
1558 		return QDF_STATUS_E_FAILURE;
1559 	}
1560 
1561 	rx_tid = &peer->rx_tid[tid];
1562 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1563 	rx_tid->userstatuscode = statuscode;
1564 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1565 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1566 
1567 	return QDF_STATUS_SUCCESS;
1568 }
1569 
1570 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1571 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
1572 {
1573 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1574 	struct dp_rx_tid *rx_tid;
1575 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1576 					(struct dp_soc *)cdp_soc,
1577 					peer_mac, 0, vdev_id,
1578 					DP_MOD_ID_CDP);
1579 
1580 	if (!peer) {
1581 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
1582 		return QDF_STATUS_E_FAILURE;
1583 	}
1584 	rx_tid = &peer->rx_tid[tid];
1585 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1586 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
1587 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1588 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1589 		status = QDF_STATUS_E_FAILURE;
1590 		goto fail;
1591 	}
1592 	/* TODO: See if we can delete the existing REO queue descriptor and
1593 	 * replace with a new one without queue extension descript to save
1594 	 * memory
1595 	 */
1596 	rx_tid->delba_rcode = reasoncode;
1597 	rx_tid->num_of_delba_req++;
1598 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1599 
1600 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1601 	peer->active_ba_session_cnt--;
1602 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1603 fail:
1604 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1605 
1606 	return status;
1607 }
1608 
1609 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1610 				 uint16_t vdev_id,
1611 				 uint8_t tid, int status)
1612 {
1613 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1614 	struct dp_rx_tid *rx_tid = NULL;
1615 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1616 					(struct dp_soc *)cdp_soc,
1617 					peer_mac, 0, vdev_id,
1618 					DP_MOD_ID_CDP);
1619 
1620 	if (!peer) {
1621 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1622 		return QDF_STATUS_E_FAILURE;
1623 	}
1624 	rx_tid = &peer->rx_tid[tid];
1625 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1626 	if (status) {
1627 		rx_tid->delba_tx_fail_cnt++;
1628 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
1629 			rx_tid->delba_tx_retry = 0;
1630 			rx_tid->delba_tx_status = 0;
1631 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1632 		} else {
1633 			rx_tid->delba_tx_retry++;
1634 			rx_tid->delba_tx_status = 1;
1635 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1636 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1637 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1638 					peer->vdev->pdev->soc->ctrl_psoc,
1639 					peer->vdev->vdev_id,
1640 					peer->mac_addr.raw, tid,
1641 					rx_tid->delba_rcode,
1642 					CDP_DELBA_REASON_NONE);
1643 		}
1644 		goto end;
1645 	} else {
1646 		rx_tid->delba_tx_success_cnt++;
1647 		rx_tid->delba_tx_retry = 0;
1648 		rx_tid->delba_tx_status = 0;
1649 	}
1650 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
1651 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1652 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1653 		peer->active_ba_session_cnt--;
1654 	}
1655 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1656 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1657 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1658 	}
1659 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1660 
1661 end:
1662 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1663 
1664 	return ret;
1665 }
1666 
1667 QDF_STATUS
1668 dp_set_pn_check_wifi3(struct cdp_soc_t *soc_t, uint8_t vdev_id,
1669 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1670 		      uint32_t *rx_pn)
1671 {
1672 	struct dp_pdev *pdev;
1673 	int i;
1674 	uint8_t pn_size;
1675 	struct hal_reo_cmd_params params;
1676 	struct dp_peer *peer = NULL;
1677 	struct dp_vdev *vdev = NULL;
1678 	struct dp_soc *soc = NULL;
1679 
1680 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc_t,
1681 					      peer_mac, 0, vdev_id,
1682 					      DP_MOD_ID_CDP);
1683 
1684 	if (!peer) {
1685 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
1686 		return QDF_STATUS_E_FAILURE;
1687 	}
1688 
1689 	vdev = peer->vdev;
1690 
1691 	if (!vdev) {
1692 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
1693 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1694 		return QDF_STATUS_E_FAILURE;
1695 	}
1696 
1697 	pdev = vdev->pdev;
1698 	soc = pdev->soc;
1699 	qdf_mem_zero(&params, sizeof(params));
1700 
1701 	params.std.need_status = 1;
1702 	params.u.upd_queue_params.update_pn_valid = 1;
1703 	params.u.upd_queue_params.update_pn_size = 1;
1704 	params.u.upd_queue_params.update_pn = 1;
1705 	params.u.upd_queue_params.update_pn_check_needed = 1;
1706 	params.u.upd_queue_params.update_svld = 1;
1707 	params.u.upd_queue_params.svld = 0;
1708 
1709 	switch (sec_type) {
1710 	case cdp_sec_type_tkip_nomic:
1711 	case cdp_sec_type_aes_ccmp:
1712 	case cdp_sec_type_aes_ccmp_256:
1713 	case cdp_sec_type_aes_gcmp:
1714 	case cdp_sec_type_aes_gcmp_256:
1715 		params.u.upd_queue_params.pn_check_needed = 1;
1716 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
1717 		pn_size = 48;
1718 		break;
1719 	case cdp_sec_type_wapi:
1720 		params.u.upd_queue_params.pn_check_needed = 1;
1721 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
1722 		pn_size = 128;
1723 		if (vdev->opmode == wlan_op_mode_ap) {
1724 			params.u.upd_queue_params.pn_even = 1;
1725 			params.u.upd_queue_params.update_pn_even = 1;
1726 		} else {
1727 			params.u.upd_queue_params.pn_uneven = 1;
1728 			params.u.upd_queue_params.update_pn_uneven = 1;
1729 		}
1730 		break;
1731 	default:
1732 		params.u.upd_queue_params.pn_check_needed = 0;
1733 		pn_size = 0;
1734 		break;
1735 	}
1736 
1737 	for (i = 0; i < DP_MAX_TIDS; i++) {
1738 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1739 
1740 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1741 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1742 			params.std.addr_lo =
1743 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1744 			params.std.addr_hi =
1745 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1746 
1747 			if (pn_size) {
1748 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
1749 					     soc, i, rx_pn[3], rx_pn[2],
1750 					     rx_pn[1], rx_pn[0]);
1751 				params.u.upd_queue_params.update_pn_valid = 1;
1752 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1753 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1754 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1755 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1756 			}
1757 			rx_tid->pn_size = pn_size;
1758 			if (dp_reo_send_cmd(soc,
1759 					    CMD_UPDATE_RX_REO_QUEUE,
1760 					    &params, dp_rx_tid_update_cb,
1761 					    rx_tid)) {
1762 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
1763 					   "tid %d desc %pK", rx_tid->tid,
1764 					   (void *)(rx_tid->hw_qdesc_paddr));
1765 				DP_STATS_INC(soc,
1766 					     rx.err.reo_cmd_send_fail, 1);
1767 			}
1768 		} else {
1769 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
1770 		}
1771 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1772 	}
1773 
1774 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1775 
1776 	return QDF_STATUS_SUCCESS;
1777 }
1778 
1779 QDF_STATUS
1780 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
1781 			uint8_t tid, uint16_t win_sz)
1782 {
1783 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1784 	struct dp_peer *peer;
1785 	struct dp_rx_tid *rx_tid;
1786 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1787 
1788 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1789 
1790 	if (!peer) {
1791 		dp_peer_err("%pK: Couldn't find peer from ID %d",
1792 			    soc, peer_id);
1793 		return QDF_STATUS_E_FAILURE;
1794 	}
1795 
1796 	qdf_assert_always(tid < DP_MAX_TIDS);
1797 
1798 	rx_tid = &peer->rx_tid[tid];
1799 
1800 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1801 		if (!rx_tid->delba_tx_status) {
1802 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
1803 				     soc, peer_id, tid, win_sz);
1804 
1805 			qdf_spin_lock_bh(&rx_tid->tid_lock);
1806 
1807 			rx_tid->delba_tx_status = 1;
1808 
1809 			rx_tid->rx_ba_win_size_override =
1810 			    qdf_min((uint16_t)63, win_sz);
1811 
1812 			rx_tid->delba_rcode =
1813 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
1814 
1815 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1816 
1817 			if (soc->cdp_soc.ol_ops->send_delba)
1818 				soc->cdp_soc.ol_ops->send_delba(
1819 					peer->vdev->pdev->soc->ctrl_psoc,
1820 					peer->vdev->vdev_id,
1821 					peer->mac_addr.raw,
1822 					tid,
1823 					rx_tid->delba_rcode,
1824 					CDP_DELBA_REASON_NONE);
1825 		}
1826 	} else {
1827 		dp_peer_err("%pK: BA session is not setup for TID:%d ",
1828 			    soc, tid);
1829 		status = QDF_STATUS_E_FAILURE;
1830 	}
1831 
1832 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1833 
1834 	return status;
1835 }
1836 
1837 #ifdef IPA_OFFLOAD
1838 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
1839 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
1840 {
1841 	struct dp_soc *soc = peer->vdev->pdev->soc;
1842 	struct hal_reo_cmd_params params;
1843 	int i;
1844 	int stats_cmd_sent_cnt = 0;
1845 	QDF_STATUS status;
1846 	uint16_t peer_id = peer->peer_id;
1847 	unsigned long comb_peer_id_tid;
1848 	struct dp_rx_tid *rx_tid;
1849 
1850 	if (!dp_stats_cmd_cb)
1851 		return stats_cmd_sent_cnt;
1852 
1853 	qdf_mem_zero(&params, sizeof(params));
1854 	for (i = 0; i < DP_MAX_TIDS; i++) {
1855 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
1856 			continue;
1857 
1858 		rx_tid = &peer->rx_tid[i];
1859 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1860 			params.std.need_status = 1;
1861 			params.std.addr_lo =
1862 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1863 			params.std.addr_hi =
1864 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1865 			params.u.stats_params.clear = 1;
1866 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
1867 					    | peer_id);
1868 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
1869 						 &params, dp_stats_cmd_cb,
1870 						 (void *)comb_peer_id_tid);
1871 			if (QDF_IS_STATUS_SUCCESS(status))
1872 				stats_cmd_sent_cnt++;
1873 
1874 			/* Flush REO descriptor from HW cache to update stats
1875 			 * in descriptor memory. This is to help debugging
1876 			 */
1877 			qdf_mem_zero(&params, sizeof(params));
1878 			params.std.need_status = 0;
1879 			params.std.addr_lo =
1880 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1881 			params.std.addr_hi =
1882 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1883 			params.u.fl_cache_params.flush_no_inval = 1;
1884 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
1885 					NULL);
1886 		}
1887 	}
1888 
1889 	return stats_cmd_sent_cnt;
1890 }
1891 
1892 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
1893 
1894 #endif
1895 int dp_peer_rxtid_stats(struct dp_peer *peer,
1896 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
1897 			void *cb_ctxt)
1898 {
1899 	struct dp_soc *soc = peer->vdev->pdev->soc;
1900 	struct hal_reo_cmd_params params;
1901 	int i;
1902 	int stats_cmd_sent_cnt = 0;
1903 	QDF_STATUS status;
1904 	struct dp_rx_tid *rx_tid;
1905 
1906 	if (!dp_stats_cmd_cb)
1907 		return stats_cmd_sent_cnt;
1908 
1909 	qdf_mem_zero(&params, sizeof(params));
1910 	for (i = 0; i < DP_MAX_TIDS; i++) {
1911 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
1912 			continue;
1913 
1914 		rx_tid = &peer->rx_tid[i];
1915 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1916 			params.std.need_status = 1;
1917 			params.std.addr_lo =
1918 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1919 			params.std.addr_hi =
1920 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1921 
1922 			if (cb_ctxt) {
1923 				status = dp_reo_send_cmd(
1924 						soc, CMD_GET_QUEUE_STATS,
1925 						&params, dp_stats_cmd_cb,
1926 						cb_ctxt);
1927 			} else {
1928 				status = dp_reo_send_cmd(
1929 						soc, CMD_GET_QUEUE_STATS,
1930 						&params, dp_stats_cmd_cb,
1931 						rx_tid);
1932 			}
1933 
1934 			if (QDF_IS_STATUS_SUCCESS(status))
1935 				stats_cmd_sent_cnt++;
1936 
1937 			/* Flush REO descriptor from HW cache to update stats
1938 			 * in descriptor memory. This is to help debugging
1939 			 */
1940 			qdf_mem_zero(&params, sizeof(params));
1941 			params.std.need_status = 0;
1942 			params.std.addr_lo =
1943 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1944 			params.std.addr_hi =
1945 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1946 			params.u.fl_cache_params.flush_no_inval = 1;
1947 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
1948 					NULL);
1949 		}
1950 	}
1951 
1952 	return stats_cmd_sent_cnt;
1953 }
1954 
1955 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
1956 {
1957 	uint8_t i;
1958 
1959 	if (IS_MLO_DP_MLD_PEER(peer)) {
1960 		dp_peer_info("skip for mld peer");
1961 		return QDF_STATUS_SUCCESS;
1962 	}
1963 
1964 	if (peer->rx_tid) {
1965 		QDF_BUG(0);
1966 		dp_peer_err("peer rx_tid mem already exist");
1967 		return QDF_STATUS_E_FAILURE;
1968 	}
1969 
1970 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
1971 			sizeof(struct dp_rx_tid));
1972 
1973 	if (!peer->rx_tid) {
1974 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
1975 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1976 		return QDF_STATUS_E_NOMEM;
1977 	}
1978 
1979 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
1980 	for (i = 0; i < DP_MAX_TIDS; i++)
1981 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
1982 
1983 	return QDF_STATUS_SUCCESS;
1984 }
1985 
1986 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
1987 {
1988 	uint8_t i;
1989 
1990 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1991 		for (i = 0; i < DP_MAX_TIDS; i++)
1992 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
1993 
1994 		qdf_mem_free(peer->rx_tid);
1995 	}
1996 
1997 	peer->rx_tid = NULL;
1998 }
1999 
2000 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2001 void dp_dump_rx_reo_queue_info(
2002 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
2003 {
2004 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2005 
2006 	if (!rx_tid)
2007 		return;
2008 
2009 	if (reo_status->fl_cache_status.header.status !=
2010 		HAL_REO_CMD_SUCCESS) {
2011 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
2012 			  reo_status->rx_queue_status.header.status);
2013 		return;
2014 	}
2015 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2016 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
2017 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2018 }
2019 
2020 void dp_send_cache_flush_for_rx_tid(
2021 	struct dp_soc *soc, struct dp_peer *peer)
2022 {
2023 	int i;
2024 	struct dp_rx_tid *rx_tid;
2025 	struct hal_reo_cmd_params params;
2026 
2027 	if (!peer) {
2028 		dp_err_rl("Peer is NULL");
2029 		return;
2030 	}
2031 
2032 	for (i = 0; i < DP_MAX_TIDS; i++) {
2033 		rx_tid = &peer->rx_tid[i];
2034 		if (!rx_tid)
2035 			continue;
2036 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2037 		if (rx_tid->hw_qdesc_vaddr_aligned) {
2038 			qdf_mem_zero(&params, sizeof(params));
2039 			params.std.need_status = 1;
2040 			params.std.addr_lo =
2041 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2042 			params.std.addr_hi =
2043 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2044 			params.u.fl_cache_params.flush_no_inval = 0;
2045 
2046 			if (rx_tid->ba_win_size > 256)
2047 				params.u.fl_cache_params.flush_q_1k_desc = 1;
2048 			params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
2049 
2050 			if (QDF_STATUS_SUCCESS !=
2051 				dp_reo_send_cmd(
2052 					soc, CMD_FLUSH_CACHE,
2053 					&params, dp_dump_rx_reo_queue_info,
2054 					(void *)rx_tid)) {
2055 				dp_err_rl("cache flush send failed tid %d",
2056 					  rx_tid->tid);
2057 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2058 				break;
2059 			}
2060 		}
2061 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2062 	}
2063 }
2064 
2065 void dp_get_rx_reo_queue_info(
2066 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2067 {
2068 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
2069 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2070 						     DP_MOD_ID_GENERIC_STATS);
2071 	struct dp_peer *peer = NULL;
2072 
2073 	if (!vdev) {
2074 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
2075 		goto failed;
2076 	}
2077 
2078 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2079 
2080 	if (!peer) {
2081 		dp_err_rl("Peer is NULL");
2082 		goto failed;
2083 	}
2084 	dp_send_cache_flush_for_rx_tid(soc, peer);
2085 failed:
2086 	if (peer)
2087 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2088 	if (vdev)
2089 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2090 }
2091 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2092 
2093