xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.c (revision 3b7d2086205cc4b82a36a180614a8914e54e8fed)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef REO_QDESC_HISTORY
54 static inline void
55 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
56 			    enum reo_qdesc_event_type type)
57 {
58 	struct reo_qdesc_event *evt;
59 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
60 	uint32_t idx;
61 
62 	reo_qdesc_history_idx++;
63 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
64 
65 	evt = &reo_qdesc_history[idx];
66 
67 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
68 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
69 	evt->ts = qdf_get_log_timestamp();
70 	evt->type = type;
71 }
72 
73 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
74 static inline void
75 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
76 				 enum reo_qdesc_event_type type)
77 {
78 	struct reo_qdesc_event *evt;
79 	uint32_t idx;
80 
81 	reo_qdesc_history_idx++;
82 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
83 
84 	evt = &reo_qdesc_history[idx];
85 
86 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
87 	evt->qdesc_addr = desc->hw_qdesc_paddr;
88 	evt->ts = qdf_get_log_timestamp();
89 	evt->type = type;
90 }
91 
92 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
93 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
94 
95 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
96 	qdf_mem_copy((desc)->peer_mac, (freedesc)->peer_mac, QDF_MAC_ADDR_SIZE)
97 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
98 
99 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
100 	qdf_mem_copy((freedesc)->peer_mac, (peer)->mac_addr.raw, QDF_MAC_ADDR_SIZE)
101 
102 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
103 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
104 
105 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
106 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
107 
108 #else
109 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
110 
111 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
112 
113 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
114 
115 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
116 
117 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
118 #endif
119 
120 static inline void
121 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
122 		      uint8_t valid)
123 {
124 	params->u.upd_queue_params.update_svld = 1;
125 	params->u.upd_queue_params.svld = valid;
126 	dp_peer_debug("Setting SSN valid bit to %d",
127 		      valid);
128 }
129 
130 #ifdef IPA_OFFLOAD
131 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
132 				       union hal_reo_status *reo_status)
133 {
134 	struct dp_peer *peer = NULL;
135 	struct dp_rx_tid *rx_tid = NULL;
136 	unsigned long comb_peer_id_tid;
137 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
138 	uint16_t tid;
139 	uint16_t peer_id;
140 
141 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
142 		dp_err("REO stats failure %d",
143 		       queue_status->header.status);
144 		return;
145 	}
146 	comb_peer_id_tid = (unsigned long)cb_ctxt;
147 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
148 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
149 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
150 	if (!peer)
151 		return;
152 	rx_tid  = &peer->rx_tid[tid];
153 
154 	if (!rx_tid) {
155 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
156 		return;
157 	}
158 
159 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
160 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
161 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
162 }
163 
164 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
165 #endif
166 
167 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
168 			union hal_reo_status *reo_status)
169 {
170 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
171 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
172 
173 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
174 		return;
175 
176 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
177 		DP_PRINT_STATS("REO stats failure %d for TID %d",
178 			       queue_status->header.status, rx_tid->tid);
179 		return;
180 	}
181 
182 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
183 		       "ssn: %d\n"
184 		       "curr_idx  : %d\n"
185 		       "pn_31_0   : %08x\n"
186 		       "pn_63_32  : %08x\n"
187 		       "pn_95_64  : %08x\n"
188 		       "pn_127_96 : %08x\n"
189 		       "last_rx_enq_tstamp : %08x\n"
190 		       "last_rx_deq_tstamp : %08x\n"
191 		       "rx_bitmap_31_0     : %08x\n"
192 		       "rx_bitmap_63_32    : %08x\n"
193 		       "rx_bitmap_95_64    : %08x\n"
194 		       "rx_bitmap_127_96   : %08x\n"
195 		       "rx_bitmap_159_128  : %08x\n"
196 		       "rx_bitmap_191_160  : %08x\n"
197 		       "rx_bitmap_223_192  : %08x\n"
198 		       "rx_bitmap_255_224  : %08x\n",
199 		       rx_tid->tid,
200 		       queue_status->ssn, queue_status->curr_idx,
201 		       queue_status->pn_31_0, queue_status->pn_63_32,
202 		       queue_status->pn_95_64, queue_status->pn_127_96,
203 		       queue_status->last_rx_enq_tstamp,
204 		       queue_status->last_rx_deq_tstamp,
205 		       queue_status->rx_bitmap_31_0,
206 		       queue_status->rx_bitmap_63_32,
207 		       queue_status->rx_bitmap_95_64,
208 		       queue_status->rx_bitmap_127_96,
209 		       queue_status->rx_bitmap_159_128,
210 		       queue_status->rx_bitmap_191_160,
211 		       queue_status->rx_bitmap_223_192,
212 		       queue_status->rx_bitmap_255_224);
213 
214 	DP_PRINT_STATS(
215 		       "curr_mpdu_cnt      : %d\n"
216 		       "curr_msdu_cnt      : %d\n"
217 		       "fwd_timeout_cnt    : %d\n"
218 		       "fwd_bar_cnt        : %d\n"
219 		       "dup_cnt            : %d\n"
220 		       "frms_in_order_cnt  : %d\n"
221 		       "bar_rcvd_cnt       : %d\n"
222 		       "mpdu_frms_cnt      : %d\n"
223 		       "msdu_frms_cnt      : %d\n"
224 		       "total_byte_cnt     : %d\n"
225 		       "late_recv_mpdu_cnt : %d\n"
226 		       "win_jump_2k        : %d\n"
227 		       "hole_cnt           : %d\n",
228 		       queue_status->curr_mpdu_cnt,
229 		       queue_status->curr_msdu_cnt,
230 		       queue_status->fwd_timeout_cnt,
231 		       queue_status->fwd_bar_cnt,
232 		       queue_status->dup_cnt,
233 		       queue_status->frms_in_order_cnt,
234 		       queue_status->bar_rcvd_cnt,
235 		       queue_status->mpdu_frms_cnt,
236 		       queue_status->msdu_frms_cnt,
237 		       queue_status->total_cnt,
238 		       queue_status->late_recv_mpdu_cnt,
239 		       queue_status->win_jump_2k,
240 		       queue_status->hole_cnt);
241 
242 	DP_PRINT_STATS("Addba Req          : %d\n"
243 			"Addba Resp         : %d\n"
244 			"Addba Resp success : %d\n"
245 			"Addba Resp failed  : %d\n"
246 			"Delba Req received : %d\n"
247 			"Delba Tx success   : %d\n"
248 			"Delba Tx Fail      : %d\n"
249 			"BA window size     : %d\n"
250 			"Pn size            : %d\n",
251 			rx_tid->num_of_addba_req,
252 			rx_tid->num_of_addba_resp,
253 			rx_tid->num_addba_rsp_success,
254 			rx_tid->num_addba_rsp_failed,
255 			rx_tid->num_of_delba_req,
256 			rx_tid->delba_tx_success_cnt,
257 			rx_tid->delba_tx_fail_cnt,
258 			rx_tid->ba_win_size,
259 			rx_tid->pn_size);
260 }
261 
262 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
263 				union hal_reo_status *reo_status)
264 {
265 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
266 
267 	if ((reo_status->rx_queue_status.header.status !=
268 		HAL_REO_CMD_SUCCESS) &&
269 		(reo_status->rx_queue_status.header.status !=
270 		HAL_REO_CMD_DRAIN)) {
271 		/* Should not happen normally. Just print error for now */
272 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
273 			    soc, reo_status->rx_queue_status.header.status,
274 			    rx_tid->tid);
275 	}
276 }
277 
278 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
279 {
280 	struct ol_if_ops *ol_ops = NULL;
281 	bool is_roaming = false;
282 	uint8_t vdev_id = -1;
283 	struct cdp_soc_t *soc;
284 
285 	if (!peer) {
286 		dp_peer_info("Peer is NULL. No roaming possible");
287 		return false;
288 	}
289 
290 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
291 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
292 
293 	if (ol_ops && ol_ops->is_roam_inprogress) {
294 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
295 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
296 	}
297 
298 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
299 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
300 
301 	return is_roaming;
302 }
303 
304 #ifdef WLAN_FEATURE_11BE_MLO
305 /**
306  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
307  *			     setup is necessary
308  * @peer: DP peer handle
309  *
310  * Return: true - allow, false - disallow
311  */
312 static inline
313 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
314 {
315 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
316 		return false;
317 
318 	return true;
319 }
320 
321 /**
322  * dp_rx_tid_update_allow() - check if rx_tid update needed
323  * @peer: DP peer handle
324  *
325  * Return: true - allow, false - disallow
326  */
327 static inline
328 bool dp_rx_tid_update_allow(struct dp_peer *peer)
329 {
330 	/* not as expected for MLO connection link peer */
331 	if (IS_MLO_DP_LINK_PEER(peer)) {
332 		QDF_BUG(0);
333 		return false;
334 	}
335 
336 	return true;
337 }
338 #else
339 static inline
340 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
341 {
342 	return true;
343 }
344 
345 static inline
346 bool dp_rx_tid_update_allow(struct dp_peer *peer)
347 {
348 	return true;
349 }
350 #endif
351 
352 QDF_STATUS
353 dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t ba_window_size,
354 		       uint32_t start_seq, bool bar_update)
355 {
356 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
357 	struct dp_soc *soc = peer->vdev->pdev->soc;
358 	struct hal_reo_cmd_params params;
359 
360 	if (!dp_rx_tid_update_allow(peer)) {
361 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
362 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	qdf_mem_zero(&params, sizeof(params));
367 
368 	params.std.need_status = 1;
369 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
370 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
371 	params.u.upd_queue_params.update_ba_window_size = 1;
372 	params.u.upd_queue_params.ba_window_size = ba_window_size;
373 
374 	if (start_seq < IEEE80211_SEQ_MAX) {
375 		params.u.upd_queue_params.update_ssn = 1;
376 		params.u.upd_queue_params.ssn = start_seq;
377 	} else {
378 	    dp_set_ssn_valid_flag(&params, 0);
379 	}
380 
381 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
382 			    dp_rx_tid_update_cb, rx_tid)) {
383 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
384 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
385 	}
386 
387 	rx_tid->ba_win_size = ba_window_size;
388 
389 	if (dp_get_peer_vdev_roaming_in_progress(peer))
390 		return QDF_STATUS_E_PERM;
391 
392 	if (!bar_update)
393 		dp_peer_rx_reorder_queue_setup(soc, peer,
394 					       tid, ba_window_size);
395 
396 	return QDF_STATUS_SUCCESS;
397 }
398 
399 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
400 /**
401  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
402  *                                    the deferred list
403  * @soc: Datapath soc handle
404  * @freedesc: REO DESC reference that needs to be freed
405  *
406  * Return: true if enqueued, else false
407  */
408 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
409 					   struct reo_desc_list_node *freedesc)
410 {
411 	struct reo_desc_deferred_freelist_node *desc;
412 
413 	if (!qdf_atomic_read(&soc->cmn_init_done))
414 		return false;
415 
416 	desc = qdf_mem_malloc(sizeof(*desc));
417 	if (!desc)
418 		return false;
419 
420 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
421 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
422 	desc->hw_qdesc_vaddr_unaligned =
423 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
424 	desc->free_ts = qdf_get_system_timestamp();
425 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
426 
427 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
428 	if (!soc->reo_desc_deferred_freelist_init) {
429 		qdf_mem_free(desc);
430 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
431 		return false;
432 	}
433 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
434 			     (qdf_list_node_t *)desc);
435 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
436 
437 	return true;
438 }
439 
440 /**
441  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
442  *                            based on time threshold
443  * @soc: Datapath soc handle
444  *
445  * Return: true if enqueued, else false
446  */
447 static void dp_reo_desc_defer_free(struct dp_soc *soc)
448 {
449 	struct reo_desc_deferred_freelist_node *desc;
450 	unsigned long curr_ts = qdf_get_system_timestamp();
451 
452 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
453 
454 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
455 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
456 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
457 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
458 				      (qdf_list_node_t **)&desc);
459 
460 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
461 
462 		qdf_mem_unmap_nbytes_single(soc->osdev,
463 					    desc->hw_qdesc_paddr,
464 					    QDF_DMA_BIDIRECTIONAL,
465 					    desc->hw_qdesc_alloc_size);
466 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
467 		qdf_mem_free(desc);
468 
469 		curr_ts = qdf_get_system_timestamp();
470 	}
471 
472 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
473 }
474 #else
475 static inline bool
476 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
477 			       struct reo_desc_list_node *freedesc)
478 {
479 	return false;
480 }
481 
482 static void dp_reo_desc_defer_free(struct dp_soc *soc)
483 {
484 }
485 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
486 
487 void check_free_list_for_invalid_flush(struct dp_soc *soc)
488 {
489 	uint32_t i;
490 	uint32_t *addr_deref_val;
491 	unsigned long curr_ts = qdf_get_system_timestamp();
492 	uint32_t max_list_size;
493 
494 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
495 
496 	if (max_list_size == 0)
497 		return;
498 
499 	for (i = 0; i < soc->free_addr_list_idx; i++) {
500 		addr_deref_val = (uint32_t *)
501 			    soc->list_qdesc_addr_free[i].hw_qdesc_vaddr_unalign;
502 
503 		if (*addr_deref_val == 0xDDBEEF84 ||
504 		    *addr_deref_val == 0xADBEEF84 ||
505 		    *addr_deref_val == 0xBDBEEF84 ||
506 		    *addr_deref_val == 0xCDBEEF84) {
507 			if (soc->list_qdesc_addr_free[i].ts_hw_flush_back == 0)
508 				soc->list_qdesc_addr_free[i].ts_hw_flush_back =
509 									curr_ts;
510 		}
511 	}
512 }
513 
514 /**
515  * dp_reo_desc_free() - Callback free reo descriptor memory after
516  * HW cache flush
517  *
518  * @soc: DP SOC handle
519  * @cb_ctxt: Callback context
520  * @reo_status: REO command status
521  */
522 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
523 			     union hal_reo_status *reo_status)
524 {
525 	struct reo_desc_list_node *freedesc =
526 		(struct reo_desc_list_node *)cb_ctxt;
527 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
528 	unsigned long curr_ts = qdf_get_system_timestamp();
529 
530 	if ((reo_status->fl_cache_status.header.status !=
531 		HAL_REO_CMD_SUCCESS) &&
532 		(reo_status->fl_cache_status.header.status !=
533 		HAL_REO_CMD_DRAIN)) {
534 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
535 			    soc, reo_status->rx_queue_status.header.status,
536 			    freedesc->rx_tid.tid);
537 	}
538 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
539 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
540 		     rx_tid->tid);
541 
542 	/* REO desc is enqueued to be freed at a later point
543 	 * in time, just free the freedesc alone and return
544 	 */
545 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
546 		goto out;
547 
548 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
549 	add_entry_free_list(soc, rx_tid);
550 
551 	hal_reo_shared_qaddr_cache_clear(soc->hal_soc);
552 	qdf_mem_unmap_nbytes_single(soc->osdev,
553 				    rx_tid->hw_qdesc_paddr,
554 				    QDF_DMA_BIDIRECTIONAL,
555 				    rx_tid->hw_qdesc_alloc_size);
556 	check_free_list_for_invalid_flush(soc);
557 
558 	*(uint32_t *)rx_tid->hw_qdesc_vaddr_unaligned = 0;
559 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
560 out:
561 	qdf_mem_free(freedesc);
562 }
563 
564 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
565 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
566 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
567 {
568 	if (dma_addr < 0x50000000)
569 		return QDF_STATUS_E_FAILURE;
570 	else
571 		return QDF_STATUS_SUCCESS;
572 }
573 #else
574 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
575 {
576 	return QDF_STATUS_SUCCESS;
577 }
578 #endif
579 
580 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
581 				 uint32_t ba_window_size, uint32_t start_seq)
582 {
583 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
584 	struct dp_vdev *vdev = peer->vdev;
585 	struct dp_soc *soc = vdev->pdev->soc;
586 	uint32_t hw_qdesc_size;
587 	uint32_t hw_qdesc_align;
588 	int hal_pn_type;
589 	void *hw_qdesc_vaddr;
590 	uint32_t alloc_tries = 0;
591 	QDF_STATUS status = QDF_STATUS_SUCCESS;
592 	struct dp_txrx_peer *txrx_peer;
593 
594 	if (!qdf_atomic_read(&peer->is_default_route_set))
595 		return QDF_STATUS_E_FAILURE;
596 
597 	if (!dp_rx_tid_setup_allow(peer)) {
598 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
599 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
600 		goto send_wmi_reo_cmd;
601 	}
602 
603 	rx_tid->ba_win_size = ba_window_size;
604 	if (rx_tid->hw_qdesc_vaddr_unaligned)
605 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
606 			start_seq, false);
607 	rx_tid->delba_tx_status = 0;
608 	rx_tid->ppdu_id_2k = 0;
609 	rx_tid->num_of_addba_req = 0;
610 	rx_tid->num_of_delba_req = 0;
611 	rx_tid->num_of_addba_resp = 0;
612 	rx_tid->num_addba_rsp_failed = 0;
613 	rx_tid->num_addba_rsp_success = 0;
614 	rx_tid->delba_tx_success_cnt = 0;
615 	rx_tid->delba_tx_fail_cnt = 0;
616 	rx_tid->statuscode = 0;
617 
618 	/* TODO: Allocating HW queue descriptors based on max BA window size
619 	 * for all QOS TIDs so that same descriptor can be used later when
620 	 * ADDBA request is received. This should be changed to allocate HW
621 	 * queue descriptors based on BA window size being negotiated (0 for
622 	 * non BA cases), and reallocate when BA window size changes and also
623 	 * send WMI message to FW to change the REO queue descriptor in Rx
624 	 * peer entry as part of dp_rx_tid_update.
625 	 */
626 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
627 					       ba_window_size, tid);
628 
629 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
630 	/* To avoid unnecessary extra allocation for alignment, try allocating
631 	 * exact size and see if we already have aligned address.
632 	 */
633 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
634 
635 try_desc_alloc:
636 	rx_tid->hw_qdesc_vaddr_unaligned =
637 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
638 
639 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
640 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
641 			    soc, tid);
642 		return QDF_STATUS_E_NOMEM;
643 	}
644 
645 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
646 		hw_qdesc_align) {
647 		/* Address allocated above is not aligned. Allocate extra
648 		 * memory for alignment
649 		 */
650 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
651 		rx_tid->hw_qdesc_vaddr_unaligned =
652 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
653 					hw_qdesc_align - 1);
654 
655 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
656 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
657 				    soc, tid);
658 			return QDF_STATUS_E_NOMEM;
659 		}
660 
661 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
662 			rx_tid->hw_qdesc_vaddr_unaligned,
663 			hw_qdesc_align);
664 
665 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
666 			      soc, rx_tid->hw_qdesc_alloc_size,
667 			      hw_qdesc_vaddr);
668 
669 	} else {
670 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
671 	}
672 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
673 
674 	txrx_peer = dp_get_txrx_peer(peer);
675 
676 	/* TODO: Ensure that sec_type is set before ADDBA is received.
677 	 * Currently this is set based on htt indication
678 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
679 	 */
680 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
681 	case cdp_sec_type_tkip_nomic:
682 	case cdp_sec_type_aes_ccmp:
683 	case cdp_sec_type_aes_ccmp_256:
684 	case cdp_sec_type_aes_gcmp:
685 	case cdp_sec_type_aes_gcmp_256:
686 		hal_pn_type = HAL_PN_WPA;
687 		break;
688 	case cdp_sec_type_wapi:
689 		if (vdev->opmode == wlan_op_mode_ap)
690 			hal_pn_type = HAL_PN_WAPI_EVEN;
691 		else
692 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
693 		break;
694 	default:
695 		hal_pn_type = HAL_PN_NONE;
696 		break;
697 	}
698 
699 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
700 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
701 		vdev->vdev_stats_id);
702 
703 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
704 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
705 		&(rx_tid->hw_qdesc_paddr));
706 
707 	add_entry_alloc_list(soc, rx_tid, peer, hw_qdesc_vaddr);
708 
709 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
710 			QDF_STATUS_SUCCESS) {
711 		if (alloc_tries++ < 10) {
712 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
713 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
714 			goto try_desc_alloc;
715 		} else {
716 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
717 				    soc, tid);
718 			status = QDF_STATUS_E_NOMEM;
719 			goto error;
720 		}
721 	}
722 
723 send_wmi_reo_cmd:
724 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
725 		status = QDF_STATUS_E_PERM;
726 		goto error;
727 	}
728 
729 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
730 						tid, ba_window_size);
731 	if (QDF_IS_STATUS_SUCCESS(status))
732 		return status;
733 
734 error:
735 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
736 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
737 		    QDF_STATUS_SUCCESS)
738 			qdf_mem_unmap_nbytes_single(
739 				soc->osdev,
740 				rx_tid->hw_qdesc_paddr,
741 				QDF_DMA_BIDIRECTIONAL,
742 				rx_tid->hw_qdesc_alloc_size);
743 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
744 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
745 		rx_tid->hw_qdesc_paddr = 0;
746 	}
747 	return status;
748 }
749 
750 #ifdef DP_UMAC_HW_RESET_SUPPORT
751 static
752 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
753 {
754 	int tid;
755 
756 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
757 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
758 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
759 
760 		if (vaddr)
761 			dp_reset_rx_reo_tid_queue(soc, vaddr,
762 						  rx_tid->hw_qdesc_alloc_size);
763 	}
764 }
765 
766 void dp_reset_tid_q_setup(struct dp_soc *soc)
767 {
768 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
769 }
770 #endif
771 #ifdef REO_DESC_DEFER_FREE
772 /**
773  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
774  * desc back to freelist and defer the deletion
775  *
776  * @soc: DP SOC handle
777  * @desc: Base descriptor to be freed
778  * @reo_status: REO command status
779  */
780 static void dp_reo_desc_clean_up(struct dp_soc *soc,
781 				 struct reo_desc_list_node *desc,
782 				 union hal_reo_status *reo_status)
783 {
784 	desc->free_ts = qdf_get_system_timestamp();
785 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
786 	qdf_list_insert_back(&soc->reo_desc_freelist,
787 			     (qdf_list_node_t *)desc);
788 }
789 
790 /**
791  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
792  * ring in avoid of REO hang
793  *
794  * @list_size: REO desc list size to be cleaned
795  */
796 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
797 {
798 	unsigned long curr_ts = qdf_get_system_timestamp();
799 
800 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
801 		dp_err_log("%lu:freedesc number %d in freelist",
802 			   curr_ts, *list_size);
803 		/* limit the batch queue size */
804 		*list_size = REO_DESC_FREELIST_SIZE;
805 	}
806 }
807 #else
808 /**
809  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
810  * cache fails free the base REO desc anyway
811  *
812  * @soc: DP SOC handle
813  * @desc: Base descriptor to be freed
814  * @reo_status: REO command status
815  */
816 static void dp_reo_desc_clean_up(struct dp_soc *soc,
817 				 struct reo_desc_list_node *desc,
818 				 union hal_reo_status *reo_status)
819 {
820 	if (reo_status) {
821 		qdf_mem_zero(reo_status, sizeof(*reo_status));
822 		reo_status->fl_cache_status.header.status = 0;
823 		dp_reo_desc_free(soc, (void *)desc, reo_status);
824 	}
825 }
826 
827 /**
828  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
829  * ring in avoid of REO hang
830  *
831  * @list_size: REO desc list size to be cleaned
832  */
833 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
834 {
835 }
836 #endif
837 
838 /**
839  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
840  * cmd and re-insert desc into free list if send fails.
841  *
842  * @soc: DP SOC handle
843  * @desc: desc with resend update cmd flag set
844  * @rx_tid: Desc RX tid associated with update cmd for resetting
845  * valid field to 0 in h/w
846  *
847  * Return: QDF status
848  */
849 static QDF_STATUS
850 dp_resend_update_reo_cmd(struct dp_soc *soc,
851 			 struct reo_desc_list_node *desc,
852 			 struct dp_rx_tid *rx_tid)
853 {
854 	struct hal_reo_cmd_params params;
855 
856 	qdf_mem_zero(&params, sizeof(params));
857 	params.std.need_status = 1;
858 	params.std.addr_lo =
859 		rx_tid->hw_qdesc_paddr & 0xffffffff;
860 	params.std.addr_hi =
861 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
862 	params.u.upd_queue_params.update_vld = 1;
863 	params.u.upd_queue_params.vld = 0;
864 	desc->resend_update_reo_cmd = false;
865 	/*
866 	 * If the cmd send fails then set resend_update_reo_cmd flag
867 	 * and insert the desc at the end of the free list to retry.
868 	 */
869 	if (dp_reo_send_cmd(soc,
870 			    CMD_UPDATE_RX_REO_QUEUE,
871 			    &params,
872 			    dp_rx_tid_delete_cb,
873 			    (void *)desc)
874 	    != QDF_STATUS_SUCCESS) {
875 		desc->resend_update_reo_cmd = true;
876 		desc->free_ts = qdf_get_system_timestamp();
877 		qdf_list_insert_back(&soc->reo_desc_freelist,
878 				     (qdf_list_node_t *)desc);
879 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
880 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
881 		return QDF_STATUS_E_FAILURE;
882 	}
883 
884 	return QDF_STATUS_SUCCESS;
885 }
886 
887 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
888 			 union hal_reo_status *reo_status)
889 {
890 	struct reo_desc_list_node *freedesc =
891 		(struct reo_desc_list_node *)cb_ctxt;
892 	uint32_t list_size;
893 	struct reo_desc_list_node *desc = NULL;
894 	unsigned long curr_ts = qdf_get_system_timestamp();
895 	uint32_t desc_size, tot_desc_size;
896 	struct hal_reo_cmd_params params;
897 	bool flush_failure = false;
898 
899 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
900 
901 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
902 		qdf_mem_zero(reo_status, sizeof(*reo_status));
903 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
904 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
905 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
906 		return;
907 	} else if (reo_status->rx_queue_status.header.status !=
908 		HAL_REO_CMD_SUCCESS) {
909 		/* Should not happen normally. Just print error for now */
910 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
911 			   reo_status->rx_queue_status.header.status,
912 			   freedesc->rx_tid.tid);
913 	}
914 
915 	dp_peer_info("%pK: rx_tid: %d status: %d",
916 		     soc, freedesc->rx_tid.tid,
917 		     reo_status->rx_queue_status.header.status);
918 
919 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
920 	freedesc->free_ts = curr_ts;
921 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
922 				  (qdf_list_node_t *)freedesc, &list_size);
923 
924 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
925 	 * failed. it may cause the number of REO queue pending  in free
926 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
927 	 * flood then cause REO HW in an unexpected condition. So it's
928 	 * needed to limit the number REO cmds in a batch operation.
929 	 */
930 	dp_reo_limit_clean_batch_sz(&list_size);
931 
932 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
933 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
934 		((list_size >= REO_DESC_FREELIST_SIZE) ||
935 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
936 		(desc->resend_update_reo_cmd && list_size))) {
937 		struct dp_rx_tid *rx_tid;
938 
939 		qdf_list_remove_front(&soc->reo_desc_freelist,
940 				      (qdf_list_node_t **)&desc);
941 		list_size--;
942 		rx_tid = &desc->rx_tid;
943 
944 		/* First process descs with resend_update_reo_cmd set */
945 		if (desc->resend_update_reo_cmd) {
946 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
947 			    QDF_STATUS_SUCCESS)
948 				break;
949 			else
950 				continue;
951 		}
952 
953 		/* Flush and invalidate REO descriptor from HW cache: Base and
954 		 * extension descriptors should be flushed separately
955 		 */
956 		if (desc->pending_ext_desc_size)
957 			tot_desc_size = desc->pending_ext_desc_size;
958 		else
959 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
960 		/* Get base descriptor size by passing non-qos TID */
961 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
962 						   DP_NON_QOS_TID);
963 
964 		/* Flush reo extension descriptors */
965 		while ((tot_desc_size -= desc_size) > 0) {
966 			qdf_mem_zero(&params, sizeof(params));
967 			params.std.addr_lo =
968 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
969 				tot_desc_size) & 0xffffffff;
970 			params.std.addr_hi =
971 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
972 
973 			if (QDF_STATUS_SUCCESS !=
974 			    dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params,
975 					    NULL, NULL)) {
976 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
977 					   "tid %d desc %pK", rx_tid->tid,
978 					   (void *)(rx_tid->hw_qdesc_paddr));
979 				desc->pending_ext_desc_size = tot_desc_size +
980 								      desc_size;
981 				dp_reo_desc_clean_up(soc, desc, reo_status);
982 				flush_failure = true;
983 				break;
984 			}
985 		}
986 
987 		if (flush_failure)
988 			break;
989 
990 		desc->pending_ext_desc_size = desc_size;
991 
992 		/* Flush base descriptor */
993 		qdf_mem_zero(&params, sizeof(params));
994 		params.std.need_status = 1;
995 		params.std.addr_lo =
996 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
997 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
998 		if (rx_tid->ba_win_size > 256)
999 			params.u.fl_cache_params.flush_q_1k_desc = 1;
1000 		params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
1001 
1002 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1003 							  CMD_FLUSH_CACHE,
1004 							  &params,
1005 							  dp_reo_desc_free,
1006 							  (void *)desc)) {
1007 			union hal_reo_status reo_status;
1008 			/*
1009 			 * If dp_reo_send_cmd return failure, related TID queue desc
1010 			 * should be unmapped. Also locally reo_desc, together with
1011 			 * TID queue desc also need to be freed accordingly.
1012 			 *
1013 			 * Here invoke desc_free function directly to do clean up.
1014 			 *
1015 			 * In case of MCL path add the desc back to the free
1016 			 * desc list and defer deletion.
1017 			 */
1018 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
1019 				   rx_tid->tid);
1020 			dp_reo_desc_clean_up(soc, desc, &reo_status);
1021 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
1022 			break;
1023 		}
1024 	}
1025 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1026 
1027 	dp_reo_desc_defer_free(soc);
1028 }
1029 
1030 /**
1031  * dp_rx_tid_delete_wifi3() - Delete receive TID queue
1032  * @peer: Datapath peer handle
1033  * @tid: TID
1034  *
1035  * Return: 0 on success, error code on failure
1036  */
1037 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1038 {
1039 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1040 	struct dp_soc *soc = peer->vdev->pdev->soc;
1041 	union hal_reo_status reo_status;
1042 	struct hal_reo_cmd_params params;
1043 	struct reo_desc_list_node *freedesc =
1044 		qdf_mem_malloc(sizeof(*freedesc));
1045 
1046 	if (!freedesc) {
1047 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
1048 			    soc, tid);
1049 		qdf_assert(0);
1050 		return -ENOMEM;
1051 	}
1052 
1053 	freedesc->rx_tid = *rx_tid;
1054 	freedesc->resend_update_reo_cmd = false;
1055 
1056 	qdf_mem_zero(&params, sizeof(params));
1057 
1058 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
1059 
1060 	reo_status.rx_queue_status.header.status = HAL_REO_CMD_SUCCESS;
1061 	dp_rx_tid_delete_cb(soc, freedesc, &reo_status);
1062 
1063 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1064 	rx_tid->hw_qdesc_alloc_size = 0;
1065 	rx_tid->hw_qdesc_paddr = 0;
1066 
1067 	return 0;
1068 }
1069 
1070 #ifdef DP_LFR
1071 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1072 {
1073 	int tid;
1074 
1075 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1076 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1077 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
1078 			      tid, peer, peer->local_id);
1079 	}
1080 }
1081 #else
1082 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1083 #endif
1084 
1085 #ifdef WLAN_FEATURE_11BE_MLO
1086 /**
1087  * dp_peer_rx_tids_init() - initialize each tids in peer
1088  * @peer: peer pointer
1089  *
1090  * Return: None
1091  */
1092 static void dp_peer_rx_tids_init(struct dp_peer *peer)
1093 {
1094 	int tid;
1095 	struct dp_rx_tid *rx_tid;
1096 	struct dp_rx_tid_defrag *rx_tid_defrag;
1097 
1098 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1099 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1100 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1101 
1102 			rx_tid_defrag->array = &rx_tid_defrag->base;
1103 			rx_tid_defrag->defrag_timeout_ms = 0;
1104 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1105 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1106 			rx_tid_defrag->base.head = NULL;
1107 			rx_tid_defrag->base.tail = NULL;
1108 			rx_tid_defrag->tid = tid;
1109 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
1110 		}
1111 	}
1112 
1113 	/* if not first assoc link peer,
1114 	 * not to initialize rx_tids again.
1115 	 */
1116 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
1117 		return;
1118 
1119 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1120 		rx_tid = &peer->rx_tid[tid];
1121 		rx_tid->tid = tid;
1122 		rx_tid->ba_win_size = 0;
1123 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1124 	}
1125 }
1126 #else
1127 static void dp_peer_rx_tids_init(struct dp_peer *peer)
1128 {
1129 	int tid;
1130 	struct dp_rx_tid *rx_tid;
1131 	struct dp_rx_tid_defrag *rx_tid_defrag;
1132 
1133 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1134 		rx_tid = &peer->rx_tid[tid];
1135 
1136 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1137 		rx_tid->tid = tid;
1138 		rx_tid->ba_win_size = 0;
1139 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1140 
1141 		rx_tid_defrag->base.head = NULL;
1142 		rx_tid_defrag->base.tail = NULL;
1143 		rx_tid_defrag->tid = tid;
1144 		rx_tid_defrag->array = &rx_tid_defrag->base;
1145 		rx_tid_defrag->defrag_timeout_ms = 0;
1146 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1147 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1148 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
1149 	}
1150 }
1151 #endif
1152 
1153 void dp_peer_rx_tid_setup(struct dp_peer *peer)
1154 {
1155 	struct dp_soc *soc = peer->vdev->pdev->soc;
1156 	struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
1157 	struct dp_vdev *vdev = peer->vdev;
1158 
1159 	dp_peer_rx_tids_init(peer);
1160 
1161 	/* Setup default (non-qos) rx tid queue */
1162 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1163 
1164 	/* Setup rx tid queue for TID 0.
1165 	 * Other queues will be setup on receiving first packet, which will cause
1166 	 * NULL REO queue error. For Mesh peer, if on one of the mesh AP the
1167 	 * mesh peer is not deleted, the new addition of mesh peer on other mesh AP
1168 	 * doesn't do BA negotiation leading to mismatch in BA windows.
1169 	 * To avoid this send max BA window during init.
1170 	 */
1171 	if (qdf_unlikely(vdev->mesh_vdev) ||
1172 	    qdf_unlikely(txrx_peer->nawds_enabled))
1173 		dp_rx_tid_setup_wifi3(
1174 				peer, 0,
1175 				hal_get_rx_max_ba_window(soc->hal_soc, 0),
1176 				0);
1177 	else
1178 		dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1179 
1180 	/*
1181 	 * Setup the rest of TID's to handle LFR
1182 	 */
1183 	dp_peer_setup_remaining_tids(peer);
1184 }
1185 
1186 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1187 {
1188 	int tid;
1189 	uint32_t tid_delete_mask = 0;
1190 
1191 	if (!peer->txrx_peer)
1192 		return;
1193 
1194 	dp_info("Remove tids for peer: %pK", peer);
1195 
1196 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1197 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1198 		struct dp_rx_tid_defrag *defrag_rx_tid =
1199 				&peer->txrx_peer->rx_tid[tid];
1200 
1201 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
1202 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
1203 			/* Cleanup defrag related resource */
1204 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
1205 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
1206 		}
1207 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
1208 
1209 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1210 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
1211 			dp_rx_tid_delete_wifi3(peer, tid);
1212 
1213 			tid_delete_mask |= (1 << tid);
1214 		}
1215 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1216 	}
1217 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1218 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1219 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
1220 			peer->vdev->pdev->pdev_id,
1221 			peer->vdev->vdev_id, peer->mac_addr.raw,
1222 			tid_delete_mask);
1223 	}
1224 #endif
1225 }
1226 
1227 /**
1228  * dp_teardown_256_ba_sessions() - Teardown sessions using 256
1229  *                                window size when a request with
1230  *                                64 window size is received.
1231  *                                This is done as a WAR since HW can
1232  *                                have only one setting per peer (64 or 256).
1233  *                                For HKv2, we use per tid buffersize setting
1234  *                                for 0 to per_tid_basize_max_tid. For tid
1235  *                                more than per_tid_basize_max_tid we use HKv1
1236  *                                method.
1237  * @peer: Datapath peer
1238  *
1239  * Return: void
1240  */
1241 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
1242 {
1243 	uint8_t delba_rcode = 0;
1244 	int tid;
1245 	struct dp_rx_tid *rx_tid = NULL;
1246 
1247 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
1248 	for (; tid < DP_MAX_TIDS; tid++) {
1249 		rx_tid = &peer->rx_tid[tid];
1250 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1251 
1252 		if (rx_tid->ba_win_size <= 64) {
1253 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1254 			continue;
1255 		} else {
1256 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
1257 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1258 				/* send delba */
1259 				if (!rx_tid->delba_tx_status) {
1260 					rx_tid->delba_tx_retry++;
1261 					rx_tid->delba_tx_status = 1;
1262 					rx_tid->delba_rcode =
1263 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
1264 					delba_rcode = rx_tid->delba_rcode;
1265 
1266 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1267 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1268 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1269 							peer->vdev->pdev->soc->ctrl_psoc,
1270 							peer->vdev->vdev_id,
1271 							peer->mac_addr.raw,
1272 							tid, delba_rcode,
1273 							CDP_DELBA_REASON_NONE);
1274 				} else {
1275 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1276 				}
1277 			} else {
1278 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
1279 			}
1280 		}
1281 	}
1282 }
1283 
1284 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1285 				      uint8_t *peer_mac,
1286 				      uint16_t vdev_id,
1287 				      uint8_t tid, int status)
1288 {
1289 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1290 					(struct dp_soc *)cdp_soc,
1291 					peer_mac, 0, vdev_id,
1292 					DP_MOD_ID_CDP);
1293 	struct dp_rx_tid *rx_tid = NULL;
1294 
1295 	if (!peer) {
1296 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1297 		goto fail;
1298 	}
1299 	rx_tid = &peer->rx_tid[tid];
1300 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1301 	if (status) {
1302 		rx_tid->num_addba_rsp_failed++;
1303 		if (rx_tid->hw_qdesc_vaddr_unaligned)
1304 			dp_rx_tid_update_wifi3(peer, tid, 1,
1305 					       IEEE80211_SEQ_MAX, false);
1306 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1307 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1308 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
1309 
1310 		goto success;
1311 	}
1312 
1313 	rx_tid->num_addba_rsp_success++;
1314 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1315 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1316 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1317 			    cdp_soc, tid);
1318 		goto fail;
1319 	}
1320 
1321 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
1322 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1323 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
1324 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1325 		goto fail;
1326 	}
1327 
1328 	if (dp_rx_tid_update_wifi3(peer, tid,
1329 				   rx_tid->ba_win_size,
1330 				   rx_tid->startseqnum,
1331 				   false)) {
1332 		dp_err("Failed update REO SSN");
1333 	}
1334 
1335 	dp_info("tid %u window_size %u start_seq_num %u",
1336 		tid, rx_tid->ba_win_size,
1337 		rx_tid->startseqnum);
1338 
1339 	/* First Session */
1340 	if (peer->active_ba_session_cnt == 0) {
1341 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
1342 			peer->hw_buffer_size = 256;
1343 		else if (rx_tid->ba_win_size <= 1024 &&
1344 			 rx_tid->ba_win_size > 256)
1345 			peer->hw_buffer_size = 1024;
1346 		else
1347 			peer->hw_buffer_size = 64;
1348 	}
1349 
1350 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1351 
1352 	peer->active_ba_session_cnt++;
1353 
1354 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1355 
1356 	/* Kill any session having 256 buffer size
1357 	 * when 64 buffer size request is received.
1358 	 * Also, latch on to 64 as new buffer size.
1359 	 */
1360 	if (peer->kill_256_sessions) {
1361 		dp_teardown_256_ba_sessions(peer);
1362 		peer->kill_256_sessions = 0;
1363 	}
1364 
1365 success:
1366 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1367 	return QDF_STATUS_SUCCESS;
1368 
1369 fail:
1370 	if (peer)
1371 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1372 
1373 	return QDF_STATUS_E_FAILURE;
1374 }
1375 
1376 QDF_STATUS
1377 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1378 			     uint16_t vdev_id, uint8_t tid,
1379 			     uint8_t *dialogtoken, uint16_t *statuscode,
1380 			     uint16_t *buffersize, uint16_t *batimeout)
1381 {
1382 	struct dp_rx_tid *rx_tid = NULL;
1383 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1384 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
1385 						       peer_mac, 0, vdev_id,
1386 						       DP_MOD_ID_CDP);
1387 
1388 	if (!peer) {
1389 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1390 		return QDF_STATUS_E_FAILURE;
1391 	}
1392 	rx_tid = &peer->rx_tid[tid];
1393 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1394 	rx_tid->num_of_addba_resp++;
1395 	/* setup ADDBA response parameters */
1396 	*dialogtoken = rx_tid->dialogtoken;
1397 	*statuscode = rx_tid->statuscode;
1398 	*buffersize = rx_tid->ba_win_size;
1399 	*batimeout  = 0;
1400 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1401 
1402 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1403 
1404 	return status;
1405 }
1406 
1407 /**
1408  * dp_check_ba_buffersize() - Check buffer size in request
1409  *                            and latch onto this size based on
1410  *                            size used in first active session.
1411  * @peer: Datapath peer
1412  * @tid: Tid
1413  * @buffersize: Block ack window size
1414  *
1415  * Return: void
1416  */
1417 static void dp_check_ba_buffersize(struct dp_peer *peer,
1418 				   uint16_t tid,
1419 				   uint16_t buffersize)
1420 {
1421 	struct dp_rx_tid *rx_tid = NULL;
1422 	struct dp_soc *soc = peer->vdev->pdev->soc;
1423 	uint16_t max_ba_window;
1424 
1425 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
1426 	dp_info("Input buffersize %d, max dp allowed %d",
1427 		buffersize, max_ba_window);
1428 	/* Adjust BA window size, restrict it to max DP allowed */
1429 	buffersize = QDF_MIN(buffersize, max_ba_window);
1430 
1431 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
1432 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1433 		soc->per_tid_basize_max_tid, tid, buffersize,
1434 		peer->hw_buffer_size);
1435 
1436 	rx_tid = &peer->rx_tid[tid];
1437 	if (soc->per_tid_basize_max_tid &&
1438 	    tid < soc->per_tid_basize_max_tid) {
1439 		rx_tid->ba_win_size = buffersize;
1440 		goto out;
1441 	} else {
1442 		if (peer->active_ba_session_cnt == 0) {
1443 			rx_tid->ba_win_size = buffersize;
1444 		} else {
1445 			if (peer->hw_buffer_size == 64) {
1446 				if (buffersize <= 64)
1447 					rx_tid->ba_win_size = buffersize;
1448 				else
1449 					rx_tid->ba_win_size = peer->hw_buffer_size;
1450 			} else if (peer->hw_buffer_size == 256) {
1451 				if (buffersize > 64) {
1452 					rx_tid->ba_win_size = buffersize;
1453 				} else {
1454 					rx_tid->ba_win_size = buffersize;
1455 					peer->hw_buffer_size = 64;
1456 					peer->kill_256_sessions = 1;
1457 				}
1458 			} else if (buffersize <= 1024) {
1459 				/*
1460 				 * Above checks are only for HK V2
1461 				 * Set incoming buffer size for others
1462 				 */
1463 				rx_tid->ba_win_size = buffersize;
1464 			} else {
1465 				dp_err("Invalid buffer size %d", buffersize);
1466 				qdf_assert_always(0);
1467 			}
1468 		}
1469 	}
1470 
1471 out:
1472 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
1473 		rx_tid->ba_win_size,
1474 		peer->hw_buffer_size,
1475 		peer->kill_256_sessions);
1476 }
1477 
1478 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
1479 					uint8_t *peer_mac, uint16_t vdev_id,
1480 					uint8_t tid, uint16_t buffersize)
1481 {
1482 	struct dp_rx_tid *rx_tid = NULL;
1483 	struct dp_peer *peer;
1484 
1485 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1486 					      peer_mac, 0, vdev_id,
1487 					      DP_MOD_ID_CDP);
1488 	if (!peer) {
1489 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1490 		return QDF_STATUS_E_FAILURE;
1491 	}
1492 
1493 	rx_tid = &peer->rx_tid[tid];
1494 
1495 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1496 	rx_tid->ba_win_size = buffersize;
1497 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1498 
1499 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
1500 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
1501 
1502 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1503 
1504 	return QDF_STATUS_SUCCESS;
1505 }
1506 
1507 #define DP_RX_BA_SESSION_DISABLE  1
1508 
1509 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1510 				  uint8_t *peer_mac,
1511 				  uint16_t vdev_id,
1512 				  uint8_t dialogtoken,
1513 				  uint16_t tid, uint16_t batimeout,
1514 				  uint16_t buffersize,
1515 				  uint16_t startseqnum)
1516 {
1517 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1518 	struct dp_rx_tid *rx_tid = NULL;
1519 	struct dp_peer *peer;
1520 
1521 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1522 					      peer_mac,
1523 					      0, vdev_id,
1524 					      DP_MOD_ID_CDP);
1525 
1526 	if (!peer) {
1527 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1528 		return QDF_STATUS_E_FAILURE;
1529 	}
1530 	rx_tid = &peer->rx_tid[tid];
1531 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1532 	rx_tid->num_of_addba_req++;
1533 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
1534 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
1535 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1536 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1537 		peer->active_ba_session_cnt--;
1538 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
1539 			      cdp_soc, tid);
1540 	}
1541 
1542 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1543 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1544 		status = QDF_STATUS_E_FAILURE;
1545 		goto fail;
1546 	}
1547 
1548 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
1549 		dp_peer_info("%pK: disable BA session",
1550 			     cdp_soc);
1551 
1552 		buffersize = 1;
1553 	} else if (rx_tid->rx_ba_win_size_override) {
1554 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
1555 			     rx_tid->rx_ba_win_size_override);
1556 
1557 		buffersize = rx_tid->rx_ba_win_size_override;
1558 	} else {
1559 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
1560 			     buffersize);
1561 	}
1562 
1563 	dp_check_ba_buffersize(peer, tid, buffersize);
1564 
1565 	if (dp_rx_tid_setup_wifi3(peer, tid,
1566 	    rx_tid->ba_win_size, startseqnum)) {
1567 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1568 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1569 		status = QDF_STATUS_E_FAILURE;
1570 		goto fail;
1571 	}
1572 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
1573 
1574 	rx_tid->dialogtoken = dialogtoken;
1575 	rx_tid->startseqnum = startseqnum;
1576 
1577 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1578 		rx_tid->statuscode = rx_tid->userstatuscode;
1579 	else
1580 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1581 
1582 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
1583 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
1584 
1585 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1586 
1587 fail:
1588 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1589 
1590 	return status;
1591 }
1592 
1593 QDF_STATUS
1594 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1595 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
1596 {
1597 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1598 					(struct dp_soc *)cdp_soc,
1599 					peer_mac, 0, vdev_id,
1600 					DP_MOD_ID_CDP);
1601 	struct dp_rx_tid *rx_tid;
1602 
1603 	if (!peer) {
1604 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1605 		return QDF_STATUS_E_FAILURE;
1606 	}
1607 
1608 	rx_tid = &peer->rx_tid[tid];
1609 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1610 	rx_tid->userstatuscode = statuscode;
1611 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1612 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1613 
1614 	return QDF_STATUS_SUCCESS;
1615 }
1616 
1617 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1618 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
1619 {
1620 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1621 	struct dp_rx_tid *rx_tid;
1622 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1623 					(struct dp_soc *)cdp_soc,
1624 					peer_mac, 0, vdev_id,
1625 					DP_MOD_ID_CDP);
1626 
1627 	if (!peer) {
1628 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1629 		return QDF_STATUS_E_FAILURE;
1630 	}
1631 	rx_tid = &peer->rx_tid[tid];
1632 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1633 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
1634 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1635 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1636 		status = QDF_STATUS_E_FAILURE;
1637 		goto fail;
1638 	}
1639 	/* TODO: See if we can delete the existing REO queue descriptor and
1640 	 * replace with a new one without queue extension descript to save
1641 	 * memory
1642 	 */
1643 	rx_tid->delba_rcode = reasoncode;
1644 	rx_tid->num_of_delba_req++;
1645 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1646 
1647 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1648 	peer->active_ba_session_cnt--;
1649 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1650 fail:
1651 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1652 
1653 	return status;
1654 }
1655 
1656 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1657 				 uint16_t vdev_id,
1658 				 uint8_t tid, int status)
1659 {
1660 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1661 	struct dp_rx_tid *rx_tid = NULL;
1662 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1663 					(struct dp_soc *)cdp_soc,
1664 					peer_mac, 0, vdev_id,
1665 					DP_MOD_ID_CDP);
1666 
1667 	if (!peer) {
1668 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1669 		return QDF_STATUS_E_FAILURE;
1670 	}
1671 	rx_tid = &peer->rx_tid[tid];
1672 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1673 	if (status) {
1674 		rx_tid->delba_tx_fail_cnt++;
1675 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
1676 			rx_tid->delba_tx_retry = 0;
1677 			rx_tid->delba_tx_status = 0;
1678 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1679 		} else {
1680 			rx_tid->delba_tx_retry++;
1681 			rx_tid->delba_tx_status = 1;
1682 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1683 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1684 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1685 					peer->vdev->pdev->soc->ctrl_psoc,
1686 					peer->vdev->vdev_id,
1687 					peer->mac_addr.raw, tid,
1688 					rx_tid->delba_rcode,
1689 					CDP_DELBA_REASON_NONE);
1690 		}
1691 		goto end;
1692 	} else {
1693 		rx_tid->delba_tx_success_cnt++;
1694 		rx_tid->delba_tx_retry = 0;
1695 		rx_tid->delba_tx_status = 0;
1696 	}
1697 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
1698 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1699 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1700 		peer->active_ba_session_cnt--;
1701 	}
1702 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1703 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1704 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1705 	}
1706 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1707 
1708 end:
1709 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1710 
1711 	return ret;
1712 }
1713 
1714 QDF_STATUS
1715 dp_set_pn_check_wifi3(struct cdp_soc_t *soc_t, uint8_t vdev_id,
1716 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1717 		      uint32_t *rx_pn)
1718 {
1719 	struct dp_pdev *pdev;
1720 	int i;
1721 	uint8_t pn_size;
1722 	struct hal_reo_cmd_params params;
1723 	struct dp_peer *peer = NULL;
1724 	struct dp_vdev *vdev = NULL;
1725 	struct dp_soc *soc = NULL;
1726 
1727 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc_t,
1728 					      peer_mac, 0, vdev_id,
1729 					      DP_MOD_ID_CDP);
1730 
1731 	if (!peer) {
1732 		dp_peer_debug("%pK: Peer is NULL!", soc);
1733 		return QDF_STATUS_E_FAILURE;
1734 	}
1735 
1736 	vdev = peer->vdev;
1737 
1738 	if (!vdev) {
1739 		dp_peer_debug("%pK: VDEV is NULL!", soc);
1740 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1741 		return QDF_STATUS_E_FAILURE;
1742 	}
1743 
1744 	pdev = vdev->pdev;
1745 	soc = pdev->soc;
1746 	qdf_mem_zero(&params, sizeof(params));
1747 
1748 	params.std.need_status = 1;
1749 	params.u.upd_queue_params.update_pn_valid = 1;
1750 	params.u.upd_queue_params.update_pn_size = 1;
1751 	params.u.upd_queue_params.update_pn = 1;
1752 	params.u.upd_queue_params.update_pn_check_needed = 1;
1753 	params.u.upd_queue_params.update_svld = 1;
1754 	params.u.upd_queue_params.svld = 0;
1755 
1756 	switch (sec_type) {
1757 	case cdp_sec_type_tkip_nomic:
1758 	case cdp_sec_type_aes_ccmp:
1759 	case cdp_sec_type_aes_ccmp_256:
1760 	case cdp_sec_type_aes_gcmp:
1761 	case cdp_sec_type_aes_gcmp_256:
1762 		params.u.upd_queue_params.pn_check_needed = 1;
1763 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
1764 		pn_size = 48;
1765 		break;
1766 	case cdp_sec_type_wapi:
1767 		params.u.upd_queue_params.pn_check_needed = 1;
1768 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
1769 		pn_size = 128;
1770 		if (vdev->opmode == wlan_op_mode_ap) {
1771 			params.u.upd_queue_params.pn_even = 1;
1772 			params.u.upd_queue_params.update_pn_even = 1;
1773 		} else {
1774 			params.u.upd_queue_params.pn_uneven = 1;
1775 			params.u.upd_queue_params.update_pn_uneven = 1;
1776 		}
1777 		break;
1778 	default:
1779 		params.u.upd_queue_params.pn_check_needed = 0;
1780 		pn_size = 0;
1781 		break;
1782 	}
1783 
1784 	for (i = 0; i < DP_MAX_TIDS; i++) {
1785 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1786 
1787 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1788 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1789 			params.std.addr_lo =
1790 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1791 			params.std.addr_hi =
1792 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1793 
1794 			if (pn_size) {
1795 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
1796 					     soc, i, rx_pn[3], rx_pn[2],
1797 					     rx_pn[1], rx_pn[0]);
1798 				params.u.upd_queue_params.update_pn_valid = 1;
1799 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1800 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1801 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1802 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1803 			}
1804 			rx_tid->pn_size = pn_size;
1805 			if (dp_reo_send_cmd(soc,
1806 					    CMD_UPDATE_RX_REO_QUEUE,
1807 					    &params, dp_rx_tid_update_cb,
1808 					    rx_tid)) {
1809 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
1810 					   "tid %d desc %pK", rx_tid->tid,
1811 					   (void *)(rx_tid->hw_qdesc_paddr));
1812 				DP_STATS_INC(soc,
1813 					     rx.err.reo_cmd_send_fail, 1);
1814 			}
1815 		} else {
1816 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
1817 		}
1818 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1819 	}
1820 
1821 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1822 
1823 	return QDF_STATUS_SUCCESS;
1824 }
1825 
1826 QDF_STATUS
1827 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
1828 			uint8_t tid, uint16_t win_sz)
1829 {
1830 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1831 	struct dp_peer *peer;
1832 	struct dp_rx_tid *rx_tid;
1833 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1834 
1835 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1836 
1837 	if (!peer) {
1838 		dp_peer_err("%pK: Couldn't find peer from ID %d",
1839 			    soc, peer_id);
1840 		return QDF_STATUS_E_FAILURE;
1841 	}
1842 
1843 	qdf_assert_always(tid < DP_MAX_TIDS);
1844 
1845 	rx_tid = &peer->rx_tid[tid];
1846 
1847 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1848 		if (!rx_tid->delba_tx_status) {
1849 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
1850 				     soc, peer_id, tid, win_sz);
1851 
1852 			qdf_spin_lock_bh(&rx_tid->tid_lock);
1853 
1854 			rx_tid->delba_tx_status = 1;
1855 
1856 			rx_tid->rx_ba_win_size_override =
1857 			    qdf_min((uint16_t)63, win_sz);
1858 
1859 			rx_tid->delba_rcode =
1860 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
1861 
1862 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1863 
1864 			if (soc->cdp_soc.ol_ops->send_delba)
1865 				soc->cdp_soc.ol_ops->send_delba(
1866 					peer->vdev->pdev->soc->ctrl_psoc,
1867 					peer->vdev->vdev_id,
1868 					peer->mac_addr.raw,
1869 					tid,
1870 					rx_tid->delba_rcode,
1871 					CDP_DELBA_REASON_NONE);
1872 		}
1873 	} else {
1874 		dp_peer_err("%pK: BA session is not setup for TID:%d ",
1875 			    soc, tid);
1876 		status = QDF_STATUS_E_FAILURE;
1877 	}
1878 
1879 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1880 
1881 	return status;
1882 }
1883 
1884 #ifdef IPA_OFFLOAD
1885 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
1886 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
1887 {
1888 	struct dp_soc *soc = peer->vdev->pdev->soc;
1889 	struct hal_reo_cmd_params params;
1890 	int i;
1891 	int stats_cmd_sent_cnt = 0;
1892 	QDF_STATUS status;
1893 	uint16_t peer_id = peer->peer_id;
1894 	unsigned long comb_peer_id_tid;
1895 	struct dp_rx_tid *rx_tid;
1896 
1897 	if (!dp_stats_cmd_cb)
1898 		return stats_cmd_sent_cnt;
1899 
1900 	qdf_mem_zero(&params, sizeof(params));
1901 	for (i = 0; i < DP_MAX_TIDS; i++) {
1902 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
1903 			continue;
1904 
1905 		rx_tid = &peer->rx_tid[i];
1906 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1907 			params.std.need_status = 1;
1908 			params.std.addr_lo =
1909 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1910 			params.std.addr_hi =
1911 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1912 			params.u.stats_params.clear = 1;
1913 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
1914 					    | peer_id);
1915 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
1916 						 &params, dp_stats_cmd_cb,
1917 						 (void *)comb_peer_id_tid);
1918 			if (QDF_IS_STATUS_SUCCESS(status))
1919 				stats_cmd_sent_cnt++;
1920 
1921 			/* Flush REO descriptor from HW cache to update stats
1922 			 * in descriptor memory. This is to help debugging
1923 			 */
1924 			qdf_mem_zero(&params, sizeof(params));
1925 			params.std.need_status = 0;
1926 			params.std.addr_lo =
1927 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1928 			params.std.addr_hi =
1929 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1930 			params.u.fl_cache_params.flush_no_inval = 1;
1931 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
1932 					NULL);
1933 		}
1934 	}
1935 
1936 	return stats_cmd_sent_cnt;
1937 }
1938 
1939 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
1940 
1941 #endif
1942 int dp_peer_rxtid_stats(struct dp_peer *peer,
1943 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
1944 			void *cb_ctxt)
1945 {
1946 	struct dp_soc *soc = peer->vdev->pdev->soc;
1947 	struct hal_reo_cmd_params params;
1948 	int i;
1949 	int stats_cmd_sent_cnt = 0;
1950 	QDF_STATUS status;
1951 	struct dp_rx_tid *rx_tid;
1952 
1953 	if (!dp_stats_cmd_cb)
1954 		return stats_cmd_sent_cnt;
1955 
1956 	qdf_mem_zero(&params, sizeof(params));
1957 	for (i = 0; i < DP_MAX_TIDS; i++) {
1958 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
1959 			continue;
1960 
1961 		rx_tid = &peer->rx_tid[i];
1962 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1963 			params.std.need_status = 1;
1964 			params.std.addr_lo =
1965 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1966 			params.std.addr_hi =
1967 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1968 
1969 			if (cb_ctxt) {
1970 				status = dp_reo_send_cmd(
1971 						soc, CMD_GET_QUEUE_STATS,
1972 						&params, dp_stats_cmd_cb,
1973 						cb_ctxt);
1974 			} else {
1975 				status = dp_reo_send_cmd(
1976 						soc, CMD_GET_QUEUE_STATS,
1977 						&params, dp_stats_cmd_cb,
1978 						rx_tid);
1979 			}
1980 
1981 			if (QDF_IS_STATUS_SUCCESS(status))
1982 				stats_cmd_sent_cnt++;
1983 
1984 			/* Flush REO descriptor from HW cache to update stats
1985 			 * in descriptor memory. This is to help debugging
1986 			 */
1987 			qdf_mem_zero(&params, sizeof(params));
1988 			params.std.need_status = 0;
1989 			params.std.addr_lo =
1990 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1991 			params.std.addr_hi =
1992 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1993 			params.u.fl_cache_params.flush_no_inval = 1;
1994 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
1995 					NULL);
1996 		}
1997 	}
1998 
1999 	return stats_cmd_sent_cnt;
2000 }
2001 
2002 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
2003 {
2004 	uint8_t i;
2005 
2006 	if (IS_MLO_DP_MLD_PEER(peer)) {
2007 		dp_peer_info("skip for mld peer");
2008 		return QDF_STATUS_SUCCESS;
2009 	}
2010 
2011 	if (peer->rx_tid) {
2012 		QDF_BUG(0);
2013 		dp_peer_err("peer rx_tid mem already exist");
2014 		return QDF_STATUS_E_FAILURE;
2015 	}
2016 
2017 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
2018 			sizeof(struct dp_rx_tid));
2019 
2020 	if (!peer->rx_tid) {
2021 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
2022 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2023 		return QDF_STATUS_E_NOMEM;
2024 	}
2025 
2026 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
2027 	for (i = 0; i < DP_MAX_TIDS; i++)
2028 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
2029 
2030 	return QDF_STATUS_SUCCESS;
2031 }
2032 
2033 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
2034 {
2035 	uint8_t i;
2036 
2037 	if (!IS_MLO_DP_LINK_PEER(peer)) {
2038 		for (i = 0; i < DP_MAX_TIDS; i++)
2039 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
2040 
2041 		qdf_mem_free(peer->rx_tid);
2042 	}
2043 
2044 	peer->rx_tid = NULL;
2045 }
2046 
2047 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2048 void dp_dump_rx_reo_queue_info(
2049 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
2050 {
2051 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2052 
2053 	if (!rx_tid)
2054 		return;
2055 
2056 	if (reo_status->fl_cache_status.header.status !=
2057 		HAL_REO_CMD_SUCCESS) {
2058 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
2059 			  reo_status->rx_queue_status.header.status);
2060 		return;
2061 	}
2062 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2063 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
2064 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2065 }
2066 
2067 void dp_send_cache_flush_for_rx_tid(
2068 	struct dp_soc *soc, struct dp_peer *peer)
2069 {
2070 	int i;
2071 	struct dp_rx_tid *rx_tid;
2072 	struct hal_reo_cmd_params params;
2073 
2074 	if (!peer) {
2075 		dp_err_rl("Peer is NULL");
2076 		return;
2077 	}
2078 
2079 	for (i = 0; i < DP_MAX_TIDS; i++) {
2080 		rx_tid = &peer->rx_tid[i];
2081 		if (!rx_tid)
2082 			continue;
2083 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2084 		if (rx_tid->hw_qdesc_vaddr_aligned) {
2085 			qdf_mem_zero(&params, sizeof(params));
2086 			params.std.need_status = 1;
2087 			params.std.addr_lo =
2088 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2089 			params.std.addr_hi =
2090 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2091 			params.u.fl_cache_params.flush_no_inval = 0;
2092 
2093 			if (rx_tid->ba_win_size > 256)
2094 				params.u.fl_cache_params.flush_q_1k_desc = 1;
2095 			params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
2096 
2097 			if (QDF_STATUS_SUCCESS !=
2098 				dp_reo_send_cmd(
2099 					soc, CMD_FLUSH_CACHE,
2100 					&params, dp_dump_rx_reo_queue_info,
2101 					(void *)rx_tid)) {
2102 				dp_err_rl("cache flush send failed tid %d",
2103 					  rx_tid->tid);
2104 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2105 				break;
2106 			}
2107 		}
2108 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2109 	}
2110 }
2111 
2112 void dp_get_rx_reo_queue_info(
2113 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2114 {
2115 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
2116 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2117 						     DP_MOD_ID_GENERIC_STATS);
2118 	struct dp_peer *peer = NULL;
2119 
2120 	if (!vdev) {
2121 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
2122 		goto failed;
2123 	}
2124 
2125 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2126 
2127 	if (!peer) {
2128 		dp_err_rl("Peer is NULL");
2129 		goto failed;
2130 	}
2131 	dp_send_cache_flush_for_rx_tid(soc, peer);
2132 failed:
2133 	if (peer)
2134 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2135 	if (vdev)
2136 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2137 }
2138 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2139 
2140