xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/mlo/dp_mlo.c (revision 7dab70881237c23ddd8f7bd28f5eaaab27f15464)
1 /*
2  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include <wlan_utility.h>
17 #include <dp_internal.h>
18 #include <dp_htt.h>
19 #include <hal_be_api.h>
20 #include "dp_mlo.h"
21 #include <dp_be.h>
22 #include <dp_be_rx.h>
23 #include <dp_htt.h>
24 #include <dp_internal.h>
25 #include <wlan_cfg.h>
26 #include <wlan_mlo_mgr_cmn.h>
27 #include "dp_umac_reset.h"
28 
29 #ifdef DP_UMAC_HW_RESET_SUPPORT
30 /**
31  * dp_umac_reset_update_partner_map() - Update Umac reset partner map
32  * @mlo_ctx: mlo soc context
33  * @chip_id: chip id
34  * @set: flag indicating whether to set or clear the bit
35  *
36  * Return: void
37  */
38 static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
39 					     int chip_id, bool set);
40 #endif
41 /**
42  * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
43  * @ctrl_ctxt: CDP control context
44  *
45  * Return: DP MLO context handle on success, NULL on failure
46  */
47 static struct cdp_mlo_ctxt *
48 dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
49 {
50 	struct dp_mlo_ctxt *mlo_ctxt =
51 		qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
52 
53 	if (!mlo_ctxt) {
54 		dp_err("Failed to allocate DP MLO Context");
55 		return NULL;
56 	}
57 
58 	mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
59 
60 	if (dp_mlo_peer_find_hash_attach_be
61 			(mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
62 		dp_err("Failed to allocate peer hash");
63 		qdf_mem_free(mlo_ctxt);
64 		return NULL;
65 	}
66 
67 	qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
68 			     (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
69 			      LRO_IPV4_SEED_ARR_SZ));
70 	qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
71 			     (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
72 			      LRO_IPV6_SEED_ARR_SZ));
73 
74 	qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
75 	qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
76 	return dp_mlo_ctx_to_cdp(mlo_ctxt);
77 }
78 
79 /**
80  * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
81  * @cdp_ml_ctxt: pointer to CDP DP MLO context
82  *
83  * Return: void
84  */
85 static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
86 {
87 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
88 
89 	if (!cdp_ml_ctxt)
90 		return;
91 
92 	qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
93 	qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
94 	dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
95 	qdf_mem_free(mlo_ctxt);
96 }
97 
98 /**
99  * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
100  * @ml_ctxt: DP ML context handle
101  * @soc: DP soc handle
102  * @chip_id: MLO chip id
103  *
104  * Return: void
105  */
106 static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
107 				      struct dp_soc *soc,
108 				      uint8_t chip_id)
109 {
110 	qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
111 	ml_ctxt->ml_soc_list[chip_id] = soc;
112 
113 	/* The same API is called during soc_attach and soc_detach
114 	 * soc parameter is non-null or null accordingly.
115 	 */
116 	if (soc)
117 		ml_ctxt->ml_soc_cnt++;
118 	else
119 		ml_ctxt->ml_soc_cnt--;
120 
121 	dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
122 
123 	qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
124 }
125 
126 struct dp_soc*
127 dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
128 			      uint8_t chip_id)
129 {
130 	struct dp_soc *soc = NULL;
131 
132 	if (!ml_ctxt) {
133 		dp_warn("MLO context not created, MLO not enabled");
134 		return NULL;
135 	}
136 
137 	qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
138 	soc = ml_ctxt->ml_soc_list[chip_id];
139 
140 	if (!soc) {
141 		qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
142 		return NULL;
143 	}
144 
145 	qdf_atomic_inc(&soc->ref_count);
146 	qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
147 
148 	return soc;
149 }
150 
151 static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
152 					       struct dp_soc_be *be_soc)
153 {
154 	uint8_t i;
155 	struct dp_soc *partner_soc;
156 	struct dp_soc_be *be_partner_soc;
157 	uint8_t pool_id;
158 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
159 
160 	for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
161 		partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
162 		if (!partner_soc) {
163 			dp_err("partner_soc is NULL");
164 			continue;
165 		}
166 
167 		be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
168 
169 		for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
170 			qdf_status =
171 				dp_hw_cookie_conversion_init
172 					(be_soc,
173 					 &be_partner_soc->rx_cc_ctx[pool_id]);
174 			if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
175 				dp_alert("MLO partner soc RX CC init failed");
176 				return qdf_status;
177 			}
178 		}
179 	}
180 
181 	return qdf_status;
182 }
183 
184 static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
185 {
186 	uint8_t i = 0;
187 	uint8_t cpu = 0;
188 	uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
189 	uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
190 	uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
191 	uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
192 
193 	/* Save the current interrupt mask and disable the interrupts */
194 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
195 		rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
196 		rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
197 		rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
198 		reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
199 
200 		soc->intr_ctx[i].rx_ring_mask = 0;
201 		soc->intr_ctx[i].rx_err_ring_mask = 0;
202 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
203 		soc->intr_ctx[i].reo_status_ring_mask = 0;
204 	}
205 
206 	/* make sure dp_service_srngs not running on any of the CPU */
207 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
208 		while (qdf_atomic_test_bit(cpu,
209 					   &soc->service_rings_running))
210 			;
211 	}
212 
213 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
214 		uint8_t ring = 0;
215 		uint32_t num_entries = 0;
216 		hal_ring_handle_t hal_ring_hdl = NULL;
217 		uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
218 						soc->wlan_cfg_ctx, i);
219 		uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
220 						soc->wlan_cfg_ctx, i);
221 		uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
222 						soc->wlan_cfg_ctx, i);
223 
224 		if (rx_mask) {
225 			/* iterate through each reo ring and process the buf */
226 			for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
227 				if (!(rx_mask & (1 << ring)))
228 					continue;
229 
230 				hal_ring_hdl =
231 					soc->reo_dest_ring[ring].hal_srng;
232 				num_entries = hal_srng_get_num_entries(
233 								soc->hal_soc,
234 								hal_ring_hdl);
235 				dp_rx_process_be(&soc->intr_ctx[i],
236 						 hal_ring_hdl,
237 						 ring,
238 						 num_entries);
239 			}
240 		}
241 
242 		/* Process REO Exception ring */
243 		if (rx_err_mask) {
244 			hal_ring_hdl = soc->reo_exception_ring.hal_srng;
245 			num_entries = hal_srng_get_num_entries(
246 						soc->hal_soc,
247 						hal_ring_hdl);
248 
249 			dp_rx_err_process(&soc->intr_ctx[i], soc,
250 					  hal_ring_hdl, num_entries);
251 		}
252 
253 		/* Process Rx WBM release ring */
254 		if (rx_wbm_rel_mask) {
255 			hal_ring_hdl = soc->rx_rel_ring.hal_srng;
256 			num_entries = hal_srng_get_num_entries(
257 						soc->hal_soc,
258 						hal_ring_hdl);
259 
260 			dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
261 					      hal_ring_hdl, num_entries);
262 		}
263 	}
264 
265 	/* restore the interrupt mask */
266 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
267 		soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
268 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
269 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
270 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
271 	}
272 }
273 
274 static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
275 			     struct cdp_mlo_ctxt *cdp_ml_ctxt)
276 {
277 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
278 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
279 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
280 	uint8_t pdev_id;
281 
282 	if (!cdp_ml_ctxt)
283 		return;
284 
285 	be_soc->ml_ctxt = mlo_ctxt;
286 
287 	for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
288 		if (soc->pdev_list[pdev_id])
289 			dp_mlo_update_link_to_pdev_map(soc,
290 						       soc->pdev_list[pdev_id]);
291 	}
292 
293 	dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
294 }
295 
296 static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
297 				struct cdp_mlo_ctxt *cdp_ml_ctxt,
298 				bool is_force_down)
299 {
300 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
301 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
302 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
303 
304 	if (!cdp_ml_ctxt)
305 		return;
306 
307 	/* During the teardown drain the Rx buffers if any exist in the ring */
308 	dp_mlo_iter_ptnr_soc(be_soc,
309 			     dp_mlo_soc_drain_rx_buf,
310 			     NULL);
311 
312 	dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
313 	be_soc->ml_ctxt = NULL;
314 }
315 
316 static QDF_STATUS dp_mlo_add_ptnr_vdev(struct dp_vdev *vdev1,
317 				       struct dp_vdev *vdev2,
318 				       struct dp_soc *soc, uint8_t pdev_id)
319 {
320 	struct dp_soc_be *soc_be = dp_get_be_soc_from_dp_soc(soc);
321 	struct dp_vdev_be *vdev2_be = dp_get_be_vdev_from_dp_vdev(vdev2);
322 
323 	/* return when valid entry  exists */
324 	if (vdev1->is_bridge_vdev) {
325 		if (vdev2_be->bridge_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
326 							CDP_INVALID_VDEV_ID)
327 			return QDF_STATUS_SUCCESS;
328 
329 		vdev2_be->bridge_vdev_list[soc_be->mlo_chip_id][pdev_id] =
330 						vdev1->vdev_id;
331 	} else {
332 		if (vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
333 							CDP_INVALID_VDEV_ID)
334 			return QDF_STATUS_SUCCESS;
335 
336 		vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] =
337 						vdev1->vdev_id;
338 	}
339 
340 	mlo_debug("Add vdev%d to vdev%d list, mlo_chip_id = %d pdev_id = %d\n",
341 		  vdev1->vdev_id, vdev2->vdev_id, soc_be->mlo_chip_id, pdev_id);
342 
343 	return QDF_STATUS_SUCCESS;
344 }
345 
346 QDF_STATUS dp_update_mlo_ptnr_list(struct cdp_soc_t *soc_hdl,
347 				   int8_t partner_vdev_ids[], uint8_t num_vdevs,
348 				   uint8_t self_vdev_id)
349 {
350 	int i, j;
351 	struct dp_soc *self_soc = cdp_soc_t_to_dp_soc(soc_hdl);
352 	struct dp_vdev *self_vdev;
353 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
354 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(self_soc);
355 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
356 
357 	if (!dp_mlo)
358 		return QDF_STATUS_E_FAILURE;
359 
360 	self_vdev = dp_vdev_get_ref_by_id(self_soc, self_vdev_id, DP_MOD_ID_RX);
361 	if (!self_vdev)
362 		return QDF_STATUS_E_FAILURE;
363 
364 	/* go through the input vdev id list and if there are partner vdevs,
365 	 * - then add the current vdev's id to partner vdev's list using pdev_id and
366 	 * increase the reference
367 	 * - add partner vdev to self list  and increase  the reference
368 	 */
369 	for (i = 0; i < num_vdevs; i++) {
370 		if (partner_vdev_ids[i] == CDP_INVALID_VDEV_ID)
371 			continue;
372 
373 		for (j = 0; j < WLAN_MAX_MLO_CHIPS; j++) {
374 			struct dp_soc *soc =
375 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, j);
376 			if (soc) {
377 				struct dp_vdev *vdev;
378 
379 				vdev = dp_vdev_get_ref_by_id(soc,
380 					partner_vdev_ids[i], DP_MOD_ID_RX);
381 				if (vdev) {
382 					if (vdev == self_vdev) {
383 						dp_vdev_unref_delete(soc,
384 							vdev, DP_MOD_ID_RX);
385 						/*dp_soc_unref_delete(soc); */
386 						continue;
387 					}
388 					if (qdf_is_macaddr_equal(
389 						(struct qdf_mac_addr *)self_vdev->mld_mac_addr.raw,
390 						(struct qdf_mac_addr *)vdev->mld_mac_addr.raw)) {
391 						if (dp_mlo_add_ptnr_vdev(self_vdev,
392 							vdev, self_soc,
393 							self_vdev->pdev->pdev_id) !=
394 							QDF_STATUS_SUCCESS) {
395 							dp_err("Unable to add self to partner vdev's list");
396 							dp_vdev_unref_delete(soc,
397 								vdev, DP_MOD_ID_RX);
398 							/* TODO - release soc ref here */
399 							/* dp_soc_unref_delete(soc);*/
400 							ret = QDF_STATUS_E_FAILURE;
401 							goto exit;
402 						}
403 						/* add to self list */
404 						if (dp_mlo_add_ptnr_vdev(vdev, self_vdev, soc,
405 							vdev->pdev->pdev_id) !=
406 							QDF_STATUS_SUCCESS) {
407 							dp_err("Unable to add vdev to self vdev's list");
408 							dp_vdev_unref_delete(self_soc,
409 								vdev, DP_MOD_ID_RX);
410 							/* TODO - release soc ref here */
411 							/* dp_soc_unref_delete(soc);*/
412 							ret = QDF_STATUS_E_FAILURE;
413 							goto exit;
414 						}
415 					}
416 					dp_vdev_unref_delete(soc, vdev,
417 							     DP_MOD_ID_RX);
418 				} /* vdev */
419 				/* TODO - release soc ref here */
420 				/* dp_soc_unref_delete(soc); */
421 			} /* soc */
422 		} /* for */
423 	} /* for */
424 
425 exit:
426 	dp_vdev_unref_delete(self_soc, self_vdev, DP_MOD_ID_RX);
427 	return ret;
428 }
429 
430 void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev)
431 {
432 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
433 	struct dp_vdev_be *vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
434 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
435 	uint8_t soc_id = be_soc->mlo_chip_id;
436 	uint8_t pdev_id = vdev->pdev->pdev_id;
437 	int i, j;
438 
439 	for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
440 		for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
441 			struct dp_vdev *pr_vdev;
442 			struct dp_soc *pr_soc;
443 			struct dp_soc_be *pr_soc_be;
444 			struct dp_pdev *pr_pdev;
445 			struct dp_vdev_be *pr_vdev_be;
446 
447 			if (vdev_be->partner_vdev_list[i][j] ==
448 			    CDP_INVALID_VDEV_ID)
449 				continue;
450 
451 			pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
452 			if (!pr_soc)
453 				continue;
454 			pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
455 			pr_vdev = dp_vdev_get_ref_by_id(pr_soc,
456 						vdev_be->partner_vdev_list[i][j],
457 						DP_MOD_ID_RX);
458 			if (!pr_vdev)
459 				continue;
460 
461 			/* remove self vdev from partner list */
462 			pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
463 			if (vdev->is_bridge_vdev)
464 				pr_vdev_be->bridge_vdev_list[soc_id][pdev_id] =
465 					CDP_INVALID_VDEV_ID;
466 			else
467 				pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
468 					CDP_INVALID_VDEV_ID;
469 
470 			/* remove partner vdev from self list */
471 			pr_pdev = pr_vdev->pdev;
472 			vdev_be->partner_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
473 				CDP_INVALID_VDEV_ID;
474 
475 			dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
476 		}
477 	}
478 
479 	for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
480 		for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
481 			struct dp_vdev *pr_vdev = NULL;
482 			struct dp_soc *pr_soc = NULL;
483 			struct dp_soc_be *pr_soc_be = NULL;
484 			struct dp_pdev *pr_pdev = NULL;
485 			struct dp_vdev_be *pr_vdev_be = NULL;
486 
487 			if (vdev_be->bridge_vdev_list[i][j] ==
488 			    CDP_INVALID_VDEV_ID)
489 				continue;
490 
491 			pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
492 			if (!pr_soc)
493 				continue;
494 			pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
495 			pr_vdev = dp_vdev_get_ref_by_id(
496 						pr_soc,
497 						vdev_be->bridge_vdev_list[i][j],
498 						DP_MOD_ID_RX);
499 			if (!pr_vdev)
500 				continue;
501 
502 			/* remove self vdev from partner list */
503 			pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
504 			if (vdev->is_bridge_vdev)
505 				pr_vdev_be->bridge_vdev_list[soc_id][pdev_id] =
506 					CDP_INVALID_VDEV_ID;
507 			else
508 				pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
509 					CDP_INVALID_VDEV_ID;
510 
511 			/* remove partner vdev from self list */
512 			pr_pdev = pr_vdev->pdev;
513 			vdev_be->bridge_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
514 					CDP_INVALID_VDEV_ID;
515 
516 			dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
517 		}
518 	}
519 }
520 
521 static QDF_STATUS
522 dp_clear_mlo_ptnr_list(struct cdp_soc_t *soc_hdl, uint8_t self_vdev_id)
523 {
524 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
525 	struct dp_vdev *vdev;
526 
527 	vdev = dp_vdev_get_ref_by_id(soc, self_vdev_id, DP_MOD_ID_RX);
528 	if (!vdev)
529 		return QDF_STATUS_E_FAILURE;
530 
531 	dp_clr_mlo_ptnr_list(soc, vdev);
532 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
533 	return QDF_STATUS_SUCCESS;
534 }
535 
536 static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
537 {
538 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
539 	int i;
540 	struct dp_soc *soc;
541 	struct dp_soc_be *be_soc;
542 	QDF_STATUS qdf_status;
543 
544 	if (!cdp_ml_ctxt)
545 		return;
546 
547 	for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
548 		soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
549 
550 		if (!soc)
551 			continue;
552 		be_soc = dp_get_be_soc_from_dp_soc(soc);
553 
554 		qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
555 
556 		if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
557 			dp_alert("MLO partner SOC Rx desc CC init failed");
558 			qdf_assert_always(0);
559 		}
560 	}
561 }
562 
563 static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
564 				     uint8_t pdev_id, uint64_t delta_tsf2)
565 {
566 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
567 	struct dp_pdev *pdev;
568 	struct dp_pdev_be *be_pdev;
569 
570 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
571 						  pdev_id);
572 	if (!pdev) {
573 		dp_err("pdev is NULL for pdev_id %u", pdev_id);
574 		return;
575 	}
576 
577 	be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
578 
579 	be_pdev->delta_tsf2 = delta_tsf2;
580 }
581 
582 static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
583 				    uint64_t delta_tqm)
584 {
585 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
586 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
587 
588 	be_soc->delta_tqm = delta_tqm;
589 }
590 
591 static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
592 					uint64_t offset)
593 {
594 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
595 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
596 
597 	be_soc->mlo_tstamp_offset = offset;
598 }
599 
600 #ifdef CONFIG_MLO_SINGLE_DEV
601 /**
602  * dp_aggregate_vdev_basic_stats() - aggregate vdev basic stats
603  * @tgt_vdev_stats: target vdev buffer
604  * @src_vdev_stats: source vdev buffer
605  *
606  * return: void
607  */
608 static inline
609 void dp_aggregate_vdev_basic_stats(
610 			struct cdp_vdev_stats *tgt_vdev_stats,
611 			struct cdp_vdev_stats *src_vdev_stats)
612 {
613 	DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
614 }
615 
616 /**
617  * dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
618  * @tgt_vdev_stats: target vdev buffer
619  * @src_vdev_stats: source vdev buffer
620  *
621  * return: void
622  */
623 static inline
624 void dp_aggregate_vdev_ingress_stats(
625 			struct cdp_vdev_stats *tgt_vdev_stats,
626 			struct cdp_vdev_stats *src_vdev_stats)
627 {
628 	/* Aggregate vdev ingress stats */
629 	DP_UPDATE_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats);
630 }
631 
632 /**
633  * dp_aggregate_vdev_stats_for_unmapped_peers() - aggregate unmap peer stats
634  * @tgt_vdev_stats: target vdev buffer
635  * @src_vdev_stats: source vdev buffer
636  *
637  * return: void
638  */
639 static inline
640 void dp_aggregate_vdev_stats_for_unmapped_peers(
641 			struct cdp_vdev_stats *tgt_vdev_stats,
642 			struct cdp_vdev_stats *src_vdev_stats)
643 {
644 	/* Aggregate unmapped peers stats */
645 	DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(tgt_vdev_stats, src_vdev_stats);
646 }
647 
648 /**
649  * dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
650  * @tgt_vdev_stats: target vdev buffer
651  * @src_vdev_stats: source vdev buffer
652  *
653  * return: void
654  */
655 static inline
656 void dp_aggregate_all_vdev_stats(
657 			struct cdp_vdev_stats *tgt_vdev_stats,
658 			struct cdp_vdev_stats *src_vdev_stats)
659 {
660 	dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats);
661 	dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
662 						   src_vdev_stats);
663 }
664 
665 /**
666  * dp_mlo_vdev_stats_aggr_bridge_vap() - aggregate bridge vdev stats
667  * @be_vdev: Dp Vdev handle
668  * @bridge_vdev: Dp vdev handle for bridge vdev
669  * @arg: buffer for target vdev stats
670  *
671  * return: void
672  */
673 static
674 void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
675 				       struct dp_vdev *bridge_vdev,
676 				       void *arg)
677 {
678 	struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
679 	struct dp_vdev_be *bridge_be_vdev = NULL;
680 
681 	bridge_be_vdev = dp_get_be_vdev_from_dp_vdev(bridge_vdev);
682 	if (!bridge_be_vdev)
683 		return;
684 
685 	dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats);
686 	dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_be_vdev->mlo_stats);
687 	dp_vdev_iterate_peer(bridge_vdev, dp_update_vdev_stats, tgt_vdev_stats,
688 			     DP_MOD_ID_GENERIC_STATS);
689 }
690 
691 /**
692  * dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
693  * VDEV level based on peer type connected to vdev
694  * @vdev: DP VDEV handle
695  * @vdev_stats: target vdev stats pointer
696  * @peer_type: type of peer - MLO Link or Legacy peer
697  *
698  * return: void
699  */
700 static
701 void dp_aggregate_interface_stats_based_on_peer_type(
702 					struct dp_vdev *vdev,
703 					struct cdp_vdev_stats *vdev_stats,
704 					enum dp_peer_type peer_type)
705 {
706 	struct cdp_vdev_stats *tgt_vdev_stats = NULL;
707 	struct dp_vdev_be *be_vdev = NULL;
708 	struct dp_soc_be *be_soc = NULL;
709 
710 	if (!vdev || !vdev->pdev)
711 		return;
712 
713 	tgt_vdev_stats = vdev_stats;
714 	be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
715 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
716 	if (!be_vdev)
717 		return;
718 
719 	if (peer_type == DP_PEER_TYPE_LEGACY) {
720 		dp_aggregate_all_vdev_stats(tgt_vdev_stats,
721 					    &vdev->stats);
722 	} else {
723 		if (be_vdev->mcast_primary) {
724 			dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
725 					      dp_mlo_vdev_stats_aggr_bridge_vap,
726 					      (void *)vdev_stats,
727 					      DP_MOD_ID_GENERIC_STATS,
728 					      DP_BRIDGE_VDEV_ITER);
729 		}
730 		dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
731 						&vdev->stats);
732 		dp_aggregate_vdev_stats_for_unmapped_peers(
733 						tgt_vdev_stats,
734 						&be_vdev->mlo_stats);
735 	}
736 
737 	/* Aggregate associated peer stats */
738 	dp_vdev_iterate_specific_peer_type(vdev,
739 					   dp_update_vdev_stats,
740 					   vdev_stats,
741 					   DP_MOD_ID_GENERIC_STATS,
742 					   peer_type);
743 }
744 
745 /**
746  * dp_aggregate_interface_stats() - aggregate stats at VDEV level
747  * @vdev: DP VDEV handle
748  * @vdev_stats: target vdev stats pointer
749  *
750  * return: void
751  */
752 static
753 void dp_aggregate_interface_stats(struct dp_vdev *vdev,
754 				  struct cdp_vdev_stats *vdev_stats)
755 {
756 	struct dp_vdev_be *be_vdev = NULL;
757 	struct dp_soc_be *be_soc = NULL;
758 
759 	if (!vdev || !vdev->pdev)
760 		return;
761 
762 	be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
763 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
764 	if (!be_vdev)
765 		return;
766 
767 	if (be_vdev->mcast_primary) {
768 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
769 				      dp_mlo_vdev_stats_aggr_bridge_vap,
770 				      (void *)vdev_stats, DP_MOD_ID_GENERIC_STATS,
771 				      DP_BRIDGE_VDEV_ITER);
772 	}
773 
774 	dp_aggregate_all_vdev_stats(vdev_stats, &be_vdev->mlo_stats);
775 	dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats);
776 
777 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
778 			     DP_MOD_ID_GENERIC_STATS);
779 
780 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
781 }
782 
783 /**
784  * dp_mlo_aggr_ptnr_iface_stats() - aggregate mlo partner vdev stats
785  * @be_vdev: vdev handle
786  * @ptnr_vdev: partner vdev handle
787  * @arg: target buffer for aggregation
788  *
789  * return: void
790  */
791 static
792 void dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be *be_vdev,
793 				  struct dp_vdev *ptnr_vdev,
794 				  void *arg)
795 {
796 	struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
797 
798 	dp_aggregate_interface_stats(ptnr_vdev, tgt_vdev_stats);
799 }
800 
801 /**
802  * dp_mlo_aggr_ptnr_iface_stats_mlo_links() - aggregate mlo partner vdev stats
803  * based on peer type
804  * @be_vdev: vdev handle
805  * @ptnr_vdev: partner vdev handle
806  * @arg: target buffer for aggregation
807  *
808  * return: void
809  */
810 static
811 void dp_mlo_aggr_ptnr_iface_stats_mlo_links(
812 					struct dp_vdev_be *be_vdev,
813 					struct dp_vdev *ptnr_vdev,
814 					void *arg)
815 {
816 	struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
817 
818 	dp_aggregate_interface_stats_based_on_peer_type(ptnr_vdev,
819 							tgt_vdev_stats,
820 							DP_PEER_TYPE_MLO_LINK);
821 }
822 
823 /**
824  * dp_aggregate_sta_interface_stats() - for sta mode aggregate vdev stats from
825  * all link peers
826  * @soc: soc handle
827  * @vdev: vdev handle
828  * @buf: target buffer for aggregation
829  *
830  * return: QDF_STATUS
831  */
832 static QDF_STATUS
833 dp_aggregate_sta_interface_stats(struct dp_soc *soc,
834 				 struct dp_vdev *vdev,
835 				 void *buf)
836 {
837 	struct dp_peer *vap_bss_peer = NULL;
838 	struct dp_peer *mld_peer = NULL;
839 	struct dp_peer *link_peer = NULL;
840 	struct dp_mld_link_peers link_peers_info;
841 	uint8_t i = 0;
842 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
843 
844 	vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
845 						  DP_MOD_ID_GENERIC_STATS);
846 	if (!vap_bss_peer)
847 		return QDF_STATUS_E_FAILURE;
848 
849 	mld_peer = DP_GET_MLD_PEER_FROM_PEER(vap_bss_peer);
850 
851 	if (!mld_peer) {
852 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
853 		return QDF_STATUS_E_FAILURE;
854 	}
855 
856 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, &link_peers_info,
857 					    DP_MOD_ID_GENERIC_STATS);
858 
859 	for (i = 0; i < link_peers_info.num_links; i++) {
860 		link_peer = link_peers_info.link_peers[i];
861 		dp_update_vdev_stats(soc, link_peer, buf);
862 		dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
863 						&link_peer->vdev->stats);
864 		dp_aggregate_vdev_basic_stats(
865 					(struct cdp_vdev_stats *)buf,
866 					&link_peer->vdev->stats);
867 	}
868 
869 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_GENERIC_STATS);
870 	dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
871 	return ret;
872 }
873 
874 static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
875 					    uint8_t vdev_id, void *buf,
876 					    bool link_vdev_only)
877 {
878 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
879 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
880 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
881 						     DP_MOD_ID_GENERIC_STATS);
882 	struct dp_vdev_be *vdev_be = NULL;
883 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
884 
885 	if (!vdev)
886 		return QDF_STATUS_E_FAILURE;
887 
888 	vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
889 	if (!vdev_be) {
890 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
891 		return QDF_STATUS_E_FAILURE;
892 	}
893 
894 	if (vdev->opmode == wlan_op_mode_sta) {
895 		ret = dp_aggregate_sta_interface_stats(soc, vdev, buf);
896 		goto complete;
897 	}
898 
899 	if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
900 	    vdev->opmode == wlan_op_mode_ap) {
901 		dp_aggregate_interface_stats_based_on_peer_type(
902 						vdev, buf,
903 						DP_PEER_TYPE_MLO_LINK);
904 		if (link_vdev_only)
905 			goto complete;
906 
907 		/* Aggregate stats from partner vdevs */
908 		dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
909 				      dp_mlo_aggr_ptnr_iface_stats_mlo_links,
910 				      buf,
911 				      DP_MOD_ID_GENERIC_STATS,
912 				      DP_LINK_VDEV_ITER);
913 	} else {
914 		dp_aggregate_interface_stats(vdev, buf);
915 
916 		if (link_vdev_only)
917 			goto complete;
918 
919 		/* Aggregate stats from partner vdevs */
920 		dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
921 				      dp_mlo_aggr_ptnr_iface_stats, buf,
922 				      DP_MOD_ID_GENERIC_STATS,
923 				      DP_LINK_VDEV_ITER);
924 	}
925 
926 complete:
927 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
928 	return ret;
929 }
930 
931 QDF_STATUS
932 dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl,
933 			  uint8_t vdev_id,
934 			  void *buf,
935 			  bool is_aggregate)
936 {
937 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
938 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
939 						     DP_MOD_ID_GENERIC_STATS);
940 	if (!vdev)
941 		return QDF_STATUS_E_FAILURE;
942 
943 	if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
944 	    vdev->opmode == wlan_op_mode_ap) {
945 		dp_aggregate_interface_stats_based_on_peer_type(
946 						vdev, buf,
947 						DP_PEER_TYPE_LEGACY);
948 	} else {
949 		dp_aggregate_interface_stats(vdev, buf);
950 	}
951 
952 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
953 
954 	return QDF_STATUS_SUCCESS;
955 }
956 #endif
957 
958 static struct cdp_mlo_ops dp_mlo_ops = {
959 	.mlo_soc_setup = dp_mlo_soc_setup,
960 	.mlo_soc_teardown = dp_mlo_soc_teardown,
961 	.update_mlo_ptnr_list = dp_update_mlo_ptnr_list,
962 	.clear_mlo_ptnr_list = dp_clear_mlo_ptnr_list,
963 	.mlo_setup_complete = dp_mlo_setup_complete,
964 	.mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
965 	.mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
966 	.mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
967 	.mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
968 	.mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
969 #ifdef CONFIG_MLO_SINGLE_DEV
970 	.mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
971 #endif
972 };
973 
974 void dp_soc_mlo_fill_params(struct dp_soc *soc,
975 			    struct cdp_soc_attach_params *params)
976 {
977 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
978 
979 	if (!params->mlo_enabled) {
980 		dp_warn("MLO not enabled on SOC");
981 		return;
982 	}
983 
984 	be_soc->mlo_chip_id = params->mlo_chip_id;
985 	be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
986 	be_soc->mlo_enabled = 1;
987 	soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
988 }
989 
990 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
991 {
992 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
993 	struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
994 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
995 	uint8_t link_id;
996 
997 	if (!be_soc->mlo_enabled)
998 		return;
999 
1000 	if (!ml_ctxt)
1001 		return;
1002 
1003 	link_id = be_pdev->mlo_link_id;
1004 
1005 	if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
1006 		if (!ml_ctxt->link_to_pdev_map[link_id])
1007 			ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
1008 		else
1009 			dp_alert("Attempt to update existing map for link %u",
1010 				 link_id);
1011 	}
1012 }
1013 
1014 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
1015 {
1016 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1017 	struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
1018 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
1019 	uint8_t link_id;
1020 
1021 	if (!be_soc->mlo_enabled)
1022 		return;
1023 
1024 	if (!ml_ctxt)
1025 		return;
1026 
1027 	link_id = be_pdev->mlo_link_id;
1028 
1029 	if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
1030 		ml_ctxt->link_to_pdev_map[link_id] = NULL;
1031 }
1032 
1033 static struct dp_pdev_be *
1034 dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
1035 {
1036 	if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
1037 		return ml_ctxt->link_to_pdev_map[link_id];
1038 	return NULL;
1039 }
1040 
1041 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
1042 			     struct cdp_pdev_attach_params *params)
1043 {
1044 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
1045 	struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
1046 
1047 	if (!be_soc->mlo_enabled) {
1048 		dp_info("MLO not enabled on SOC");
1049 		return;
1050 	}
1051 
1052 	be_pdev->mlo_link_id = params->mlo_link_id;
1053 }
1054 
1055 void dp_mlo_partner_chips_map(struct dp_soc *soc,
1056 			      struct dp_peer *peer,
1057 			      uint16_t peer_id)
1058 {
1059 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1060 	struct dp_mlo_ctxt *mlo_ctxt = NULL;
1061 	bool is_ml_peer_id =
1062 		HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
1063 	uint8_t chip_id;
1064 	struct dp_soc *temp_soc;
1065 
1066 	/* for non ML peer dont map on partner chips*/
1067 	if (!is_ml_peer_id)
1068 		return;
1069 
1070 	mlo_ctxt = be_soc->ml_ctxt;
1071 	if (!mlo_ctxt)
1072 		return;
1073 
1074 	qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
1075 	for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
1076 		temp_soc = mlo_ctxt->ml_soc_list[chip_id];
1077 
1078 		if (!temp_soc)
1079 			continue;
1080 
1081 		/* skip if this is current soc */
1082 		if (temp_soc == soc)
1083 			continue;
1084 
1085 		dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
1086 	}
1087 	qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
1088 }
1089 
1090 qdf_export_symbol(dp_mlo_partner_chips_map);
1091 
1092 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
1093 				uint16_t peer_id)
1094 {
1095 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1096 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1097 	bool is_ml_peer_id =
1098 		HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
1099 	uint8_t chip_id;
1100 	struct dp_soc *temp_soc;
1101 
1102 	if (!is_ml_peer_id)
1103 		return;
1104 
1105 	if (!mlo_ctxt)
1106 		return;
1107 
1108 	qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
1109 	for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
1110 		temp_soc = mlo_ctxt->ml_soc_list[chip_id];
1111 
1112 		if (!temp_soc)
1113 			continue;
1114 
1115 		/* skip if this is current soc */
1116 		if (temp_soc == soc)
1117 			continue;
1118 
1119 		dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
1120 	}
1121 	qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
1122 }
1123 
1124 qdf_export_symbol(dp_mlo_partner_chips_unmap);
1125 
1126 uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1127 {
1128 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1129 
1130 	return be_soc->mlo_chip_id;
1131 }
1132 
1133 qdf_export_symbol(dp_mlo_get_chip_id);
1134 
1135 struct dp_peer *
1136 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1137 				  uint8_t *peer_mac_addr,
1138 				  int mac_addr_is_aligned,
1139 				  uint8_t vdev_id,
1140 				  uint8_t chip_id,
1141 				  enum dp_mod_id mod_id)
1142 {
1143 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1144 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1145 	struct dp_soc *link_peer_soc = NULL;
1146 	struct dp_peer *peer = NULL;
1147 
1148 	if (!mlo_ctxt)
1149 		return NULL;
1150 
1151 	link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
1152 
1153 	if (!link_peer_soc)
1154 		return NULL;
1155 
1156 	peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
1157 				      mac_addr_is_aligned, vdev_id,
1158 				      mod_id);
1159 	qdf_atomic_dec(&link_peer_soc->ref_count);
1160 	return peer;
1161 }
1162 
1163 qdf_export_symbol(dp_link_peer_hash_find_by_chip_id);
1164 
1165 void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
1166 			    struct cdp_lro_hash_config *lro_hash)
1167 {
1168 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1169 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
1170 
1171 	if (!be_soc->mlo_enabled || !ml_ctxt)
1172 		return dp_get_rx_hash_key_bytes(lro_hash);
1173 
1174 	qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
1175 		     (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
1176 		      LRO_IPV4_SEED_ARR_SZ));
1177 	qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
1178 		     (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
1179 		      LRO_IPV6_SEED_ARR_SZ));
1180 }
1181 
1182 struct dp_soc *
1183 dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
1184 {
1185 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1186 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1187 	struct dp_soc *replenish_soc;
1188 
1189 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1190 		return soc;
1191 
1192 	if (be_soc->mlo_chip_id == chip_id)
1193 		return soc;
1194 
1195 	replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
1196 	if (qdf_unlikely(!replenish_soc)) {
1197 		dp_alert("replenish SOC is NULL");
1198 		qdf_assert_always(0);
1199 	}
1200 
1201 	return replenish_soc;
1202 }
1203 
1204 uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
1205 {
1206 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1207 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1208 
1209 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1210 		return 1;
1211 
1212 	return mlo_ctxt->ml_soc_cnt;
1213 }
1214 
1215 struct dp_soc *
1216 dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
1217 {
1218 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1219 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1220 	struct dp_soc *partner_soc = NULL;
1221 	uint8_t chip_id;
1222 
1223 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1224 		return soc;
1225 
1226 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
1227 		partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
1228 
1229 		if (!partner_soc)
1230 			continue;
1231 
1232 		if (partner_soc->idle_link_bm_id == idle_bm_id)
1233 			return partner_soc;
1234 	}
1235 
1236 	return NULL;
1237 }
1238 
1239 #ifdef WLAN_MLO_MULTI_CHIP
1240 static void dp_print_mlo_partner_list(struct dp_vdev_be *be_vdev,
1241 				      struct dp_vdev *partner_vdev,
1242 				      void *arg)
1243 {
1244 	struct dp_vdev_be *partner_vdev_be = NULL;
1245 	struct dp_soc_be *partner_soc_be = NULL;
1246 
1247 	partner_vdev_be = dp_get_be_vdev_from_dp_vdev(partner_vdev);
1248 	partner_soc_be = dp_get_be_soc_from_dp_soc(partner_vdev->pdev->soc);
1249 
1250 	DP_PRINT_STATS("is_bridge_vap = %s, mcast_primary = %s,  vdev_id = %d, pdev_id = %d, chip_id = %d",
1251 		       partner_vdev->is_bridge_vdev ? "true" : "false",
1252 		       partner_vdev_be->mcast_primary ? "true" : "false",
1253 		       partner_vdev->vdev_id,
1254 		       partner_vdev->pdev->pdev_id,
1255 		       partner_soc_be->mlo_chip_id);
1256 }
1257 
1258 void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
1259 			   struct dp_vdev_be *be_vdev,
1260 			   dp_ptnr_vdev_iter_func func,
1261 			   void *arg,
1262 			   enum dp_mod_id mod_id,
1263 			   uint8_t type)
1264 {
1265 	int i = 0;
1266 	int j = 0;
1267 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
1268 
1269 	if (type < DP_LINK_VDEV_ITER || type > DP_ALL_VDEV_ITER) {
1270 		dp_err("invalid iterate type");
1271 		return;
1272 	}
1273 
1274 	for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
1275 	     IS_LINK_VDEV_ITER_REQUIRED(type); i++) {
1276 		struct dp_soc *ptnr_soc =
1277 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1278 
1279 		if (!ptnr_soc)
1280 			continue;
1281 		for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
1282 			struct dp_vdev *ptnr_vdev;
1283 
1284 			ptnr_vdev = dp_vdev_get_ref_by_id(
1285 					ptnr_soc,
1286 					be_vdev->partner_vdev_list[i][j],
1287 					mod_id);
1288 			if (!ptnr_vdev)
1289 				continue;
1290 			(*func)(be_vdev, ptnr_vdev, arg);
1291 			dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
1292 					     ptnr_vdev,
1293 					     mod_id);
1294 		}
1295 	}
1296 
1297 	for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
1298 	     IS_BRIDGE_VDEV_ITER_REQUIRED(type); i++) {
1299 		struct dp_soc *ptnr_soc =
1300 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1301 
1302 		if (!ptnr_soc)
1303 			continue;
1304 		for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
1305 			struct dp_vdev *bridge_vdev;
1306 
1307 			bridge_vdev = dp_vdev_get_ref_by_id(
1308 					ptnr_soc,
1309 					be_vdev->bridge_vdev_list[i][j],
1310 					mod_id);
1311 			if (!bridge_vdev)
1312 				continue;
1313 			(*func)(be_vdev, bridge_vdev, arg);
1314 			dp_vdev_unref_delete(bridge_vdev->pdev->soc,
1315 					     bridge_vdev,
1316 					     mod_id);
1317 		}
1318 	}
1319 }
1320 
1321 qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
1322 
1323 void dp_mlo_debug_print_ptnr_info(struct dp_vdev *vdev)
1324 {
1325 	struct dp_vdev_be *be_vdev = NULL;
1326 	struct dp_soc_be *be_soc = NULL;
1327 
1328 	be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
1329 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1330 
1331 	DP_PRINT_STATS("self vdev is_bridge_vap = %s, mcast_primary = %s, vdev = %d, pdev_id = %d, chip_id = %d",
1332 		       vdev->is_bridge_vdev ? "true" : "false",
1333 		       be_vdev->mcast_primary ? "true" : "false",
1334 		       vdev->vdev_id,
1335 		       vdev->pdev->pdev_id,
1336 		       dp_mlo_get_chip_id(vdev->pdev->soc));
1337 
1338 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
1339 			      dp_print_mlo_partner_list,
1340 			      NULL, DP_MOD_ID_GENERIC_STATS,
1341 			      DP_ALL_VDEV_ITER);
1342 }
1343 #endif
1344 
1345 #ifdef WLAN_MCAST_MLO
1346 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
1347 					      struct dp_vdev_be *be_vdev,
1348 					      enum dp_mod_id mod_id)
1349 {
1350 	int i = 0;
1351 	int j = 0;
1352 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
1353 	struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
1354 
1355 	if (be_vdev->mcast_primary) {
1356 		if (dp_vdev_get_ref((struct dp_soc *)be_soc, vdev, mod_id) !=
1357 					QDF_STATUS_SUCCESS)
1358 			return NULL;
1359 
1360 		return vdev;
1361 	}
1362 
1363 	for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
1364 		struct dp_soc *ptnr_soc =
1365 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1366 
1367 		if (!ptnr_soc)
1368 			continue;
1369 		for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
1370 			struct dp_vdev *ptnr_vdev = NULL;
1371 			struct dp_vdev_be *be_ptnr_vdev = NULL;
1372 
1373 			ptnr_vdev = dp_vdev_get_ref_by_id(
1374 					ptnr_soc,
1375 					be_vdev->partner_vdev_list[i][j],
1376 					mod_id);
1377 			if (!ptnr_vdev)
1378 				continue;
1379 			be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
1380 			if (be_ptnr_vdev->mcast_primary)
1381 				return ptnr_vdev;
1382 			dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
1383 					     &be_ptnr_vdev->vdev,
1384 					     mod_id);
1385 		}
1386 	}
1387 	return NULL;
1388 }
1389 
1390 qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
1391 #endif
1392 
1393 /**
1394  * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
1395  * @be_soc: dp_soc_be pointer
1396  * @func: Function to be called for each soc
1397  * @arg: context to be passed to the callback
1398  *
1399  * Return: true if mlo is enabled, false if mlo is disabled
1400  */
1401 bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
1402 			  void *arg)
1403 {
1404 	int i = 0;
1405 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
1406 
1407 	if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
1408 		return false;
1409 
1410 	for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
1411 		struct dp_soc *ptnr_soc =
1412 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1413 
1414 		if (!ptnr_soc)
1415 			continue;
1416 		(*func)(ptnr_soc, arg, i);
1417 	}
1418 
1419 	return true;
1420 }
1421 
1422 qdf_export_symbol(dp_mlo_iter_ptnr_soc);
1423 
1424 static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
1425 {
1426 	struct dp_soc *soc;
1427 	struct dp_pdev *pdev;
1428 	struct dp_soc_be *be_soc;
1429 	uint32_t mlo_offset;
1430 
1431 	pdev = &be_pdev->pdev;
1432 	soc = pdev->soc;
1433 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1434 
1435 	mlo_offset = be_soc->mlo_tstamp_offset;
1436 
1437 	return mlo_offset;
1438 }
1439 
1440 int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
1441 					     uint8_t hw_link_id)
1442 {
1443 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1444 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
1445 	struct dp_pdev_be *be_pdev;
1446 	int32_t delta_tsf2_mlo_offset;
1447 	int32_t mlo_offset, delta_tsf2;
1448 
1449 	if (!ml_ctxt)
1450 		return 0;
1451 
1452 	be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
1453 	if (!be_pdev)
1454 		return 0;
1455 
1456 	mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
1457 	delta_tsf2 = be_pdev->delta_tsf2;
1458 
1459 	delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
1460 
1461 	return delta_tsf2_mlo_offset;
1462 }
1463 
1464 int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
1465 {
1466 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1467 	int32_t delta_tqm_mlo_offset;
1468 	int32_t mlo_offset, delta_tqm;
1469 
1470 	mlo_offset = be_soc->mlo_tstamp_offset;
1471 	delta_tqm = be_soc->delta_tqm;
1472 
1473 	delta_tqm_mlo_offset = mlo_offset - delta_tqm;
1474 
1475 	return delta_tqm_mlo_offset;
1476 }
1477 
1478 #ifdef DP_UMAC_HW_RESET_SUPPORT
1479 /**
1480  * dp_umac_reset_update_partner_map() - Update Umac reset partner map
1481  * @mlo_ctx: DP ML context handle
1482  * @chip_id: chip id
1483  * @set: flag indicating whether to set or clear the bit
1484  *
1485  * Return: void
1486  */
1487 static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
1488 					     int chip_id, bool set)
1489 {
1490 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
1491 						&mlo_ctx->grp_umac_reset_ctx;
1492 
1493 	if (set)
1494 		qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
1495 	else
1496 		qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
1497 }
1498 
1499 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
1500 {
1501 	struct dp_mlo_ctxt *mlo_ctx;
1502 	struct dp_soc_be *be_soc;
1503 
1504 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1505 	if (!be_soc) {
1506 		dp_umac_reset_err("null be_soc");
1507 		return QDF_STATUS_E_NULL_VALUE;
1508 	}
1509 
1510 	mlo_ctx = be_soc->ml_ctxt;
1511 	if (!mlo_ctx) {
1512 		/* This API can be called for non-MLO SOC as well. Hence, return
1513 		 * the status as success when mlo_ctx is NULL.
1514 		 */
1515 		return QDF_STATUS_SUCCESS;
1516 	}
1517 
1518 	dp_umac_reset_update_partner_map(mlo_ctx, be_soc->mlo_chip_id, false);
1519 
1520 	return QDF_STATUS_SUCCESS;
1521 }
1522 
1523 /**
1524  * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
1525  * @soc: dp soc handle
1526  *
1527  * Return: void
1528  */
1529 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
1530 {
1531 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1532 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1533 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1534 
1535 	if (!mlo_ctx) {
1536 		dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
1537 		return;
1538 	}
1539 
1540 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1541 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1542 
1543 	grp_umac_reset_ctx->umac_reset_in_progress = false;
1544 	grp_umac_reset_ctx->is_target_recovery = false;
1545 	grp_umac_reset_ctx->response_map = 0;
1546 	grp_umac_reset_ctx->request_map = 0;
1547 	grp_umac_reset_ctx->initiator_chip_id = 0;
1548 
1549 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1550 
1551 	dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
1552 			    mlo_ctx);
1553 }
1554 
1555 /**
1556  * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
1557  * @soc: dp soc handle
1558  * @umac_reset_ctx: Umac reset context
1559  * @rx_event: Rx event received
1560  * @is_target_recovery: Flag to indicate if it is triggered for target recovery
1561  *
1562  * Return: status
1563  */
1564 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
1565 				struct dp_soc_umac_reset_ctx *umac_reset_ctx,
1566 				enum umac_reset_rx_event rx_event,
1567 				bool is_target_recovery)
1568 {
1569 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1570 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1571 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1572 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1573 
1574 	if (!mlo_ctx)
1575 		return dp_umac_reset_validate_n_update_state_machine_on_rx(
1576 					umac_reset_ctx, rx_event,
1577 					UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
1578 					UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
1579 
1580 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1581 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1582 
1583 	if (grp_umac_reset_ctx->umac_reset_in_progress) {
1584 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1585 		return QDF_STATUS_E_INVAL;
1586 	}
1587 
1588 	status = dp_umac_reset_validate_n_update_state_machine_on_rx(
1589 					umac_reset_ctx, rx_event,
1590 					UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
1591 					UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
1592 
1593 	if (status != QDF_STATUS_SUCCESS) {
1594 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1595 		return status;
1596 	}
1597 
1598 	grp_umac_reset_ctx->umac_reset_in_progress = true;
1599 	grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
1600 
1601 	/* We don't wait for the 'Umac trigger' message from all socs */
1602 	grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
1603 	grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
1604 	grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
1605 	grp_umac_reset_ctx->umac_reset_count++;
1606 
1607 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1608 
1609 	return QDF_STATUS_SUCCESS;
1610 }
1611 
1612 /**
1613  * dp_umac_reset_handle_action_cb() - Function to call action callback
1614  * @soc: dp soc handle
1615  * @umac_reset_ctx: Umac reset context
1616  * @action: Action to call the callback for
1617  *
1618  * Return: QDF_STATUS status
1619  */
1620 QDF_STATUS
1621 dp_umac_reset_handle_action_cb(struct dp_soc *soc,
1622 			       struct dp_soc_umac_reset_ctx *umac_reset_ctx,
1623 			       enum umac_reset_action action)
1624 {
1625 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1626 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1627 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1628 
1629 	if (!mlo_ctx) {
1630 		dp_umac_reset_debug("MLO context is Null");
1631 		goto handle;
1632 	}
1633 
1634 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1635 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1636 
1637 	qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
1638 			   &grp_umac_reset_ctx->request_map);
1639 
1640 	dp_umac_reset_debug("partner_map %u request_map %u",
1641 			    grp_umac_reset_ctx->partner_map,
1642 			    grp_umac_reset_ctx->request_map);
1643 
1644 	/* This logic is needed for synchronization between mlo socs */
1645 	if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
1646 			!= grp_umac_reset_ctx->partner_map) {
1647 		struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
1648 		struct hif_umac_reset_ctx *hif_umac_reset_ctx;
1649 
1650 		if (!hif_sc) {
1651 			hif_err("scn is null");
1652 			qdf_assert_always(0);
1653 			return QDF_STATUS_E_FAILURE;
1654 		}
1655 
1656 		hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
1657 
1658 		/* Mark the action as pending */
1659 		umac_reset_ctx->pending_action = action;
1660 		/* Reschedule the tasklet and exit */
1661 		tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
1662 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1663 
1664 		return QDF_STATUS_SUCCESS;
1665 	}
1666 
1667 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1668 	umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
1669 
1670 handle:
1671 	if (!umac_reset_ctx->rx_actions.cb[action]) {
1672 		dp_umac_reset_err("rx callback is NULL");
1673 		return QDF_STATUS_E_FAILURE;
1674 	}
1675 
1676 	return umac_reset_ctx->rx_actions.cb[action](soc);
1677 }
1678 
1679 /**
1680  * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
1681  * @umac_reset_ctx: UMAC reset context
1682  * @tx_cmd: Tx command to be posted
1683  *
1684  * Return: QDF status of operation
1685  */
1686 QDF_STATUS
1687 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
1688 			  enum umac_reset_tx_cmd tx_cmd)
1689 {
1690 	struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
1691 					  umac_reset_ctx);
1692 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1693 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1694 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1695 
1696 	if (!mlo_ctx) {
1697 		dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
1698 		return QDF_STATUS_SUCCESS;
1699 	}
1700 
1701 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1702 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1703 
1704 	qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
1705 			   &grp_umac_reset_ctx->response_map);
1706 
1707 	/* This logic is needed for synchronization between mlo socs */
1708 	if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
1709 				!= grp_umac_reset_ctx->partner_map) {
1710 		dp_umac_reset_debug(
1711 			"Response(s) pending : expected map %u current map %u",
1712 			grp_umac_reset_ctx->partner_map,
1713 			grp_umac_reset_ctx->response_map);
1714 
1715 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1716 		return QDF_STATUS_SUCCESS;
1717 	}
1718 
1719 	dp_umac_reset_debug(
1720 		"All responses received: expected map %u current map %u",
1721 		grp_umac_reset_ctx->partner_map,
1722 		grp_umac_reset_ctx->response_map);
1723 
1724 	grp_umac_reset_ctx->response_map = 0;
1725 	grp_umac_reset_ctx->request_map = 0;
1726 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1727 
1728 	dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
1729 			     &tx_cmd);
1730 
1731 	if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
1732 		dp_umac_reset_complete_umac_recovery(soc);
1733 
1734 	return QDF_STATUS_SUCCESS;
1735 }
1736 
1737 /**
1738  * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
1739  * @soc: dp soc handle
1740  *
1741  * Return: true if the soc is initiator or false otherwise
1742  */
1743 bool dp_umac_reset_initiator_check(struct dp_soc *soc)
1744 {
1745 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1746 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1747 
1748 	if (!mlo_ctx)
1749 		return true;
1750 
1751 	return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
1752 				dp_mlo_get_chip_id(soc));
1753 }
1754 
1755 /**
1756  * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
1757  * @soc: dp soc handle
1758  *
1759  * Return: true if the session is for target recovery or false otherwise
1760  */
1761 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
1762 {
1763 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1764 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1765 
1766 	if (!mlo_ctx)
1767 		return false;
1768 
1769 	return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
1770 }
1771 
1772 /**
1773  * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
1774  * @soc: dp soc handle
1775  *
1776  * Return: true if the soc is ignored or false otherwise
1777  */
1778 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
1779 {
1780 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1781 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1782 
1783 	if (!mlo_ctx)
1784 		return false;
1785 
1786 	return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
1787 				    &mlo_ctx->grp_umac_reset_ctx.partner_map);
1788 }
1789 
1790 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
1791 {
1792 	struct dp_mlo_ctxt *mlo_ctx;
1793 	struct dp_soc_be *be_soc;
1794 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1795 
1796 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1797 	if (!be_soc) {
1798 		dp_umac_reset_err("null be_soc");
1799 		return QDF_STATUS_E_NULL_VALUE;
1800 	}
1801 
1802 	mlo_ctx = be_soc->ml_ctxt;
1803 	if (!mlo_ctx) {
1804 		/* This API can be called for non-MLO SOC as well. Hence, return
1805 		 * the status as success when mlo_ctx is NULL.
1806 		 */
1807 		return QDF_STATUS_SUCCESS;
1808 	}
1809 
1810 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1811 
1812 	DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
1813 		  "\t\tPartner map                   :%x\n"
1814 		  "\t\tRequest map                   :%x\n"
1815 		  "\t\tResponse map                  :%x\n"
1816 		  "\t\tIs target recovery            :%d\n"
1817 		  "\t\tIs Umac reset inprogress      :%d\n"
1818 		  "\t\tNumber of UMAC reset triggered:%d\n"
1819 		  "\t\tInitiator chip ID             :%d\n",
1820 		  grp_umac_reset_ctx->partner_map,
1821 		  grp_umac_reset_ctx->request_map,
1822 		  grp_umac_reset_ctx->response_map,
1823 		  grp_umac_reset_ctx->is_target_recovery,
1824 		  grp_umac_reset_ctx->umac_reset_in_progress,
1825 		  grp_umac_reset_ctx->umac_reset_count,
1826 		  grp_umac_reset_ctx->initiator_chip_id);
1827 
1828 	return QDF_STATUS_SUCCESS;
1829 }
1830 
1831 enum cdp_umac_reset_state
1832 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc)
1833 {
1834 	struct dp_soc_umac_reset_ctx *umac_reset_ctx;
1835 	struct dp_soc *soc = (struct dp_soc *)psoc;
1836 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1837 	struct dp_soc_be *be_soc = NULL;
1838 	struct dp_mlo_ctxt *mlo_ctx = NULL;
1839 	enum cdp_umac_reset_state umac_reset_is_inprogress;
1840 
1841 	if (!soc) {
1842 		dp_umac_reset_err("DP SOC is null");
1843 		return CDP_UMAC_RESET_INVALID_STATE;
1844 	}
1845 
1846 	umac_reset_ctx = &soc->umac_reset_ctx;
1847 
1848 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1849 	if (be_soc)
1850 		mlo_ctx = be_soc->ml_ctxt;
1851 
1852 	if (mlo_ctx) {
1853 		grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1854 		umac_reset_is_inprogress =
1855 			grp_umac_reset_ctx->umac_reset_in_progress;
1856 	} else {
1857 		umac_reset_is_inprogress = (umac_reset_ctx->current_state !=
1858 					    UMAC_RESET_STATE_WAIT_FOR_TRIGGER);
1859 	}
1860 
1861 	if (umac_reset_is_inprogress)
1862 		return CDP_UMAC_RESET_IN_PROGRESS;
1863 
1864 	/* Check if the umac reset was in progress during the buffer
1865 	 * window.
1866 	 */
1867 	umac_reset_is_inprogress =
1868 		((qdf_get_log_timestamp_usecs() -
1869 		  umac_reset_ctx->ts.post_reset_complete_done) <=
1870 		 (wlan_cfg_get_umac_reset_buffer_window_ms(soc->wlan_cfg_ctx) *
1871 		  1000));
1872 
1873 	return (umac_reset_is_inprogress ?
1874 			CDP_UMAC_RESET_IN_PROGRESS_DURING_BUFFER_WINDOW :
1875 			CDP_UMAC_RESET_NOT_IN_PROGRESS);
1876 }
1877 #endif
1878 
1879 struct dp_soc *
1880 dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
1881 {
1882 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1883 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1884 	struct dp_soc *partner_soc;
1885 
1886 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1887 		return soc;
1888 
1889 	if (be_soc->mlo_chip_id == chip_id)
1890 		return soc;
1891 
1892 	partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
1893 	return partner_soc;
1894 }
1895