1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_dp_rx_thread.h>
21 #include "dp_peer.h"
22 #include "dp_internal.h"
23 #include "dp_types.h"
24 #include <cdp_txrx_cmn_struct.h>
25 #include <cdp_txrx_peer_ops.h>
26 #include <cds_sched.h>
27 #include "dp_rx.h"
28 #include "wlan_dp_ucfg_api.h"
29 #include "wlan_dp_prealloc.h"
30 #include "wlan_dp_main.h"
31 #include "wlan_dp_public_struct.h"
32 #include "wlan_dp_ucfg_api.h"
33 #include "qdf_nbuf.h"
34 #include "qdf_threads.h"
35 #include "qdf_net_if.h"
36 
37 /* Timeout in ms to wait for a DP rx thread */
38 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
39 #define DP_RX_THREAD_WAIT_TIMEOUT 4000
40 #else
41 #define DP_RX_THREAD_WAIT_TIMEOUT 2000
42 #endif
43 #define DP_RX_THREAD_FLUSH_TIMEOUT 6000
44 #define DP_RX_THREAD_MIN_FLUSH_TIMEOUT 10
45 
46 #ifdef CONFIG_SLUB_DEBUG_ON
47 /* number of rx pkts that thread should yield */
48 #define DP_RX_THREAD_YIELD_PKT_CNT 20000
49 #endif
50 
51 #define DP_RX_TM_DEBUG 0
52 #if DP_RX_TM_DEBUG
53 /**
54  * dp_rx_tm_walk_skb_list() - Walk skb list and print members
55  * @nbuf_list: nbuf list to print
56  *
57  * Returns: None
58  */
dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)59 static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
60 {
61 	qdf_nbuf_t nbuf;
62 	int i = 0;
63 
64 	nbuf = nbuf_list;
65 	while (nbuf) {
66 		dp_debug("%d nbuf:%pK nbuf->next:%pK nbuf->data:%pK", i,
67 			 nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf));
68 		nbuf = qdf_nbuf_next(nbuf);
69 		i++;
70 	}
71 }
72 #else
dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)73 static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
74 { }
75 #endif /* DP_RX_TM_DEBUG */
76 
77 #ifdef DP_RX_REFILL_CPU_PERF_AFFINE_MASK
78 /**
79  * dp_rx_refill_thread_set_affinity - Affine Rx refill threads
80  * @refill_thread: Contains over all rx refill thread info
81  *
82  * Return: None
83  */
84 static void
dp_rx_refill_thread_set_affinity(struct dp_rx_refill_thread * refill_thread)85 dp_rx_refill_thread_set_affinity(struct dp_rx_refill_thread *refill_thread)
86 {
87 	unsigned int cpus;
88 	char new_mask_str[10];
89 	qdf_cpu_mask new_mask;
90 	int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
91 	int package_id;
92 
93 	qdf_cpumask_clear(&new_mask);
94 	qdf_for_each_online_cpu(cpus) {
95 		package_id = qdf_topology_physical_package_id(cpus);
96 		if (package_id >= 0 && BIT(package_id) & perf_cpu_cluster)
97 			qdf_cpumask_set_cpu(cpus, &new_mask);
98 	}
99 
100 	qdf_thread_set_cpus_allowed_mask(refill_thread->task, &new_mask);
101 
102 	qdf_thread_cpumap_print_to_pagebuf(false, new_mask_str, &new_mask);
103 	dp_debug("Refill Thread CPU mask  %s", new_mask_str);
104 }
105 #else
106 static void
dp_rx_refill_thread_set_affinity(struct dp_rx_refill_thread * refill_thread)107 dp_rx_refill_thread_set_affinity(struct dp_rx_refill_thread *refill_thread)
108 {
109 }
110 #endif
111 /**
112  * dp_rx_tm_get_soc_handle() - get soc handle from struct dp_rx_tm_handle_cmn
113  * @rx_tm_handle_cmn: rx thread manager cmn handle
114  *
115  * Returns: ol_txrx_soc_handle on success, NULL on failure.
116  */
117 static inline ol_txrx_soc_handle
dp_rx_tm_get_soc_handle(struct dp_rx_tm_handle_cmn * rx_tm_handle_cmn)118 dp_rx_tm_get_soc_handle(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
119 {
120 	struct dp_txrx_handle_cmn *txrx_handle_cmn;
121 	ol_txrx_soc_handle soc;
122 
123 	txrx_handle_cmn =
124 		dp_rx_thread_get_txrx_handle(rx_tm_handle_cmn);
125 
126 	soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
127 	return soc;
128 }
129 
130 /**
131  * dp_rx_tm_thread_dump_stats() - display stats for a rx_thread
132  * @rx_thread: rx_thread pointer for which the stats need to be
133  *            displayed
134  *
135  * Returns: None
136  */
dp_rx_tm_thread_dump_stats(struct dp_rx_thread * rx_thread)137 static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
138 {
139 	uint8_t reo_ring_num;
140 	uint32_t off = 0;
141 	char nbuf_queued_string[100];
142 	uint32_t total_queued = 0;
143 	uint32_t temp = 0;
144 
145 	qdf_mem_zero(nbuf_queued_string, sizeof(nbuf_queued_string));
146 
147 	for (reo_ring_num = 0; reo_ring_num < DP_RX_TM_MAX_REO_RINGS;
148 	     reo_ring_num++) {
149 		temp = rx_thread->stats.nbuf_queued[reo_ring_num];
150 		if (!temp)
151 			continue;
152 		total_queued += temp;
153 		if (off >= sizeof(nbuf_queued_string))
154 			continue;
155 		off += qdf_scnprintf(&nbuf_queued_string[off],
156 				     sizeof(nbuf_queued_string) - off,
157 				     "reo[%u]:%u ", reo_ring_num, temp);
158 	}
159 
160 	if (!total_queued)
161 		return;
162 
163 	dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u gro_flushes: %u gro_flushes_by_vdev_del: %u rx_flushes: %u max_len:%u invalid(peer:%u vdev:%u rx-handle:%u others:%u enq fail:%u)",
164 		rx_thread->id,
165 		qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue),
166 		total_queued,
167 		nbuf_queued_string,
168 		rx_thread->stats.nbuf_dequeued,
169 		rx_thread->stats.nbuf_sent_to_stack,
170 		rx_thread->stats.gro_flushes,
171 		rx_thread->stats.gro_flushes_by_vdev_del,
172 		rx_thread->stats.rx_flushed,
173 		rx_thread->stats.nbufq_max_len,
174 		rx_thread->stats.dropped_invalid_peer,
175 		rx_thread->stats.dropped_invalid_vdev,
176 		rx_thread->stats.dropped_invalid_os_rx_handles,
177 		rx_thread->stats.dropped_others,
178 		rx_thread->stats.dropped_enq_fail);
179 }
180 
dp_rx_tm_dump_stats(struct dp_rx_tm_handle * rx_tm_hdl)181 QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_hdl)
182 {
183 	int i;
184 
185 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
186 		if (!rx_tm_hdl->rx_thread[i])
187 			continue;
188 		dp_rx_tm_thread_dump_stats(rx_tm_hdl->rx_thread[i]);
189 	}
190 	return QDF_STATUS_SUCCESS;
191 }
192 
193 #ifdef FEATURE_ALLOW_PKT_DROPPING
194 /*
195  * dp_check_and_update_pending() - Check and Set RX Pending flag
196  * @tm_handle_cmn: DP thread pointer
197  *
198  * Returns: QDF_STATUS_SUCCESS on success or qdf error code on
199  * failure
200  */
201 static inline
dp_check_and_update_pending(struct dp_rx_tm_handle_cmn * tm_handle_cmn)202 QDF_STATUS dp_check_and_update_pending(struct dp_rx_tm_handle_cmn
203 				       *tm_handle_cmn)
204 {
205 	struct dp_txrx_handle_cmn *txrx_handle_cmn;
206 	struct dp_rx_tm_handle *rx_tm_hdl =
207 		    (struct dp_rx_tm_handle *)tm_handle_cmn;
208 	struct dp_soc *dp_soc;
209 	uint32_t rx_pending_hl_threshold;
210 	uint32_t rx_pending_lo_threshold;
211 	uint32_t nbuf_queued_total = 0;
212 	uint32_t nbuf_dequeued_total = 0;
213 	uint32_t rx_flushed_total = 0;
214 	uint32_t pending = 0;
215 	int i;
216 
217 	txrx_handle_cmn =
218 		dp_rx_thread_get_txrx_handle(tm_handle_cmn);
219 	if (!txrx_handle_cmn) {
220 		dp_err("invalid txrx_handle_cmn!");
221 		QDF_BUG(0);
222 		return QDF_STATUS_E_FAILURE;
223 	}
224 
225 	dp_soc = (struct dp_soc *)dp_txrx_get_soc_from_ext_handle(
226 					txrx_handle_cmn);
227 	if (!dp_soc) {
228 		dp_err("invalid soc!");
229 		QDF_BUG(0);
230 		return QDF_STATUS_E_FAILURE;
231 	}
232 
233 	rx_pending_hl_threshold = wlan_cfg_rx_pending_hl_threshold(
234 				  dp_soc->wlan_cfg_ctx);
235 	rx_pending_lo_threshold = wlan_cfg_rx_pending_lo_threshold(
236 				  dp_soc->wlan_cfg_ctx);
237 
238 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
239 		if (likely(rx_tm_hdl->rx_thread[i])) {
240 			nbuf_queued_total +=
241 			    rx_tm_hdl->rx_thread[i]->stats.nbuf_queued_total;
242 			nbuf_dequeued_total +=
243 			    rx_tm_hdl->rx_thread[i]->stats.nbuf_dequeued;
244 			rx_flushed_total +=
245 			    rx_tm_hdl->rx_thread[i]->stats.rx_flushed;
246 		}
247 	}
248 
249 	if (nbuf_queued_total > (nbuf_dequeued_total + rx_flushed_total))
250 		pending = nbuf_queued_total - (nbuf_dequeued_total +
251 					       rx_flushed_total);
252 
253 	if (unlikely(pending > rx_pending_hl_threshold))
254 		qdf_atomic_set(&rx_tm_hdl->allow_dropping, 1);
255 	else if (pending < rx_pending_lo_threshold)
256 		qdf_atomic_set(&rx_tm_hdl->allow_dropping, 0);
257 
258 	return QDF_STATUS_SUCCESS;
259 }
260 
261 #else
262 static inline
dp_check_and_update_pending(struct dp_rx_tm_handle_cmn * tm_handle_cmn)263 QDF_STATUS dp_check_and_update_pending(struct dp_rx_tm_handle_cmn
264 				       *tm_handle_cmn)
265 {
266 	return QDF_STATUS_SUCCESS;
267 }
268 #endif
269 
270 /**
271  * dp_rx_tm_thread_enqueue() - enqueue nbuf list into rx_thread
272  * @rx_thread: rx_thread in which the nbuf needs to be queued
273  * @nbuf_list: list of packets to be queued into the thread
274  *
275  * Enqueue packet into rx_thread and wake it up. The function
276  * moves the next pointer of the nbuf_list into the ext list of
277  * the first nbuf for storage into the thread. Only the first
278  * nbuf is queued into the thread nbuf queue. The reverse is
279  * done at the time of dequeue.
280  *
281  * Returns: QDF_STATUS_SUCCESS on success or qdf error code on
282  * failure
283  */
dp_rx_tm_thread_enqueue(struct dp_rx_thread * rx_thread,qdf_nbuf_t nbuf_list)284 static QDF_STATUS dp_rx_tm_thread_enqueue(struct dp_rx_thread *rx_thread,
285 					  qdf_nbuf_t nbuf_list)
286 {
287 	qdf_nbuf_t head_ptr, next_ptr_list;
288 	uint32_t temp_qlen;
289 	uint32_t num_elements_in_nbuf;
290 	uint32_t nbuf_queued;
291 	struct dp_rx_tm_handle_cmn *tm_handle_cmn;
292 	uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
293 	qdf_wait_queue_head_t *wait_q_ptr;
294 	uint8_t allow_dropping;
295 
296 	tm_handle_cmn = rx_thread->rtm_handle_cmn;
297 
298 	if (!tm_handle_cmn) {
299 		dp_alert("tm_handle_cmn is null!");
300 		QDF_BUG(0);
301 		return QDF_STATUS_E_FAILURE;
302 	}
303 
304 	wait_q_ptr = &rx_thread->wait_q;
305 
306 	if (reo_ring_num >= DP_RX_TM_MAX_REO_RINGS) {
307 		dp_alert("incorrect ring %u", reo_ring_num);
308 		QDF_BUG(0);
309 		return QDF_STATUS_E_FAILURE;
310 	}
311 
312 	num_elements_in_nbuf = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
313 	nbuf_queued = num_elements_in_nbuf;
314 
315 	allow_dropping = qdf_atomic_read(
316 		&((struct dp_rx_tm_handle *)tm_handle_cmn)->allow_dropping);
317 	if (unlikely(allow_dropping)) {
318 		qdf_nbuf_list_free(nbuf_list);
319 		rx_thread->stats.dropped_enq_fail += num_elements_in_nbuf;
320 		nbuf_queued = 0;
321 		goto enq_done;
322 	}
323 
324 	dp_rx_tm_walk_skb_list(nbuf_list);
325 
326 	head_ptr = nbuf_list;
327 
328 	/* Ensure head doesn't have an ext list */
329 	while (qdf_unlikely(head_ptr && qdf_nbuf_get_ext_list(head_ptr))) {
330 		QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head_ptr) = 1;
331 		num_elements_in_nbuf--;
332 		next_ptr_list = head_ptr->next;
333 		qdf_nbuf_set_next(head_ptr, NULL);
334 		/* count aggregated RX frame into enqueued stats */
335 		nbuf_queued += qdf_nbuf_get_gso_segs(head_ptr);
336 		qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue,
337 						 head_ptr);
338 		head_ptr = next_ptr_list;
339 	}
340 
341 	if (!head_ptr)
342 		goto enq_done;
343 
344 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head_ptr) = num_elements_in_nbuf;
345 
346 	next_ptr_list = head_ptr->next;
347 
348 	if (next_ptr_list) {
349 		/* move ->next pointer to ext list */
350 		qdf_nbuf_append_ext_list(head_ptr, next_ptr_list, 0);
351 		dp_debug("appended next_ptr_list %pK to nbuf %pK ext list %pK",
352 			 qdf_nbuf_next(nbuf_list), nbuf_list,
353 			 qdf_nbuf_get_ext_list(nbuf_list));
354 	}
355 	qdf_nbuf_set_next(head_ptr, NULL);
356 
357 	qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue, head_ptr);
358 
359 enq_done:
360 	temp_qlen = qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue);
361 
362 	rx_thread->stats.nbuf_queued[reo_ring_num] += nbuf_queued;
363 	rx_thread->stats.nbuf_queued_total += nbuf_queued;
364 
365 	dp_check_and_update_pending(tm_handle_cmn);
366 
367 	if (temp_qlen > rx_thread->stats.nbufq_max_len)
368 		rx_thread->stats.nbufq_max_len = temp_qlen;
369 
370 	dp_debug("enqueue packet thread %pK wait queue %pK qlen %u",
371 		 rx_thread, wait_q_ptr,
372 		 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
373 
374 	qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
375 	qdf_wake_up_interruptible(wait_q_ptr);
376 
377 	return QDF_STATUS_SUCCESS;
378 }
379 
380 /**
381  * dp_rx_tm_thread_gro_flush_ind() - Rxthread flush ind post
382  * @rx_thread: rx_thread in which the flush needs to be handled
383  * @flush_code: flush code to differentiate low TPUT flush
384  *
385  * Return: QDF_STATUS_SUCCESS on success or qdf error code on
386  * failure
387  */
388 static QDF_STATUS
dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread * rx_thread,enum dp_rx_gro_flush_code flush_code)389 dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread *rx_thread,
390 			      enum dp_rx_gro_flush_code flush_code)
391 {
392 	struct dp_rx_tm_handle_cmn *tm_handle_cmn;
393 	qdf_wait_queue_head_t *wait_q_ptr;
394 
395 	tm_handle_cmn = rx_thread->rtm_handle_cmn;
396 	wait_q_ptr = &rx_thread->wait_q;
397 
398 	qdf_atomic_set(&rx_thread->gro_flush_ind, flush_code);
399 
400 	dp_debug("Flush indication received");
401 
402 	qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
403 	qdf_wake_up_interruptible(wait_q_ptr);
404 	return QDF_STATUS_SUCCESS;
405 }
406 
407 /**
408  * dp_rx_thread_adjust_nbuf_list() - create an nbuf list from the frag list
409  * @head: nbuf list to be created
410  *
411  * Returns: void
412  */
dp_rx_thread_adjust_nbuf_list(qdf_nbuf_t head)413 static void dp_rx_thread_adjust_nbuf_list(qdf_nbuf_t head)
414 {
415 	qdf_nbuf_t next_ptr_list, nbuf_list;
416 
417 	nbuf_list = head;
418 	if (head && QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) > 1) {
419 		/* move ext list to ->next pointer */
420 		next_ptr_list = qdf_nbuf_get_ext_list(head);
421 		qdf_nbuf_append_ext_list(head, NULL, 0);
422 		qdf_nbuf_set_next(nbuf_list, next_ptr_list);
423 		dp_rx_tm_walk_skb_list(nbuf_list);
424 	}
425 }
426 
427 /**
428  * dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
429  * @rx_thread: rx_thread from which the nbuf needs to be dequeued
430  *
431  * Returns: nbuf or nbuf_list dequeued from rx_thread
432  */
dp_rx_tm_thread_dequeue(struct dp_rx_thread * rx_thread)433 static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
434 {
435 	qdf_nbuf_t head;
436 
437 	head = qdf_nbuf_queue_head_dequeue(&rx_thread->nbuf_queue);
438 	dp_rx_thread_adjust_nbuf_list(head);
439 
440 	dp_debug("Dequeued %pK nbuf_list", head);
441 	return head;
442 }
443 
444 #ifdef CONFIG_SLUB_DEBUG_ON
445 /**
446  * dp_rx_thread_should_yield() - check whether rx loop should yield
447  * @iter: iteration of packets received
448  * @rx_thread: rx_thread which should yield
449  *
450  * Returns: should yield or not
451  */
dp_rx_thread_should_yield(struct dp_rx_thread * rx_thread,uint32_t iter)452 static inline bool dp_rx_thread_should_yield(struct dp_rx_thread *rx_thread,
453 					     uint32_t iter)
454 {
455 	if (iter >= DP_RX_THREAD_YIELD_PKT_CNT ||
456 	    qdf_test_bit(RX_VDEV_DEL_EVENT, &rx_thread->event_flag))
457 		return true;
458 	return false;
459 }
460 #else
dp_rx_thread_should_yield(struct dp_rx_thread * rx_thread,uint32_t iter)461 static inline bool dp_rx_thread_should_yield(struct dp_rx_thread *rx_thread,
462 					     uint32_t iter)
463 {
464 	return false;
465 }
466 #endif
467 
468 /**
469  * dp_rx_thread_process_nbufq() - process nbuf queue of a thread
470  * @rx_thread: rx_thread whose nbuf queue needs to be processed
471  *
472  * Returns: 0 on success, error code on failure
473  */
dp_rx_thread_process_nbufq(struct dp_rx_thread * rx_thread)474 static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
475 {
476 	qdf_nbuf_t nbuf_list;
477 	uint8_t vdev_id;
478 	ol_txrx_rx_fp stack_fn;
479 	ol_osif_vdev_handle osif_vdev;
480 	ol_txrx_soc_handle soc;
481 	uint32_t num_list_elements = 0;
482 	uint32_t iterates = 0;
483 
484 	struct dp_txrx_handle_cmn *txrx_handle_cmn;
485 
486 	txrx_handle_cmn =
487 		dp_rx_thread_get_txrx_handle(rx_thread->rtm_handle_cmn);
488 
489 	soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
490 	if (!soc) {
491 		dp_err("invalid soc!");
492 		QDF_BUG(0);
493 		return -EFAULT;
494 	}
495 
496 	dp_debug("enter: qlen  %u",
497 		 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
498 
499 	nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
500 	while (nbuf_list) {
501 		num_list_elements =
502 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
503 		/* count aggregated RX frame into stats */
504 		num_list_elements += qdf_nbuf_get_gso_segs(nbuf_list);
505 		rx_thread->stats.nbuf_dequeued += num_list_elements;
506 		iterates += num_list_elements;
507 
508 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf_list);
509 		cdp_get_os_rx_handles_from_vdev(soc, vdev_id, &stack_fn,
510 						&osif_vdev);
511 		dp_debug("rx_thread %pK sending packet %pK to stack",
512 			 rx_thread, nbuf_list);
513 		if (!stack_fn || !osif_vdev ||
514 		    QDF_STATUS_SUCCESS != stack_fn(osif_vdev, nbuf_list)) {
515 			rx_thread->stats.dropped_invalid_os_rx_handles +=
516 							num_list_elements;
517 			qdf_nbuf_list_free(nbuf_list);
518 		} else {
519 			rx_thread->stats.nbuf_sent_to_stack +=
520 							num_list_elements;
521 		}
522 		if (qdf_unlikely(dp_rx_thread_should_yield(rx_thread,
523 							   iterates))) {
524 			rx_thread->stats.rx_nbufq_loop_yield++;
525 			break;
526 		}
527 		nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
528 	}
529 
530 	dp_debug("exit: qlen  %u",
531 		 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
532 
533 	return 0;
534 }
535 
536 /**
537  * dp_rx_thread_gro_flush() - flush GRO packets for the RX thread
538  * @rx_thread: rx_thread to be processed
539  * @gro_flush_code: flush code to differentiating flushes
540  *
541  * Return: void
542  */
dp_rx_thread_gro_flush(struct dp_rx_thread * rx_thread,enum dp_rx_gro_flush_code gro_flush_code)543 static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread,
544 				   enum dp_rx_gro_flush_code gro_flush_code)
545 {
546 	struct wlan_dp_psoc_context *dp_ctx;
547 
548 	dp_ctx =  dp_get_context();
549 	if (!dp_ctx) {
550 		dp_err("DP context is NULL");
551 		return;
552 	}
553 	dp_debug("flushing packets for thread %u", rx_thread->id);
554 	qdf_local_bh_disable();
555 	dp_ctx->dp_ops.dp_rx_thread_napi_gro_flush(&rx_thread->napi,
556 						   gro_flush_code);
557 	qdf_local_bh_enable();
558 	rx_thread->stats.gro_flushes++;
559 }
560 
561 /**
562  * dp_rx_should_flush() - Determines whether the RX thread should be flushed.
563  * @rx_thread: rx_thread to be processed
564  *
565  * Return: enum dp_rx_gro_flush_code
566  */
567 static inline enum dp_rx_gro_flush_code
dp_rx_should_flush(struct dp_rx_thread * rx_thread)568 dp_rx_should_flush(struct dp_rx_thread *rx_thread)
569 {
570 	enum dp_rx_gro_flush_code gro_flush_code;
571 
572 	gro_flush_code = qdf_atomic_read(&rx_thread->gro_flush_ind);
573 	if (qdf_atomic_test_bit(RX_VDEV_DEL_EVENT, &rx_thread->event_flag))
574 		gro_flush_code = DP_RX_GRO_NORMAL_FLUSH;
575 
576 	return gro_flush_code;
577 }
578 
579 /**
580  * dp_rx_thread_sub_loop() - rx thread subloop
581  * @rx_thread: rx_thread to be processed
582  * @shutdown: pointer to shutdown variable
583  *
584  * The function handles shutdown and suspend events from other
585  * threads and processes nbuf queue of a rx thread. In case a
586  * shutdown event is received from some other wlan thread, the
587  * function sets the shutdown pointer to true and returns
588  *
589  * Returns: 0 on success, error code on failure
590  */
dp_rx_thread_sub_loop(struct dp_rx_thread * rx_thread,bool * shutdown)591 static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
592 {
593 	enum dp_rx_gro_flush_code gro_flush_code;
594 
595 	while (true) {
596 		if (qdf_atomic_test_and_clear_bit(RX_SHUTDOWN_EVENT,
597 						  &rx_thread->event_flag)) {
598 			if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
599 							  &rx_thread->event_flag)) {
600 				qdf_event_set(&rx_thread->suspend_event);
601 			}
602 			dp_debug("shutting down (%s) id %d pid %d",
603 				 qdf_get_current_comm(), rx_thread->id,
604 				 qdf_get_current_pid());
605 			*shutdown = true;
606 			break;
607 		}
608 
609 		dp_rx_thread_process_nbufq(rx_thread);
610 
611 		gro_flush_code = dp_rx_should_flush(rx_thread);
612 		/* Only flush when gro_flush_code is either
613 		 * DP_RX_GRO_NORMAL_FLUSH or DP_RX_GRO_LOW_TPUT_FLUSH
614 		 */
615 		if (gro_flush_code != DP_RX_GRO_NOT_FLUSH) {
616 			dp_rx_thread_gro_flush(rx_thread, gro_flush_code);
617 			qdf_atomic_set(&rx_thread->gro_flush_ind, 0);
618 		}
619 
620 		if (qdf_atomic_test_and_clear_bit(RX_VDEV_DEL_EVENT,
621 						  &rx_thread->event_flag)) {
622 			rx_thread->stats.gro_flushes_by_vdev_del++;
623 			qdf_event_set(&rx_thread->vdev_del_event);
624 			if (qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue))
625 				continue;
626 		}
627 
628 		if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
629 						  &rx_thread->event_flag)) {
630 			dp_debug("received suspend ind (%s) id %d pid %d",
631 				 qdf_get_current_comm(), rx_thread->id,
632 				 qdf_get_current_pid());
633 			qdf_event_set(&rx_thread->suspend_event);
634 			dp_debug("waiting for resume (%s) id %d pid %d",
635 				 qdf_get_current_comm(), rx_thread->id,
636 				 qdf_get_current_pid());
637 			qdf_wait_single_event(&rx_thread->resume_event, 0);
638 		}
639 		break;
640 	}
641 	return 0;
642 }
643 
644 /**
645  * dp_rx_thread_loop() - main dp rx thread loop
646  * @arg: pointer to dp_rx_thread structure for the rx thread
647  *
648  * Return: thread exit code
649  */
dp_rx_thread_loop(void * arg)650 static int dp_rx_thread_loop(void *arg)
651 {
652 	struct dp_rx_thread *rx_thread = arg;
653 	bool shutdown = false;
654 	int status;
655 	QDF_STATUS status_intr;
656 	struct dp_rx_tm_handle_cmn *tm_handle_cmn;
657 
658 	if (!arg) {
659 		dp_err("bad Args passed");
660 		return 0;
661 	}
662 
663 	tm_handle_cmn = rx_thread->rtm_handle_cmn;
664 
665 	qdf_set_user_nice(qdf_get_current_task(), -1);
666 	qdf_set_wake_up_idle(true);
667 
668 	qdf_event_set(&rx_thread->start_event);
669 	dp_info("starting rx_thread (%s) id %d pid %d", qdf_get_current_comm(),
670 		rx_thread->id, qdf_get_current_pid());
671 	while (!shutdown) {
672 		/* This implements the execution model algorithm */
673 		dp_debug("sleeping");
674 		status =
675 		    qdf_wait_queue_interruptible
676 				(rx_thread->wait_q,
677 				 qdf_atomic_test_bit(RX_POST_EVENT,
678 						     &rx_thread->event_flag) ||
679 				 qdf_atomic_test_bit(RX_SUSPEND_EVENT,
680 						     &rx_thread->event_flag) ||
681 				 qdf_atomic_test_bit(RX_VDEV_DEL_EVENT,
682 						     &rx_thread->event_flag));
683 		dp_debug("woken up");
684 		status_intr = qdf_status_from_os_return(status);
685 		if (status_intr == QDF_STATUS_E_RESTART) {
686 			QDF_DEBUG_PANIC("wait_event_interruptible returned -ERESTARTSYS");
687 			break;
688 		}
689 		qdf_atomic_clear_bit(RX_POST_EVENT, &rx_thread->event_flag);
690 		dp_rx_thread_sub_loop(rx_thread, &shutdown);
691 	}
692 
693 	/* If we get here the scheduler thread must exit */
694 	dp_info("exiting (%s) id %d pid %d", qdf_get_current_comm(),
695 		rx_thread->id, qdf_get_current_pid());
696 	qdf_event_set(&rx_thread->shutdown_event);
697 
698 	return 0;
699 }
700 
dp_rx_refill_thread_sub_loop(struct dp_rx_refill_thread * rx_thread,bool * shutdown)701 static int dp_rx_refill_thread_sub_loop(struct dp_rx_refill_thread *rx_thread,
702 					bool *shutdown)
703 {
704 	while (true) {
705 		if (qdf_atomic_test_and_clear_bit(RX_REFILL_SHUTDOWN_EVENT,
706 						  &rx_thread->event_flag)) {
707 			if (qdf_atomic_test_and_clear_bit(RX_REFILL_SUSPEND_EVENT,
708 							  &rx_thread->event_flag)) {
709 				qdf_event_set(&rx_thread->suspend_event);
710 			}
711 			dp_debug("shutting down (%s) pid %d",
712 				 qdf_get_current_comm(), qdf_get_current_pid());
713 			*shutdown = true;
714 			break;
715 		}
716 
717 		dp_rx_refill_buff_pool_enqueue((struct dp_soc *)rx_thread->soc);
718 
719 		if (qdf_atomic_test_and_clear_bit(RX_REFILL_SUSPEND_EVENT,
720 						  &rx_thread->event_flag)) {
721 			dp_debug("refill thread received suspend ind (%s) pid %d",
722 				 qdf_get_current_comm(),
723 				 qdf_get_current_pid());
724 			qdf_event_set(&rx_thread->suspend_event);
725 			dp_debug("refill thread waiting for resume (%s) pid %d",
726 				 qdf_get_current_comm(),
727 				 qdf_get_current_pid());
728 			qdf_wait_single_event(&rx_thread->resume_event, 0);
729 		}
730 		break;
731 	}
732 	return 0;
733 }
734 
dp_rx_refill_thread_loop(void * arg)735 static int dp_rx_refill_thread_loop(void *arg)
736 {
737 	struct dp_rx_refill_thread *rx_thread = arg;
738 	bool shutdown = false;
739 	int status;
740 	QDF_STATUS status_intr;
741 
742 	if (!arg) {
743 		dp_err("bad Args passed");
744 		return 0;
745 	}
746 
747 	qdf_set_user_nice(qdf_get_current_task(), -1);
748 	qdf_set_wake_up_idle(true);
749 
750 	qdf_event_set(&rx_thread->start_event);
751 	dp_info("starting rx_refill_thread (%s) pid %d", qdf_get_current_comm(),
752 		qdf_get_current_pid());
753 	while (!shutdown) {
754 		/* This implements the execution model algorithm */
755 		status =
756 		    qdf_wait_queue_interruptible
757 				(rx_thread->wait_q,
758 				 qdf_atomic_test_bit(RX_REFILL_POST_EVENT,
759 						     &rx_thread->event_flag) ||
760 				 qdf_atomic_test_bit(RX_REFILL_SUSPEND_EVENT,
761 						     &rx_thread->event_flag));
762 
763 		status_intr = qdf_status_from_os_return(status);
764 		if (status_intr == QDF_STATUS_E_RESTART) {
765 			QDF_DEBUG_PANIC("wait_event_interruptible returned -ERESTARTSYS");
766 			break;
767 		}
768 		qdf_atomic_clear_bit(RX_REFILL_POST_EVENT,
769 				     &rx_thread->event_flag);
770 		dp_rx_refill_thread_sub_loop(rx_thread, &shutdown);
771 	}
772 
773 	/* If we get here the scheduler thread must exit */
774 	dp_info("exiting (%s) pid %d", qdf_get_current_comm(),
775 		qdf_get_current_pid());
776 	qdf_event_set(&rx_thread->shutdown_event);
777 
778 	return 0;
779 }
780 
781 /**
782  * dp_rx_tm_thread_napi_poll() - dummy napi poll for rx_thread NAPI
783  * @napi: pointer to DP rx_thread NAPI
784  * @budget: NAPI BUDGET
785  *
786  * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
787  */
dp_rx_tm_thread_napi_poll(qdf_napi_struct * napi,int budget)788 static int dp_rx_tm_thread_napi_poll(qdf_napi_struct *napi, int budget)
789 {
790 	QDF_DEBUG_PANIC("this napi_poll should not be polled as we don't schedule it");
791 
792 	return 0;
793 }
794 
795 /**
796  * dp_rx_tm_thread_napi_init() - Initialize dummy rx_thread NAPI
797  * @rx_thread: dp_rx_thread structure containing dummy napi and netdev
798  *
799  * Return: None
800  */
dp_rx_tm_thread_napi_init(struct dp_rx_thread * rx_thread)801 static void dp_rx_tm_thread_napi_init(struct dp_rx_thread *rx_thread)
802 {
803 	/* Todo - optimize to use only one dummy netdev for all thread napis */
804 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&rx_thread->netdev);
805 	qdf_netif_napi_add(&rx_thread->netdev, &rx_thread->napi,
806 			   dp_rx_tm_thread_napi_poll, 64);
807 	qdf_napi_enable(&rx_thread->napi);
808 }
809 
810 /**
811  * dp_rx_tm_thread_napi_deinit() - De-initialize dummy rx_thread NAPI
812  * @rx_thread: dp_rx_thread handle containing dummy napi and netdev
813  *
814  * Return: None
815  */
dp_rx_tm_thread_napi_deinit(struct dp_rx_thread * rx_thread)816 static void dp_rx_tm_thread_napi_deinit(struct dp_rx_thread *rx_thread)
817 {
818 	qdf_netif_napi_del(&rx_thread->napi);
819 }
820 
821 /*
822  * dp_rx_tm_thread_init() - Initialize dp_rx_thread structure and thread
823  *
824  * @rx_thread: dp_rx_thread structure to be initialized
825  * @id: id of the thread to be initialized
826  *
827  * Return: QDF_STATUS on success, QDF error code on failure
828  */
dp_rx_tm_thread_init(struct dp_rx_thread * rx_thread,uint8_t id)829 static QDF_STATUS dp_rx_tm_thread_init(struct dp_rx_thread *rx_thread,
830 				       uint8_t id)
831 {
832 	char thread_name[15];
833 	QDF_STATUS qdf_status;
834 
835 	qdf_mem_zero(thread_name, sizeof(thread_name));
836 
837 	if (!rx_thread) {
838 		dp_err("rx_thread is null!");
839 		return QDF_STATUS_E_FAULT;
840 	}
841 	rx_thread->id = id;
842 	rx_thread->event_flag = 0;
843 	qdf_nbuf_queue_head_init(&rx_thread->nbuf_queue);
844 	qdf_event_create(&rx_thread->start_event);
845 	qdf_event_create(&rx_thread->suspend_event);
846 	qdf_event_create(&rx_thread->resume_event);
847 	qdf_event_create(&rx_thread->shutdown_event);
848 	qdf_event_create(&rx_thread->vdev_del_event);
849 	qdf_atomic_init(&rx_thread->gro_flush_ind);
850 	qdf_init_waitqueue_head(&rx_thread->wait_q);
851 	qdf_scnprintf(thread_name, sizeof(thread_name), "dp_rx_thread_%u", id);
852 	dp_info("%s %u", thread_name, id);
853 
854 	if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
855 			cfg_dp_gro_enable))
856 		dp_rx_tm_thread_napi_init(rx_thread);
857 
858 	rx_thread->task = qdf_create_thread(dp_rx_thread_loop,
859 					    rx_thread, thread_name);
860 	if (!rx_thread->task) {
861 		dp_err("could not create dp_rx_thread %d", id);
862 		return QDF_STATUS_E_FAILURE;
863 	}
864 
865 	qdf_wake_up_process(rx_thread->task);
866 	qdf_status = qdf_wait_single_event(&rx_thread->start_event, 0);
867 
868 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
869 		dp_err("failed waiting for thread creation id %d", id);
870 		return QDF_STATUS_E_FAILURE;
871 	}
872 	return QDF_STATUS_SUCCESS;
873 }
874 
875 /*
876  * dp_rx_tm_thread_deinit() - De-Initialize dp_rx_thread structure and thread
877  * @rx_thread: dp_rx_thread structure to be de-initialized
878  * @id: id of the thread to be initialized
879  *
880  * Return: QDF_STATUS_SUCCESS
881  */
dp_rx_tm_thread_deinit(struct dp_rx_thread * rx_thread)882 static QDF_STATUS dp_rx_tm_thread_deinit(struct dp_rx_thread *rx_thread)
883 {
884 	qdf_event_destroy(&rx_thread->start_event);
885 	qdf_event_destroy(&rx_thread->suspend_event);
886 	qdf_event_destroy(&rx_thread->resume_event);
887 	qdf_event_destroy(&rx_thread->shutdown_event);
888 	qdf_event_destroy(&rx_thread->vdev_del_event);
889 
890 	if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
891 			cfg_dp_gro_enable))
892 		dp_rx_tm_thread_napi_deinit(rx_thread);
893 
894 	return QDF_STATUS_SUCCESS;
895 }
896 
dp_rx_refill_thread_init(struct dp_rx_refill_thread * refill_thread)897 QDF_STATUS dp_rx_refill_thread_init(struct dp_rx_refill_thread *refill_thread)
898 {
899 	char refill_thread_name[20] = {0};
900 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
901 
902 	qdf_scnprintf(refill_thread_name, sizeof(refill_thread_name),
903 		      "dp_refill_thread");
904 	dp_info("Initializing %s", refill_thread_name);
905 
906 	refill_thread->state = DP_RX_REFILL_THREAD_INVALID;
907 	refill_thread->event_flag = 0;
908 	qdf_event_create(&refill_thread->start_event);
909 	qdf_event_create(&refill_thread->suspend_event);
910 	qdf_event_create(&refill_thread->resume_event);
911 	qdf_event_create(&refill_thread->shutdown_event);
912 	qdf_init_waitqueue_head(&refill_thread->wait_q);
913 	refill_thread->task = qdf_create_thread(dp_rx_refill_thread_loop,
914 						refill_thread,
915 						refill_thread_name);
916 	if (!refill_thread->task) {
917 		dp_err("could not create dp_rx_refill_thread");
918 		return QDF_STATUS_E_FAILURE;
919 	}
920 	qdf_wake_up_process(refill_thread->task);
921 	qdf_status = qdf_wait_single_event(&refill_thread->start_event,
922 					   0);
923 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
924 		dp_err("failed waiting for refill thread creation status: %d",
925 		       qdf_status);
926 		return QDF_STATUS_E_FAILURE;
927 	}
928 
929 	dp_rx_refill_thread_set_affinity(refill_thread);
930 
931 	refill_thread->state = DP_RX_REFILL_THREAD_RUNNING;
932 	return QDF_STATUS_SUCCESS;
933 }
934 
dp_rx_refill_thread_deinit(struct dp_rx_refill_thread * refill_thread)935 QDF_STATUS dp_rx_refill_thread_deinit(struct dp_rx_refill_thread *refill_thread)
936 {
937 	qdf_set_bit(RX_REFILL_SHUTDOWN_EVENT,
938 		    &refill_thread->event_flag);
939 	qdf_set_bit(RX_REFILL_POST_EVENT,
940 		    &refill_thread->event_flag);
941 	qdf_wake_up_interruptible(&refill_thread->wait_q);
942 	qdf_wait_single_event(&refill_thread->shutdown_event, 0);
943 
944 	qdf_event_destroy(&refill_thread->start_event);
945 	qdf_event_destroy(&refill_thread->suspend_event);
946 	qdf_event_destroy(&refill_thread->resume_event);
947 	qdf_event_destroy(&refill_thread->shutdown_event);
948 
949 	refill_thread->state = DP_RX_REFILL_THREAD_INVALID;
950 	return QDF_STATUS_SUCCESS;
951 }
952 
dp_rx_tm_init(struct dp_rx_tm_handle * rx_tm_hdl,uint8_t num_dp_rx_threads)953 QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
954 			 uint8_t num_dp_rx_threads)
955 {
956 	int i;
957 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
958 
959 	if (num_dp_rx_threads > DP_MAX_RX_THREADS) {
960 		dp_err("unable to initialize %u number of threads. MAX %u",
961 		       num_dp_rx_threads, DP_MAX_RX_THREADS);
962 		return QDF_STATUS_E_INVAL;
963 	}
964 
965 	rx_tm_hdl->num_dp_rx_threads = num_dp_rx_threads;
966 	rx_tm_hdl->state = DP_RX_THREADS_INVALID;
967 
968 	dp_info("initializing %u threads", num_dp_rx_threads);
969 
970 	/* allocate an array to contain the DP RX thread pointers */
971 	rx_tm_hdl->rx_thread = qdf_mem_malloc(num_dp_rx_threads *
972 					      sizeof(struct dp_rx_thread *));
973 
974 	if (qdf_unlikely(!rx_tm_hdl->rx_thread)) {
975 		qdf_status = QDF_STATUS_E_NOMEM;
976 		goto ret;
977 	}
978 
979 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
980 		rx_tm_hdl->rx_thread[i] =
981 			(struct dp_rx_thread *)
982 			qdf_mem_malloc(sizeof(struct dp_rx_thread));
983 		if (qdf_unlikely(!rx_tm_hdl->rx_thread[i])) {
984 			QDF_ASSERT(0);
985 			qdf_status = QDF_STATUS_E_NOMEM;
986 			goto ret;
987 		}
988 		rx_tm_hdl->rx_thread[i]->rtm_handle_cmn =
989 				(struct dp_rx_tm_handle_cmn *)rx_tm_hdl;
990 		qdf_status =
991 			dp_rx_tm_thread_init(rx_tm_hdl->rx_thread[i], i);
992 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
993 			break;
994 	}
995 ret:
996 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
997 		dp_rx_tm_deinit(rx_tm_hdl);
998 	else
999 		rx_tm_hdl->state = DP_RX_THREADS_RUNNING;
1000 
1001 	return qdf_status;
1002 }
1003 
1004 /**
1005  * dp_rx_tm_suspend() - suspend DP RX threads
1006  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
1007  *            infrastructure
1008  *
1009  * Return: Success/Failure
1010  */
dp_rx_tm_suspend(struct dp_rx_tm_handle * rx_tm_hdl)1011 QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_hdl)
1012 {
1013 	int i;
1014 	QDF_STATUS qdf_status;
1015 	struct dp_rx_thread *rx_thread;
1016 
1017 	if (rx_tm_hdl->state == DP_RX_THREADS_SUSPENDED) {
1018 		dp_info("already in suspend state! Ignoring.");
1019 		return QDF_STATUS_E_INVAL;
1020 	}
1021 
1022 	rx_tm_hdl->state = DP_RX_THREADS_SUSPENDING;
1023 
1024 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1025 		if (!rx_tm_hdl->rx_thread[i])
1026 			continue;
1027 		qdf_event_reset(&rx_tm_hdl->rx_thread[i]->resume_event);
1028 		qdf_event_reset(&rx_tm_hdl->rx_thread[i]->suspend_event);
1029 		qdf_set_bit(RX_SUSPEND_EVENT,
1030 			    &rx_tm_hdl->rx_thread[i]->event_flag);
1031 		qdf_wake_up_interruptible(&rx_tm_hdl->rx_thread[i]->wait_q);
1032 	}
1033 
1034 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1035 		rx_thread = rx_tm_hdl->rx_thread[i];
1036 		if (!rx_thread)
1037 			continue;
1038 		dp_debug("thread %d", i);
1039 		qdf_status = qdf_wait_single_event(&rx_thread->suspend_event,
1040 						   DP_RX_THREAD_WAIT_TIMEOUT);
1041 		if (QDF_IS_STATUS_SUCCESS(qdf_status))
1042 			dp_debug("thread:%d suspended", rx_thread->id);
1043 		else
1044 			goto suspend_fail;
1045 	}
1046 	rx_tm_hdl->state = DP_RX_THREADS_SUSPENDED;
1047 
1048 	return QDF_STATUS_SUCCESS;
1049 
1050 suspend_fail:
1051 	dp_err("thread:%d %s(%d) while waiting for suspend",
1052 	       rx_thread->id,
1053 	       qdf_status == QDF_STATUS_E_TIMEOUT ? "timeout out" : "failed",
1054 	       qdf_status);
1055 
1056 	dp_rx_tm_resume(rx_tm_hdl);
1057 
1058 	return qdf_status;
1059 }
1060 
1061 /**
1062  * dp_rx_refill_thread_suspend() - Suspend DP RX refill threads
1063  * @refill_thread: containing the overall refill thread infrastructure
1064  *
1065  * Return: Success/Failure
1066  */
1067 QDF_STATUS
dp_rx_refill_thread_suspend(struct dp_rx_refill_thread * refill_thread)1068 dp_rx_refill_thread_suspend(struct dp_rx_refill_thread *refill_thread)
1069 {
1070 	QDF_STATUS qdf_status;
1071 
1072 	if (refill_thread->state == DP_RX_REFILL_THREAD_SUSPENDED) {
1073 		dp_info("already in suspend state! Ignoring.");
1074 		return QDF_STATUS_E_INVAL;
1075 	}
1076 
1077 	refill_thread->state = DP_RX_REFILL_THREAD_SUSPENDING;
1078 
1079 	qdf_event_reset(&refill_thread->resume_event);
1080 	qdf_event_reset(&refill_thread->suspend_event);
1081 	qdf_set_bit(RX_REFILL_SUSPEND_EVENT,
1082 		    &refill_thread->event_flag);
1083 	qdf_wake_up_interruptible(&refill_thread->wait_q);
1084 
1085 	qdf_status = qdf_wait_single_event(&refill_thread->suspend_event,
1086 					   DP_RX_THREAD_WAIT_TIMEOUT);
1087 	if (QDF_IS_STATUS_SUCCESS(qdf_status))
1088 		dp_debug("Refill thread  suspended");
1089 	else
1090 		goto suspend_fail;
1091 
1092 	refill_thread->state = DP_RX_REFILL_THREAD_SUSPENDED;
1093 	return QDF_STATUS_SUCCESS;
1094 
1095 suspend_fail:
1096 	dp_err("Refill thread %s(%d) while waiting for suspend",
1097 	       qdf_status == QDF_STATUS_E_TIMEOUT ? "timeout out" : "failed",
1098 	       qdf_status);
1099 
1100 	dp_rx_refill_thread_resume(refill_thread);
1101 
1102 	return qdf_status;
1103 }
1104 
1105 /*
1106  * dp_rx_tm_flush_nbuf_list() - Flush rx thread nbuf list
1107  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
1108  *  infrastructure
1109  * @vdev_id: vdev id for which packets are to be flushed
1110  *
1111  * Return: None
1112  */
1113 static inline void
dp_rx_tm_flush_nbuf_list(struct dp_rx_tm_handle * rx_tm_hdl,uint8_t vdev_id)1114 dp_rx_tm_flush_nbuf_list(struct dp_rx_tm_handle *rx_tm_hdl, uint8_t vdev_id)
1115 {
1116 	qdf_nbuf_t nbuf_list, tmp_nbuf_list;
1117 	uint32_t num_list_elements = 0;
1118 	uint64_t lock_time, unlock_time, flush_time;
1119 	qdf_nbuf_t nbuf_list_head = NULL, nbuf_list_next;
1120 	struct dp_rx_thread *rx_thread;
1121 	int i;
1122 
1123 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1124 		rx_thread = rx_tm_hdl->rx_thread[i];
1125 		if (!rx_thread)
1126 			continue;
1127 
1128 		qdf_nbuf_queue_head_lock(&rx_thread->nbuf_queue);
1129 		lock_time = qdf_get_log_timestamp();
1130 		QDF_NBUF_QUEUE_WALK_SAFE(&rx_thread->nbuf_queue, nbuf_list,
1131 					 tmp_nbuf_list) {
1132 			if (QDF_NBUF_CB_RX_VDEV_ID(nbuf_list) == vdev_id) {
1133 				qdf_nbuf_unlink_no_lock(nbuf_list,
1134 							&rx_thread->nbuf_queue);
1135 				DP_RX_HEAD_APPEND(nbuf_list_head, nbuf_list);
1136 			}
1137 		}
1138 		qdf_nbuf_queue_head_unlock(&rx_thread->nbuf_queue);
1139 		unlock_time = qdf_get_log_timestamp();
1140 
1141 		while (nbuf_list_head) {
1142 			nbuf_list_next = qdf_nbuf_queue_next(nbuf_list_head);
1143 			qdf_nbuf_set_next(nbuf_list_head, NULL);
1144 			dp_rx_thread_adjust_nbuf_list(nbuf_list_head);
1145 			num_list_elements =
1146 			    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list_head);
1147 			rx_thread->stats.rx_flushed += num_list_elements;
1148 			qdf_nbuf_list_free(nbuf_list_head);
1149 			nbuf_list_head = nbuf_list_next;
1150 		}
1151 
1152 		flush_time = qdf_get_log_timestamp();
1153 		dp_info("Thread: %u lock held time: %llu us flush time: %llu us",
1154 			rx_thread->id,
1155 			qdf_log_timestamp_to_usecs(unlock_time - lock_time),
1156 			qdf_log_timestamp_to_usecs(flush_time - unlock_time));
1157 
1158 		qdf_event_reset(&rx_thread->vdev_del_event);
1159 		qdf_set_bit(RX_VDEV_DEL_EVENT, &rx_thread->event_flag);
1160 		qdf_wake_up_interruptible(&rx_thread->wait_q);
1161 		}
1162 }
1163 
1164 /**
1165  * dp_rx_tm_wait_vdev_del_event() - wait on rx thread vdev delete event
1166  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
1167  *  infrastructure
1168  *
1169  * Return: None
1170  */
1171 static inline void
dp_rx_tm_wait_vdev_del_event(struct dp_rx_tm_handle * rx_tm_hdl)1172 dp_rx_tm_wait_vdev_del_event(struct dp_rx_tm_handle *rx_tm_hdl)
1173 {
1174 	QDF_STATUS qdf_status;
1175 	int i;
1176 	struct dp_rx_thread *rx_thread;
1177 	int wait_timeout = DP_RX_THREAD_FLUSH_TIMEOUT;
1178 	uint64_t entry_time, exit_time, wait_time;
1179 
1180 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1181 		rx_thread = rx_tm_hdl->rx_thread[i];
1182 		if (!rx_thread)
1183 			continue;
1184 
1185 		entry_time = qdf_get_log_timestamp();
1186 		qdf_status =
1187 			qdf_wait_single_event(&rx_thread->vdev_del_event,
1188 					      wait_timeout);
1189 
1190 		if (QDF_IS_STATUS_SUCCESS(qdf_status))
1191 			dp_debug("thread:%d napi gro flush successfully",
1192 				 rx_thread->id);
1193 		else if (qdf_status == QDF_STATUS_E_TIMEOUT) {
1194 			dp_err("thread:%d timed out waiting for napi gro flush",
1195 			       rx_thread->id);
1196 			/*
1197 			 * If timeout, then force flush in case any rx packets
1198 			 * belong to this vdev is still pending on stack queue,
1199 			 * while net_vdev will be freed soon.
1200 			 */
1201 			dp_rx_thread_gro_flush(rx_thread,
1202 					       DP_RX_GRO_NORMAL_FLUSH);
1203 		} else {
1204 			dp_err("thread:%d event wait failed with status: %d",
1205 			       rx_thread->id, qdf_status);
1206 		}
1207 
1208 		exit_time = qdf_get_log_timestamp();
1209 		wait_time = qdf_do_div(qdf_log_timestamp_to_usecs(exit_time -
1210 								  entry_time),
1211 				       QDF_USEC_PER_MSEC);
1212 		/**
1213 		 *
1214 		 * Maximum wait timeout to flush all thread is
1215 		 * DP_RX_THREAD_MIN_FLUSH_TIMEOUT, This logic
1216 		 * limit maximum wait time of all the thread to
1217 		 * DP_RX_THREAD_FLUSH_TIMEOUT. In case if
1218 		 * DP_RX_THREAD_FLUSH_TIMEOUT is exhausted
1219 		 * give remaining thread DP_RX_THREAD_MIN_FLUSH_TIMEOUT.
1220 		 *
1221 		 * Since all the threads are already woken before calling
1222 		 * wait API. So each thread will get
1223 		 * DP_RX_THREAD_FLUSH_TIMEOUT to set the event.
1224 		 */
1225 		if (wait_timeout > DP_RX_THREAD_MIN_FLUSH_TIMEOUT)
1226 			wait_timeout = (wait_timeout > wait_time) ?
1227 				       (wait_timeout - wait_time) :
1228 				       DP_RX_THREAD_MIN_FLUSH_TIMEOUT;
1229 	}
1230 }
1231 
1232 /**
1233  * dp_rx_tm_flush_by_vdev_id() - flush rx packets by vdev_id in all
1234  * rx thread queues
1235  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
1236  * infrastructure
1237  * @vdev_id: vdev id for which packets are to be flushed
1238  *
1239  * Return: QDF_STATUS_SUCCESS
1240  */
dp_rx_tm_flush_by_vdev_id(struct dp_rx_tm_handle * rx_tm_hdl,uint8_t vdev_id)1241 QDF_STATUS dp_rx_tm_flush_by_vdev_id(struct dp_rx_tm_handle *rx_tm_hdl,
1242 				     uint8_t vdev_id)
1243 {
1244 	uint64_t entry_time, exit_time;
1245 
1246 	entry_time = qdf_get_log_timestamp();
1247 	dp_rx_tm_flush_nbuf_list(rx_tm_hdl, vdev_id);
1248 	dp_rx_tm_wait_vdev_del_event(rx_tm_hdl);
1249 	exit_time = qdf_get_log_timestamp();
1250 	dp_info("Vdev: %u total flush time: %llu us",
1251 		vdev_id,
1252 		qdf_log_timestamp_to_usecs(exit_time - entry_time));
1253 
1254 	return QDF_STATUS_SUCCESS;
1255 }
1256 
1257 /**
1258  * dp_rx_tm_resume() - resume DP RX threads
1259  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
1260  * infrastructure
1261  *
1262  * Return: QDF_STATUS_SUCCESS on resume success. QDF error otherwise.
1263  */
dp_rx_tm_resume(struct dp_rx_tm_handle * rx_tm_hdl)1264 QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_hdl)
1265 {
1266 	int i;
1267 
1268 	if (rx_tm_hdl->state != DP_RX_THREADS_SUSPENDED &&
1269 	    rx_tm_hdl->state != DP_RX_THREADS_SUSPENDING) {
1270 		dp_info("resume callback received w/o suspend! Ignoring.");
1271 		return QDF_STATUS_E_INVAL;
1272 	}
1273 
1274 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1275 		if (!rx_tm_hdl->rx_thread[i])
1276 			continue;
1277 		dp_debug("calling thread %d to resume", i);
1278 
1279 		/* postively reset event_flag for DP_RX_THREADS_SUSPENDING
1280 		 * state
1281 		 */
1282 		qdf_clear_bit(RX_SUSPEND_EVENT,
1283 			      &rx_tm_hdl->rx_thread[i]->event_flag);
1284 		qdf_event_set(&rx_tm_hdl->rx_thread[i]->resume_event);
1285 	}
1286 
1287 	rx_tm_hdl->state = DP_RX_THREADS_RUNNING;
1288 
1289 	return QDF_STATUS_SUCCESS;
1290 }
1291 
1292 /**
1293  * dp_rx_refill_thread_resume() - Resume DP RX refill threads
1294  * @refill_thread: refill_thread containing the overall thread infrastructure
1295  *
1296  * Return: QDF_STATUS_SUCCESS on resume success. QDF error otherwise.
1297  */
dp_rx_refill_thread_resume(struct dp_rx_refill_thread * refill_thread)1298 QDF_STATUS dp_rx_refill_thread_resume(struct dp_rx_refill_thread *refill_thread)
1299 {
1300 	dp_debug("calling refill thread to resume");
1301 
1302 	if (refill_thread->state != DP_RX_REFILL_THREAD_SUSPENDED &&
1303 	    refill_thread->state != DP_RX_REFILL_THREAD_SUSPENDING) {
1304 		dp_info("resume callback received in %d state ! Ignoring.",
1305 			refill_thread->state);
1306 		return QDF_STATUS_E_INVAL;
1307 	}
1308 
1309 	/* postively reset event_flag for DP_RX_REFILL_THREAD_SUSPENDING
1310 	 * state
1311 	 */
1312 	qdf_clear_bit(RX_REFILL_SUSPEND_EVENT,
1313 		      &refill_thread->event_flag);
1314 	qdf_event_set(&refill_thread->resume_event);
1315 
1316 	refill_thread->state = DP_RX_REFILL_THREAD_RUNNING;
1317 
1318 	return QDF_STATUS_SUCCESS;
1319 }
1320 
1321 /**
1322  * dp_rx_tm_shutdown() - shutdown all DP RX threads
1323  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
1324  *
1325  * Return: QDF_STATUS_SUCCESS
1326  */
dp_rx_tm_shutdown(struct dp_rx_tm_handle * rx_tm_hdl)1327 static QDF_STATUS dp_rx_tm_shutdown(struct dp_rx_tm_handle *rx_tm_hdl)
1328 {
1329 	int i;
1330 
1331 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1332 		if (!rx_tm_hdl->rx_thread[i] ||
1333 		    rx_tm_hdl->state == DP_RX_THREADS_INVALID)
1334 			continue;
1335 		qdf_set_bit(RX_SHUTDOWN_EVENT,
1336 			    &rx_tm_hdl->rx_thread[i]->event_flag);
1337 		qdf_set_bit(RX_POST_EVENT,
1338 			    &rx_tm_hdl->rx_thread[i]->event_flag);
1339 		qdf_wake_up_interruptible(&rx_tm_hdl->rx_thread[i]->wait_q);
1340 	}
1341 
1342 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1343 		if (!rx_tm_hdl->rx_thread[i] ||
1344 		    rx_tm_hdl->state == DP_RX_THREADS_INVALID)
1345 			continue;
1346 		dp_debug("waiting for shutdown of thread %d", i);
1347 		qdf_wait_single_event(&rx_tm_hdl->rx_thread[i]->shutdown_event,
1348 				      0);
1349 	}
1350 	rx_tm_hdl->state = DP_RX_THREADS_INVALID;
1351 	return QDF_STATUS_SUCCESS;
1352 }
1353 
1354 /**
1355  * dp_rx_tm_deinit() - de-initialize RX thread infrastructure
1356  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
1357  * infrastructure
1358  *
1359  * Return: QDF_STATUS_SUCCESS
1360  */
dp_rx_tm_deinit(struct dp_rx_tm_handle * rx_tm_hdl)1361 QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl)
1362 {
1363 	int i = 0;
1364 
1365 	if (!rx_tm_hdl->rx_thread) {
1366 		dp_err("rx_tm_hdl->rx_thread not initialized!");
1367 		return QDF_STATUS_SUCCESS;
1368 	}
1369 
1370 	dp_rx_tm_shutdown(rx_tm_hdl);
1371 
1372 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1373 		if (!rx_tm_hdl->rx_thread[i])
1374 			continue;
1375 		dp_rx_tm_thread_deinit(rx_tm_hdl->rx_thread[i]);
1376 		qdf_mem_free(rx_tm_hdl->rx_thread[i]);
1377 	}
1378 
1379 	/* free the array of RX thread pointers*/
1380 	qdf_mem_free(rx_tm_hdl->rx_thread);
1381 	rx_tm_hdl->rx_thread = NULL;
1382 
1383 	return QDF_STATUS_SUCCESS;
1384 }
1385 
1386 /**
1387  * dp_rx_tm_select_thread() - select a DP RX thread for a nbuf
1388  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
1389  * infrastructure
1390  * @reo_ring_num: REO ring number corresponding to the thread
1391  *
1392  * The function relies on the presence of QDF_NBUF_CB_RX_CTX_ID passed to it
1393  * from the nbuf list. Depending on the RX_CTX (copy engine or reo
1394  * ring) on which the packet was received, the function selects
1395  * a corresponding rx_thread.
1396  *
1397  * Return: rx thread ID selected for the nbuf
1398  */
dp_rx_tm_select_thread(struct dp_rx_tm_handle * rx_tm_hdl,uint8_t reo_ring_num)1399 static uint8_t dp_rx_tm_select_thread(struct dp_rx_tm_handle *rx_tm_hdl,
1400 				      uint8_t reo_ring_num)
1401 {
1402 	uint8_t selected_rx_thread;
1403 
1404 	selected_rx_thread = reo_ring_num % rx_tm_hdl->num_dp_rx_threads;
1405 	dp_debug("ring_num %d, selected thread %u", reo_ring_num,
1406 		 selected_rx_thread);
1407 
1408 	return selected_rx_thread;
1409 }
1410 
dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle * rx_tm_hdl,qdf_nbuf_t nbuf_list)1411 QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
1412 				qdf_nbuf_t nbuf_list)
1413 {
1414 	uint8_t selected_thread_id;
1415 
1416 	selected_thread_id =
1417 		dp_rx_tm_select_thread(rx_tm_hdl,
1418 				       QDF_NBUF_CB_RX_CTX_ID(nbuf_list));
1419 	dp_rx_tm_thread_enqueue(rx_tm_hdl->rx_thread[selected_thread_id],
1420 				nbuf_list);
1421 	return QDF_STATUS_SUCCESS;
1422 }
1423 
1424 QDF_STATUS
dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle * rx_tm_hdl,int rx_ctx_id,enum dp_rx_gro_flush_code flush_code)1425 dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_hdl, int rx_ctx_id,
1426 		       enum dp_rx_gro_flush_code flush_code)
1427 {
1428 	uint8_t selected_thread_id;
1429 
1430 	selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, rx_ctx_id);
1431 	dp_rx_tm_thread_gro_flush_ind(rx_tm_hdl->rx_thread[selected_thread_id],
1432 				      flush_code);
1433 
1434 	return QDF_STATUS_SUCCESS;
1435 }
1436 
dp_rx_tm_get_napi_context(struct dp_rx_tm_handle * rx_tm_hdl,uint8_t rx_ctx_id)1437 qdf_napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
1438 					   uint8_t rx_ctx_id)
1439 {
1440 	uint8_t selected_thread_id;
1441 
1442 	selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, rx_ctx_id);
1443 
1444 	return &rx_tm_hdl->rx_thread[selected_thread_id]->napi;
1445 }
1446 
dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle * rx_tm_hdl,qdf_cpu_mask * new_mask)1447 QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl,
1448 				 qdf_cpu_mask *new_mask)
1449 {
1450 	int i = 0;
1451 
1452 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1453 		if (!rx_tm_hdl->rx_thread[i])
1454 			continue;
1455 		qdf_thread_set_cpus_allowed_mask(rx_tm_hdl->rx_thread[i]->task,
1456 						 new_mask);
1457 	}
1458 	return QDF_STATUS_SUCCESS;
1459 }
1460 
1461 /**
1462  * dp_rx_refill_thread_schedule() - Schedule rx refill thread
1463  * @soc: ol_txrx_soc_handle object
1464  *
1465  */
1466 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
dp_rx_refill_thread_schedule(ol_txrx_soc_handle soc)1467 static void dp_rx_refill_thread_schedule(ol_txrx_soc_handle soc)
1468 {
1469 	struct dp_rx_refill_thread *rx_thread;
1470 	struct dp_txrx_handle *dp_ext_hdl;
1471 
1472 	if (!soc)
1473 		return;
1474 
1475 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
1476 	if (!dp_ext_hdl)
1477 		return;
1478 
1479 	rx_thread = &dp_ext_hdl->refill_thread;
1480 	qdf_set_bit(RX_REFILL_POST_EVENT, &rx_thread->event_flag);
1481 	qdf_wake_up_interruptible(&rx_thread->wait_q);
1482 }
1483 #else
dp_rx_refill_thread_schedule(ol_txrx_soc_handle soc)1484 static void dp_rx_refill_thread_schedule(ol_txrx_soc_handle soc)
1485 {
1486 }
1487 #endif
1488 
1489 /**
1490  * dp_get_rx_threads_num() - Get number of threads in use
1491  * @soc: ol_txrx_soc_handle object
1492  *
1493  * Return: number of threads
1494  */
dp_get_rx_threads_num(ol_txrx_soc_handle soc)1495 static uint8_t dp_get_rx_threads_num(ol_txrx_soc_handle soc)
1496 {
1497 	return cdp_get_num_rx_contexts(soc);
1498 }
1499 
dp_txrx_init(ol_txrx_soc_handle soc,uint8_t pdev_id,struct dp_txrx_config * config)1500 QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, uint8_t pdev_id,
1501 			struct dp_txrx_config *config)
1502 {
1503 	struct dp_txrx_handle *dp_ext_hdl;
1504 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1505 	uint8_t num_dp_rx_threads;
1506 	struct dp_pdev *pdev;
1507 	struct dp_soc *dp_soc;
1508 
1509 	if (qdf_unlikely(!soc)) {
1510 		dp_err("soc is NULL");
1511 		return 0;
1512 	}
1513 
1514 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(cdp_soc_t_to_dp_soc(soc),
1515 						  pdev_id);
1516 	if (!pdev) {
1517 		dp_err("pdev is NULL");
1518 		return 0;
1519 	}
1520 
1521 	dp_ext_hdl = qdf_mem_malloc(sizeof(*dp_ext_hdl));
1522 	if (!dp_ext_hdl) {
1523 		QDF_ASSERT(0);
1524 		return QDF_STATUS_E_NOMEM;
1525 	}
1526 
1527 	dp_info("dp_txrx_handle allocated");
1528 	dp_ext_hdl->soc = soc;
1529 	dp_ext_hdl->pdev = dp_pdev_to_cdp_pdev(pdev);
1530 	cdp_soc_set_dp_txrx_handle(soc, dp_ext_hdl);
1531 	qdf_mem_copy(&dp_ext_hdl->config, config, sizeof(*config));
1532 	dp_ext_hdl->rx_tm_hdl.txrx_handle_cmn =
1533 				dp_txrx_get_cmn_hdl_frm_ext_hdl(dp_ext_hdl);
1534 
1535 	dp_soc = cdp_soc_t_to_dp_soc(soc);
1536 	if (wlan_cfg_is_rx_refill_buffer_pool_enabled(dp_soc->wlan_cfg_ctx)) {
1537 		dp_ext_hdl->refill_thread.soc = soc;
1538 		dp_ext_hdl->refill_thread.enabled = true;
1539 		qdf_status =
1540 			dp_rx_refill_thread_init(&dp_ext_hdl->refill_thread);
1541 		if (qdf_status != QDF_STATUS_SUCCESS) {
1542 			dp_err("Failed to initialize RX refill thread status:%d",
1543 			       qdf_status);
1544 			qdf_mem_free(dp_ext_hdl);
1545 			return qdf_status;
1546 		}
1547 		cdp_register_rx_refill_thread_sched_handler(soc,
1548 							    dp_rx_refill_thread_schedule);
1549 	}
1550 
1551 	num_dp_rx_threads = dp_get_rx_threads_num(soc);
1552 	dp_info("%d RX threads in use", num_dp_rx_threads);
1553 
1554 	if (dp_ext_hdl->config.enable_rx_threads) {
1555 		qdf_status = dp_rx_tm_init(&dp_ext_hdl->rx_tm_hdl,
1556 					   num_dp_rx_threads);
1557 	}
1558 
1559 	if (QDF_IS_STATUS_ERROR(qdf_status))
1560 		dp_txrx_deinit(soc);
1561 
1562 	return qdf_status;
1563 }
1564 
dp_txrx_deinit(ol_txrx_soc_handle soc)1565 QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc)
1566 {
1567 	struct dp_txrx_handle *dp_ext_hdl;
1568 	struct dp_soc *dp_soc;
1569 
1570 	if (!soc)
1571 		return QDF_STATUS_E_INVAL;
1572 
1573 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
1574 	if (!dp_ext_hdl)
1575 		return QDF_STATUS_E_FAULT;
1576 
1577 	dp_soc = cdp_soc_t_to_dp_soc(soc);
1578 	if (wlan_cfg_is_rx_refill_buffer_pool_enabled(dp_soc->wlan_cfg_ctx)) {
1579 		dp_rx_refill_thread_deinit(&dp_ext_hdl->refill_thread);
1580 		dp_ext_hdl->refill_thread.soc = NULL;
1581 		dp_ext_hdl->refill_thread.enabled = false;
1582 	}
1583 
1584 	if (dp_ext_hdl->config.enable_rx_threads)
1585 		dp_rx_tm_deinit(&dp_ext_hdl->rx_tm_hdl);
1586 
1587 	qdf_mem_free(dp_ext_hdl);
1588 	dp_info("dp_txrx_handle_t de-allocated");
1589 
1590 	cdp_soc_set_dp_txrx_handle(soc, NULL);
1591 
1592 	return QDF_STATUS_SUCCESS;
1593 }
1594 
1595 /**
1596  * dp_rx_tm_get_pending() - get number of frame in thread
1597  * nbuf queue pending
1598  * @soc: ol_txrx_soc_handle object
1599  *
1600  * Return: number of frames
1601  */
1602 #ifdef FEATURE_WLAN_DP_RX_THREADS
dp_rx_tm_get_pending(ol_txrx_soc_handle soc)1603 int dp_rx_tm_get_pending(ol_txrx_soc_handle soc)
1604 {
1605 	int i;
1606 	int num_pending = 0;
1607 	struct dp_rx_thread *rx_thread;
1608 	struct dp_txrx_handle *dp_ext_hdl;
1609 	struct dp_rx_tm_handle *rx_tm_hdl;
1610 
1611 	if (!soc)
1612 		return 0;
1613 
1614 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
1615 	if (!dp_ext_hdl)
1616 		return 0;
1617 
1618 	rx_tm_hdl = &dp_ext_hdl->rx_tm_hdl;
1619 
1620 	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1621 		rx_thread = rx_tm_hdl->rx_thread[i];
1622 		if (!rx_thread)
1623 			continue;
1624 		num_pending += qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue);
1625 	}
1626 
1627 	if (num_pending)
1628 		dp_debug("pending frames in thread queue %d", num_pending);
1629 
1630 	return num_pending;
1631 }
1632 #else
dp_rx_tm_get_pending(ol_txrx_soc_handle soc)1633 int dp_rx_tm_get_pending(ol_txrx_soc_handle soc)
1634 {
1635 	return 0;
1636 }
1637 #endif
1638