1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: Define internal APIs related to the packet capture component
22  */
23 
24 #include "wlan_pkt_capture_mon_thread.h"
25 #include "cds_ieee80211_common.h"
26 #include "wlan_mgmt_txrx_utils_api.h"
27 #include "cfg_ucfg_api.h"
28 #include "wlan_mgmt_txrx_utils_api.h"
29 
30 /*
31  * The following commit was introduced in v5.17:
32  * cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit")
33  * Use the old name for kernels before 5.17
34  */
35 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
36 #define kthread_complete_and_exit(c, s) complete_and_exit(c, s)
37 #endif
38 
pkt_capture_mon(struct pkt_capture_cb_context * cb_ctx,qdf_nbuf_t msdu,struct wlan_objmgr_vdev * vdev,uint16_t ch_freq)39 void pkt_capture_mon(struct pkt_capture_cb_context *cb_ctx, qdf_nbuf_t msdu,
40 		     struct wlan_objmgr_vdev *vdev, uint16_t ch_freq)
41 {
42 	if (cb_ctx->mon_cb(cb_ctx->mon_ctx, msdu) != QDF_STATUS_SUCCESS) {
43 		pkt_capture_err("Frame Rx to HDD failed");
44 		qdf_nbuf_free(msdu);
45 	}
46 }
47 
pkt_capture_free_mon_pkt_freeq(struct pkt_capture_mon_context * mon_ctx)48 void pkt_capture_free_mon_pkt_freeq(struct pkt_capture_mon_context *mon_ctx)
49 {
50 	struct pkt_capture_mon_pkt *pkt;
51 
52 	spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
53 	while (!list_empty(&mon_ctx->mon_pkt_freeq)) {
54 		pkt = list_entry((&mon_ctx->mon_pkt_freeq)->next,
55 				 typeof(*pkt), list);
56 		list_del(&pkt->list);
57 		spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
58 		qdf_mem_free(pkt);
59 		spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
60 	}
61 	spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
62 }
63 
64 /**
65  * pkt_capture_alloc_mon_pkt_freeq() - Function to allocate free buffer queue
66  * @mon_ctx: pointer to packet capture mon context
67  *
68  * This API allocates MAX_MON_PKT_SIZE number of mon packets
69  * which are used for mon data processing.
70  *
71  * Return: QDF_STATUS
72  */
73 static QDF_STATUS
pkt_capture_alloc_mon_pkt_freeq(struct pkt_capture_mon_context * mon_ctx)74 pkt_capture_alloc_mon_pkt_freeq(struct pkt_capture_mon_context *mon_ctx)
75 {
76 	struct pkt_capture_mon_pkt *pkt, *tmp;
77 	int i;
78 
79 	for (i = 0; i < MAX_MON_PKT_SIZE; i++) {
80 		pkt = qdf_mem_malloc(sizeof(*pkt));
81 		if (!pkt)
82 			goto free;
83 
84 		spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
85 		list_add_tail(&pkt->list,
86 			      &mon_ctx->mon_pkt_freeq);
87 		spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
88 	}
89 
90 	return QDF_STATUS_SUCCESS;
91 free:
92 	spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
93 	list_for_each_entry_safe(pkt, tmp,
94 				 &mon_ctx->mon_pkt_freeq,
95 				 list) {
96 		list_del(&pkt->list);
97 		spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
98 		qdf_mem_free(pkt);
99 		spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
100 	}
101 	spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
102 
103 	return QDF_STATUS_E_NOMEM;
104 }
105 
106 /**
107  * pkt_capture_free_mon_pkt() - api to release mon packet to the freeq
108  * @mon_ctx: Pointer to packet capture mon context
109  * @pkt: MON packet buffer to be returned to free queue.
110  *
111  * This api returns the mon packet used for mon data to the free queue
112  *
113  * Return: None
114  */
115 static void
pkt_capture_free_mon_pkt(struct pkt_capture_mon_context * mon_ctx,struct pkt_capture_mon_pkt * pkt)116 pkt_capture_free_mon_pkt(struct pkt_capture_mon_context *mon_ctx,
117 			 struct pkt_capture_mon_pkt *pkt)
118 {
119 	memset(pkt, 0, sizeof(*pkt));
120 	spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
121 	list_add_tail(&pkt->list, &mon_ctx->mon_pkt_freeq);
122 	spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
123 }
124 
125 struct pkt_capture_mon_pkt *
pkt_capture_alloc_mon_pkt(struct wlan_objmgr_vdev * vdev)126 pkt_capture_alloc_mon_pkt(struct wlan_objmgr_vdev *vdev)
127 {
128 	struct pkt_capture_vdev_priv *vdev_priv;
129 	struct pkt_capture_mon_context *mon_ctx;
130 	struct pkt_capture_mon_pkt *pkt;
131 
132 	if (!vdev) {
133 		pkt_capture_err("vdev is NULL");
134 		return NULL;
135 	}
136 
137 	vdev_priv = pkt_capture_vdev_get_priv(vdev);
138 	if (!vdev_priv) {
139 		pkt_capture_err("packet capture vdev priv is NULL");
140 		return NULL;
141 	}
142 
143 	mon_ctx = vdev_priv->mon_ctx;
144 	if (!mon_ctx) {
145 		pkt_capture_err("packet capture mon context is NULL");
146 		return NULL;
147 	}
148 
149 	spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
150 	if (list_empty(&mon_ctx->mon_pkt_freeq)) {
151 		spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
152 		return NULL;
153 	}
154 
155 	pkt = list_first_entry(&mon_ctx->mon_pkt_freeq,
156 			       struct pkt_capture_mon_pkt, list);
157 	list_del(&pkt->list);
158 	spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
159 
160 	return pkt;
161 }
162 
pkt_capture_indicate_monpkt(struct wlan_objmgr_vdev * vdev,struct pkt_capture_mon_pkt * pkt)163 void pkt_capture_indicate_monpkt(struct wlan_objmgr_vdev *vdev,
164 				 struct pkt_capture_mon_pkt *pkt)
165 {
166 	struct pkt_capture_vdev_priv *vdev_priv;
167 	struct pkt_capture_mon_context *mon_ctx;
168 
169 	if (!vdev) {
170 		pkt_capture_err("vdev is NULL");
171 		return;
172 	}
173 
174 	vdev_priv = pkt_capture_vdev_get_priv(vdev);
175 	if (!vdev_priv) {
176 		pkt_capture_err("packet capture vdev priv is NULL");
177 		return;
178 	}
179 	mon_ctx = vdev_priv->mon_ctx;
180 
181 	spin_lock_bh(&mon_ctx->mon_queue_lock);
182 	list_add_tail(&pkt->list, &mon_ctx->mon_thread_queue);
183 	spin_unlock_bh(&mon_ctx->mon_queue_lock);
184 	set_bit(PKT_CAPTURE_RX_POST_EVENT, &mon_ctx->mon_event_flag);
185 	wake_up_interruptible(&mon_ctx->mon_wait_queue);
186 }
187 
pkt_capture_wakeup_mon_thread(struct wlan_objmgr_vdev * vdev)188 void pkt_capture_wakeup_mon_thread(struct wlan_objmgr_vdev *vdev)
189 {
190 	struct pkt_capture_vdev_priv *vdev_priv;
191 	struct pkt_capture_mon_context *mon_ctx;
192 
193 	if (!vdev) {
194 		pkt_capture_err("vdev is NULL");
195 		return;
196 	}
197 
198 	vdev_priv = pkt_capture_vdev_get_priv(vdev);
199 	if (!vdev_priv) {
200 		pkt_capture_err("packet capture vdev priv is NULL");
201 		return;
202 	}
203 	mon_ctx = vdev_priv->mon_ctx;
204 
205 	set_bit(PKT_CAPTURE_RX_POST_EVENT, &mon_ctx->mon_event_flag);
206 	wake_up_interruptible(&mon_ctx->mon_wait_queue);
207 }
208 
209 /**
210  * pkt_capture_process_from_queue() - function to process pending mon packets
211  * @mon_ctx: Pointer to packet capture mon context
212  *
213  * This api traverses the pending buffer list and calling the callback.
214  * This callback would essentially send the packet to HDD.
215  *
216  * Return: None
217  */
218 static void
pkt_capture_process_from_queue(struct pkt_capture_mon_context * mon_ctx)219 pkt_capture_process_from_queue(struct pkt_capture_mon_context *mon_ctx)
220 {
221 	struct pkt_capture_mon_pkt *pkt;
222 	uint8_t vdev_id;
223 	uint8_t tid;
224 
225 	spin_lock_bh(&mon_ctx->mon_queue_lock);
226 	while (!list_empty(&mon_ctx->mon_thread_queue)) {
227 		if (!test_bit(PKT_CAPTURE_REGISTER_EVENT,
228 			      &mon_ctx->mon_event_flag)) {
229 			complete(&mon_ctx->mon_register_event);
230 			break;
231 		}
232 		pkt = list_first_entry(&mon_ctx->mon_thread_queue,
233 				       struct pkt_capture_mon_pkt, list);
234 		list_del(&pkt->list);
235 		spin_unlock_bh(&mon_ctx->mon_queue_lock);
236 		vdev_id = pkt->vdev_id;
237 		tid = pkt->tid;
238 		pkt->callback(pkt->context, pkt->pdev, pkt->monpkt, vdev_id,
239 			      tid, pkt->status, pkt->pkt_format, pkt->bssid,
240 			      pkt->tx_retry_cnt);
241 		pkt_capture_free_mon_pkt(mon_ctx, pkt);
242 		spin_lock_bh(&mon_ctx->mon_queue_lock);
243 	}
244 	spin_unlock_bh(&mon_ctx->mon_queue_lock);
245 }
246 
247 /**
248  * pkt_capture_mon_thread() - packet capture mon thread
249  * @arg: Pointer to vdev object manager
250  *
251  * This api is the thread handler for mon Data packet processing.
252  *
253  * Return: thread exit code
254  */
pkt_capture_mon_thread(void * arg)255 static int pkt_capture_mon_thread(void *arg)
256 {
257 	struct pkt_capture_mon_context *mon_ctx;
258 	unsigned long pref_cpu = 0;
259 	bool shutdown = false;
260 	int status, i;
261 
262 	if (!arg) {
263 		pkt_capture_err("Bad Args passed to mon thread");
264 		return 0;
265 	}
266 	mon_ctx = (struct pkt_capture_mon_context *)arg;
267 	set_user_nice(current, -1);
268 #ifdef MSM_PLATFORM
269 	qdf_set_wake_up_idle(true);
270 #endif
271 
272 	/**
273 	 * Find the available cpu core other than cpu 0 and
274 	 * bind the thread
275 	 */
276 	for_each_online_cpu(i) {
277 		if (i == 0)
278 			continue;
279 		pref_cpu = i;
280 			break;
281 	}
282 
283 	set_cpus_allowed_ptr(current, cpumask_of(pref_cpu));
284 
285 	complete(&mon_ctx->mon_start_event);
286 
287 	while (!shutdown) {
288 		status =
289 		wait_event_interruptible(mon_ctx->mon_wait_queue,
290 					 test_bit(PKT_CAPTURE_RX_POST_EVENT,
291 						  &mon_ctx->mon_event_flag) ||
292 					 test_bit(PKT_CAPTURE_RX_SUSPEND_EVENT,
293 						  &mon_ctx->mon_event_flag));
294 		if (status == -ERESTARTSYS)
295 			break;
296 
297 		clear_bit(PKT_CAPTURE_RX_POST_EVENT,
298 			  &mon_ctx->mon_event_flag);
299 		while (true) {
300 			if (test_bit(PKT_CAPTURE_RX_SHUTDOWN_EVENT,
301 				     &mon_ctx->mon_event_flag)) {
302 				clear_bit(PKT_CAPTURE_RX_SHUTDOWN_EVENT,
303 					  &mon_ctx->mon_event_flag);
304 				if (test_bit(
305 					PKT_CAPTURE_RX_SUSPEND_EVENT,
306 					&mon_ctx->mon_event_flag)) {
307 					clear_bit(PKT_CAPTURE_RX_SUSPEND_EVENT,
308 						  &mon_ctx->mon_event_flag);
309 					complete(&mon_ctx->suspend_mon_event);
310 				}
311 				pkt_capture_info("Shutting down pktcap thread");
312 				shutdown = true;
313 				break;
314 			}
315 
316 			/*
317 			 * if packet capture deregistratin happens stop
318 			 * processing packets in queue because mon cb will
319 			 * be set to NULL.
320 			 */
321 			if (test_bit(PKT_CAPTURE_REGISTER_EVENT,
322 				     &mon_ctx->mon_event_flag))
323 				pkt_capture_process_from_queue(mon_ctx);
324 			else
325 				complete(&mon_ctx->mon_register_event);
326 
327 			if (test_bit(PKT_CAPTURE_RX_SUSPEND_EVENT,
328 				     &mon_ctx->mon_event_flag)) {
329 				clear_bit(PKT_CAPTURE_RX_SUSPEND_EVENT,
330 					  &mon_ctx->mon_event_flag);
331 				spin_lock(&mon_ctx->mon_thread_lock);
332 				INIT_COMPLETION(mon_ctx->resume_mon_event);
333 				complete(&mon_ctx->suspend_mon_event);
334 				spin_unlock(&mon_ctx->mon_thread_lock);
335 				wait_for_completion_interruptible
336 					(&mon_ctx->resume_mon_event);
337 			}
338 			break;
339 		}
340 	}
341 	pkt_capture_debug("Exiting packet capture mon thread");
342 	kthread_complete_and_exit(&mon_ctx->mon_shutdown, 0);
343 
344 	return 0;
345 }
346 
pkt_capture_close_mon_thread(struct pkt_capture_mon_context * mon_ctx)347 void pkt_capture_close_mon_thread(struct pkt_capture_mon_context *mon_ctx)
348 {
349 	if (!mon_ctx->mon_thread)
350 		return;
351 
352 	/* Shut down mon thread */
353 	set_bit(PKT_CAPTURE_RX_SHUTDOWN_EVENT,
354 		&mon_ctx->mon_event_flag);
355 	set_bit(PKT_CAPTURE_RX_POST_EVENT,
356 		&mon_ctx->mon_event_flag);
357 	wake_up_interruptible(&mon_ctx->mon_wait_queue);
358 	wait_for_completion(&mon_ctx->mon_shutdown);
359 	mon_ctx->mon_thread = NULL;
360 	pkt_capture_drop_monpkt(mon_ctx);
361 	pkt_capture_free_mon_pkt_freeq(mon_ctx);
362 }
363 
364 QDF_STATUS
pkt_capture_open_mon_thread(struct pkt_capture_mon_context * mon_ctx)365 pkt_capture_open_mon_thread(struct pkt_capture_mon_context *mon_ctx)
366 {
367 	mon_ctx->mon_thread = kthread_create(pkt_capture_mon_thread,
368 					     mon_ctx,
369 					     "pkt_capture_mon_thread");
370 
371 	if (IS_ERR(mon_ctx->mon_thread)) {
372 		pkt_capture_fatal("Could not Create packet capture mon thread");
373 		return QDF_STATUS_E_FAILURE;
374 	}
375 	wake_up_process(mon_ctx->mon_thread);
376 	pkt_capture_debug("packet capture MON thread Created");
377 
378 	wait_for_completion_interruptible(&mon_ctx->mon_start_event);
379 	pkt_capture_debug("packet capture MON Thread has started");
380 
381 	return QDF_STATUS_SUCCESS;
382 }
383 
pkt_capture_drop_monpkt(struct pkt_capture_mon_context * mon_ctx)384 void pkt_capture_drop_monpkt(struct pkt_capture_mon_context *mon_ctx)
385 {
386 	struct pkt_capture_mon_pkt *pkt, *tmp;
387 	struct list_head local_list;
388 	qdf_nbuf_t buf, next_buf;
389 
390 	INIT_LIST_HEAD(&local_list);
391 	spin_lock_bh(&mon_ctx->mon_queue_lock);
392 	if (list_empty(&mon_ctx->mon_thread_queue)) {
393 		spin_unlock_bh(&mon_ctx->mon_queue_lock);
394 		return;
395 	}
396 	list_for_each_entry_safe(pkt, tmp,
397 				 &mon_ctx->mon_thread_queue,
398 				 list)
399 		list_move_tail(&pkt->list, &local_list);
400 
401 	spin_unlock_bh(&mon_ctx->mon_queue_lock);
402 
403 	list_for_each_entry_safe(pkt, tmp, &local_list, list) {
404 		list_del(&pkt->list);
405 		buf = pkt->monpkt;
406 		while (buf) {
407 			next_buf = qdf_nbuf_queue_next(buf);
408 			qdf_nbuf_free(buf);
409 			buf = next_buf;
410 		}
411 		pkt_capture_free_mon_pkt(mon_ctx, pkt);
412 	}
413 }
414 
pkt_capture_suspend_mon_thread(struct wlan_objmgr_vdev * vdev)415 int pkt_capture_suspend_mon_thread(struct wlan_objmgr_vdev *vdev)
416 {
417 	struct pkt_capture_vdev_priv *vdev_priv;
418 	struct pkt_capture_mon_context *mon_ctx;
419 	int rc;
420 
421 	if (!vdev) {
422 		pkt_capture_err("vdev is NULL");
423 		return -EINVAL;
424 	}
425 
426 	vdev_priv = pkt_capture_vdev_get_priv(vdev);
427 	if (!vdev_priv) {
428 		pkt_capture_err("packet capture vdev priv is NULL");
429 		return -EINVAL;
430 	}
431 	mon_ctx = vdev_priv->mon_ctx;
432 	if (!mon_ctx) {
433 		pkt_capture_err("packet capture mon context is NULL");
434 		return -EINVAL;
435 	}
436 
437 	set_bit(PKT_CAPTURE_RX_SUSPEND_EVENT,
438 		&mon_ctx->mon_event_flag);
439 	wake_up_interruptible(&mon_ctx->mon_wait_queue);
440 	rc = wait_for_completion_timeout(
441 			&mon_ctx->suspend_mon_event,
442 			msecs_to_jiffies(PKT_CAPTURE_SUSPEND_TIMEOUT));
443 	if (!rc) {
444 		clear_bit(PKT_CAPTURE_RX_SUSPEND_EVENT,
445 			  &mon_ctx->mon_event_flag);
446 		pkt_capture_err("Failed to suspend packet capture mon thread");
447 		return -EINVAL;
448 	}
449 	mon_ctx->is_mon_thread_suspended = true;
450 
451 	return 0;
452 }
453 
pkt_capture_resume_mon_thread(struct wlan_objmgr_vdev * vdev)454 void pkt_capture_resume_mon_thread(struct wlan_objmgr_vdev *vdev)
455 {
456 	struct pkt_capture_vdev_priv *vdev_priv;
457 	struct pkt_capture_mon_context *mon_ctx;
458 
459 	if (!vdev) {
460 		pkt_capture_err("vdev is NULL");
461 		return;
462 	}
463 
464 	vdev_priv = pkt_capture_vdev_get_priv(vdev);
465 	if (!vdev_priv) {
466 		pkt_capture_err("packet capture vdev priv is NULL");
467 		return;
468 	}
469 	mon_ctx = vdev_priv->mon_ctx;
470 	if (!mon_ctx) {
471 		pkt_capture_err("packet capture mon context is NULL");
472 		return;
473 	}
474 
475 	if (mon_ctx->is_mon_thread_suspended)
476 		complete(&mon_ctx->resume_mon_event);
477 }
478 
479 QDF_STATUS
pkt_capture_alloc_mon_thread(struct pkt_capture_mon_context * mon_ctx)480 pkt_capture_alloc_mon_thread(struct pkt_capture_mon_context *mon_ctx)
481 {
482 	spin_lock_init(&mon_ctx->mon_thread_lock);
483 	init_waitqueue_head(&mon_ctx->mon_wait_queue);
484 	init_completion(&mon_ctx->mon_start_event);
485 	init_completion(&mon_ctx->suspend_mon_event);
486 	init_completion(&mon_ctx->resume_mon_event);
487 	init_completion(&mon_ctx->mon_shutdown);
488 	init_completion(&mon_ctx->mon_register_event);
489 	mon_ctx->mon_event_flag = 0;
490 	spin_lock_init(&mon_ctx->mon_queue_lock);
491 	spin_lock_init(&mon_ctx->mon_pkt_freeq_lock);
492 	INIT_LIST_HEAD(&mon_ctx->mon_thread_queue);
493 	spin_lock_bh(&mon_ctx->mon_pkt_freeq_lock);
494 	INIT_LIST_HEAD(&mon_ctx->mon_pkt_freeq);
495 	spin_unlock_bh(&mon_ctx->mon_pkt_freeq_lock);
496 
497 	return pkt_capture_alloc_mon_pkt_freeq(mon_ctx);
498 }
499