xref: /wlan-dirver/qcacld-3.0/os_if/dp/src/os_if_dp_txrx.c (revision 1fee2fd0aade1d97f7426d6ff7ab16571e512484)
1 /*
2  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 /**
18  *  DOC: osif_dp_txrx.c
19  *  This file contains DP component's TX/RX osif API implementation
20  */
21 #include "os_if_dp.h"
22 #include "os_if_dp_lro.h"
23 #include <wlan_dp_public_struct.h>
24 #include <wlan_objmgr_vdev_obj.h>
25 #include "osif_sync.h"
26 #include <linux/netdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 #include <linux/inetdevice.h>
31 #include <linux/wireless.h>
32 #include <linux/rtnetlink.h>
33 #include <net/cfg80211.h>
34 #include <cdp_txrx_cmn.h>
35 #include <cdp_txrx_peer_ops.h>
36 #include <cdp_txrx_misc.h>
37 #include <net/tcp.h>
38 #include <ol_defines.h>
39 #include <hif_napi.h>
40 #include <hif.h>
41 #include <wlan_hdd_main.h>
42 #include "wlan_hdd_wmm.h"
43 
44 /**
45  * osif_dp_classify_pkt() - classify packet
46  * @skb:  sk buff
47  *
48  * Return: none
49  */
50 void osif_dp_classify_pkt(struct sk_buff *skb)
51 {
52 	struct ethhdr *eh = (struct ethhdr *)skb->data;
53 
54 	qdf_mem_zero(skb->cb, sizeof(skb->cb));
55 
56 	/* check destination mac address is broadcast/multicast */
57 	if (is_broadcast_ether_addr((uint8_t *)eh))
58 		QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
59 	else if (is_multicast_ether_addr((uint8_t *)eh))
60 		QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
61 
62 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
63 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
64 			QDF_NBUF_CB_PACKET_TYPE_ARP;
65 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
66 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
67 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
68 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
69 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
70 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
71 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
72 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
73 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
74 	else if (qdf_nbuf_is_icmp_pkt(skb))
75 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
76 			QDF_NBUF_CB_PACKET_TYPE_ICMP;
77 	else if (qdf_nbuf_is_icmpv6_pkt(skb))
78 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
79 			QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
80 }
81 
82 /**
83  * osif_dp_mark_critical_pkt() - Identify and mark critical packets
84  * @skb: skb ptr
85  *
86  * Return: None
87  */
88 static void osif_dp_mark_critical_pkt(struct sk_buff *skb)
89 {
90 	if (qdf_nbuf_is_ipv4_eapol_pkt(skb)) {
91 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
92 				QDF_NBUF_CB_PACKET_TYPE_EAPOL;
93 	} else if (qdf_nbuf_is_ipv4_arp_pkt(skb)) {
94 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
95 				QDF_NBUF_CB_PACKET_TYPE_ARP;
96 	} else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb)) {
97 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
98 				QDF_NBUF_CB_PACKET_TYPE_DHCP;
99 	} else if (qdf_nbuf_is_ipv6_dhcp_pkt(skb)) {
100 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
101 				QDF_NBUF_CB_PACKET_TYPE_DHCPV6;
102 	} else if (qdf_nbuf_is_icmpv6_pkt(skb)) {
103 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
104 			QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
105 	}
106 
107 	QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) = true;
108 }
109 
110 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
111 /**
112  * osif_dp_mark_pkt_type_by_priority() - mark packet type to skb->cb
113  *                                       by type from priority of skb
114  * @skb: network buffer
115  *
116  * Return: true - packet type marked, false - not marked
117  */
118 static inline
119 bool osif_dp_mark_pkt_type_by_priority(struct sk_buff *skb)
120 {
121 	bool type_marked = false;
122 	uint32_t pkt_type =
123 		qdf_nbuf_get_priority_pkt_type(skb);
124 
125 	if (qdf_unlikely(pkt_type == QDF_NBUF_PRIORITY_PKT_TCP_ACK)) {
126 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
127 					QDF_NBUF_CB_PACKET_TYPE_TCP_ACK;
128 		type_marked = true;
129 	}
130 	/* cleanup the packet type in priority */
131 	qdf_nbuf_remove_priority_pkt_type(skb);
132 
133 	return type_marked;
134 }
135 #else
136 static inline
137 bool osif_dp_mark_pkt_type_by_priority(struct sk_buff *skb)
138 {
139 	return false;
140 }
141 #endif
142 
143 /**
144  * osif_dp_mark_non_critical_pkt() - Identify and mark non-critical packets
145  * @skb: skb ptr
146  *
147  * Return: None
148  */
149 static void osif_dp_mark_non_critical_pkt(struct sk_buff *skb)
150 {
151 	/* check if packet type is marked from skb->priority already */
152 	if (osif_dp_mark_pkt_type_by_priority(skb))
153 		return;
154 
155 	if (qdf_nbuf_is_icmp_pkt(skb))
156 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
157 				QDF_NBUF_CB_PACKET_TYPE_ICMP;
158 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
159 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
160 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
161 }
162 
163 void osif_dp_mark_pkt_type(struct sk_buff *skb)
164 {
165 	struct ethhdr *eh = (struct ethhdr *)skb->data;
166 
167 	/*
168 	 * Zero out CB before accessing it. Expectation is that cb is accessed
169 	 * for the first time here on TX path in hard_start_xmit.
170 	 */
171 	qdf_mem_zero(skb->cb, sizeof(skb->cb));
172 
173 	/* check destination mac address is broadcast/multicast */
174 	if (is_broadcast_ether_addr((uint8_t *)eh))
175 		QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
176 	else if (is_multicast_ether_addr((uint8_t *)eh))
177 		QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
178 
179 	/*
180 	 * TX Packets in the HI_PRIO queue are assumed to be critical and
181 	 * marked accordingly.
182 	 */
183 	if (skb->queue_mapping == TX_GET_QUEUE_IDX(HDD_LINUX_AC_HI_PRIO, 0))
184 		osif_dp_mark_critical_pkt(skb);
185 	else
186 		osif_dp_mark_non_critical_pkt(skb);
187 }
188 
189 /*
190  * When bus bandwidth is idle, if RX data is delivered with
191  * napi_gro_receive, to reduce RX delay related with GRO,
192  * check gro_result returned from napi_gro_receive to determine
193  * is extra GRO flush still necessary.
194  */
195 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
196 #define DP_IS_EXTRA_GRO_FLUSH_NECESSARY(_gro_ret) true
197 #define GRO_DROP_UPDATE_STATUS(gro_ret, status)
198 #else
199 #define GRO_DROP_UPDATE_STATUS(gro_ret, status) \
200 	if ((gro_ret) == GRO_DROP) ((status) = QDF_STATUS_E_GRO_DROP)
201 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
202 #define DP_IS_EXTRA_GRO_FLUSH_NECESSARY(_gro_ret) \
203 	((_gro_ret) != GRO_DROP)
204 #else
205 #define DP_IS_EXTRA_GRO_FLUSH_NECESSARY(_gro_ret) \
206 	((_gro_ret) != GRO_DROP && (_gro_ret) != GRO_NORMAL)
207 #endif
208 #endif
209 
210 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
211 /**
212  * osif_dp_rx_thread_napi_gro_flush() - do gro flush
213  * @napi: napi used to do gro flush
214  * @flush_code: flush_code differentiating low_tput_flush and normal_flush
215  *
216  * if there is RX GRO_NORMAL packets pending in napi
217  * rx_list, flush them manually right after napi_gro_flush.
218  *
219  * Return: none
220  */
221 static inline
222 void osif_dp_rx_thread_napi_gro_flush(struct napi_struct *napi,
223 				      enum dp_rx_gro_flush_code flush_code)
224 {
225 	if (napi->poll) {
226 		/* Skipping GRO flush in low TPUT */
227 		if (flush_code != DP_RX_GRO_LOW_TPUT_FLUSH)
228 			napi_gro_flush(napi, false);
229 
230 		if (napi->rx_count) {
231 			netif_receive_skb_list(&napi->rx_list);
232 			qdf_init_list_head(&napi->rx_list);
233 			napi->rx_count = 0;
234 		}
235 	}
236 }
237 #else
238 static inline
239 void osif_dp_rx_thread_napi_gro_flush(struct napi_struct *napi,
240 				      enum dp_rx_gro_flush_code flush_code)
241 {
242 	if (napi->poll) {
243 		/* Skipping GRO flush in low TPUT */
244 		if (flush_code != DP_RX_GRO_LOW_TPUT_FLUSH)
245 			napi_gro_flush(napi, false);
246 	}
247 }
248 #endif
249 
250 /**
251  * osif_dp_rx_napi_gro_flush() - GRO RX/flush function.
252  * @napi_to_use: napi to be used to give packets to the stack, gro flush
253  * @nbuf: pointer to n/w buff
254  * @low_tput_force_flush: Is force flush required in low tput
255  *
256  * Function calls napi_gro_receive for the skb. If the skb indicates that a
257  * flush needs to be done (set by the lower DP layer), the function also calls
258  * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
259  * napi_gro__ calls.
260  *
261  * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
262  *	   QDF error code.
263  */
264 
265 static QDF_STATUS
266 osif_dp_rx_napi_gro_flush(qdf_napi_struct *napi_to_use,
267 			  qdf_nbuf_t nbuf,
268 			  uint8_t *low_tput_force_flush)
269 {
270 	QDF_STATUS status = QDF_STATUS_SUCCESS;
271 	gro_result_t gro_ret;
272 
273 	skb_set_hash(nbuf, QDF_NBUF_CB_RX_FLOW_ID(nbuf), PKT_HASH_TYPE_L4);
274 
275 	local_bh_disable();
276 	gro_ret = napi_gro_receive((struct napi_struct *)napi_to_use, nbuf);
277 
278 	if (DP_IS_EXTRA_GRO_FLUSH_NECESSARY(gro_ret)) {
279 		*low_tput_force_flush = 1;
280 		osif_dp_rx_thread_napi_gro_flush((struct napi_struct *)napi_to_use,
281 						 DP_RX_GRO_NORMAL_FLUSH);
282 	}
283 
284 	local_bh_enable();
285 	GRO_DROP_UPDATE_STATUS(gro_ret, status);
286 
287 	return status;
288 }
289 
290 /**
291  * osif_dp_rx_napi_gro_receive() - GRO RX receive function.
292  * @napi_to_use: napi to be used to give packets to the stack
293  * @nbuf: pointer to n/w buff
294  *
295  * Function calls napi_gro_receive for the skb.
296  * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
297  * napi_gro__ calls.
298  *
299  * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
300  *	   QDF error code.
301  */
302 static QDF_STATUS
303 osif_dp_rx_napi_gro_receive(qdf_napi_struct *napi_to_use,
304 			    qdf_nbuf_t nbuf)
305 {
306 	QDF_STATUS status = QDF_STATUS_SUCCESS;
307 	gro_result_t gro_ret;
308 
309 	skb_set_hash(nbuf, QDF_NBUF_CB_RX_FLOW_ID(nbuf), PKT_HASH_TYPE_L4);
310 
311 	local_bh_disable();
312 	gro_ret = napi_gro_receive((struct napi_struct *)napi_to_use, nbuf);
313 
314 	local_bh_enable();
315 	GRO_DROP_UPDATE_STATUS(gro_ret, status);
316 
317 	return status;
318 }
319 
320 #ifdef RECEIVE_OFFLOAD
321 /**
322  * osif_dp_rxthread_napi_normal_gro_flush() - GRO flush cbk for NAPI+Rx_Thread
323  * Rx mode
324  * @data: hif NAPI context
325  *
326  * Return: none
327  */
328 static void osif_dp_rxthread_napi_normal_gro_flush(void *data)
329 {
330 	struct qca_napi_info *qca_napi = (struct qca_napi_info *)data;
331 
332 	local_bh_disable();
333 	/*
334 	 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
335 	 * corresponds each hif_napi.
336 	 */
337 	osif_dp_rx_thread_napi_gro_flush(&qca_napi->rx_thread_napi,
338 					 DP_RX_GRO_NORMAL_FLUSH);
339 	local_bh_enable();
340 }
341 
342 /**
343  * osif_dp_hif_napi_gro_flush() - GRO flush callback for NAPI Rx mode
344  * @data: hif NAPI context
345  *
346  * Return: none
347  */
348 static void osif_dp_hif_napi_gro_flush(void *data)
349 {
350 	struct qca_napi_info *qca_napi = (struct qca_napi_info *)data;
351 
352 	local_bh_disable();
353 	napi_gro_flush(&qca_napi->napi, false);
354 	local_bh_enable();
355 }
356 #endif
357 
358 #ifdef FEATURE_LRO
359 /**
360  * osif_dp_qdf_lro_flush() - LRO flush wrapper
361  * @data: hif NAPI context
362  *
363  * Return: none
364  */
365 static void osif_dp_qdf_lro_flush(void *data)
366 {
367 	struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
368 	qdf_lro_ctx_t qdf_lro_ctx = qca_napii->lro_ctx;
369 
370 	qdf_lro_flush(qdf_lro_ctx);
371 }
372 #elif defined(RECEIVE_OFFLOAD)
373 static void osif_dp_qdf_lro_flush(void *data)
374 {
375 }
376 #endif
377 
378 #ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
379 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
380 static enum qdisc_filter_status
381 __osif_check_for_prio_filter_in_clsact_qdisc(struct tcf_block *block,
382 					     uint32_t prio)
383 {
384 	struct tcf_chain *chain;
385 	struct tcf_proto *tp;
386 	struct tcf_proto *tp_next;
387 	enum qdisc_filter_status ret = QDISC_FILTER_PRIO_MISMATCH;
388 
389 	mutex_lock(&block->lock);
390 	list_for_each_entry(chain, &block->chain_list, list) {
391 		mutex_lock(&chain->filter_chain_lock);
392 		tp = tcf_chain_dereference(chain->filter_chain, chain);
393 		while (tp) {
394 			tp_next = rtnl_dereference(tp->next);
395 			if (tp->prio == (prio << 16)) {
396 				ret = QDISC_FILTER_PRIO_MATCH;
397 				break;
398 			}
399 			tp = tp_next;
400 		}
401 		mutex_unlock(&chain->filter_chain_lock);
402 
403 		if (ret == QDISC_FILTER_PRIO_MATCH)
404 			break;
405 	}
406 	mutex_unlock(&block->lock);
407 
408 	return ret;
409 }
410 #else
411 static enum qdisc_filter_status
412 __osif_check_for_prio_filter_in_clsact_qdisc(struct tcf_block *block,
413 					     uint32_t prio)
414 {
415 	struct tcf_chain *chain;
416 	struct tcf_proto *tp;
417 	enum qdisc_filter_status ret = QDISC_FILTER_PRIO_MISMATCH;
418 
419 	if (!rtnl_trylock())
420 		return QDISC_FILTER_RTNL_LOCK_FAIL;
421 
422 	list_for_each_entry(chain, &block->chain_list, list) {
423 		for (tp = rtnl_dereference(chain->filter_chain); tp;
424 		     tp = rtnl_dereference(tp->next)) {
425 			if (tp->prio == (prio << 16))
426 				ret = QDISC_FILTER_PRIO_MATCH;
427 		}
428 	}
429 	rtnl_unlock();
430 
431 	return ret;
432 }
433 #endif
434 
435 /**
436  * osif_check_for_prio_filter_in_clsact_qdisc() - Check if priority 3 filter
437  *  is configured in the ingress clsact qdisc
438  * @qdisc: pointer to clsact qdisc
439  * @prio: traffic priority
440  *
441  * Return: qdisc filter status
442  */
443 static enum qdisc_filter_status
444 osif_check_for_prio_filter_in_clsact_qdisc(struct Qdisc *qdisc, uint32_t prio)
445 {
446 	const struct Qdisc_class_ops *cops;
447 	struct tcf_block *ingress_block;
448 
449 	cops = qdisc->ops->cl_ops;
450 	if (qdf_unlikely(!cops || !cops->tcf_block))
451 		return QDISC_FILTER_PRIO_MISMATCH;
452 
453 	ingress_block = cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
454 	if (qdf_unlikely(!ingress_block))
455 		return QDISC_FILTER_PRIO_MISMATCH;
456 
457 	return __osif_check_for_prio_filter_in_clsact_qdisc(ingress_block,
458 							    prio);
459 }
460 
461 /**
462  * osif_dp_rx_check_qdisc_configured() - Check if any ingress qdisc
463  * configured for given netdev
464  * @ndev: pointer to netdev
465  * @prio: traffic priority
466  *
467  * The function checks if ingress qdisc is registered for a given
468  * net device.
469  *
470  * Return: None
471  */
472 static QDF_STATUS
473 osif_dp_rx_check_qdisc_configured(qdf_netdev_t ndev, uint32_t prio)
474 {
475 	struct netdev_queue *ingress_q;
476 	struct Qdisc *ingress_qdisc;
477 	struct net_device *dev = (struct net_device *)ndev;
478 	bool disable_gro = false;
479 	enum qdisc_filter_status status;
480 
481 	if (!dev->ingress_queue)
482 		goto reset_wl;
483 
484 	if (!rtnl_trylock())
485 		return QDF_STATUS_E_AGAIN;
486 
487 	ingress_q = rtnl_dereference(dev->ingress_queue);
488 	if (qdf_unlikely(!ingress_q))
489 		goto reset;
490 
491 	ingress_qdisc = rtnl_dereference(ingress_q->qdisc);
492 	if (qdf_unlikely(!ingress_qdisc))
493 		goto reset;
494 
495 	if (qdf_str_eq(ingress_qdisc->ops->id, "ingress")) {
496 		disable_gro = true;
497 	} else if (qdf_str_eq(ingress_qdisc->ops->id, "clsact")) {
498 		status = osif_check_for_prio_filter_in_clsact_qdisc(
499 								  ingress_qdisc,
500 								  prio);
501 
502 		if (status == QDISC_FILTER_PRIO_MISMATCH)
503 			goto reset;
504 
505 		disable_gro = true;
506 	}
507 
508 	if (disable_gro) {
509 		rtnl_unlock();
510 		return QDF_STATUS_SUCCESS;
511 	}
512 
513 reset:
514 	rtnl_unlock();
515 
516 reset_wl:
517 	return QDF_STATUS_E_NOSUPPORT;
518 }
519 
520 #else
521 static QDF_STATUS
522 osif_dp_rx_check_qdisc_configured(qdf_netdev_t ndev, uint32_t prio)
523 {
524 	return QDF_STATUS_E_NOSUPPORT;
525 }
526 #endif
527 
528 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
529 static void
530 osif_dp_register_arp_unsolicited_cbk(struct wlan_dp_psoc_callbacks *cb_obj)
531 {
532 	cb_obj->dp_is_gratuitous_arp_unsolicited_na = NULL;
533 }
534 #else
535 static bool osif_dp_is_gratuitous_arp_unsolicited_na(qdf_nbuf_t nbuf)
536 {
537 	return cfg80211_is_gratuitous_arp_unsolicited_na((struct sk_buff *)nbuf);
538 }
539 
540 static void
541 osif_dp_register_arp_unsolicited_cbk(struct wlan_dp_psoc_callbacks *cb_obj)
542 {
543 	cb_obj->dp_is_gratuitous_arp_unsolicited_na =
544 		osif_dp_is_gratuitous_arp_unsolicited_na;
545 }
546 #endif
547 
548 #if defined(CFG80211_CTRL_FRAME_SRC_ADDR_TA_ADDR)
549 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 41))
550 static
551 bool osif_dp_cfg80211_rx_control_port(qdf_netdev_t dev, u8 *ta_addr,
552 				      qdf_nbuf_t nbuf, bool unencrypted)
553 {
554 	return cfg80211_rx_control_port((struct net_device *)dev,
555 					(struct sk_buff *)nbuf,
556 					unencrypted, -1);
557 }
558 
559 #else
560 static
561 bool osif_dp_cfg80211_rx_control_port(qdf_netdev_t dev, u8 *ta_addr,
562 				      qdf_nbuf_t nbuf, bool unencrypted)
563 {
564 	return cfg80211_rx_control_port((struct net_device *)dev,
565 					ta_addr, (struct sk_buff *)nbuf,
566 					unencrypted);
567 }
568 #endif
569 
570 static void
571 osif_dp_register_send_rx_pkt_over_nl(struct wlan_dp_psoc_callbacks *cb_obj)
572 {
573 	cb_obj->dp_send_rx_pkt_over_nl = osif_dp_cfg80211_rx_control_port;
574 }
575 
576 #else
577 static void
578 osif_dp_register_send_rx_pkt_over_nl(struct wlan_dp_psoc_callbacks *cb_obj)
579 {
580 	cb_obj->dp_send_rx_pkt_over_nl = NULL;
581 }
582 #endif
583 
584 #ifdef RECEIVE_OFFLOAD
585 static
586 void osif_dp_register_rx_offld_flush_cb(enum dp_rx_offld_flush_cb cb_type)
587 {
588 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
589 
590 	if (cb_type == DP_RX_FLUSH_LRO)
591 		cdp_register_rx_offld_flush_cb(soc, osif_dp_qdf_lro_flush);
592 	else if (cb_type == DP_RX_FLUSH_THREAD)
593 		cdp_register_rx_offld_flush_cb(soc,
594 					       osif_dp_rxthread_napi_normal_gro_flush);
595 	else if (cb_type == DP_RX_FLUSH_NAPI)
596 		cdp_register_rx_offld_flush_cb(soc,
597 					       osif_dp_hif_napi_gro_flush);
598 }
599 #else
600 
601 static
602 void osif_dp_register_rx_offld_flush_cb(enum dp_rx_offld_flush_cb cb_type) { }
603 #endif
604 
605 static
606 QDF_STATUS osif_dp_rx_pkt_to_nw(qdf_nbuf_t nbuf, enum dp_nbuf_push_type type)
607 {
608 	int netif_status;
609 
610 	if (type == DP_NBUF_PUSH_BH_DISABLE) {
611 		local_bh_disable();
612 		netif_status = netif_receive_skb(nbuf);
613 		local_bh_enable();
614 	} else if (type == DP_NBUF_PUSH_NI) {
615 		netif_status = netif_rx_ni(nbuf);
616 	} else if (type == DP_NBUF_PUSH_NAPI) {
617 		netif_status = netif_receive_skb(nbuf);
618 	} else {
619 		netif_status = netif_rx(nbuf);
620 	}
621 
622 	if (qdf_likely(netif_status == NET_RX_SUCCESS))
623 		return QDF_STATUS_SUCCESS;
624 
625 	return QDF_STATUS_E_FAILURE;
626 }
627 
628 void os_if_dp_register_txrx_callbacks(struct wlan_dp_psoc_callbacks *cb_obj)
629 {
630 	cb_obj->dp_nbuf_push_pkt = osif_dp_rx_pkt_to_nw;
631 	cb_obj->dp_rx_napi_gro_flush = osif_dp_rx_napi_gro_flush;
632 	cb_obj->dp_rx_napi_gro_receive = osif_dp_rx_napi_gro_receive;
633 	cb_obj->dp_rx_thread_napi_gro_flush = osif_dp_rx_thread_napi_gro_flush;
634 	cb_obj->dp_lro_rx_cb = osif_dp_lro_rx;
635 	cb_obj->dp_register_rx_offld_flush_cb =
636 		osif_dp_register_rx_offld_flush_cb;
637 	cb_obj->dp_rx_check_qdisc_configured =
638 		osif_dp_rx_check_qdisc_configured;
639 
640 	osif_dp_register_arp_unsolicited_cbk(cb_obj);
641 
642 	osif_dp_register_send_rx_pkt_over_nl(cb_obj);
643 }
644