xref: /wlan-dirver/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h (revision 0626a4da6c07f30da06dd6747e8cc290a60371d8)
1 /*
2  * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
3  *
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 /**
20  * @file cdp_txrx_ops.h
21  * @brief Define the host data path converged API functions
22  * called by the host control SW and the OS interface module
23  */
24 #ifndef _CDP_TXRX_CMN_OPS_H_
25 #define _CDP_TXRX_CMN_OPS_H_
26 
27 #include <cdp_txrx_cmn_struct.h>
28 #include <cdp_txrx_stats_struct.h>
29 #include "cdp_txrx_handle.h"
30 #include <cdp_txrx_mon_struct.h>
31 #include "wlan_objmgr_psoc_obj.h"
32 
33 #ifdef IPA_OFFLOAD
34 #ifdef CONFIG_IPA_WDI_UNIFIED_API
35 #include <qdf_ipa_wdi3.h>
36 #else
37 #include <qdf_ipa.h>
38 #endif
39 #endif
40 
41 /**
42  * bitmap values to indicate special handling of peer_delete
43  */
44 #define CDP_PEER_DELETE_NO_SPECIAL             0
45 #define CDP_PEER_DO_NOT_START_UNMAP_TIMER      1
46 
47 /* same as ieee80211_nac_param */
48 enum cdp_nac_param_cmd {
49 	/* IEEE80211_NAC_PARAM_ADD */
50 	CDP_NAC_PARAM_ADD = 1,
51 	/* IEEE80211_NAC_PARAM_DEL */
52 	CDP_NAC_PARAM_DEL,
53 	/* IEEE80211_NAC_PARAM_LIST */
54 	CDP_NAC_PARAM_LIST,
55 };
56 /******************************************************************************
57  *
58  * Control Interface (A Interface)
59  *
60  *****************************************************************************/
61 
62 struct cdp_cmn_ops {
63 
64 	int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
65 
66 	int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
67 
68 	struct cdp_vdev *(*txrx_vdev_attach)
69 		(struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
70 		 uint8_t vdev_id, enum wlan_op_mode op_mode);
71 
72 	void (*txrx_vdev_detach)
73 		(struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
74 		 void *cb_context);
75 
76 	struct cdp_pdev *(*txrx_pdev_attach)
77 		(ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
78 		HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
79 
80 	int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
81 
82 	void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
83 
84 	void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
85 
86 	void *(*txrx_peer_create)
87 		(struct cdp_vdev *vdev, uint8_t *peer_mac_addr,
88 		 struct cdp_ctrl_objmgr_peer *ctrl_peer);
89 
90 	void (*txrx_peer_setup)
91 		(struct cdp_vdev *vdev_hdl, void *peer_hdl);
92 
93 	void (*txrx_peer_teardown)
94 		(struct cdp_vdev *vdev_hdl, void *peer_hdl);
95 
96 	int (*txrx_peer_add_ast)
97 		(ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
98 		uint8_t *mac_addr, enum  cdp_txrx_ast_entry_type type,
99 		uint32_t flags);
100 
101 	void (*txrx_peer_del_ast)
102 		(ol_txrx_soc_handle soc, void *ast_hdl);
103 
104 	int (*txrx_peer_update_ast)
105 		(ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
106 		uint8_t *mac_addr, uint32_t flags);
107 
108 
109 	void *(*txrx_peer_ast_hash_find)
110 		(ol_txrx_soc_handle soc, uint8_t *ast_mac_addr);
111 
112 	uint8_t (*txrx_peer_ast_get_pdev_id)
113 		(ol_txrx_soc_handle soc, void *ast_hdl);
114 
115 	uint8_t (*txrx_peer_ast_get_next_hop)
116 		(ol_txrx_soc_handle soc, void *ast_hdl);
117 
118 	void (*txrx_peer_ast_set_type)
119 		(ol_txrx_soc_handle soc, void *ast_hdl,
120 		enum cdp_txrx_ast_entry_type type);
121 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
122 	void (*txrx_peer_ast_set_cp_ctx)(ol_txrx_soc_handle soc,
123 					 void *ast_entry,
124 					 void *cp_ctx);
125 
126 	void * (*txrx_peer_ast_get_cp_ctx)(ol_txrx_soc_handle soc,
127 					   void *ast_entry);
128 
129 	bool (*txrx_peer_ast_get_wmi_sent)(ol_txrx_soc_handle soc,
130 					   void *ast_entry);
131 
132 	void (*txrx_peer_ast_free_entry)(ol_txrx_soc_handle soc,
133 					 void *ast_entry);
134 #endif
135 
136 	enum cdp_txrx_ast_entry_type (*txrx_peer_ast_get_type)
137 		(ol_txrx_soc_handle soc, void *ast_hdl);
138 
139 	void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
140 
141 	int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
142 			uint8_t smart_monitor);
143 
144 	uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
145 
146 	void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev,
147 					       int16_t chan_noise_floor);
148 
149 	void (*txrx_set_nac)(struct cdp_peer *peer);
150 
151 	void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
152 
153 	void (*txrx_get_peer_mac_from_peer_id)
154 		(struct cdp_pdev *pdev_handle,
155 		 uint32_t peer_id, uint8_t *peer_mac);
156 
157 	void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
158 
159 	void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
160 
161 	void (*txrx_ath_getstats)(void *pdev,
162 			struct cdp_dev_stats *stats, uint8_t type);
163 
164 	void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
165 			u_int8_t *user_position);
166 
167 	uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
168 
169 	void (*txrx_if_mgmt_drain)(void *ni, int force);
170 
171 	void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
172 
173 	void (*txrx_set_privacy_filters)
174 		(struct cdp_vdev *vdev, void *filter, uint32_t num);
175 
176 	/********************************************************************
177 	 * Data Interface (B Interface)
178 	 ********************************************************************/
179 
180 	void (*txrx_vdev_register)(struct cdp_vdev *vdev,
181 			void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
182 			struct ol_txrx_ops *txrx_ops);
183 
184 	int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
185 			qdf_nbuf_t tx_mgmt_frm, uint8_t type);
186 
187 	int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
188 			qdf_nbuf_t tx_mgmt_frm,	uint8_t type, uint8_t use_6mbps,
189 			uint16_t chanfreq);
190 
191 	/**
192 	 * ol_txrx_mgmt_tx_cb - tx management delivery notification
193 	 * callback function
194 	 */
195 
196 	void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
197 				    ol_txrx_mgmt_tx_cb download_cb,
198 				    ol_txrx_mgmt_tx_cb ota_ack_cb,
199 				    void *ctxt);
200 
201 	int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
202 
203 	/**
204 	 * ol_txrx_data_tx_cb - Function registered with the data path
205 	 * that is called when tx frames marked as "no free" are
206 	 * done being transmitted
207 	 */
208 
209 	void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
210 			ol_txrx_data_tx_cb callback, void *ctxt);
211 
212 	/*******************************************************************
213 	 * Statistics and Debugging Interface (C Interface)
214 	 ********************************************************************/
215 
216 	int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
217 			int max_subfrms_amsdu);
218 
219 	A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
220 			struct ol_txrx_stats_req *req,
221 			bool per_vdev, bool response_expected);
222 
223 	int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
224 
225 	void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
226 			uint8_t cfg_stats_type, uint32_t cfg_val);
227 
228 	void (*txrx_print_level_set)(unsigned level);
229 
230 	/**
231 	 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
232 	 * @vdev: vdev handle
233 	 *
234 	 * Return: vdev mac address
235 	 */
236 	uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
237 
238 	/**
239 	 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
240 	 * vdev
241 	 * @vdev: vdev handle
242 	 *
243 	 * Return: Handle to struct qdf_mac_addr
244 	 */
245 	struct qdf_mac_addr *
246 		(*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
247 
248 	/**
249 	 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
250 	 * @vdev: vdev handle
251 	 *
252 	 * Return: Handle to pdev
253 	 */
254 	struct cdp_pdev *(*txrx_get_pdev_from_vdev)
255 		(struct cdp_vdev *vdev);
256 
257 	/**
258 	 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
259 	 * @vdev: vdev handle
260 	 *
261 	 * Return: Handle to control pdev
262 	 */
263 	struct cdp_cfg *
264 		(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
265 
266 	struct cdp_vdev *
267 		(*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
268 				uint8_t vdev_id);
269 
270 	void (*txrx_soc_detach)(void *soc);
271 
272 	int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid,
273 					int status);
274 
275 	int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
276 				   uint16_t tid, uint16_t batimeout,
277 				   uint16_t buffersize,
278 				   uint16_t startseqnum);
279 
280 	void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
281 		uint8_t *dialogtoken, uint16_t *statuscode,
282 		uint16_t *buffersize, uint16_t *batimeout);
283 
284 	int (*delba_process)(void *peer_handle,
285 		int tid, uint16_t reasoncode);
286 
287 	/**
288 	 * delba_tx_completion() - Indicate delba tx status
289 	 * @peer_handle: Peer handle
290 	 * @tid: Tid number
291 	 * @status: Tx completion status
292 	 *
293 	 * Return: 0 on Success, 1 on failure
294 	 */
295 	int (*delba_tx_completion)(void *peer_handle,
296 				   uint8_t tid, int status);
297 
298 	void (*set_addba_response)(void *peer_handle,
299 		uint8_t tid, uint16_t statuscode);
300 
301 	uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
302 			uint16_t peer_id, uint8_t *mac_addr);
303 
304 	void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
305 			uint8_t map_id);
306 
307 	void (*flush_cache_rx_queue)(void);
308 	void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
309 			uint8_t tos, uint8_t tid);
310 
311 	QDF_STATUS (*txrx_stats_request)(struct cdp_vdev *vdev,
312 					 struct cdp_txrx_stats_req *req);
313 
314 	QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
315 				    enum qdf_stats_verbosity_level level);
316 	void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
317 
318 	int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
319 	QDF_STATUS (*txrx_intr_attach)(void *soc);
320 	void (*txrx_intr_detach)(void *soc);
321 	void  (*set_pn_check)(struct cdp_vdev *vdev,
322 		struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
323 		 uint32_t *rx_pn);
324 	QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
325 			struct cdp_config_params *params);
326 
327 	void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
328 	void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
329 			void *dp_txrx_hdl);
330 
331 	void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
332 	void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
333 			void *dp_txrx_handle);
334 
335 	void (*txrx_peer_reset_ast)
336 		(ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl);
337 
338 	void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
339 					  void *vdev_hdl);
340 
341 	void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
342 	void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
343 					  uint8_t ac, uint32_t value);
344 	void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
345 					  uint8_t ac, uint32_t *value);
346 
347 	QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
348 					   uint32_t num_peers,
349 					   bool peer_map_unmap_v2);
350 
351 	void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl,
352 					struct cdp_ctrl_objmgr_pdev *ctrl_pdev);
353 
354 	ol_txrx_tx_fp tx_send;
355 	/**
356 	 * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
357 	 *					to deliver pkt to stack.
358 	 * @vdev: vdev handle
359 	 * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
360 	 * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
361 	 */
362 	void (*txrx_get_os_rx_handles_from_vdev)
363 					(struct cdp_vdev *vdev,
364 					 ol_txrx_rx_fp *stack_fn,
365 					 ol_osif_vdev_handle *osif_vdev);
366 	int (*txrx_classify_update)
367 		(struct cdp_vdev *vdev, qdf_nbuf_t skb,
368 		 enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
369 };
370 
371 struct cdp_ctrl_ops {
372 
373 	int
374 		(*txrx_mempools_attach)(void *ctrl_pdev);
375 	int
376 		(*txrx_set_filter_neighbour_peers)(
377 				struct cdp_pdev *pdev,
378 				uint32_t val);
379 	int
380 		(*txrx_update_filter_neighbour_peers)(
381 				struct cdp_vdev *vdev,
382 				uint32_t cmd, uint8_t *macaddr);
383 	/**
384 	 * @brief set the safemode of the device
385 	 * @details
386 	 * This flag is used to bypass the encrypt and decrypt processes when
387 	 * send and receive packets. It works like open AUTH mode, HW will
388 	 * ctreate all packets as non-encrypt frames because no key installed.
389 	 * For rx fragmented frames,it bypasses all the rx defragmentaion.
390 	 *
391 	 * @param vdev - the data virtual device object
392 	 * @param val - the safemode state
393 	 * @return - void
394 	 */
395 
396 	void
397 		(*txrx_set_safemode)(
398 				struct cdp_vdev *vdev,
399 				u_int32_t val);
400 	/**
401 	 * @brief configure the drop unencrypted frame flag
402 	 * @details
403 	 * Rx related. When set this flag, all the unencrypted frames
404 	 * received over a secure connection will be discarded
405 	 *
406 	 * @param vdev - the data virtual device object
407 	 * @param val - flag
408 	 * @return - void
409 	 */
410 	void
411 		(*txrx_set_drop_unenc)(
412 				struct cdp_vdev *vdev,
413 				u_int32_t val);
414 
415 
416 	/**
417 	 * @brief set the Tx encapsulation type of the VDEV
418 	 * @details
419 	 * This will be used to populate the HTT desc packet type field
420 	 * during Tx
421 	 * @param vdev - the data virtual device object
422 	 * @param val - the Tx encap type
423 	 * @return - void
424 	 */
425 	void
426 		(*txrx_set_tx_encap_type)(
427 				struct cdp_vdev *vdev,
428 				enum htt_cmn_pkt_type val);
429 	/**
430 	 * @brief set the Rx decapsulation type of the VDEV
431 	 * @details
432 	 * This will be used to configure into firmware and hardware
433 	 * which format to decap all Rx packets into, for all peers under
434 	 * the VDEV.
435 	 * @param vdev - the data virtual device object
436 	 * @param val - the Rx decap mode
437 	 * @return - void
438 	 */
439 	void
440 		(*txrx_set_vdev_rx_decap_type)(
441 				struct cdp_vdev *vdev,
442 				enum htt_cmn_pkt_type val);
443 
444 	/**
445 	 * @brief get the Rx decapsulation type of the VDEV
446 	 *
447 	 * @param vdev - the data virtual device object
448 	 * @return - the Rx decap type
449 	 */
450 	enum htt_cmn_pkt_type
451 		(*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
452 
453 	/* Is this similar to ol_txrx_peer_state_update() in MCL */
454 	/**
455 	 * @brief Update the authorize peer object at association time
456 	 * @details
457 	 * For the host-based implementation of rate-control, it
458 	 * updates the peer/node-related parameters within rate-control
459 	 * context of the peer at association.
460 	 *
461 	 * @param peer - pointer to the node's object
462 	 * @authorize - either to authorize or unauthorize peer
463 	 *
464 	 * @return none
465 	 */
466 	void
467 		(*txrx_peer_authorize)(struct cdp_peer *peer,
468 				u_int32_t authorize);
469 
470 	bool
471 		(*txrx_set_inact_params)(struct cdp_pdev *pdev,
472 				u_int16_t inact_check_interval,
473 				u_int16_t inact_normal,
474 				u_int16_t inact_overload);
475 	bool
476 		(*txrx_start_inact_timer)(
477 				struct cdp_pdev *pdev,
478 				bool enable);
479 
480 
481 	/**
482 	 * @brief Set the overload status of the radio
483 	 * @details
484 	 *  Set the overload status of the radio, updating the inactivity
485 	 *  threshold and inactivity count for each node.
486 	 *
487 	 * @param pdev - the data physical device object
488 	 * @param overload - whether the radio is overloaded or not
489 	 */
490 	void (*txrx_set_overload)(
491 			struct cdp_pdev *pdev,
492 			bool overload);
493 	/**
494 	 * @brief Check the inactivity status of the peer/node
495 	 *
496 	 * @param peer - pointer to the node's object
497 	 * @return true if the node is inactive; otherwise return false
498 	 */
499 	bool
500 		(*txrx_peer_is_inact)(void *peer);
501 
502 	/**
503 	 * @brief Mark inactivity status of the peer/node
504 	 * @details
505 	 *  If it becomes active, reset inactivity count to reload value;
506 	 *  if the inactivity status changed, notify umac band steering.
507 	 *
508 	 * @param peer - pointer to the node's object
509 	 * @param inactive - whether the node is inactive or not
510 	 */
511 	void (*txrx_mark_peer_inact)(
512 			void *peer,
513 			bool inactive);
514 
515 
516 	/* Should be ol_txrx_ctrl_api.h */
517 	void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
518 
519 	/**
520 	 * @brief setting mesh rx filter
521 	 * @details
522 	 *  based on the bits enabled in the filter packets has to be dropped.
523 	 *
524 	 * @param vdev - the data virtual device object
525 	 * @param val - value to set
526 	 */
527 	void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
528 
529 	void (*tx_flush_buffers)(struct cdp_vdev *vdev);
530 
531 	int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
532 
533 	void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
534 			enum cdp_vdev_param_type param, uint32_t val);
535 
536 	void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
537 	/**
538 	 * @brief Set the reo dest ring num of the radio
539 	 * @details
540 	 *  Set the reo destination ring no on which we will receive
541 	 *  pkts for this radio.
542 	 *
543 	 * @param pdev - the data physical device object
544 	 * @param reo_dest_ring_num - value ranges between 1 - 4
545 	 */
546 	void (*txrx_set_pdev_reo_dest)(
547 			struct cdp_pdev *pdev,
548 			enum cdp_host_reo_dest_ring reo_dest_ring_num);
549 
550 	/**
551 	 * @brief Get the reo dest ring num of the radio
552 	 * @details
553 	 *  Get the reo destination ring no on which we will receive
554 	 *  pkts for this radio.
555 	 *
556 	 * @param pdev - the data physical device object
557 	 * @return the reo destination ring number
558 	 */
559 	enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
560 						struct cdp_pdev *pdev);
561 
562 	int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
563 			uint32_t event);
564 
565 	int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
566 			uint32_t event);
567 	int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
568 
569 	void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
570 			uint8_t subtype, uint8_t tx_power);
571 
572 	void (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
573 			enum cdp_pdev_param_type type, uint8_t val);
574 	void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
575 
576 #ifdef ATH_SUPPORT_NAC_RSSI
577 	QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
578 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
579 		uint8_t chan_num);
580 	QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev,
581 						   char *macaddr,
582 						   uint8_t *rssi);
583 #endif
584 	void (*set_key)(struct cdp_peer *peer_handle,
585 			bool is_unicast, uint32_t *key);
586 };
587 
588 struct cdp_me_ops {
589 
590 	u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
591 		(struct cdp_pdev *pdev, u_int16_t buf_count);
592 
593 		u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
594 				struct cdp_pdev *pdev,
595 				u_int16_t buf_count);
596 
597 	u_int16_t
598 		(*tx_get_mcast_buf_allocated_marked)
599 			(struct cdp_pdev *pdev);
600 	void
601 		(*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
602 
603 	void
604 		(*tx_me_free_descriptor)(struct cdp_pdev *pdev);
605 
606 	uint16_t
607 		(*tx_me_convert_ucast)(struct cdp_vdev *vdev,
608 			qdf_nbuf_t wbuf, u_int8_t newmac[][6],
609 			uint8_t newmaccnt);
610 	/* Should be a function pointer in ol_txrx_osif_ops{} */
611 	/**
612 	 * @brief notify mcast frame indication from FW.
613 	 * @details
614 	 *     This notification will be used to convert
615 	 *     multicast frame to unicast.
616 	 *
617 	 * @param pdev - handle to the ctrl SW's physical device object
618 	 * @param vdev_id - ID of the virtual device received the special data
619 	 * @param msdu - the multicast msdu returned by FW for host inspect
620 	 */
621 
622 	int (*mcast_notify)(struct cdp_pdev *pdev,
623 			u_int8_t vdev_id, qdf_nbuf_t msdu);
624 };
625 
626 struct cdp_mon_ops {
627 
628 	void (*txrx_monitor_set_filter_ucast_data)
629 		(struct cdp_pdev *, u_int8_t val);
630 	void (*txrx_monitor_set_filter_mcast_data)
631 		(struct cdp_pdev *, u_int8_t val);
632 	void (*txrx_monitor_set_filter_non_data)
633 	      (struct cdp_pdev *, u_int8_t val);
634 
635 	bool (*txrx_monitor_get_filter_ucast_data)
636 		(struct cdp_vdev *vdev_txrx_handle);
637 	bool (*txrx_monitor_get_filter_mcast_data)
638 		(struct cdp_vdev *vdev_txrx_handle);
639 	bool (*txrx_monitor_get_filter_non_data)
640 		(struct cdp_vdev *vdev_txrx_handle);
641 	int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
642 
643 	/* HK advance monitor filter support */
644 	int (*txrx_set_advance_monitor_filter)
645 		(struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
646 };
647 
648 struct cdp_host_stats_ops {
649 	int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
650 			struct ol_txrx_stats_req *req);
651 
652 	void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
653 
654 	void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
655 
656 	int (*txrx_stats_publish)(struct cdp_pdev *pdev,
657 			void *buf);
658 	/**
659 	 * @brief Enable enhanced stats functionality.
660 	 *
661 	 * @param pdev - the physical device object
662 	 * @return - void
663 	 */
664 	void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
665 
666 	/**
667 	 * @brief Disable enhanced stats functionality.
668 	 *
669 	 * @param pdev - the physical device object
670 	 * @return - void
671 	 */
672 	void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
673 
674 	/**
675 	 * @brief Get the desired stats from the message.
676 	 *
677 	 * @param pdev - the physical device object
678 	 * @param stats_base - stats buffer received from FW
679 	 * @param type - stats type.
680 	 * @return - pointer to requested stat identified by type
681 	 */
682 	uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
683 			uint32_t *stats_base, uint32_t msg_len, uint8_t type);
684 	void
685 		(*tx_print_tso_stats)(struct cdp_vdev *vdev);
686 
687 	void
688 		(*tx_rst_tso_stats)(struct cdp_vdev *vdev);
689 
690 	void
691 		(*tx_print_sg_stats)(struct cdp_vdev *vdev);
692 
693 	void
694 		(*tx_rst_sg_stats)(struct cdp_vdev *vdev);
695 
696 	void
697 		(*print_rx_cksum_stats)(struct cdp_vdev *vdev);
698 
699 	void
700 		(*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
701 
702 	A_STATUS
703 		(*txrx_host_me_stats)(struct cdp_vdev *vdev);
704 
705 	void
706 		(*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
707 
708 	int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
709 			struct ol_txrx_stats_req *req);
710 
711 	void
712 		(*print_lro_stats)(struct cdp_vdev *vdev);
713 
714 	void
715 		(*reset_lro_stats)(struct cdp_vdev *vdev);
716 
717 	void
718 		(*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
719 				uint32_t cap);
720 	void
721 		(*get_htt_stats)(struct cdp_pdev *pdev, void *data,
722 				uint32_t data_len);
723 	void
724 		(*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data,
725 					  uint16_t stats_id);
726 	struct cdp_peer_stats*
727 		(*txrx_get_peer_stats)(struct cdp_peer *peer);
728 	void
729 		(*txrx_reset_peer_ald_stats)(struct cdp_peer *peer);
730 	void
731 		(*txrx_reset_peer_stats)(struct cdp_peer *peer);
732 	int
733 		(*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf,
734 				       bool is_aggregate);
735 	int
736 		(*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
737 						    void *data, uint32_t len,
738 						    uint32_t stats_id);
739 	int
740 		(*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle,
741 					    void *buffer);
742 	void
743 		(*txrx_update_vdev_stats)(struct cdp_vdev *vdev, void *buf,
744 					  uint16_t stats_id);
745 };
746 
747 struct cdp_wds_ops {
748 	void
749 		(*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
750 				u_int32_t val);
751 	void
752 		(*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
753 				int wds_tx_ucast, int wds_tx_mcast);
754 	int (*vdev_set_wds)(void *vdev, uint32_t val);
755 };
756 
757 struct cdp_raw_ops {
758 	int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
759 
760 	void (*rsim_get_astentry)(struct cdp_vdev *vdev,
761 				qdf_nbuf_t *pnbuf,
762 				struct cdp_raw_ast *raw_ast);
763 };
764 
765 #ifdef CONFIG_WIN
766 struct cdp_pflow_ops {
767 	uint32_t(*pflow_update_pdev_params)(void *,
768 			enum _ol_ath_param_t, uint32_t, void *);
769 };
770 #endif /* CONFIG_WIN */
771 
772 #define LRO_IPV4_SEED_ARR_SZ 5
773 #define LRO_IPV6_SEED_ARR_SZ 11
774 
775 /**
776  * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
777  * @lro_enable: indicates whether rx_offld is enabled
778  * @tcp_flag: If the TCP flags from the packet do not match
779  * the values in this field after masking with TCP flags mask
780  * below, packet is not rx_offld eligible
781  * @tcp_flag_mask: field for comparing the TCP values provided
782  * above with the TCP flags field in the received packet
783  * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
784  * 5-tuple toeplitz hash for ipv4 packets
785  * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
786  * 5-tuple toeplitz hash for ipv6 packets
787  */
788 struct cdp_lro_hash_config {
789 	uint32_t lro_enable;
790 	uint32_t tcp_flag:9,
791 		tcp_flag_mask:9;
792 	uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
793 	uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
794 };
795 
796 struct ol_if_ops {
797 	void
798 	(*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
799 				    uint8_t *peer_macaddr, uint8_t vdev_id,
800 				    bool hash_based, uint8_t ring_num);
801 	QDF_STATUS
802 	(*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
803 				       uint8_t vdev_id, uint8_t *peer_mac,
804 				       qdf_dma_addr_t hw_qdesc, int tid,
805 				       uint16_t queue_num,
806 				       uint8_t ba_window_size_valid,
807 				       uint16_t ba_window_size);
808 	QDF_STATUS
809 	(*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
810 					uint8_t vdev_id, uint8_t *peer_macaddr,
811 					uint32_t tid_mask);
812 	int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
813 			uint8_t *peer_macaddr);
814 	bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
815 	int (*peer_add_wds_entry)(void *ol_soc_handle,
816 			const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
817 			uint32_t flags);
818 	int (*peer_update_wds_entry)(void *ol_soc_handle,
819 			uint8_t *dest_macaddr, uint8_t *peer_macaddr,
820 			uint32_t flags);
821 	void (*peer_del_wds_entry)(void *ol_soc_handle,
822 			uint8_t *wds_macaddr);
823 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
824 	void (*peer_del_wds_cp_ctx)(void *cp_ctx);
825 #endif
826 	QDF_STATUS
827 	(*lro_hash_config)(struct wlan_objmgr_psoc *ctrl_psoc,
828 			   struct cdp_lro_hash_config *rx_offld_hash);
829 	void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
830 			uint8_t type);
831 #ifdef CONFIG_WIN
832 	uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg);
833 #else
834 	uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
835 #endif
836 	int  (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
837 			uint8_t vdev_id, uint8_t *peer_mac_addr,
838 			enum cdp_txrx_ast_entry_type peer_type,
839 			uint32_t tx_ast_hashidx);
840 	int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id);
841 
842 	int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num);
843 
844 	void (*rx_mic_error)(void *ol_soc_handle,
845 			 uint16_t vdev_id, void *wh);
846 	bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer,
847 				   qdf_nbuf_t nbuf,
848 				   uint16_t hdr_space);
849 	uint8_t (*freq_to_channel)(void *ol_soc_handle,  uint16_t vdev_id);
850 
851 	void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
852 				  u_int8_t *dstmac, bool active);
853 #ifdef ATH_SUPPORT_NAC_RSSI
854 	int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
855 		u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid,
856 		char *client_macaddr, uint8_t chan_num);
857 	int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
858 		u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid);
859 #endif
860 	int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr);
861 
862 	/**
863 	 * send_delba() - Send delba to peer
864 	 * @pdev_handle: Dp pdev handle
865 	 * @ctrl_peer: Peer handle
866 	 * @peer_macaddr: Peer mac addr
867 	 * @tid: Tid number
868 	 *
869 	 * Return: 0 for success, non-zero for failure
870 	 */
871 	int (*send_delba)(void *pdev_handle,  void *ctrl_peer,
872 			  uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle,
873 			  uint8_t reason_code);
874 	/* TODO: Add any other control path calls required to OL_IF/WMA layer */
875 };
876 
877 #ifndef CONFIG_WIN
878 /* From here MCL specific OPs */
879 /**
880  * struct cdp_misc_ops - mcl ops not classified
881  * @set_ibss_vdev_heart_beat_timer:
882  * @bad_peer_txctl_set_setting:
883  * @bad_peer_txctl_update_threshold:
884  * @hl_tdls_flag_reset:
885  * @tx_non_std:
886  * @get_vdev_id:
887  * @set_wisa_mode:
888  * @txrx_data_stall_cb_register:
889  * @txrx_data_stall_cb_deregister:
890  * @txrx_post_data_stall_event
891  * @runtime_suspend:
892  * @runtime_resume:
893  */
894 struct cdp_misc_ops {
895 	uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
896 			uint16_t timer_value_sec);
897 	void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
898 			struct ol_tx_wmm_param_t wmm_param);
899 	void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
900 			int period, int txq_limit);
901 	void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
902 		int level, int tput_thresh, int tx_limit);
903 	void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
904 	qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
905 		enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
906 	uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
907 	uint32_t (*get_tx_ack_stats)(uint8_t vdev_id);
908 	QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
909 	QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
910 	QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
911 	void (*txrx_post_data_stall_event)(
912 			enum data_stall_log_event_indicator indicator,
913 			enum data_stall_log_event_type data_stall_type,
914 			uint32_t pdev_id, uint32_t vdev_id_bitmap,
915 			enum data_stall_log_recovery_type recovery_type);
916 	QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
917 	QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
918 	int (*get_opmode)(struct cdp_vdev *vdev);
919 	void (*mark_first_wakeup_packet)(uint8_t value);
920 	void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
921 	void (*flush_rx_frames)(void *peer, bool drop);
922 	A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
923 		uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
924 	void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
925 	void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
926 };
927 
928 /**
929  * struct cdp_tx_delay_ops - mcl tx delay ops
930  * @tx_delay:
931  * @tx_delay_hist:
932  * @tx_packet_count:
933  * @tx_set_compute_interval:
934  */
935 struct cdp_tx_delay_ops {
936 	void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
937 		uint32_t *tx_delay_microsec, int category);
938 	void (*tx_delay_hist)(struct cdp_pdev *pdev,
939 		uint16_t *bin_values, int category);
940 	void (*tx_packet_count)(struct cdp_pdev *pdev,
941 		uint16_t *out_packet_count,
942 		uint16_t *out_packet_loss_count, int category);
943 	void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
944 		uint32_t interval);
945 };
946 
947 /**
948  * struct cdp_pmf_ops - mcl protected management frame ops
949  * @get_pn_info:
950  */
951 struct cdp_pmf_ops {
952 	void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
953 			uint64_t **last_pn, uint32_t **rmf_pn_replays);
954 };
955 
956 /**
957  * struct cdp_cfg_ops - mcl configuration ops
958  * @set_cfg_rx_fwd_disabled:
959  * @set_cfg_packet_log_enabled:
960  * @cfg_attach:
961  * @vdev_rx_set_intrabss_fwd:
962  * @get_opmode:
963  * @is_rx_fwd_disabled:
964  * @tx_set_is_mgmt_over_wmi_enabled:
965  * @is_high_latency:
966  * @set_flow_control_parameters:
967  */
968 struct cdp_cfg_ops {
969 	void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
970 		uint8_t disable_rx_fwd);
971 	void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
972 		uint8_t val);
973 	struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
974 	void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
975 	uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
976 	void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
977 	int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
978 	void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
979 		void *param);
980 	void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
981 	void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
982 };
983 
984 /**
985  * struct cdp_flowctl_ops - mcl flow control
986  * @register_pause_cb:
987  * @set_desc_global_pool_size:
988  * @dump_flow_pool_info:
989  */
990 struct cdp_flowctl_ops {
991 	QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
992 					    struct cdp_pdev *pdev,
993 					    uint8_t vdev_id);
994 	void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
995 					struct cdp_pdev *pdev,
996 					uint8_t vdev_id);
997 	QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
998 					tx_pause_callback);
999 	void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
1000 
1001 	void (*dump_flow_pool_info)(void *);
1002 };
1003 
1004 /**
1005  * struct cdp_lflowctl_ops - mcl legacy flow control ops
1006  * @register_tx_flow_control:
1007  * @deregister_tx_flow_control_cb:
1008  * @flow_control_cb:
1009  * @get_tx_resource:
1010  * @ll_set_tx_pause_q_depth:
1011  * @vdev_flush:
1012  * @vdev_pause:
1013  * @vdev_unpause:
1014  */
1015 struct cdp_lflowctl_ops {
1016 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
1017 	int (*register_tx_flow_control)(struct cdp_soc_t *soc,
1018 					tx_pause_callback flowcontrol);
1019 	int (*set_vdev_tx_desc_limit)(u8 vdev_id, u8 chan);
1020 	int (*set_vdev_os_queue_status)(u8 vdev_id,
1021 					enum netif_action_type action);
1022 #else
1023 	int (*register_tx_flow_control)(uint8_t vdev_id,
1024 		ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
1025 		ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
1026 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
1027 	int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
1028 	void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
1029 	bool (*get_tx_resource)(uint8_t sta_id,
1030 			 unsigned int low_watermark,
1031 			 unsigned int high_watermark_offset);
1032 	int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
1033 	void (*vdev_flush)(struct cdp_vdev *vdev);
1034 	void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
1035 	void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
1036 };
1037 
1038 #ifdef IPA_OFFLOAD
1039 /**
1040  * struct cdp_ipa_ops - mcl ipa data path ops
1041  * @ipa_get_resource:
1042  * @ipa_set_doorbell_paddr:
1043  * @ipa_set_active:
1044  * @ipa_op_response:
1045  * @ipa_register_op_cb:
1046  * @ipa_get_stat:
1047  * @ipa_tx_data_frame:
1048  */
1049 struct cdp_ipa_ops {
1050 	QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
1051 	QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
1052 	QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
1053 		bool is_tx);
1054 	QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
1055 	QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
1056 		void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
1057 		void *usr_ctxt);
1058 	QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
1059 	qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
1060 	void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
1061 		uint32_t value);
1062 #ifdef FEATURE_METERING
1063 	QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
1064 		uint8_t reset_stats);
1065 	QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
1066 		uint64_t quota_bytes);
1067 #endif
1068 	QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
1069 	QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
1070 #ifdef CONFIG_IPA_WDI_UNIFIED_API
1071 	QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
1072 		void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1073 		uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
1074 		uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle,
1075 		bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in);
1076 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1077 	QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
1078 		void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1079 		uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
1080 		uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
1081 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1082 	QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
1083 		uint32_t rx_pipe_handle);
1084 	QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
1085 		qdf_ipa_client_type_t prod_client,
1086 		qdf_ipa_client_type_t cons_client,
1087 		uint8_t session_id, bool is_ipv6_enabled);
1088 	QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
1089 	QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
1090 	QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
1091 	QDF_STATUS (*ipa_set_perf_level)(int client,
1092 		uint32_t max_supported_bw_mbps);
1093 };
1094 #endif
1095 
1096 /**
1097  * struct cdp_bus_ops - mcl bus suspend/resume ops
1098  * @bus_suspend:
1099  * @bus_resume:
1100  */
1101 struct cdp_bus_ops {
1102 	QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
1103 	QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
1104 };
1105 
1106 /**
1107  * struct cdp_ocb_ops - mcl ocb ops
1108  * @set_ocb_chan_info:
1109  * @get_ocb_chan_info:
1110  */
1111 struct cdp_ocb_ops {
1112 	void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
1113 			struct ol_txrx_ocb_set_chan ocb_set_chan);
1114 	struct ol_txrx_ocb_chan_info *
1115 		(*get_ocb_chan_info)(struct cdp_vdev *vdev);
1116 };
1117 
1118 /**
1119  * struct cdp_peer_ops - mcl peer related ops
1120  * @register_peer:
1121  * @clear_peer:
1122  * @cfg_attach:
1123  * @find_peer_by_addr:
1124  * @find_peer_by_addr_and_vdev:
1125  * @local_peer_id:
1126  * @peer_find_by_local_id:
1127  * @peer_state_update:
1128  * @get_vdevid:
1129  * @get_vdev_by_sta_id:
1130  * @register_ocb_peer:
1131  * @peer_get_peer_mac_addr:
1132  * @get_peer_state:
1133  * @get_vdev_for_peer:
1134  * @update_ibss_add_peer_num_of_vdev:
1135  * @remove_peers_for_vdev:
1136  * @remove_peers_for_vdev_no_lock:
1137  * @copy_mac_addr_raw:
1138  * @add_last_real_peer:
1139  * @is_vdev_restore_last_peer:
1140  * @update_last_real_peer:
1141  */
1142 struct cdp_peer_ops {
1143 	QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
1144 			struct ol_txrx_desc_type *sta_desc);
1145 	QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
1146 	QDF_STATUS (*change_peer_state)(uint8_t sta_id,
1147 			enum ol_txrx_peer_state sta_state,
1148 			bool roam_synch_in_progress);
1149 	void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
1150 				       u8 *peer_addr, uint8_t *peer_id,
1151 				       enum peer_debug_id_type debug_id);
1152 	void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
1153 	void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
1154 			uint8_t *peer_addr, uint8_t *peer_id);
1155 	void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
1156 			struct cdp_vdev *vdev,
1157 			uint8_t *peer_addr, uint8_t *peer_id);
1158 	uint16_t (*local_peer_id)(void *peer);
1159 	void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
1160 			uint8_t local_peer_id);
1161 	QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
1162 			uint8_t *peer_addr,
1163 			enum ol_txrx_peer_state state);
1164 	QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
1165 	struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev,
1166 			uint8_t sta_id);
1167 	QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
1168 	uint8_t * (*peer_get_peer_mac_addr)(void *peer);
1169 	int (*get_peer_state)(void *peer);
1170 	struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
1171 	int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
1172 			int16_t peer_num_delta);
1173 	void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
1174 			ol_txrx_vdev_peer_remove_cb callback,
1175 			void *callback_context, bool remove_last_peer);
1176 	void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
1177 			ol_txrx_vdev_peer_remove_cb callback,
1178 			void *callback_context);
1179 	void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
1180 	void (*add_last_real_peer)(struct cdp_pdev *pdev,
1181 		struct cdp_vdev *vdev, uint8_t *peer_id);
1182 	bool (*is_vdev_restore_last_peer)(void *peer);
1183 	void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
1184 			uint8_t *peer_id, bool restore_last_peer);
1185 	void (*peer_detach_force_delete)(void *peer);
1186 };
1187 
1188 /**
1189  * struct cdp_ocb_ops - mcl ocb ops
1190  * @throttle_init_period:
1191  * @throttle_set_level:
1192  */
1193 struct cdp_throttle_ops {
1194 	void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
1195 			uint8_t *dutycycle_level);
1196 	void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
1197 };
1198 
1199 /**
1200  * struct cdp_ocb_ops - mcl ocb ops
1201  * @clear_stats:
1202  * @stats:
1203  */
1204 struct cdp_mob_stats_ops {
1205 	void (*clear_stats)(uint16_t bitmap);
1206 	int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
1207 };
1208 #endif /* CONFIG_WIN */
1209 
1210 #ifdef RECEIVE_OFFLOAD
1211 /**
1212  * struct cdp_rx_offld_ops - mcl receive offload ops
1213  * @register_rx_offld_flush_cb:
1214  * @deregister_rx_offld_flush_cb:
1215  */
1216 struct cdp_rx_offld_ops {
1217 	void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
1218 	void (*deregister_rx_offld_flush_cb)(void);
1219 };
1220 #endif
1221 
1222 struct cdp_ops {
1223 	struct cdp_cmn_ops          *cmn_drv_ops;
1224 	struct cdp_ctrl_ops         *ctrl_ops;
1225 	struct cdp_me_ops           *me_ops;
1226 	struct cdp_mon_ops          *mon_ops;
1227 	struct cdp_host_stats_ops   *host_stats_ops;
1228 	struct cdp_wds_ops          *wds_ops;
1229 	struct cdp_raw_ops          *raw_ops;
1230 	struct cdp_pflow_ops        *pflow_ops;
1231 #ifndef CONFIG_WIN
1232 	struct cdp_misc_ops         *misc_ops;
1233 	struct cdp_cfg_ops          *cfg_ops;
1234 	struct cdp_flowctl_ops      *flowctl_ops;
1235 	struct cdp_lflowctl_ops     *l_flowctl_ops;
1236 #ifdef IPA_OFFLOAD
1237 	struct cdp_ipa_ops          *ipa_ops;
1238 #endif
1239 #ifdef RECEIVE_OFFLOAD
1240 	struct cdp_rx_offld_ops     *rx_offld_ops;
1241 #endif
1242 	struct cdp_bus_ops          *bus_ops;
1243 	struct cdp_ocb_ops          *ocb_ops;
1244 	struct cdp_peer_ops         *peer_ops;
1245 	struct cdp_throttle_ops     *throttle_ops;
1246 	struct cdp_mob_stats_ops    *mob_stats_ops;
1247 	struct cdp_tx_delay_ops     *delay_ops;
1248 	struct cdp_pmf_ops          *pmf_ops;
1249 #endif /* CONFIG_WIN */
1250 };
1251 #endif
1252