xref: /wlan-dirver/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
3  *
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * @file cdp_txrx_ops.h
22  * @brief Define the host data path converged API functions
23  * called by the host control SW and the OS interface module
24  */
25 #ifndef _CDP_TXRX_CMN_OPS_H_
26 #define _CDP_TXRX_CMN_OPS_H_
27 
28 #include <cdp_txrx_cmn_struct.h>
29 #include <cdp_txrx_stats_struct.h>
30 #include "cdp_txrx_handle.h"
31 #include <cdp_txrx_mon_struct.h>
32 #include "wlan_objmgr_psoc_obj.h"
33 
34 #ifdef IPA_OFFLOAD
35 #ifdef CONFIG_IPA_WDI_UNIFIED_API
36 #include <qdf_ipa_wdi3.h>
37 #else
38 #include <qdf_ipa.h>
39 #endif
40 #endif
41 
42 /**
43  * bitmap values to indicate special handling of peer_delete
44  */
45 #define CDP_PEER_DELETE_NO_SPECIAL             0
46 #define CDP_PEER_DO_NOT_START_UNMAP_TIMER      1
47 
48 /* same as ieee80211_nac_param */
49 enum cdp_nac_param_cmd {
50 	/* IEEE80211_NAC_PARAM_ADD */
51 	CDP_NAC_PARAM_ADD = 1,
52 	/* IEEE80211_NAC_PARAM_DEL */
53 	CDP_NAC_PARAM_DEL,
54 	/* IEEE80211_NAC_PARAM_LIST */
55 	CDP_NAC_PARAM_LIST,
56 };
57 /******************************************************************************
58  *
59  * Control Interface (A Interface)
60  *
61  *****************************************************************************/
62 
63 struct cdp_cmn_ops {
64 
65 	int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
66 
67 	int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
68 
69 	struct cdp_vdev *(*txrx_vdev_attach)
70 		(struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
71 		 uint8_t vdev_id, enum wlan_op_mode op_mode);
72 
73 	void (*txrx_vdev_detach)
74 		(struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
75 		 void *cb_context);
76 
77 	struct cdp_pdev *(*txrx_pdev_attach)
78 		(ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
79 		HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
80 
81 	int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
82 
83 	void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
84 
85 	void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
86 
87 	void *(*txrx_peer_create)
88 		(struct cdp_vdev *vdev, uint8_t *peer_mac_addr,
89 		 struct cdp_ctrl_objmgr_peer *ctrl_peer);
90 
91 	void (*txrx_peer_setup)
92 		(struct cdp_vdev *vdev_hdl, void *peer_hdl);
93 
94 	void (*txrx_peer_teardown)
95 		(struct cdp_vdev *vdev_hdl, void *peer_hdl);
96 
97 	int (*txrx_peer_add_ast)
98 		(ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
99 		uint8_t *mac_addr, enum  cdp_txrx_ast_entry_type type,
100 		uint32_t flags);
101 
102 	void (*txrx_peer_del_ast)
103 		(ol_txrx_soc_handle soc, void *ast_hdl);
104 
105 	int (*txrx_peer_update_ast)
106 		(ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
107 		uint8_t *mac_addr, uint32_t flags);
108 
109 
110 	void *(*txrx_peer_ast_hash_find)
111 		(ol_txrx_soc_handle soc, uint8_t *ast_mac_addr);
112 
113 	uint8_t (*txrx_peer_ast_get_pdev_id)
114 		(ol_txrx_soc_handle soc, void *ast_hdl);
115 
116 	uint8_t (*txrx_peer_ast_get_next_hop)
117 		(ol_txrx_soc_handle soc, void *ast_hdl);
118 
119 	void (*txrx_peer_ast_set_type)
120 		(ol_txrx_soc_handle soc, void *ast_hdl,
121 		enum cdp_txrx_ast_entry_type type);
122 
123 	enum cdp_txrx_ast_entry_type (*txrx_peer_ast_get_type)
124 		(ol_txrx_soc_handle soc, void *ast_hdl);
125 
126 	void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
127 
128 	int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
129 			uint8_t smart_monitor);
130 
131 	uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
132 
133 	void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev,
134 					       int16_t chan_noise_floor);
135 
136 	void (*txrx_set_nac)(struct cdp_peer *peer);
137 
138 	void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
139 
140 	void (*txrx_get_peer_mac_from_peer_id)
141 		(struct cdp_pdev *pdev_handle,
142 		 uint32_t peer_id, uint8_t *peer_mac);
143 
144 	void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
145 
146 	void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
147 
148 	void (*txrx_ath_getstats)(void *pdev,
149 			struct cdp_dev_stats *stats, uint8_t type);
150 
151 	void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
152 			u_int8_t *user_position);
153 
154 	uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
155 
156 	void (*txrx_if_mgmt_drain)(void *ni, int force);
157 
158 	void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
159 
160 	void (*txrx_set_privacy_filters)
161 		(struct cdp_vdev *vdev, void *filter, uint32_t num);
162 
163 	/********************************************************************
164 	 * Data Interface (B Interface)
165 	 ********************************************************************/
166 
167 	void (*txrx_vdev_register)(struct cdp_vdev *vdev,
168 			void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
169 			struct ol_txrx_ops *txrx_ops);
170 
171 	int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
172 			qdf_nbuf_t tx_mgmt_frm, uint8_t type);
173 
174 	int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
175 			qdf_nbuf_t tx_mgmt_frm,	uint8_t type, uint8_t use_6mbps,
176 			uint16_t chanfreq);
177 
178 	/**
179 	 * ol_txrx_mgmt_tx_cb - tx management delivery notification
180 	 * callback function
181 	 */
182 
183 	void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
184 				    ol_txrx_mgmt_tx_cb download_cb,
185 				    ol_txrx_mgmt_tx_cb ota_ack_cb,
186 				    void *ctxt);
187 
188 	int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
189 
190 	/**
191 	 * ol_txrx_data_tx_cb - Function registered with the data path
192 	 * that is called when tx frames marked as "no free" are
193 	 * done being transmitted
194 	 */
195 
196 	void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
197 			ol_txrx_data_tx_cb callback, void *ctxt);
198 
199 	/*******************************************************************
200 	 * Statistics and Debugging Interface (C Interface)
201 	 ********************************************************************/
202 
203 	int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
204 			int max_subfrms_amsdu);
205 
206 	A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
207 			struct ol_txrx_stats_req *req,
208 			bool per_vdev, bool response_expected);
209 
210 	int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
211 
212 	void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
213 			uint8_t cfg_stats_type, uint32_t cfg_val);
214 
215 	void (*txrx_print_level_set)(unsigned level);
216 
217 	/**
218 	 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
219 	 * @vdev: vdev handle
220 	 *
221 	 * Return: vdev mac address
222 	 */
223 	uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
224 
225 	/**
226 	 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
227 	 * vdev
228 	 * @vdev: vdev handle
229 	 *
230 	 * Return: Handle to struct qdf_mac_addr
231 	 */
232 	struct qdf_mac_addr *
233 		(*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
234 
235 	/**
236 	 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
237 	 * @vdev: vdev handle
238 	 *
239 	 * Return: Handle to pdev
240 	 */
241 	struct cdp_pdev *(*txrx_get_pdev_from_vdev)
242 		(struct cdp_vdev *vdev);
243 
244 	/**
245 	 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
246 	 * @vdev: vdev handle
247 	 *
248 	 * Return: Handle to control pdev
249 	 */
250 	struct cdp_cfg *
251 		(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
252 
253 	struct cdp_vdev *
254 		(*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
255 				uint8_t vdev_id);
256 
257 	void (*txrx_soc_detach)(void *soc);
258 
259 	int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid,
260 					int status);
261 
262 	int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
263 				   uint16_t tid, uint16_t batimeout,
264 				   uint16_t buffersize,
265 				   uint16_t startseqnum);
266 
267 	void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
268 		uint8_t *dialogtoken, uint16_t *statuscode,
269 		uint16_t *buffersize, uint16_t *batimeout);
270 
271 	int (*delba_process)(void *peer_handle,
272 		int tid, uint16_t reasoncode);
273 
274 	void (*set_addba_response)(void *peer_handle,
275 		uint8_t tid, uint16_t statuscode);
276 
277 	uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
278 			uint16_t peer_id, uint8_t *mac_addr);
279 
280 	void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
281 			uint8_t map_id);
282 
283 	void (*flush_cache_rx_queue)(void);
284 	void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
285 			uint8_t tos, uint8_t tid);
286 
287 	int (*txrx_stats_request)(struct cdp_vdev *vdev,
288 			struct cdp_txrx_stats_req *req);
289 
290 	QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
291 				    enum qdf_stats_verbosity_level level);
292 	void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
293 
294 	int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
295 	QDF_STATUS (*txrx_intr_attach)(void *soc);
296 	void (*txrx_intr_detach)(void *soc);
297 	void  (*set_pn_check)(struct cdp_vdev *vdev,
298 		struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
299 		 uint32_t *rx_pn);
300 	QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
301 			struct cdp_config_params *params);
302 
303 	void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
304 	void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
305 			void *dp_txrx_hdl);
306 
307 	void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
308 	void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
309 			void *dp_txrx_handle);
310 
311 	void (*txrx_peer_reset_ast)
312 		(ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl);
313 
314 	void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
315 					  void *vdev_hdl);
316 
317 	void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
318 
319 	QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
320 			uint32_t num_peers);
321 
322 	void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl,
323 					struct cdp_ctrl_objmgr_pdev *ctrl_pdev);
324 
325 	ol_txrx_tx_fp tx_send;
326 };
327 
328 struct cdp_ctrl_ops {
329 
330 	int
331 		(*txrx_mempools_attach)(void *ctrl_pdev);
332 	int
333 		(*txrx_set_filter_neighbour_peers)(
334 				struct cdp_pdev *pdev,
335 				uint32_t val);
336 	int
337 		(*txrx_update_filter_neighbour_peers)(
338 				struct cdp_vdev *vdev,
339 				uint32_t cmd, uint8_t *macaddr);
340 	/**
341 	 * @brief set the safemode of the device
342 	 * @details
343 	 * This flag is used to bypass the encrypt and decrypt processes when
344 	 * send and receive packets. It works like open AUTH mode, HW will
345 	 * ctreate all packets as non-encrypt frames because no key installed.
346 	 * For rx fragmented frames,it bypasses all the rx defragmentaion.
347 	 *
348 	 * @param vdev - the data virtual device object
349 	 * @param val - the safemode state
350 	 * @return - void
351 	 */
352 
353 	void
354 		(*txrx_set_safemode)(
355 				struct cdp_vdev *vdev,
356 				u_int32_t val);
357 	/**
358 	 * @brief configure the drop unencrypted frame flag
359 	 * @details
360 	 * Rx related. When set this flag, all the unencrypted frames
361 	 * received over a secure connection will be discarded
362 	 *
363 	 * @param vdev - the data virtual device object
364 	 * @param val - flag
365 	 * @return - void
366 	 */
367 	void
368 		(*txrx_set_drop_unenc)(
369 				struct cdp_vdev *vdev,
370 				u_int32_t val);
371 
372 
373 	/**
374 	 * @brief set the Tx encapsulation type of the VDEV
375 	 * @details
376 	 * This will be used to populate the HTT desc packet type field
377 	 * during Tx
378 	 * @param vdev - the data virtual device object
379 	 * @param val - the Tx encap type
380 	 * @return - void
381 	 */
382 	void
383 		(*txrx_set_tx_encap_type)(
384 				struct cdp_vdev *vdev,
385 				enum htt_cmn_pkt_type val);
386 	/**
387 	 * @brief set the Rx decapsulation type of the VDEV
388 	 * @details
389 	 * This will be used to configure into firmware and hardware
390 	 * which format to decap all Rx packets into, for all peers under
391 	 * the VDEV.
392 	 * @param vdev - the data virtual device object
393 	 * @param val - the Rx decap mode
394 	 * @return - void
395 	 */
396 	void
397 		(*txrx_set_vdev_rx_decap_type)(
398 				struct cdp_vdev *vdev,
399 				enum htt_cmn_pkt_type val);
400 
401 	/**
402 	 * @brief get the Rx decapsulation type of the VDEV
403 	 *
404 	 * @param vdev - the data virtual device object
405 	 * @return - the Rx decap type
406 	 */
407 	enum htt_cmn_pkt_type
408 		(*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
409 
410 	/* Is this similar to ol_txrx_peer_state_update() in MCL */
411 	/**
412 	 * @brief Update the authorize peer object at association time
413 	 * @details
414 	 * For the host-based implementation of rate-control, it
415 	 * updates the peer/node-related parameters within rate-control
416 	 * context of the peer at association.
417 	 *
418 	 * @param peer - pointer to the node's object
419 	 * @authorize - either to authorize or unauthorize peer
420 	 *
421 	 * @return none
422 	 */
423 	void
424 		(*txrx_peer_authorize)(struct cdp_peer *peer,
425 				u_int32_t authorize);
426 
427 	bool
428 		(*txrx_set_inact_params)(struct cdp_pdev *pdev,
429 				u_int16_t inact_check_interval,
430 				u_int16_t inact_normal,
431 				u_int16_t inact_overload);
432 	bool
433 		(*txrx_start_inact_timer)(
434 				struct cdp_pdev *pdev,
435 				bool enable);
436 
437 
438 	/**
439 	 * @brief Set the overload status of the radio
440 	 * @details
441 	 *  Set the overload status of the radio, updating the inactivity
442 	 *  threshold and inactivity count for each node.
443 	 *
444 	 * @param pdev - the data physical device object
445 	 * @param overload - whether the radio is overloaded or not
446 	 */
447 	void (*txrx_set_overload)(
448 			struct cdp_pdev *pdev,
449 			bool overload);
450 	/**
451 	 * @brief Check the inactivity status of the peer/node
452 	 *
453 	 * @param peer - pointer to the node's object
454 	 * @return true if the node is inactive; otherwise return false
455 	 */
456 	bool
457 		(*txrx_peer_is_inact)(void *peer);
458 
459 	/**
460 	 * @brief Mark inactivity status of the peer/node
461 	 * @details
462 	 *  If it becomes active, reset inactivity count to reload value;
463 	 *  if the inactivity status changed, notify umac band steering.
464 	 *
465 	 * @param peer - pointer to the node's object
466 	 * @param inactive - whether the node is inactive or not
467 	 */
468 	void (*txrx_mark_peer_inact)(
469 			void *peer,
470 			bool inactive);
471 
472 
473 	/* Should be ol_txrx_ctrl_api.h */
474 	void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
475 
476 	/**
477 	 * @brief setting mesh rx filter
478 	 * @details
479 	 *  based on the bits enabled in the filter packets has to be dropped.
480 	 *
481 	 * @param vdev - the data virtual device object
482 	 * @param val - value to set
483 	 */
484 	void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
485 
486 	void (*tx_flush_buffers)(struct cdp_vdev *vdev);
487 
488 	int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
489 
490 	void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
491 			enum cdp_vdev_param_type param, uint32_t val);
492 
493 	void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
494 	/**
495 	 * @brief Set the reo dest ring num of the radio
496 	 * @details
497 	 *  Set the reo destination ring no on which we will receive
498 	 *  pkts for this radio.
499 	 *
500 	 * @param pdev - the data physical device object
501 	 * @param reo_dest_ring_num - value ranges between 1 - 4
502 	 */
503 	void (*txrx_set_pdev_reo_dest)(
504 			struct cdp_pdev *pdev,
505 			enum cdp_host_reo_dest_ring reo_dest_ring_num);
506 
507 	/**
508 	 * @brief Get the reo dest ring num of the radio
509 	 * @details
510 	 *  Get the reo destination ring no on which we will receive
511 	 *  pkts for this radio.
512 	 *
513 	 * @param pdev - the data physical device object
514 	 * @return the reo destination ring number
515 	 */
516 	enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
517 						struct cdp_pdev *pdev);
518 
519 	int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
520 			uint32_t event);
521 
522 	int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
523 			uint32_t event);
524 	int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
525 
526 	void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
527 			uint8_t subtype, uint8_t tx_power);
528 
529 	void (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
530 			enum cdp_pdev_param_type type, uint8_t val);
531 	void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
532 
533 #ifdef ATH_SUPPORT_NAC_RSSI
534 	QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
535 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
536 		uint8_t chan_num);
537 	QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev,
538 						   char *macaddr,
539 						   uint8_t *rssi);
540 #endif
541 	void (*set_key)(struct cdp_peer *peer_handle,
542 			bool is_unicast, uint32_t *key);
543 };
544 
545 struct cdp_me_ops {
546 
547 	u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
548 		(struct cdp_pdev *pdev, u_int16_t buf_count);
549 
550 		u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
551 				struct cdp_pdev *pdev,
552 				u_int16_t buf_count);
553 
554 	u_int16_t
555 		(*tx_get_mcast_buf_allocated_marked)
556 			(struct cdp_pdev *pdev);
557 	void
558 		(*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
559 
560 	void
561 		(*tx_me_free_descriptor)(struct cdp_pdev *pdev);
562 
563 	uint16_t
564 		(*tx_me_convert_ucast)(struct cdp_vdev *vdev,
565 			qdf_nbuf_t wbuf, u_int8_t newmac[][6],
566 			uint8_t newmaccnt);
567 	/* Should be a function pointer in ol_txrx_osif_ops{} */
568 	/**
569 	 * @brief notify mcast frame indication from FW.
570 	 * @details
571 	 *     This notification will be used to convert
572 	 *     multicast frame to unicast.
573 	 *
574 	 * @param pdev - handle to the ctrl SW's physical device object
575 	 * @param vdev_id - ID of the virtual device received the special data
576 	 * @param msdu - the multicast msdu returned by FW for host inspect
577 	 */
578 
579 	int (*mcast_notify)(struct cdp_pdev *pdev,
580 			u_int8_t vdev_id, qdf_nbuf_t msdu);
581 };
582 
583 struct cdp_mon_ops {
584 
585 	void (*txrx_monitor_set_filter_ucast_data)
586 		(struct cdp_pdev *, u_int8_t val);
587 	void (*txrx_monitor_set_filter_mcast_data)
588 		(struct cdp_pdev *, u_int8_t val);
589 	void (*txrx_monitor_set_filter_non_data)
590 	      (struct cdp_pdev *, u_int8_t val);
591 
592 	bool (*txrx_monitor_get_filter_ucast_data)
593 		(struct cdp_vdev *vdev_txrx_handle);
594 	bool (*txrx_monitor_get_filter_mcast_data)
595 		(struct cdp_vdev *vdev_txrx_handle);
596 	bool (*txrx_monitor_get_filter_non_data)
597 		(struct cdp_vdev *vdev_txrx_handle);
598 	int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
599 
600 	/* HK advance monitor filter support */
601 	int (*txrx_set_advance_monitor_filter)
602 		(struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
603 };
604 
605 struct cdp_host_stats_ops {
606 	int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
607 			struct ol_txrx_stats_req *req);
608 
609 	void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
610 
611 	void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
612 
613 	int (*txrx_stats_publish)(struct cdp_pdev *pdev,
614 			void *buf);
615 	/**
616 	 * @brief Enable enhanced stats functionality.
617 	 *
618 	 * @param pdev - the physical device object
619 	 * @return - void
620 	 */
621 	void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
622 
623 	/**
624 	 * @brief Disable enhanced stats functionality.
625 	 *
626 	 * @param pdev - the physical device object
627 	 * @return - void
628 	 */
629 	void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
630 
631 	/**
632 	 * @brief Get the desired stats from the message.
633 	 *
634 	 * @param pdev - the physical device object
635 	 * @param stats_base - stats buffer received from FW
636 	 * @param type - stats type.
637 	 * @return - pointer to requested stat identified by type
638 	 */
639 	uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
640 			uint32_t *stats_base, uint32_t msg_len, uint8_t type);
641 	void
642 		(*tx_print_tso_stats)(struct cdp_vdev *vdev);
643 
644 	void
645 		(*tx_rst_tso_stats)(struct cdp_vdev *vdev);
646 
647 	void
648 		(*tx_print_sg_stats)(struct cdp_vdev *vdev);
649 
650 	void
651 		(*tx_rst_sg_stats)(struct cdp_vdev *vdev);
652 
653 	void
654 		(*print_rx_cksum_stats)(struct cdp_vdev *vdev);
655 
656 	void
657 		(*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
658 
659 	A_STATUS
660 		(*txrx_host_me_stats)(struct cdp_vdev *vdev);
661 
662 	void
663 		(*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
664 
665 	int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
666 			struct ol_txrx_stats_req *req);
667 
668 	void
669 		(*print_lro_stats)(struct cdp_vdev *vdev);
670 
671 	void
672 		(*reset_lro_stats)(struct cdp_vdev *vdev);
673 
674 	void
675 		(*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
676 				uint32_t cap);
677 	void
678 		(*get_htt_stats)(struct cdp_pdev *pdev, void *data,
679 				uint32_t data_len);
680 	void
681 		(*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data,
682 					  uint16_t stats_id);
683 	struct cdp_peer_stats*
684 		(*txrx_get_peer_stats)(struct cdp_peer *peer);
685 	void
686 		(*txrx_reset_peer_ald_stats)(struct cdp_peer *peer);
687 	void
688 		(*txrx_reset_peer_stats)(struct cdp_peer *peer);
689 	int
690 		(*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf,
691 				       bool is_aggregate);
692 	int
693 		(*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
694 						    void *data, uint32_t len,
695 						    uint32_t stats_id);
696 	int
697 		(*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle,
698 					    void *buffer);
699 };
700 
701 struct cdp_wds_ops {
702 	void
703 		(*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
704 				u_int32_t val);
705 	void
706 		(*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
707 				int wds_tx_ucast, int wds_tx_mcast);
708 	int (*vdev_set_wds)(void *vdev, uint32_t val);
709 };
710 
711 struct cdp_raw_ops {
712 	int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
713 
714 	void (*rsim_get_astentry)(struct cdp_vdev *vdev,
715 				qdf_nbuf_t *pnbuf,
716 				struct cdp_raw_ast *raw_ast);
717 };
718 
719 #ifdef CONFIG_WIN
720 struct cdp_pflow_ops {
721 	uint32_t(*pflow_update_pdev_params)(void *,
722 			enum _ol_ath_param_t, uint32_t, void *);
723 };
724 #endif /* CONFIG_WIN */
725 
726 #define LRO_IPV4_SEED_ARR_SZ 5
727 #define LRO_IPV6_SEED_ARR_SZ 11
728 
729 /**
730  * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
731  * @lro_enable: indicates whether rx_offld is enabled
732  * @tcp_flag: If the TCP flags from the packet do not match
733  * the values in this field after masking with TCP flags mask
734  * below, packet is not rx_offld eligible
735  * @tcp_flag_mask: field for comparing the TCP values provided
736  * above with the TCP flags field in the received packet
737  * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
738  * 5-tuple toeplitz hash for ipv4 packets
739  * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
740  * 5-tuple toeplitz hash for ipv6 packets
741  */
742 struct cdp_lro_hash_config {
743 	uint32_t lro_enable;
744 	uint32_t tcp_flag:9,
745 		tcp_flag_mask:9;
746 	uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
747 	uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
748 };
749 
750 struct ol_if_ops {
751 	void
752 	(*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
753 				    uint8_t *peer_macaddr, uint8_t vdev_id,
754 				    bool hash_based, uint8_t ring_num);
755 	QDF_STATUS
756 	(*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
757 				       uint8_t vdev_id, uint8_t *peer_mac,
758 				       qdf_dma_addr_t hw_qdesc, int tid,
759 				       uint16_t queue_num,
760 				       uint8_t ba_window_size_valid,
761 				       uint16_t ba_window_size);
762 	QDF_STATUS
763 	(*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
764 					uint8_t vdev_id, uint8_t *peer_macaddr,
765 					uint32_t tid_mask);
766 	int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
767 			uint8_t *peer_macaddr);
768 	bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
769 	int (*peer_add_wds_entry)(void *ol_soc_handle,
770 			const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
771 			uint32_t flags);
772 	int (*peer_update_wds_entry)(void *ol_soc_handle,
773 			uint8_t *dest_macaddr, uint8_t *peer_macaddr,
774 			uint32_t flags);
775 	void (*peer_del_wds_entry)(void *ol_soc_handle,
776 			uint8_t *wds_macaddr);
777 	QDF_STATUS
778 	(*lro_hash_config)(struct wlan_objmgr_psoc *ctrl_psoc,
779 			   struct cdp_lro_hash_config *rx_offld_hash);
780 	void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
781 			uint8_t type);
782 	uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg);
783 
784 	int  (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
785 			uint8_t vdev_id, uint8_t *peer_mac_addr,
786 			enum cdp_txrx_ast_entry_type peer_type);
787 	int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id);
788 
789 	int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num);
790 
791 	void (*rx_mic_error)(void *ol_soc_handle,
792 			 uint16_t vdev_id, void *wh);
793 	bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer,
794 				   qdf_nbuf_t nbuf,
795 				   uint16_t hdr_space);
796 	uint8_t (*freq_to_channel)(void *ol_soc_handle,  uint16_t vdev_id);
797 
798 	void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
799 				  u_int8_t *dstmac, bool active);
800 #ifdef ATH_SUPPORT_NAC_RSSI
801 	int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
802 		u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid,
803 		char *client_macaddr, uint8_t chan_num);
804 	int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
805 		u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid);
806 #endif
807 	int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr);
808 
809 	/* TODO: Add any other control path calls required to OL_IF/WMA layer */
810 };
811 
812 #ifndef CONFIG_WIN
813 /* From here MCL specific OPs */
814 /**
815  * struct cdp_misc_ops - mcl ops not classified
816  * @set_ibss_vdev_heart_beat_timer:
817  * @bad_peer_txctl_set_setting:
818  * @bad_peer_txctl_update_threshold:
819  * @hl_tdls_flag_reset:
820  * @tx_non_std:
821  * @get_vdev_id:
822  * @set_wisa_mode:
823  * @txrx_data_stall_cb_register:
824  * @txrx_data_stall_cb_deregister:
825  * @txrx_post_data_stall_event
826  * @runtime_suspend:
827  * @runtime_resume:
828  */
829 struct cdp_misc_ops {
830 	uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
831 			uint16_t timer_value_sec);
832 	void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
833 			struct ol_tx_wmm_param_t wmm_param);
834 	void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
835 			int period, int txq_limit);
836 	void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
837 		int level, int tput_thresh, int tx_limit);
838 	void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
839 	qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
840 		enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
841 	uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
842 	uint32_t (*get_tx_ack_stats)(uint8_t vdev_id);
843 	QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
844 	QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
845 	QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
846 	void (*txrx_post_data_stall_event)(
847 			enum data_stall_log_event_indicator indicator,
848 			enum data_stall_log_event_type data_stall_type,
849 			uint32_t pdev_id, uint32_t vdev_id_bitmap,
850 			enum data_stall_log_recovery_type recovery_type);
851 	QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
852 	QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
853 	int (*get_opmode)(struct cdp_vdev *vdev);
854 	void (*mark_first_wakeup_packet)(uint8_t value);
855 	void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
856 	void (*flush_rx_frames)(void *peer, bool drop);
857 	A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
858 		uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
859 	void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
860 	void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
861 };
862 
863 /**
864  * struct cdp_tx_delay_ops - mcl tx delay ops
865  * @tx_delay:
866  * @tx_delay_hist:
867  * @tx_packet_count:
868  * @tx_set_compute_interval:
869  */
870 struct cdp_tx_delay_ops {
871 	void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
872 		uint32_t *tx_delay_microsec, int category);
873 	void (*tx_delay_hist)(struct cdp_pdev *pdev,
874 		uint16_t *bin_values, int category);
875 	void (*tx_packet_count)(struct cdp_pdev *pdev,
876 		uint16_t *out_packet_count,
877 		uint16_t *out_packet_loss_count, int category);
878 	void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
879 		uint32_t interval);
880 };
881 
882 /**
883  * struct cdp_pmf_ops - mcl protected management frame ops
884  * @get_pn_info:
885  */
886 struct cdp_pmf_ops {
887 	void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
888 			uint64_t **last_pn, uint32_t **rmf_pn_replays);
889 };
890 
891 /**
892  * struct cdp_cfg_ops - mcl configuration ops
893  * @set_cfg_rx_fwd_disabled:
894  * @set_cfg_packet_log_enabled:
895  * @cfg_attach:
896  * @vdev_rx_set_intrabss_fwd:
897  * @get_opmode:
898  * @is_rx_fwd_disabled:
899  * @tx_set_is_mgmt_over_wmi_enabled:
900  * @is_high_latency:
901  * @set_flow_control_parameters:
902  */
903 struct cdp_cfg_ops {
904 	void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
905 		uint8_t disable_rx_fwd);
906 	void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
907 		uint8_t val);
908 	struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
909 	void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
910 	uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
911 	void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
912 	int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
913 	void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
914 		void *param);
915 	void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
916 	void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
917 };
918 
919 /**
920  * struct cdp_flowctl_ops - mcl flow control
921  * @register_pause_cb:
922  * @set_desc_global_pool_size:
923  * @dump_flow_pool_info:
924  */
925 struct cdp_flowctl_ops {
926 	QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
927 					    struct cdp_pdev *pdev,
928 					    uint8_t vdev_id);
929 	void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
930 					struct cdp_pdev *pdev,
931 					uint8_t vdev_id);
932 	QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
933 					tx_pause_callback);
934 	void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
935 
936 	void (*dump_flow_pool_info)(void *);
937 };
938 
939 /**
940  * struct cdp_lflowctl_ops - mcl legacy flow control ops
941  * @register_tx_flow_control:
942  * @deregister_tx_flow_control_cb:
943  * @flow_control_cb:
944  * @get_tx_resource:
945  * @ll_set_tx_pause_q_depth:
946  * @vdev_flush:
947  * @vdev_pause:
948  * @vdev_unpause:
949  */
950 struct cdp_lflowctl_ops {
951 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
952 	int (*register_tx_flow_control)(struct cdp_soc_t *soc,
953 					tx_pause_callback flowcontrol);
954 	int (*set_vdev_tx_desc_limit)(u8 vdev_id, u8 chan);
955 	int (*set_vdev_os_queue_status)(u8 vdev_id,
956 					enum netif_action_type action);
957 #else
958 	int (*register_tx_flow_control)(uint8_t vdev_id,
959 		ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
960 		ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
961 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
962 	int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
963 	void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
964 	bool (*get_tx_resource)(uint8_t sta_id,
965 			 unsigned int low_watermark,
966 			 unsigned int high_watermark_offset);
967 	int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
968 	void (*vdev_flush)(struct cdp_vdev *vdev);
969 	void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
970 	void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
971 };
972 
973 #ifdef IPA_OFFLOAD
974 /**
975  * struct cdp_ipa_ops - mcl ipa data path ops
976  * @ipa_get_resource:
977  * @ipa_set_doorbell_paddr:
978  * @ipa_set_active:
979  * @ipa_op_response:
980  * @ipa_register_op_cb:
981  * @ipa_get_stat:
982  * @ipa_tx_data_frame:
983  */
984 struct cdp_ipa_ops {
985 	QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
986 	QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
987 	QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
988 		bool is_tx);
989 	QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
990 	QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
991 		void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
992 		void *usr_ctxt);
993 	QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
994 	qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
995 	void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
996 		uint32_t value);
997 #ifdef FEATURE_METERING
998 	QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
999 		uint8_t reset_stats);
1000 	QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
1001 		uint64_t quota_bytes);
1002 #endif
1003 	QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
1004 	QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
1005 #ifdef CONFIG_IPA_WDI_UNIFIED_API
1006 	QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
1007 		void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1008 		uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
1009 		uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle,
1010 		bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in);
1011 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1012 	QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
1013 		void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1014 		uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
1015 		uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
1016 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1017 	QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
1018 		uint32_t rx_pipe_handle);
1019 	QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
1020 		qdf_ipa_client_type_t prod_client,
1021 		qdf_ipa_client_type_t cons_client,
1022 		uint8_t session_id, bool is_ipv6_enabled);
1023 	QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
1024 	QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
1025 	QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
1026 	QDF_STATUS (*ipa_set_perf_level)(int client,
1027 		uint32_t max_supported_bw_mbps);
1028 };
1029 #endif
1030 
1031 /**
1032  * struct cdp_bus_ops - mcl bus suspend/resume ops
1033  * @bus_suspend:
1034  * @bus_resume:
1035  */
1036 struct cdp_bus_ops {
1037 	QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
1038 	QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
1039 };
1040 
1041 /**
1042  * struct cdp_ocb_ops - mcl ocb ops
1043  * @set_ocb_chan_info:
1044  * @get_ocb_chan_info:
1045  */
1046 struct cdp_ocb_ops {
1047 	void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
1048 			struct ol_txrx_ocb_set_chan ocb_set_chan);
1049 	struct ol_txrx_ocb_chan_info *
1050 		(*get_ocb_chan_info)(struct cdp_vdev *vdev);
1051 };
1052 
1053 /**
1054  * struct cdp_peer_ops - mcl peer related ops
1055  * @register_peer:
1056  * @clear_peer:
1057  * @cfg_attach:
1058  * @find_peer_by_addr:
1059  * @find_peer_by_addr_and_vdev:
1060  * @local_peer_id:
1061  * @peer_find_by_local_id:
1062  * @peer_state_update:
1063  * @get_vdevid:
1064  * @get_vdev_by_sta_id:
1065  * @register_ocb_peer:
1066  * @peer_get_peer_mac_addr:
1067  * @get_peer_state:
1068  * @get_vdev_for_peer:
1069  * @update_ibss_add_peer_num_of_vdev:
1070  * @remove_peers_for_vdev:
1071  * @remove_peers_for_vdev_no_lock:
1072  * @copy_mac_addr_raw:
1073  * @add_last_real_peer:
1074  * @get_last_mgmt_timestamp:
1075  * @set_last_mgmt_timestamp:
1076  * @is_vdev_restore_last_peer:
1077  * @update_last_real_peer:
1078  */
1079 struct cdp_peer_ops {
1080 	QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
1081 			struct ol_txrx_desc_type *sta_desc);
1082 	QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
1083 	QDF_STATUS (*change_peer_state)(uint8_t sta_id,
1084 			enum ol_txrx_peer_state sta_state,
1085 			bool roam_synch_in_progress);
1086 	void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
1087 				       u8 *peer_addr, uint8_t *peer_id,
1088 				       enum peer_debug_id_type debug_id);
1089 	void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
1090 	void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
1091 			uint8_t *peer_addr, uint8_t *peer_id);
1092 	void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
1093 			struct cdp_vdev *vdev,
1094 			uint8_t *peer_addr, uint8_t *peer_id);
1095 	uint16_t (*local_peer_id)(void *peer);
1096 	void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
1097 			uint8_t local_peer_id);
1098 	QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
1099 			uint8_t *peer_addr,
1100 			enum ol_txrx_peer_state state);
1101 	QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
1102 	struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev,
1103 			uint8_t sta_id);
1104 	QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
1105 	uint8_t * (*peer_get_peer_mac_addr)(void *peer);
1106 	int (*get_peer_state)(void *peer);
1107 	struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
1108 	int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
1109 			int16_t peer_num_delta);
1110 	void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
1111 			ol_txrx_vdev_peer_remove_cb callback,
1112 			void *callback_context, bool remove_last_peer);
1113 	void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
1114 			ol_txrx_vdev_peer_remove_cb callback,
1115 			void *callback_context);
1116 	void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
1117 	void (*add_last_real_peer)(struct cdp_pdev *pdev,
1118 		struct cdp_vdev *vdev, uint8_t *peer_id);
1119 	bool (*is_vdev_restore_last_peer)(void *peer);
1120 	void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
1121 			uint8_t *peer_id, bool restore_last_peer);
1122 	void (*peer_detach_force_delete)(void *peer);
1123 	bool (*get_last_mgmt_timestamp)(struct cdp_pdev *ppdev,
1124 					u8 *peer_addr,
1125 					u8 subtype,
1126 					qdf_time_t *timestamp);
1127 	bool (*update_last_mgmt_timestamp)(struct cdp_pdev *ppdev,
1128 					   u8 *peer_addr,
1129 					   qdf_time_t timestamp,
1130 					   u8 subtype);
1131 };
1132 
1133 /**
1134  * struct cdp_ocb_ops - mcl ocb ops
1135  * @throttle_init_period:
1136  * @throttle_set_level:
1137  */
1138 struct cdp_throttle_ops {
1139 	void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
1140 			uint8_t *dutycycle_level);
1141 	void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
1142 };
1143 
1144 /**
1145  * struct cdp_ocb_ops - mcl ocb ops
1146  * @clear_stats:
1147  * @stats:
1148  */
1149 struct cdp_mob_stats_ops {
1150 	void (*clear_stats)(uint16_t bitmap);
1151 	int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
1152 };
1153 #endif /* CONFIG_WIN */
1154 
1155 #ifdef RECEIVE_OFFLOAD
1156 /**
1157  * struct cdp_rx_offld_ops - mcl receive offload ops
1158  * @register_rx_offld_flush_cb:
1159  * @deregister_rx_offld_flush_cb:
1160  */
1161 struct cdp_rx_offld_ops {
1162 	void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
1163 	void (*deregister_rx_offld_flush_cb)(void);
1164 };
1165 #endif
1166 
1167 struct cdp_ops {
1168 	struct cdp_cmn_ops          *cmn_drv_ops;
1169 	struct cdp_ctrl_ops         *ctrl_ops;
1170 	struct cdp_me_ops           *me_ops;
1171 	struct cdp_mon_ops          *mon_ops;
1172 	struct cdp_host_stats_ops   *host_stats_ops;
1173 	struct cdp_wds_ops          *wds_ops;
1174 	struct cdp_raw_ops          *raw_ops;
1175 	struct cdp_pflow_ops        *pflow_ops;
1176 #ifndef CONFIG_WIN
1177 	struct cdp_misc_ops         *misc_ops;
1178 	struct cdp_cfg_ops          *cfg_ops;
1179 	struct cdp_flowctl_ops      *flowctl_ops;
1180 	struct cdp_lflowctl_ops     *l_flowctl_ops;
1181 #ifdef IPA_OFFLOAD
1182 	struct cdp_ipa_ops          *ipa_ops;
1183 #endif
1184 #ifdef RECEIVE_OFFLOAD
1185 	struct cdp_rx_offld_ops     *rx_offld_ops;
1186 #endif
1187 	struct cdp_bus_ops          *bus_ops;
1188 	struct cdp_ocb_ops          *ocb_ops;
1189 	struct cdp_peer_ops         *peer_ops;
1190 	struct cdp_throttle_ops     *throttle_ops;
1191 	struct cdp_mob_stats_ops    *mob_stats_ops;
1192 	struct cdp_tx_delay_ops     *delay_ops;
1193 	struct cdp_pmf_ops          *pmf_ops;
1194 #endif /* CONFIG_WIN */
1195 };
1196 #endif
1197