1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 #ifndef _DP_PEER_H_ 19 #define _DP_PEER_H_ 20 21 #include <qdf_types.h> 22 #include <qdf_lock.h> 23 #include "dp_types.h" 24 25 #define DP_INVALID_PEER_ID 0xffff 26 27 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000 28 /** 29 * __dp_peer_find_by_id() - Returns peer object given the peer id 30 * 31 * @soc : core DP soc context 32 * @peer_id : peer id from peer object can be retrieved 33 * 34 * Return: struct dp_peer*: Pointer to DP peer object 35 */ 36 static inline struct dp_peer * 37 __dp_peer_find_by_id(struct dp_soc *soc, 38 uint16_t peer_id) 39 { 40 struct dp_peer *peer; 41 42 /* TODO: Hold lock */ 43 peer = (peer_id >= soc->max_peers) ? NULL : 44 soc->peer_id_to_obj_map[peer_id]; 45 46 return peer; 47 } 48 49 #ifdef PEER_PROTECTED_ACCESS 50 /** 51 * dp_peer_find_by_id() - Returns peer object given the peer id 52 * if delete_in_progress in not set for peer 53 * 54 * @soc : core DP soc context 55 * @peer_id : peer id from peer object can be retrieved 56 * 57 * Return: struct dp_peer*: Pointer to DP peer object 58 */ 59 static inline 60 struct dp_peer *dp_peer_find_by_id(struct dp_soc *soc, 61 uint16_t peer_id) 62 { 63 struct dp_peer *peer; 64 65 qdf_spin_lock_bh(&soc->peer_ref_mutex); 66 peer = __dp_peer_find_by_id(soc, peer_id); 67 if (!peer || (peer && peer->delete_in_progress)) { 68 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 69 return NULL; 70 } 71 qdf_atomic_inc(&peer->ref_cnt); 72 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 73 74 return peer; 75 } 76 #else 77 static inline struct dp_peer * 78 dp_peer_find_by_id(struct dp_soc *soc, 79 uint16_t peer_id) 80 { 81 struct dp_peer *peer; 82 83 peer = __dp_peer_find_by_id (soc, peer_id); 84 if (peer && peer->delete_in_progress) { 85 return NULL; 86 } 87 88 return peer; 89 } 90 #endif /* PEER_LOCK_REF_PROTECT */ 91 92 #ifdef PEER_CACHE_RX_PKTS 93 /** 94 * dp_rx_flush_rx_cached() - flush cached rx frames 95 * @peer: peer 96 * @drop: set flag to drop frames 97 * 98 * Return: None 99 */ 100 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop); 101 #else 102 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 103 { 104 } 105 #endif 106 107 static inline void 108 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer) 109 { 110 qdf_spin_lock_bh(&peer->peer_info_lock); 111 peer->state = OL_TXRX_PEER_STATE_DISC; 112 qdf_spin_unlock_bh(&peer->peer_info_lock); 113 114 dp_rx_flush_rx_cached(peer, true); 115 } 116 117 void dp_print_ast_stats(struct dp_soc *soc); 118 void dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, 119 uint16_t hw_peer_id, uint8_t vdev_id, 120 uint8_t *peer_mac_addr, uint16_t ast_hash, 121 uint8_t is_wds); 122 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id, 123 uint8_t vdev_id, uint8_t *peer_mac_addr, 124 uint8_t is_wds, uint32_t free_wds_count); 125 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id, 126 enum cdp_sec_type sec_type, int is_unicast, 127 u_int32_t *michael_key, u_int32_t *rx_pn); 128 129 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id, 130 uint8_t tid, uint16_t win_sz); 131 132 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, 133 uint16_t peer_id, uint8_t *peer_mac); 134 135 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, 136 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 137 uint32_t flags); 138 139 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry); 140 141 void dp_peer_ast_unmap_handler(struct dp_soc *soc, 142 struct dp_ast_entry *ast_entry); 143 144 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 145 struct dp_ast_entry *ast_entry, uint32_t flags); 146 147 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, 148 uint8_t *ast_mac_addr, 149 uint8_t pdev_id); 150 151 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, 152 uint8_t *ast_mac_addr); 153 154 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc, 155 struct dp_peer *peer, 156 uint8_t *ast_mac_addr); 157 158 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 159 struct dp_ast_entry *ast_entry); 160 161 162 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 163 struct dp_ast_entry *ast_entry); 164 165 void dp_peer_ast_set_type(struct dp_soc *soc, 166 struct dp_ast_entry *ast_entry, 167 enum cdp_txrx_ast_entry_type type); 168 169 void dp_peer_ast_send_wds_del(struct dp_soc *soc, 170 struct dp_ast_entry *ast_entry); 171 172 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 173 struct cdp_soc *dp_soc, 174 void *cookie, 175 enum cdp_ast_free_status status); 176 177 void dp_peer_ast_hash_remove(struct dp_soc *soc, 178 struct dp_ast_entry *ase); 179 180 void dp_peer_free_ast_entry(struct dp_soc *soc, 181 struct dp_ast_entry *ast_entry); 182 183 void dp_peer_unlink_ast_entry(struct dp_soc *soc, 184 struct dp_ast_entry *ast_entry); 185 186 /* 187 * dp_peer_find_by_id_exist - check if peer exists for given id 188 * @soc: core DP soc context 189 * @peer_id: peer id from peer object can be retrieved 190 * 191 * Return: true if peer exists of false otherwise 192 */ 193 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id); 194 195 #define DP_AST_ASSERT(_condition) \ 196 do { \ 197 if (!(_condition)) { \ 198 dp_print_ast_stats(soc);\ 199 QDF_BUG(_condition); \ 200 } \ 201 } while (0) 202 203 /** 204 * dp_peer_update_inactive_time - Update inactive time for peer 205 * @pdev: pdev object 206 * @tag_type: htt_tlv_tag type 207 * #tag_buf: buf message 208 */ 209 void 210 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, 211 uint32_t *tag_buf); 212 213 #ifndef QCA_MULTIPASS_SUPPORT 214 /** 215 * dp_peer_set_vlan_id: set vlan_id for this peer 216 * @cdp_soc: soc handle 217 * @vdev_id: id of vdev object 218 * @peer_mac: mac address 219 * @vlan_id: vlan id for peer 220 * 221 * return: void 222 */ 223 static inline 224 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, 225 uint8_t vdev_id, uint8_t *peer_mac, 226 uint16_t vlan_id) 227 { 228 } 229 230 /** 231 * dp_set_vlan_groupkey: set vlan map for vdev 232 * @soc: pointer to soc 233 * @vdev_id: id of vdev handle 234 * @vlan_id: vlan_id 235 * @group_key: group key for vlan 236 * 237 * return: set success/failure 238 */ 239 static inline 240 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id, 241 uint16_t vlan_id, uint16_t group_key) 242 { 243 return QDF_STATUS_SUCCESS; 244 } 245 246 /** 247 * dp_peer_multipass_list_init: initialize multipass peer list 248 * @vdev: pointer to vdev 249 * 250 * return: void 251 */ 252 static inline 253 void dp_peer_multipass_list_init(struct dp_vdev *vdev) 254 { 255 } 256 257 /** 258 * dp_peer_multipass_list_remove: remove peer from special peer list 259 * @peer: peer handle 260 * 261 * return: void 262 */ 263 static inline 264 void dp_peer_multipass_list_remove(struct dp_peer *peer) 265 { 266 } 267 #else 268 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, 269 uint8_t vdev_id, uint8_t *peer_mac, 270 uint16_t vlan_id); 271 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id, 272 uint16_t vlan_id, uint16_t group_key); 273 void dp_peer_multipass_list_init(struct dp_vdev *vdev); 274 void dp_peer_multipass_list_remove(struct dp_peer *peer); 275 #endif 276 277 278 #ifndef QCA_PEER_MULTIQ_SUPPORT 279 /** 280 * dp_peer_reset_flowq_map() - reset peer flowq map table 281 * @peer - dp peer handle 282 * 283 * Return: none 284 */ 285 static inline 286 void dp_peer_reset_flowq_map(struct dp_peer *peer) 287 { 288 } 289 290 /** 291 * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map 292 * @soc - genereic soc handle 293 * @is_wds - flag to indicate if peer is wds 294 * @peer_id - peer_id from htt peer map message 295 * @peer_mac_addr - mac address of the peer 296 * @ast_info - ast flow override information from peer map 297 * 298 * Return: none 299 */ 300 static inline 301 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, 302 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, 303 struct dp_ast_flow_override_info *ast_info) 304 { 305 } 306 #else 307 void dp_peer_reset_flowq_map(struct dp_peer *peer); 308 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, 309 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, 310 struct dp_ast_flow_override_info *ast_info); 311 #endif 312 313 /** 314 * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer 315 * @soc: DP SOC handle 316 * @pdev_id: id of DP pdev handle 317 * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode 318 * @is_tx_pkt_cap_enable: enable/disable/delete/print 319 * Tx packet capture in monitor mode 320 * Tx packet capture in monitor mode 321 * @peer_mac: MAC address for which the above need to be enabled/disabled 322 * 323 * Return: Success if Rx & Tx capture is enabled for peer, false otherwise 324 */ 325 QDF_STATUS 326 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, 327 uint8_t pdev_id, 328 bool is_rx_pkt_cap_enable, 329 uint8_t is_tx_pkt_cap_enable, 330 uint8_t *peer_mac); 331 332 /* 333 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache 334 * after deleting the entries (ie., setting valid=0) 335 * 336 * @soc: DP SOC handle 337 * @cb_ctxt: Callback context 338 * @reo_status: REO command status 339 */ 340 void dp_rx_tid_delete_cb(struct dp_soc *soc, 341 void *cb_ctxt, 342 union hal_reo_status *reo_status); 343 344 #ifndef WLAN_TX_PKT_CAPTURE_ENH 345 /** 346 * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID 347 * @peer: Datapath peer 348 * 349 */ 350 static inline void dp_peer_tid_queue_init(struct dp_peer *peer) 351 { 352 } 353 354 /** 355 * dp_peer_tid_peer_id_update() – update peer_id to tid structure 356 * @peer: Datapath peer 357 * @peer_id: peer_id 358 * 359 */ 360 static inline 361 void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id) 362 { 363 } 364 365 /** 366 * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID 367 * @peer: Datapath peer 368 * 369 */ 370 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer) 371 { 372 } 373 374 /** 375 * dp_peer_update_80211_hdr() – dp peer update 80211 hdr 376 * @vdev: Datapath vdev 377 * @peer: Datapath peer 378 * 379 */ 380 static inline void 381 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer) 382 { 383 } 384 #endif 385 386 #endif /* _DP_PEER_H_ */ 387