1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 #ifndef __DP_TX_H 19 #define __DP_TX_H 20 21 #include <qdf_types.h> 22 #include <qdf_nbuf.h> 23 #include "dp_types.h" 24 25 26 #define DP_INVALID_VDEV_ID 0xFF 27 28 #define DP_TX_MAX_NUM_FRAGS 6 29 30 /* 31 * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1 32 * please do not change this flag's definition 33 */ 34 #define DP_TX_DESC_FLAG_FRAG 0x1 35 #define DP_TX_DESC_FLAG_TO_FW 0x2 36 #define DP_TX_DESC_FLAG_SIMPLE 0x4 37 #define DP_TX_DESC_FLAG_RAW 0x8 38 #define DP_TX_DESC_FLAG_MESH 0x10 39 #define DP_TX_DESC_FLAG_QUEUED_TX 0x20 40 #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40 41 #define DP_TX_DESC_FLAG_ME 0x80 42 #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100 43 #define DP_TX_DESC_FLAG_ALLOCATED 0x200 44 #define DP_TX_DESC_FLAG_MESH_MODE 0x400 45 46 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1 47 48 #define DP_TX_FREE_SINGLE_BUF(soc, buf) \ 49 do { \ 50 qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \ 51 qdf_nbuf_free(buf); \ 52 } while (0) 53 54 #define OCB_HEADER_VERSION 1 55 56 #ifdef TX_PER_PDEV_DESC_POOL 57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 58 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 59 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ 60 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) 61 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 62 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 63 #else 64 #ifdef TX_PER_VDEV_DESC_POOL 65 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 66 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 67 #endif /* TX_PER_VDEV_DESC_POOL */ 68 #endif /* TX_PER_PDEV_DESC_POOL */ 69 #define DP_TX_QUEUE_MASK 0x3 70 71 /* number of dwords for htt_tx_msdu_desc_ext2_t */ 72 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7 73 74 /** 75 * struct dp_tx_frag_info_s 76 * @vaddr: hlos vritual address for buffer 77 * @paddr_lo: physical address lower 32bits 78 * @paddr_hi: physical address higher bits 79 * @len: length of the buffer 80 */ 81 struct dp_tx_frag_info_s { 82 uint8_t *vaddr; 83 uint32_t paddr_lo; 84 uint16_t paddr_hi; 85 uint16_t len; 86 }; 87 88 /** 89 * struct dp_tx_seg_info_s - Segmentation Descriptor 90 * @nbuf: NBUF pointer if segment corresponds to separate nbuf 91 * @frag_cnt: Fragment count in this segment 92 * @total_len: Total length of segment 93 * @frags: per-Fragment information 94 * @next: pointer to next MSDU segment 95 */ 96 struct dp_tx_seg_info_s { 97 qdf_nbuf_t nbuf; 98 uint16_t frag_cnt; 99 uint16_t total_len; 100 struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS]; 101 struct dp_tx_seg_info_s *next; 102 }; 103 104 /** 105 * struct dp_tx_sg_info_s - Scatter Gather Descriptor 106 * @num_segs: Number of segments (TSO/ME) in the frame 107 * @total_len: Total length of the frame 108 * @curr_seg: Points to current segment descriptor to be processed. Chain of 109 * descriptors for SG frames/multicast-unicast converted packets. 110 * 111 * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to 112 * carry fragmentation information 113 * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries 114 * indicated through flags in SKB CB (first_msdu and last_msdu). This will be 115 * converted into set of skb sg (nr_frags) structures. 116 */ 117 struct dp_tx_sg_info_s { 118 uint32_t num_segs; 119 uint32_t total_len; 120 struct dp_tx_seg_info_s *curr_seg; 121 }; 122 123 /** 124 * struct dp_tx_queue - Tx queue 125 * @desc_pool_id: Descriptor Pool to be used for the tx queue 126 * @ring_id: TCL descriptor ring ID corresponding to the tx queue 127 * 128 * Tx queue contains information of the software (Descriptor pool) 129 * and hardware resources (TCL ring id) to be used for a particular 130 * transmit queue (obtained from skb_queue_mapping in case of linux) 131 */ 132 struct dp_tx_queue { 133 uint8_t desc_pool_id; 134 uint8_t ring_id; 135 }; 136 137 /** 138 * struct dp_tx_msdu_info_s - MSDU Descriptor 139 * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement 140 * @tx_queue: Tx queue on which this MSDU should be transmitted 141 * @num_seg: Number of segments (TSO) 142 * @tid: TID (override) that is sent from HLOS 143 * @u.tso_info: TSO information for TSO frame types 144 * (chain of the TSO segments, number of segments) 145 * @u.sg_info: Scatter Gather information for non-TSO SG frames 146 * @meta_data: Mesh meta header information 147 * @exception_fw: Duplicate frame to be sent to firmware 148 * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions 149 * @ix_tx_sniffer: Indicates if the packet has to be sniffed 150 * 151 * This structure holds the complete MSDU information needed to program the 152 * Hardware TCL and MSDU extension descriptors for different frame types 153 * 154 */ 155 struct dp_tx_msdu_info_s { 156 enum dp_tx_frm_type frm_type; 157 struct dp_tx_queue tx_queue; 158 uint32_t num_seg; 159 uint8_t tid; 160 uint8_t exception_fw; 161 uint8_t is_tx_sniffer; 162 union { 163 struct qdf_tso_info_t tso_info; 164 struct dp_tx_sg_info_s sg_info; 165 } u; 166 uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS]; 167 uint16_t ppdu_cookie; 168 }; 169 170 /** 171 * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index 172 * @soc: core txrx context 173 * @index: index of ring to deinit 174 * 175 * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using 176 * index of the respective TCL/WBM2SW release in soc structure. 177 * For example, if the index is 2 then &soc->tcl_data_ring[2] 178 * and &soc->tx_comp_ring[2] will be deinitialized. 179 * 180 * Return: none 181 */ 182 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index); 183 184 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev); 185 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev); 186 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev); 187 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 188 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 189 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 190 uint8_t num_pool, 191 uint16_t num_desc); 192 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 193 uint8_t num_pool, 194 uint16_t num_desc); 195 196 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 197 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 198 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc); 199 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc); 200 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 201 uint8_t num_pool, 202 uint16_t num_desc); 203 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 204 uint8_t num_pool, 205 uint16_t num_desc); 206 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc); 207 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc); 208 209 /** 210 * dp_tso_attach() - TSO Attach handler 211 * @txrx_soc: Opaque Dp handle 212 * 213 * Reserve TSO descriptor buffers 214 * 215 * Return: QDF_STATUS_E_FAILURE on failure or 216 * QDF_STATUS_SUCCESS on success 217 */ 218 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc); 219 220 /** 221 * dp_tso_detach() - TSO Detach handler 222 * @txrx_soc: Opaque Dp handle 223 * 224 * Deallocate TSO descriptor buffers 225 * 226 * Return: QDF_STATUS_E_FAILURE on failure or 227 * QDF_STATUS_SUCCESS on success 228 */ 229 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc); 230 231 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev); 232 233 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); 234 235 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id, 236 qdf_nbuf_t nbuf); 237 238 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, 239 qdf_nbuf_t nbuf, 240 struct cdp_tx_exception_metadata *tx_exc); 241 242 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc, 243 uint8_t vdev_id, 244 qdf_nbuf_t nbuf, 245 struct cdp_tx_exception_metadata *tx_exc); 246 247 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 248 qdf_nbuf_t nbuf); 249 qdf_nbuf_t 250 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 251 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 252 struct cdp_tx_exception_metadata *tx_exc_metadata); 253 254 #if QDF_LOCK_STATS 255 noinline qdf_nbuf_t 256 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 257 struct dp_tx_msdu_info_s *msdu_info); 258 #else 259 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 260 struct dp_tx_msdu_info_s *msdu_info); 261 #endif 262 #ifdef FEATURE_WLAN_TDLS 263 /** 264 * dp_tx_non_std() - Allow the control-path SW to send data frames 265 * @soc_hdl: Datapath soc handle 266 * @vdev_id: id of vdev 267 * @tx_spec: what non-standard handling to apply to the tx data frames 268 * @msdu_list: NULL-terminated list of tx MSDUs 269 * 270 * Return: NULL on success, 271 * nbuf when it fails to send 272 */ 273 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 274 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); 275 #endif 276 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac); 277 278 /** 279 * dp_tx_comp_handler() - Tx completion handler 280 * @int_ctx: pointer to DP interrupt context 281 * @soc: core txrx main context 282 * @hal_srng: Opaque HAL SRNG pointer 283 * @ring_id: completion ring id 284 * @quota: No. of packets/descriptors that can be serviced in one loop 285 * 286 * This function will collect hardware release ring element contents and 287 * handle descriptor contents. Based on contents, free packet or handle error 288 * conditions 289 * 290 * Return: Number of TX completions processed 291 */ 292 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 293 hal_ring_handle_t hal_srng, uint8_t ring_id, 294 uint32_t quota); 295 296 QDF_STATUS 297 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 298 299 QDF_STATUS 300 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 301 302 #ifndef FEATURE_WDS 303 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) 304 { 305 return; 306 } 307 #endif 308 309 #ifndef ATH_SUPPORT_IQUE 310 static inline void dp_tx_me_exit(struct dp_pdev *pdev) 311 { 312 return; 313 } 314 #endif 315 316 #ifndef QCA_MULTIPASS_SUPPORT 317 static inline 318 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, 319 qdf_nbuf_t nbuf, 320 struct dp_tx_msdu_info_s *msdu_info) 321 { 322 return true; 323 } 324 325 static inline 326 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) 327 { 328 } 329 330 #else 331 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, 332 qdf_nbuf_t nbuf, 333 struct dp_tx_msdu_info_s *msdu_info); 334 335 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev); 336 #endif 337 338 /** 339 * dp_tx_hw_to_qdf()- convert hw status to qdf status 340 * @status: hw status 341 * 342 * Return: qdf tx rx status 343 */ 344 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status) 345 { 346 switch (status) { 347 case HAL_TX_TQM_RR_FRAME_ACKED: 348 return QDF_TX_RX_STATUS_OK; 349 case HAL_TX_TQM_RR_REM_CMD_REM: 350 case HAL_TX_TQM_RR_REM_CMD_TX: 351 case HAL_TX_TQM_RR_REM_CMD_NOTX: 352 case HAL_TX_TQM_RR_REM_CMD_AGED: 353 return QDF_TX_RX_STATUS_FW_DISCARD; 354 default: 355 return QDF_TX_RX_STATUS_DEFAULT; 356 } 357 } 358 359 /** 360 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame 361 * @vdev: DP Virtual device handle 362 * @nbuf: Buffer pointer 363 * @queue: queue ids container for nbuf 364 * 365 * TX packet queue has 2 instances, software descriptors id and dma ring id 366 * Based on tx feature and hardware configuration queue id combination could be 367 * different. 368 * For example - 369 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id 370 * With no XPS,lock based resource protection, Descriptor pool ids are different 371 * for each vdev, dma ring id will be same as single pdev id 372 * 373 * Return: None 374 */ 375 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 376 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 377 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 378 { 379 uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & 380 DP_TX_QUEUE_MASK; 381 382 queue->desc_pool_id = queue_offset; 383 queue->ring_id = qdf_get_cpu(); 384 385 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 386 "%s, pool_id:%d ring_id: %d", 387 __func__, queue->desc_pool_id, queue->ring_id); 388 } 389 390 /* 391 * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission 392 * @dp_soc - DP soc structure pointer 393 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 394 * 395 * Return - HAL ring handle 396 */ 397 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc, 398 uint8_t ring_id) 399 { 400 if (ring_id == soc->num_tcl_data_rings) 401 return soc->tcl_cmd_credit_ring.hal_srng; 402 403 return soc->tcl_data_ring[ring_id].hal_srng; 404 } 405 406 /* 407 * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion. 408 * @dp_soc - DP soc structure pointer 409 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 410 * 411 * Return - HAL ring handle 412 */ 413 static inline uint8_t dp_tx_get_rbm_id(struct dp_soc *doc, 414 uint8_t ring_id) 415 { 416 return (ring_id ? HAL_WBM_SW0_BM_ID + (ring_id - 1) : 417 HAL_WBM_SW2_BM_ID); 418 } 419 420 #else /* QCA_OL_TX_MULTIQ_SUPPORT */ 421 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 422 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 423 { 424 /* get flow id */ 425 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 426 queue->ring_id = DP_TX_GET_RING_ID(vdev); 427 428 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 429 "%s, pool_id:%d ring_id: %d", 430 __func__, queue->desc_pool_id, queue->ring_id); 431 } 432 433 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc, 434 uint8_t ring_id) 435 { 436 return soc->tcl_data_ring[ring_id].hal_srng; 437 } 438 439 static inline uint8_t dp_tx_get_rbm_id(struct dp_soc *soc, 440 uint8_t ring_id) 441 { 442 return (ring_id + HAL_WBM_SW0_BM_ID); 443 } 444 #endif 445 446 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS 447 /* 448 * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission 449 * @dp_soc - DP soc structure pointer 450 * @hal_ring_hdl - HAL ring handle 451 * 452 * Return - None 453 */ 454 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc, 455 hal_ring_handle_t hal_ring_hdl) 456 { 457 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 458 } 459 460 /* 461 * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission 462 * @dp_soc - DP soc structure pointer 463 * @hal_ring_hdl - HAL ring handle 464 * 465 * Return - None 466 */ 467 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc, 468 hal_ring_handle_t hal_ring_hdl) 469 { 470 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 471 } 472 473 /* 474 * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission 475 * @dp_soc - DP soc structure pointer 476 * @hal_ring_hdl - HAL ring handle 477 * 478 * Return - None 479 */ 480 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc, 481 hal_ring_handle_t 482 hal_ring_hdl) 483 { 484 } 485 486 #else 487 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc, 488 hal_ring_handle_t hal_ring_hdl) 489 { 490 return hal_srng_access_start(soc->hal_soc, hal_ring_hdl); 491 } 492 493 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc, 494 hal_ring_handle_t hal_ring_hdl) 495 { 496 hal_srng_access_end(soc->hal_soc, hal_ring_hdl); 497 } 498 499 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc, 500 hal_ring_handle_t 501 hal_ring_hdl) 502 { 503 hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); 504 } 505 #endif 506 507 #ifdef FEATURE_PERPKT_INFO 508 QDF_STATUS 509 dp_get_completion_indication_for_stack(struct dp_soc *soc, 510 struct dp_pdev *pdev, 511 struct dp_peer *peer, 512 struct hal_tx_completion_status *ts, 513 qdf_nbuf_t netbuf, 514 uint64_t time_latency); 515 516 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 517 uint16_t peer_id, uint32_t ppdu_id, 518 qdf_nbuf_t netbuf); 519 #endif 520 521 void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl); 522 523 #ifdef ATH_TX_PRI_OVERRIDE 524 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \ 525 ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf)) 526 #else 527 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) 528 #endif 529 530 void 531 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 532 uint32_t buf_type); 533 534 /* TODO TX_FEATURE_NOT_YET */ 535 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) 536 { 537 return; 538 } 539 /* TODO TX_FEATURE_NOT_YET */ 540 541 #ifndef WLAN_TX_PKT_CAPTURE_ENH 542 static inline 543 QDF_STATUS dp_peer_set_tx_capture_enabled(struct dp_pdev *pdev, 544 struct dp_peer *peer_handle, 545 uint8_t value, uint8_t *peer_mac) 546 { 547 return QDF_STATUS_SUCCESS; 548 } 549 #endif 550 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 551 bool force_free); 552 #endif 553