1 /* 2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /** 21 * DOC: i_qdf_nbuf_m.h 22 * 23 * This file provides platform specific nbuf API's. 24 * Included by i_qdf_nbuf.h and should not be included 25 * directly from other files. 26 */ 27 28 #ifndef _I_QDF_NBUF_M_H 29 #define _I_QDF_NBUF_M_H 30 /** 31 * struct qdf_nbuf_cb - network buffer control block contents (skb->cb) 32 * - data passed between layers of the driver. 33 * 34 * Notes: 35 * 1. Hard limited to 48 bytes. Please count your bytes 36 * 2. The size of this structure has to be easily calculable and 37 * consistently so: do not use any conditional compile flags 38 * 3. Split into a common part followed by a tx/rx overlay 39 * 4. There is only one extra frag, which represents the HTC/HTT header 40 * 5. "ext_cb_pt" must be the first member in both TX and RX unions 41 * for the priv_cb_w since it must be at same offset for both 42 * TX and RX union 43 * 6. "ipa.owned" bit must be first member in both TX and RX unions 44 * for the priv_cb_m since it must be at same offset for both 45 * TX and RX union. 46 * 47 * @paddr : physical addressed retrieved by dma_map of nbuf->data 48 * @u: union of rx and tx data 49 * @u.rx: rx data 50 * @u.rx.dev: union of priv_cb_w and priv_cb_m 51 * 52 * @u.rx.dev.priv_cb_w: 53 * @u.rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer 54 * @u.rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype 55 * @u.rx.dev.priv_cb_w.msdu_len: length of RX packet 56 * @u.rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet 57 * @u.rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map 58 * @u.rx.dev.priv_cb_w.peer_id: peer_id for RX packet 59 * @u.rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet 60 * type 61 * @u.rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd 62 * 63 * @u.rx.dev.priv_cb_m: 64 * @u.rx.dev.priv_cb_m.ipa.owned: packet owned by IPA 65 * @u.rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer 66 * @u.rx.dev.priv_cb_m.flush_ind: flush indication 67 * @u.rx.dev.priv_cb_m.packet_buf_pool: packet buff bool 68 * @u.rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset 69 * @u.rx.dev.priv_cb_m.exc_frm: exception frame 70 * @u.rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map 71 * @u.rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or 72 * sw exception bit from ring desc 73 * @u.rx.dev.priv_cb_m.lmac_id: lmac id for RX packet 74 * @u.rx.dev.priv_cb_m.fr_ds: from DS bit in RX packet 75 * @u.rx.dev.priv_cb_m.to_ds: to DS bit in RX packet 76 * @u.rx.dev.priv_cb_m.logical_link_id: link id of RX packet 77 * @u.rx.dev.priv_cb_m.reserved1: reserved bits 78 * @u.rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number 79 * @u.rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number 80 * @u.rx.dev.priv_cb_m.dp: Union of wifi3 and wifi2 structs 81 * @u.rx.dev.priv_cb_m.dp.wifi3: wifi3 data 82 * @u.rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet 83 * @u.rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet 84 * @u.rx.dev.priv_cb_m.dp.wifi2: wifi2 data 85 * @u.rx.dev.priv_cb_m.dp.wifi2.map_index: 86 * @u.rx.dev.priv_cb_m.lro_ctx: LRO context 87 * 88 * @u.rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible 89 * @u.rx.tcp_proto: L4 protocol is TCP 90 * @u.rx.tcp_pure_ack: A TCP ACK packet with no payload 91 * @u.rx.ipv6_proto: L3 protocol is IPV6 92 * @u.rx.ip_offset: offset to IP header 93 * @u.rx.tcp_offset: offset to TCP header 94 * @u.rx.rx_ctx_id: Rx context id 95 * @u.rx.fcs_err: FCS error 96 * @u.rx.is_raw_frame: RAW frame 97 * @u.rx.num_elements_in_list: number of elements in the nbuf list 98 * 99 * @u.rx.tcp_udp_chksum: L4 payload checksum 100 * @u.rx.tcp_win: TCP window size 101 * 102 * @u.rx.flow_id: 32bit flow id 103 * 104 * @u.rx.flag_chfrag_start: first MSDU in an AMSDU 105 * @u.rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU 106 * @u.rx.flag_chfrag_end: last MSDU in an AMSDU 107 * @u.rx.flag_retry: flag to indicate MSDU is retried 108 * @u.rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets 109 * @u.rx.flag_da_valid: flag to indicate DA is valid for RX packet 110 * @u.rx.flag_sa_valid: flag to indicate SA is valid for RX packet 111 * @u.rx.flag_is_frag: flag to indicate skb has frag list 112 * 113 * @u.rx.trace: combined structure for DP and protocol trace 114 * @u.rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)| 115 * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)] 116 * @u.rx.trace.dp_trace: flag (Datapath trace) 117 * @u.rx.trace.packet_track: RX_DATA packet 118 * @u.rx.trace.rsrvd: enable packet logging 119 * 120 * @u.rx.vdev_id: vdev_id for RX pkt 121 * @u.rx.tid_val: tid value 122 * @u.rx.ftype: mcast2ucast, TSO, SG, MESH 123 * 124 * @u.tx: tx data 125 * @u.tx.dev: union of priv_cb_w and priv_cb_m 126 * 127 * @u.tx.dev.priv_cb_w: 128 * @u.tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer 129 * @u.tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype 130 * 131 * @u.tx.dev.priv_cb_m: 132 * @u.tx.dev.priv_cb_m:ipa: IPA-specific data 133 * @u.tx.dev.priv_cb_m.ipa.ipa.owned: packet owned by IPA 134 * @u.tx.dev.priv_cb_m.ipa.ipa.priv: private data, used by IPA 135 * @u.tx.dev.priv_cb_m.data_attr: value that is programmed in CE descr, includes 136 * + (1) CE classification enablement bit 137 * + (2) packet type (802.3 or Ethernet type II) 138 * + (3) packet offset (usually length of HTC/HTT descr) 139 * @u.tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw 140 * @u.tx.dev.priv_cb_m.dma_option: DMA options 141 * @u.tx.dev.priv_cb_m.dma_option.mgmt_desc_id: mgmt descriptor for tx 142 * completion cb 143 * @u.tx.dev.priv_cb_m.dma_option.dma_option.bi_map: flag to do bi-direction 144 * dma map 145 * @u.tx.dev.priv_cb_m.dma_option.dma_option.reserved: reserved bits for future 146 * use 147 * @u.tx.dev.priv_cb_m.flag_notify_comp: reserved 148 * @u.tx.dev.priv_cb_m.rsvd: reserved 149 * @u.tx.dev.priv_cb_m.reserved: reserved 150 * 151 * @u.tx.ftype: mcast2ucast, TSO, SG, MESH 152 * @u.tx.vdev_id: vdev (for protocol trace) 153 * @u.tx.len: length of efrag pointed by the above pointers 154 * 155 * @u.tx.flags: union of flag representations 156 * @u.tx.flags.bits: flags represent as individual bitmasks 157 * @u.tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream) 158 * @u.tx.flags.bits.num: number of extra frags ( 0 or 1) 159 * @u.tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream) 160 * @u.tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU 161 * @u.tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU 162 * @u.tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU 163 * @u.tx.flags.bits.flag_ext_header: extended flags 164 * @u.tx.flags.bits.is_critical: flag indicating a critical frame 165 * @u.tx.flags.u8: flags as a single u8 166 * @u.tx.trace: combined structure for DP and protocol trace 167 * @u.tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)| 168 * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)] 169 * @u.tx.trace.is_packet_priv: 170 * @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK} 171 * @u.tx.trace.to_fw: Flag to indicate send this packet to FW 172 * @u.tx.trace.htt2_frm: flag (high-latency path only) 173 * @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)| 174 * + (MGMT_ACTION)] - 4 bits 175 * @u.tx.trace.dp_trace: flag (Datapath trace) 176 * @u.tx.trace.is_bcast: flag (Broadcast packet) 177 * @u.tx.trace.is_mcast: flag (Multicast packet) 178 * @u.tx.trace.packet_type: flag (Packet type) 179 * @u.tx.trace.print: enable packet logging 180 * 181 * @u.tx.vaddr: virtual address of ~ 182 * @u.tx.paddr: physical/DMA address of ~ 183 */ 184 struct qdf_nbuf_cb { 185 /* common */ 186 qdf_paddr_t paddr; /* of skb->data */ 187 /* valid only in one direction */ 188 union { 189 /* Note: MAX: 40 bytes */ 190 struct { 191 union { 192 struct { 193 void *ext_cb_ptr; 194 void *fctx; 195 uint16_t msdu_len : 14, 196 flag_intra_bss : 1, 197 ipa_smmu_map : 1; 198 uint16_t peer_id; 199 uint16_t protocol_tag; 200 uint16_t flow_tag; 201 } priv_cb_w; 202 struct { 203 /* ipa_owned bit is common between rx 204 * control block and tx control block. 205 * Do not change location of this bit. 206 */ 207 uint32_t ipa_owned:1, 208 peer_cached_buf_frm:1, 209 flush_ind:1, 210 packet_buf_pool:1, 211 l3_hdr_pad:3, 212 /* exception frame flag */ 213 exc_frm:1, 214 ipa_smmu_map:1, 215 reo_dest_ind_or_sw_excpt:5, 216 lmac_id:2, 217 fr_ds:1, 218 to_ds:1, 219 logical_link_id:4, 220 reserved1:10; 221 uint32_t tcp_seq_num; 222 uint32_t tcp_ack_num; 223 union { 224 struct { 225 uint16_t msdu_len; 226 uint16_t peer_id; 227 } wifi3; 228 struct { 229 uint32_t map_index; 230 } wifi2; 231 } dp; 232 unsigned char *lro_ctx; 233 } priv_cb_m; 234 } dev; 235 uint32_t lro_eligible:1, 236 tcp_proto:1, 237 tcp_pure_ack:1, 238 ipv6_proto:1, 239 ip_offset:7, 240 tcp_offset:7, 241 rx_ctx_id:4, 242 fcs_err:1, 243 is_raw_frame:1, 244 num_elements_in_list:8; 245 uint32_t tcp_udp_chksum:16, 246 tcp_win:16; 247 uint32_t flow_id; 248 uint8_t flag_chfrag_start:1, 249 flag_chfrag_cont:1, 250 flag_chfrag_end:1, 251 flag_retry:1, 252 flag_da_mcbc:1, 253 flag_da_valid:1, 254 flag_sa_valid:1, 255 flag_is_frag:1; 256 union { 257 uint8_t packet_state; 258 uint8_t dp_trace:1, 259 packet_track:3, 260 rsrvd:4; 261 } trace; 262 uint16_t vdev_id:8, 263 tid_val:4, 264 ftype:4; 265 } rx; 266 267 /* Note: MAX: 40 bytes */ 268 struct { 269 union { 270 struct { 271 void *ext_cb_ptr; 272 void *fctx; 273 } priv_cb_w; 274 struct { 275 /* ipa_owned bit is common between rx 276 * control block and tx control block. 277 * Do not change location of this bit. 278 */ 279 struct { 280 uint32_t owned:1, 281 priv:31; 282 } ipa; 283 uint32_t data_attr; 284 uint16_t desc_id; 285 uint16_t mgmt_desc_id; 286 struct { 287 uint8_t bi_map:1, 288 reserved:7; 289 } dma_option; 290 uint8_t flag_notify_comp:1, 291 rsvd:7; 292 uint8_t reserved[2]; 293 } priv_cb_m; 294 } dev; 295 uint8_t ftype; 296 uint8_t vdev_id; 297 uint16_t len; 298 union { 299 struct { 300 uint8_t flag_efrag:1, 301 flag_nbuf:1, 302 num:1, 303 flag_chfrag_start:1, 304 flag_chfrag_cont:1, 305 flag_chfrag_end:1, 306 flag_ext_header:1, 307 is_critical:1; 308 } bits; 309 uint8_t u8; 310 } flags; 311 struct { 312 uint8_t packet_state:7, 313 is_packet_priv:1; 314 uint8_t packet_track:3, 315 to_fw:1, 316 htt2_frm:1, 317 proto_type:3; 318 uint8_t dp_trace:1, 319 is_bcast:1, 320 is_mcast:1, 321 packet_type:4, 322 print:1; 323 } trace; 324 unsigned char *vaddr; 325 qdf_paddr_t paddr; 326 } tx; 327 } u; 328 }; /* struct qdf_nbuf_cb: MAX 48 bytes */ 329 330 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) 331 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size, 332 (sizeof(struct qdf_nbuf_cb)) <= 333 sizeof_field(struct sk_buff, cb)); 334 #else 335 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size, 336 (sizeof(struct qdf_nbuf_cb)) <= 337 FIELD_SIZEOF(struct sk_buff, cb)); 338 #endif 339 340 /* 341 * access macros to qdf_nbuf_cb 342 * Note: These macros can be used as L-values as well as R-values. 343 * When used as R-values, they effectively function as "get" macros 344 * When used as L_values, they effectively function as "set" macros 345 */ 346 347 #define QDF_NBUF_CB_PADDR(skb) \ 348 (((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr) 349 350 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \ 351 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible) 352 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \ 353 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto) 354 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \ 355 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack) 356 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \ 357 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto) 358 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \ 359 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset) 360 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \ 361 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset) 362 #define QDF_NBUF_CB_RX_CTX_ID(skb) \ 363 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id) 364 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \ 365 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list) 366 367 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \ 368 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum) 369 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \ 370 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win) 371 372 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \ 373 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id) 374 375 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\ 376 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state) 377 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \ 378 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace) 379 380 #define QDF_NBUF_CB_RX_FTYPE(skb) \ 381 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype) 382 383 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \ 384 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id) 385 386 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \ 387 (((struct qdf_nbuf_cb *) \ 388 ((skb)->cb))->u.rx.flag_chfrag_start) 389 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \ 390 (((struct qdf_nbuf_cb *) \ 391 ((skb)->cb))->u.rx.flag_chfrag_cont) 392 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \ 393 (((struct qdf_nbuf_cb *) \ 394 ((skb)->cb))->u.rx.flag_chfrag_end) 395 396 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \ 397 (((struct qdf_nbuf_cb *) \ 398 ((skb)->cb))->u.rx.flag_da_mcbc) 399 400 #define QDF_NBUF_CB_RX_DA_VALID(skb) \ 401 (((struct qdf_nbuf_cb *) \ 402 ((skb)->cb))->u.rx.flag_da_valid) 403 404 #define QDF_NBUF_CB_RX_SA_VALID(skb) \ 405 (((struct qdf_nbuf_cb *) \ 406 ((skb)->cb))->u.rx.flag_sa_valid) 407 408 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \ 409 (((struct qdf_nbuf_cb *) \ 410 ((skb)->cb))->u.rx.flag_retry) 411 412 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \ 413 (((struct qdf_nbuf_cb *) \ 414 ((skb)->cb))->u.rx.is_raw_frame) 415 416 #define QDF_NBUF_CB_RX_FROM_DS(skb) \ 417 (((struct qdf_nbuf_cb *) \ 418 ((skb)->cb))->u.rx.dev.priv_cb_m.fr_ds) 419 420 #define QDF_NBUF_CB_RX_TO_DS(skb) \ 421 (((struct qdf_nbuf_cb *) \ 422 ((skb)->cb))->u.rx.dev.priv_cb_m.to_ds) 423 424 #define QDF_NBUF_CB_RX_TID_VAL(skb) \ 425 (((struct qdf_nbuf_cb *) \ 426 ((skb)->cb))->u.rx.tid_val) 427 428 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \ 429 (((struct qdf_nbuf_cb *) \ 430 ((skb)->cb))->u.rx.flag_is_frag) 431 432 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \ 433 (((struct qdf_nbuf_cb *) \ 434 ((skb)->cb))->u.rx.fcs_err) 435 436 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \ 437 qdf_nbuf_set_state(skb, PACKET_STATE) 438 439 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \ 440 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr) 441 442 #define QDF_NBUF_CB_TX_FTYPE(skb) \ 443 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype) 444 445 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \ 446 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len) 447 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \ 448 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id) 449 450 /* Tx Flags Accessor Macros*/ 451 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \ 452 (((struct qdf_nbuf_cb *) \ 453 ((skb)->cb))->u.tx.flags.bits.flag_efrag) 454 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \ 455 (((struct qdf_nbuf_cb *) \ 456 ((skb)->cb))->u.tx.flags.bits.flag_nbuf) 457 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \ 458 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num) 459 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \ 460 (((struct qdf_nbuf_cb *) \ 461 ((skb)->cb))->u.tx.flags.bits.flag_chfrag_start) 462 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \ 463 (((struct qdf_nbuf_cb *) \ 464 ((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont) 465 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \ 466 (((struct qdf_nbuf_cb *) \ 467 ((skb)->cb))->u.tx.flags.bits.flag_chfrag_end) 468 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \ 469 (((struct qdf_nbuf_cb *) \ 470 ((skb)->cb))->u.tx.flags.bits.flag_ext_header) 471 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \ 472 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8) 473 474 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \ 475 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical) 476 /* End of Tx Flags Accessor Macros */ 477 478 /* Tx trace accessor macros */ 479 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\ 480 (((struct qdf_nbuf_cb *) \ 481 ((skb)->cb))->u.tx.trace.packet_state) 482 483 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \ 484 (((struct qdf_nbuf_cb *) \ 485 ((skb)->cb))->u.tx.trace.is_packet_priv) 486 487 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\ 488 (((struct qdf_nbuf_cb *) \ 489 ((skb)->cb))->u.tx.trace.packet_track) 490 491 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\ 492 (((struct qdf_nbuf_cb *) \ 493 ((skb)->cb))->u.tx.trace.to_fw) 494 495 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\ 496 (((struct qdf_nbuf_cb *) \ 497 ((skb)->cb))->u.rx.trace.packet_track) 498 499 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\ 500 (((struct qdf_nbuf_cb *) \ 501 ((skb)->cb))->u.tx.trace.proto_type) 502 503 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\ 504 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace) 505 506 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \ 507 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print) 508 509 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \ 510 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm) 511 512 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\ 513 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast) 514 515 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\ 516 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast) 517 518 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\ 519 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type) 520 521 #define QDF_NBUF_CB_SET_BCAST(skb) \ 522 (((struct qdf_nbuf_cb *) \ 523 ((skb)->cb))->u.tx.trace.is_bcast = true) 524 525 #define QDF_NBUF_CB_SET_MCAST(skb) \ 526 (((struct qdf_nbuf_cb *) \ 527 ((skb)->cb))->u.tx.trace.is_mcast = true) 528 /* End of Tx trace accessor macros */ 529 530 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \ 531 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr) 532 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \ 533 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr) 534 535 /* assume the OS provides a single fragment */ 536 #define __qdf_nbuf_get_num_frags(skb) \ 537 (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1) 538 539 #define __qdf_nbuf_reset_num_frags(skb) \ 540 (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0) 541 542 #define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \ 543 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_seq_num) 544 #define QDF_NBUF_CB_RX_TCP_ACK_NUM(skb) \ 545 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_ack_num) 546 #define QDF_NBUF_CB_RX_LRO_CTX(skb) \ 547 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.lro_ctx) 548 549 #define QDF_NBUF_CB_TX_IPA_OWNED(skb) \ 550 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.owned) 551 #define QDF_NBUF_CB_TX_IPA_PRIV(skb) \ 552 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.priv) 553 #define QDF_NBUF_CB_TX_DESC_ID(skb)\ 554 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.desc_id) 555 #define QDF_NBUF_CB_MGMT_TXRX_DESC_ID(skb)\ 556 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.mgmt_desc_id) 557 #define QDF_NBUF_CB_TX_DMA_BI_MAP(skb) \ 558 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \ 559 dma_option.bi_map) 560 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \ 561 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \ 562 flag_notify_comp) 563 564 #define QDF_NBUF_CB_RX_PEER_ID(skb) \ 565 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \ 566 wifi3.peer_id) 567 568 #define QDF_NBUF_CB_RX_PKT_LEN(skb) \ 569 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \ 570 wifi3.msdu_len) 571 572 #define QDF_NBUF_CB_RX_MAP_IDX(skb) \ 573 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \ 574 wifi2.map_index) 575 576 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \ 577 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 578 peer_cached_buf_frm) 579 580 #define QDF_NBUF_CB_RX_FLUSH_IND(skb) \ 581 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.flush_ind) 582 583 #define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \ 584 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 585 packet_buf_pool) 586 587 #define QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(skb) \ 588 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 589 l3_hdr_pad) 590 591 #define QDF_NBUF_CB_RX_PACKET_EXC_FRAME(skb) \ 592 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 593 exc_frm) 594 595 #define QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \ 596 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 597 ipa_smmu_map) 598 599 #define QDF_NBUF_CB_RX_PACKET_REO_DEST_IND_OR_SW_EXCPT(skb) \ 600 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 601 reo_dest_ind_or_sw_excpt) 602 603 #define QDF_NBUF_CB_RX_PACKET_LMAC_ID(skb) \ 604 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 605 lmac_id) 606 607 #define QDF_NBUF_CB_RX_LOGICAL_LINK_ID(skb) \ 608 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ 609 logical_link_id) 610 611 #define __qdf_nbuf_ipa_owned_get(skb) \ 612 QDF_NBUF_CB_TX_IPA_OWNED(skb) 613 614 #define __qdf_nbuf_ipa_owned_set(skb) \ 615 (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 1) 616 617 #define __qdf_nbuf_ipa_owned_clear(skb) \ 618 (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 0) 619 620 #define __qdf_nbuf_ipa_priv_get(skb) \ 621 QDF_NBUF_CB_TX_IPA_PRIV(skb) 622 623 #define __qdf_nbuf_ipa_priv_set(skb, priv) \ 624 (QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv)) 625 626 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \ 627 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr) 628 629 #define __qdf_nbuf_data_attr_get(skb) \ 630 QDF_NBUF_CB_TX_DATA_ATTR(skb) 631 #define __qdf_nbuf_data_attr_set(skb, data_attr) \ 632 (QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr)) 633 634 /** 635 * __qdf_nbuf_map_nbytes_single() - map nbytes 636 * @osdev: os device 637 * @buf: buffer 638 * @dir: direction 639 * @nbytes: number of bytes 640 * 641 * Return: QDF_STATUS 642 */ 643 #ifdef A_SIMOS_DEVHOST 644 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single( 645 qdf_device_t osdev, struct sk_buff *buf, 646 qdf_dma_dir_t dir, int nbytes) 647 { 648 qdf_dma_addr_t paddr; 649 650 QDF_NBUF_CB_PADDR(buf) = paddr = buf->data; 651 return QDF_STATUS_SUCCESS; 652 } 653 #else 654 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single( 655 qdf_device_t osdev, struct sk_buff *buf, 656 qdf_dma_dir_t dir, int nbytes) 657 { 658 qdf_dma_addr_t paddr; 659 QDF_STATUS ret; 660 661 /* assume that the OS only provides a single fragment */ 662 QDF_NBUF_CB_PADDR(buf) = paddr = 663 dma_map_single(osdev->dev, buf->data, 664 nbytes, __qdf_dma_dir_to_os(dir)); 665 ret = dma_mapping_error(osdev->dev, paddr) ? 666 QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS; 667 if (QDF_IS_STATUS_SUCCESS(ret)) 668 __qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf), 669 dir, true); 670 return ret; 671 } 672 #endif 673 /** 674 * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes 675 * @osdev: os device 676 * @buf: buffer 677 * @dir: direction 678 * @nbytes: number of bytes 679 * 680 * Return: none 681 */ 682 #if defined(A_SIMOS_DEVHOST) 683 static inline void 684 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf, 685 qdf_dma_dir_t dir, int nbytes) 686 { 687 } 688 689 #else 690 static inline void 691 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf, 692 qdf_dma_dir_t dir, int nbytes) 693 { 694 qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf); 695 696 if (qdf_likely(paddr)) { 697 __qdf_record_nbuf_nbytes( 698 __qdf_nbuf_get_end_offset(buf), dir, false); 699 dma_unmap_single(osdev->dev, paddr, nbytes, 700 __qdf_dma_dir_to_os(dir)); 701 return; 702 } 703 } 704 #endif 705 706 /** 707 * __qdf_nbuf_reset() - reset the buffer data and pointer 708 * @skb: Network buf instance 709 * @reserve: reserve 710 * @align: align 711 * 712 * Return: none 713 */ 714 static inline void 715 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align) 716 { 717 int offset; 718 719 skb_push(skb, skb_headroom(skb)); 720 skb_put(skb, skb_tailroom(skb)); 721 memset(skb->data, 0x0, skb->len); 722 skb_trim(skb, 0); 723 skb_reserve(skb, NET_SKB_PAD); 724 memset(skb->cb, 0x0, sizeof(skb->cb)); 725 726 /* 727 * The default is for netbuf fragments to be interpreted 728 * as wordstreams rather than bytestreams. 729 */ 730 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; 731 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; 732 733 /* 734 * Align & make sure that the tail & data are adjusted properly 735 */ 736 737 if (align) { 738 offset = ((unsigned long)skb->data) % align; 739 if (offset) 740 skb_reserve(skb, align - offset); 741 } 742 743 skb_reserve(skb, reserve); 744 } 745 746 /** 747 * qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb 748 * @skb: skb pointer whose cb is updated with vdev id information 749 * @vdev_id: vdev id to be updated in cb 750 * 751 * Return: void 752 */ 753 static inline void 754 qdf_nbuf_cb_update_vdev_id(struct sk_buff *skb, uint8_t vdev_id) 755 { 756 QDF_NBUF_CB_RX_VDEV_ID(skb) = vdev_id; 757 } 758 759 /** 760 * __qdf_nbuf_init_replenish_timer() - Initialize the alloc replenish timer 761 * 762 * This function initializes the nbuf alloc fail replenish timer. 763 * 764 * Return: void 765 */ 766 void __qdf_nbuf_init_replenish_timer(void); 767 768 /** 769 * __qdf_nbuf_deinit_replenish_timer() - Deinitialize the alloc replenish timer 770 * 771 * This function deinitializes the nbuf alloc fail replenish timer. 772 * 773 * Return: void 774 */ 775 void __qdf_nbuf_deinit_replenish_timer(void); 776 777 /** 778 * __qdf_nbuf_len() - return the amount of valid data in the skb 779 * @skb: Pointer to network buffer 780 * 781 * This API returns the amount of valid data in the skb, If there are frags 782 * then it returns total length. 783 * 784 * Return: network buffer length 785 */ 786 static inline size_t __qdf_nbuf_len(struct sk_buff *skb) 787 { 788 int i, extra_frag_len = 0; 789 790 i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb); 791 if (i > 0) 792 extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb); 793 794 return extra_frag_len + skb->len; 795 } 796 797 /** 798 * __qdf_nbuf_num_frags_init() - init extra frags 799 * @skb: sk buffer 800 * 801 * Return: none 802 */ 803 static inline 804 void __qdf_nbuf_num_frags_init(struct sk_buff *skb) 805 { 806 QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0; 807 } 808 809 /** 810 * __qdf_nbuf_push_head() - Push data in the front 811 * @skb: Pointer to network buffer 812 * @size: size to be pushed 813 * 814 * Return: New data pointer of this buf after data has been pushed, 815 * or NULL if there is not enough room in this buf. 816 */ 817 static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size) 818 { 819 if (QDF_NBUF_CB_PADDR(skb)) 820 QDF_NBUF_CB_PADDR(skb) -= size; 821 822 return skb_push(skb, size); 823 } 824 825 826 /** 827 * __qdf_nbuf_pull_head() - pull data out from the front 828 * @skb: Pointer to network buffer 829 * @size: size to be popped 830 * 831 * Return: New data pointer of this buf after data has been popped, 832 * or NULL if there is not sufficient data to pull. 833 */ 834 static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size) 835 { 836 if (QDF_NBUF_CB_PADDR(skb)) 837 QDF_NBUF_CB_PADDR(skb) += size; 838 839 return skb_pull(skb, size); 840 } 841 842 /** 843 * qdf_nbuf_is_intra_bss() - get intra bss bit 844 * @buf: Network buffer 845 * 846 * Return: integer value - 0/1 847 */ 848 static inline int qdf_nbuf_is_intra_bss(struct sk_buff *buf) 849 { 850 return 0; 851 } 852 853 /** 854 * qdf_nbuf_set_intra_bss() - set intra bss bit 855 * @buf: Network buffer 856 * @val: 0/1 857 * 858 * Return: void 859 */ 860 static inline void qdf_nbuf_set_intra_bss(struct sk_buff *buf, uint8_t val) 861 { 862 } 863 864 /** 865 * qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer 866 * 867 * This function initializes the nbuf alloc fail replenish timer. 868 * 869 * Return: void 870 */ 871 static inline void 872 qdf_nbuf_init_replenish_timer(void) 873 { 874 __qdf_nbuf_init_replenish_timer(); 875 } 876 877 /** 878 * qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer 879 * 880 * This function deinitializes the nbuf alloc fail replenish timer. 881 * 882 * Return: void 883 */ 884 static inline void 885 qdf_nbuf_deinit_replenish_timer(void) 886 { 887 __qdf_nbuf_deinit_replenish_timer(); 888 } 889 890 static inline void 891 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end) {} 892 893 static inline void 894 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end) {} 895 896 static inline void 897 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end) {} 898 899 static inline void 900 __qdf_dsb(void) {} 901 902 static inline void 903 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end) {} 904 905 #endif /*_I_QDF_NBUF_M_H */ 906