1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: qdf_nbuf.c 21 * QCA driver framework(QDF) network buffer management APIs 22 */ 23 24 #include <linux/hashtable.h> 25 #include <linux/kernel.h> 26 #include <linux/version.h> 27 #include <linux/skbuff.h> 28 #include <linux/module.h> 29 #include <linux/proc_fs.h> 30 #include <qdf_atomic.h> 31 #include <qdf_types.h> 32 #include <qdf_nbuf.h> 33 #include "qdf_flex_mem.h" 34 #include <qdf_mem.h> 35 #include <qdf_status.h> 36 #include <qdf_lock.h> 37 #include <qdf_trace.h> 38 #include <qdf_debugfs.h> 39 #include <net/ieee80211_radiotap.h> 40 #include <qdf_module.h> 41 #include <qdf_atomic.h> 42 #include <pld_common.h> 43 #include <qdf_module.h> 44 #include "qdf_str.h" 45 46 #if defined(FEATURE_TSO) 47 #include <net/ipv6.h> 48 #include <linux/ipv6.h> 49 #include <linux/tcp.h> 50 #include <linux/if_vlan.h> 51 #include <linux/ip.h> 52 #endif /* FEATURE_TSO */ 53 54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) 55 56 #define qdf_nbuf_users_inc atomic_inc 57 #define qdf_nbuf_users_dec atomic_dec 58 #define qdf_nbuf_users_set atomic_set 59 #define qdf_nbuf_users_read atomic_read 60 #else 61 #define qdf_nbuf_users_inc refcount_inc 62 #define qdf_nbuf_users_dec refcount_dec 63 #define qdf_nbuf_users_set refcount_set 64 #define qdf_nbuf_users_read refcount_read 65 #endif /* KERNEL_VERSION(4, 13, 0) */ 66 67 #define IEEE80211_RADIOTAP_VHT_BW_20 0 68 #define IEEE80211_RADIOTAP_VHT_BW_40 1 69 #define IEEE80211_RADIOTAP_VHT_BW_80 2 70 #define IEEE80211_RADIOTAP_VHT_BW_160 3 71 72 #define RADIOTAP_VHT_BW_20 0 73 #define RADIOTAP_VHT_BW_40 1 74 #define RADIOTAP_VHT_BW_80 4 75 #define RADIOTAP_VHT_BW_160 11 76 77 /* channel number to freq conversion */ 78 #define CHANNEL_NUM_14 14 79 #define CHANNEL_NUM_15 15 80 #define CHANNEL_NUM_27 27 81 #define CHANNEL_NUM_35 35 82 #define CHANNEL_NUM_182 182 83 #define CHANNEL_NUM_197 197 84 #define CHANNEL_FREQ_2484 2484 85 #define CHANNEL_FREQ_2407 2407 86 #define CHANNEL_FREQ_2512 2512 87 #define CHANNEL_FREQ_5000 5000 88 #define CHANNEL_FREQ_4000 4000 89 #define FREQ_MULTIPLIER_CONST_5MHZ 5 90 #define FREQ_MULTIPLIER_CONST_20MHZ 20 91 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100 92 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080 93 #define RADIOTAP_CCK_CHANNEL 0x0020 94 #define RADIOTAP_OFDM_CHANNEL 0x0040 95 96 #ifdef CONFIG_MCL 97 #include <qdf_mc_timer.h> 98 99 struct qdf_track_timer { 100 qdf_mc_timer_t track_timer; 101 qdf_atomic_t alloc_fail_cnt; 102 }; 103 104 static struct qdf_track_timer alloc_track_timer; 105 106 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS 5000 107 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD 50 108 #endif 109 110 /* Packet Counter */ 111 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX]; 112 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX]; 113 #ifdef QDF_NBUF_GLOBAL_COUNT 114 #define NBUF_DEBUGFS_NAME "nbuf_counters" 115 static qdf_atomic_t nbuf_count; 116 #endif 117 118 /** 119 * qdf_nbuf_tx_desc_count_display() - Displays the packet counter 120 * 121 * Return: none 122 */ 123 void qdf_nbuf_tx_desc_count_display(void) 124 { 125 qdf_print("Current Snapshot of the Driver:\n"); 126 qdf_print("Data Packets:\n"); 127 qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d", 128 nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] - 129 (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] + 130 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - 131 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]), 132 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - 133 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE], 134 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] - 135 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT], 136 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT] - 137 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]); 138 qdf_print(" HTC %d HIF %d CE %d TX_COMP %d\n", 139 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] - 140 nbuf_tx_data[QDF_NBUF_TX_PKT_HIF], 141 nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] - 142 nbuf_tx_data[QDF_NBUF_TX_PKT_CE], 143 nbuf_tx_data[QDF_NBUF_TX_PKT_CE] - 144 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE], 145 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]); 146 qdf_print("Mgmt Packets:\n"); 147 qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n", 148 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - 149 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE], 150 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] - 151 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT], 152 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] - 153 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC], 154 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] - 155 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF], 156 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] - 157 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE], 158 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] - 159 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE], 160 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]); 161 } 162 qdf_export_symbol(qdf_nbuf_tx_desc_count_display); 163 164 /** 165 * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter 166 * @packet_type : packet type either mgmt/data 167 * @current_state : layer at which the packet currently present 168 * 169 * Return: none 170 */ 171 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type, 172 uint8_t current_state) 173 { 174 switch (packet_type) { 175 case QDF_NBUF_TX_PKT_MGMT_TRACK: 176 nbuf_tx_mgmt[current_state]++; 177 break; 178 case QDF_NBUF_TX_PKT_DATA_TRACK: 179 nbuf_tx_data[current_state]++; 180 break; 181 default: 182 break; 183 } 184 } 185 qdf_export_symbol(qdf_nbuf_tx_desc_count_update); 186 187 /** 188 * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt 189 * 190 * Return: none 191 */ 192 void qdf_nbuf_tx_desc_count_clear(void) 193 { 194 memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt)); 195 memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data)); 196 } 197 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear); 198 199 /** 200 * qdf_nbuf_set_state() - Updates the packet state 201 * @nbuf: network buffer 202 * @current_state : layer at which the packet currently is 203 * 204 * This function updates the packet state to the layer at which the packet 205 * currently is 206 * 207 * Return: none 208 */ 209 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state) 210 { 211 /* 212 * Only Mgmt, Data Packets are tracked. WMI messages 213 * such as scan commands are not tracked 214 */ 215 uint8_t packet_type; 216 217 packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf); 218 219 if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) && 220 (packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) { 221 return; 222 } 223 QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state; 224 qdf_nbuf_tx_desc_count_update(packet_type, 225 current_state); 226 } 227 qdf_export_symbol(qdf_nbuf_set_state); 228 229 #ifdef CONFIG_MCL 230 /** 231 * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer 232 * 233 * This function starts the alloc fail replenish timer. 234 * 235 * Return: void 236 */ 237 static void __qdf_nbuf_start_replenish_timer(void) 238 { 239 qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt); 240 if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) != 241 QDF_TIMER_STATE_RUNNING) 242 qdf_mc_timer_start(&alloc_track_timer.track_timer, 243 QDF_NBUF_ALLOC_EXPIRE_TIMER_MS); 244 } 245 246 /** 247 * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer 248 * 249 * This function stops the alloc fail replenish timer. 250 * 251 * Return: void 252 */ 253 static void __qdf_nbuf_stop_replenish_timer(void) 254 { 255 if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0) 256 return; 257 258 qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0); 259 if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) == 260 QDF_TIMER_STATE_RUNNING) 261 qdf_mc_timer_stop(&alloc_track_timer.track_timer); 262 } 263 264 /** 265 * qdf_replenish_expire_handler - Replenish expire handler 266 * 267 * This function triggers when the alloc fail replenish timer expires. 268 * 269 * Return: void 270 */ 271 static void qdf_replenish_expire_handler(void *arg) 272 { 273 if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) > 274 QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) { 275 qdf_print("ERROR: NBUF allocation timer expired Fail count %d", 276 qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt)); 277 278 /* Error handling here */ 279 } 280 } 281 282 /** 283 * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer 284 * 285 * This function initializes the nbuf alloc fail replenish timer. 286 * 287 * Return: void 288 */ 289 void __qdf_nbuf_init_replenish_timer(void) 290 { 291 qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW, 292 qdf_replenish_expire_handler, NULL); 293 } 294 295 /** 296 * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer 297 * 298 * This function deinitializes the nbuf alloc fail replenish timer. 299 * 300 * Return: void 301 */ 302 void __qdf_nbuf_deinit_replenish_timer(void) 303 { 304 __qdf_nbuf_stop_replenish_timer(); 305 qdf_mc_timer_destroy(&alloc_track_timer.track_timer); 306 } 307 #else 308 309 static inline void __qdf_nbuf_start_replenish_timer(void) {} 310 static inline void __qdf_nbuf_stop_replenish_timer(void) {} 311 #endif 312 313 /* globals do not need to be initialized to NULL/0 */ 314 qdf_nbuf_trace_update_t qdf_trace_update_cb; 315 qdf_nbuf_free_t nbuf_free_cb; 316 317 #ifdef QDF_NBUF_GLOBAL_COUNT 318 319 /** 320 * __qdf_nbuf_count_get() - get nbuf global count 321 * 322 * Return: nbuf global count 323 */ 324 int __qdf_nbuf_count_get(void) 325 { 326 return qdf_atomic_read(&nbuf_count); 327 } 328 qdf_export_symbol(__qdf_nbuf_count_get); 329 330 /** 331 * __qdf_nbuf_count_inc() - increment nbuf global count 332 * 333 * @buf: sk buff 334 * 335 * Return: void 336 */ 337 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf) 338 { 339 qdf_atomic_inc(&nbuf_count); 340 } 341 qdf_export_symbol(__qdf_nbuf_count_inc); 342 343 /** 344 * __qdf_nbuf_count_dec() - decrement nbuf global count 345 * 346 * @buf: sk buff 347 * 348 * Return: void 349 */ 350 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf) 351 { 352 qdf_atomic_dec(&nbuf_count); 353 } 354 qdf_export_symbol(__qdf_nbuf_count_dec); 355 #endif 356 357 358 /** 359 * __qdf_nbuf_alloc() - Allocate nbuf 360 * @hdl: Device handle 361 * @size: Netbuf requested size 362 * @reserve: headroom to start with 363 * @align: Align 364 * @prio: Priority 365 * 366 * This allocates an nbuf aligns if needed and reserves some space in the front, 367 * since the reserve is done after alignment the reserve value if being 368 * unaligned will result in an unaligned address. 369 * 370 * Return: nbuf or %NULL if no memory 371 */ 372 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86) 373 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve, 374 int align, int prio) 375 { 376 struct sk_buff *skb; 377 unsigned long offset; 378 uint32_t lowmem_alloc_tries = 0; 379 380 if (align) 381 size += (align - 1); 382 383 realloc: 384 skb = dev_alloc_skb(size); 385 386 if (skb) 387 goto skb_alloc; 388 389 skb = pld_nbuf_pre_alloc(size); 390 391 if (!skb) { 392 pr_info("ERROR:NBUF alloc failed\n"); 393 return NULL; 394 } 395 396 skb_alloc: 397 /* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040 398 * Though we are trying to reserve low memory upfront to prevent this, 399 * we sometimes see SKBs allocated from low memory. 400 */ 401 if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) { 402 lowmem_alloc_tries++; 403 if (lowmem_alloc_tries > 100) { 404 qdf_print("%s Failed \n",__func__); 405 return NULL; 406 } else { 407 /* Not freeing to make sure it 408 * will not get allocated again 409 */ 410 goto realloc; 411 } 412 } 413 memset(skb->cb, 0x0, sizeof(skb->cb)); 414 415 /* 416 * The default is for netbuf fragments to be interpreted 417 * as wordstreams rather than bytestreams. 418 */ 419 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; 420 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; 421 422 /* 423 * XXX:how about we reserve first then align 424 * Align & make sure that the tail & data are adjusted properly 425 */ 426 427 if (align) { 428 offset = ((unsigned long)skb->data) % align; 429 if (offset) 430 skb_reserve(skb, align - offset); 431 } 432 433 /* 434 * NOTE:alloc doesn't take responsibility if reserve unaligns the data 435 * pointer 436 */ 437 skb_reserve(skb, reserve); 438 qdf_nbuf_count_inc(skb); 439 440 return skb; 441 } 442 #else 443 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve, 444 int align, int prio) 445 { 446 struct sk_buff *skb; 447 unsigned long offset; 448 int flags = GFP_KERNEL; 449 450 if (align) 451 size += (align - 1); 452 453 if (in_interrupt() || irqs_disabled() || in_atomic()) { 454 flags = GFP_ATOMIC; 455 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) 456 /* 457 * Observed that kcompactd burns out CPU to make order-3 page. 458 *__netdev_alloc_skb has 4k page fallback option just in case of 459 * failing high order page allocation so we don't need to be 460 * hard. Make kcompactd rest in piece. 461 */ 462 flags = flags & ~__GFP_KSWAPD_RECLAIM; 463 #endif 464 } 465 466 skb = __netdev_alloc_skb(NULL, size, flags); 467 468 if (skb) 469 goto skb_alloc; 470 471 skb = pld_nbuf_pre_alloc(size); 472 473 if (!skb) { 474 pr_err_ratelimited("ERROR:NBUF alloc failed, size = %zu\n", 475 size); 476 __qdf_nbuf_start_replenish_timer(); 477 return NULL; 478 } else { 479 __qdf_nbuf_stop_replenish_timer(); 480 } 481 482 skb_alloc: 483 memset(skb->cb, 0x0, sizeof(skb->cb)); 484 485 /* 486 * The default is for netbuf fragments to be interpreted 487 * as wordstreams rather than bytestreams. 488 */ 489 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; 490 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; 491 492 /* 493 * XXX:how about we reserve first then align 494 * Align & make sure that the tail & data are adjusted properly 495 */ 496 497 if (align) { 498 offset = ((unsigned long)skb->data) % align; 499 if (offset) 500 skb_reserve(skb, align - offset); 501 } 502 503 /* 504 * NOTE:alloc doesn't take responsibility if reserve unaligns the data 505 * pointer 506 */ 507 skb_reserve(skb, reserve); 508 qdf_nbuf_count_inc(skb); 509 510 return skb; 511 } 512 #endif 513 qdf_export_symbol(__qdf_nbuf_alloc); 514 515 /** 516 * __qdf_nbuf_free() - free the nbuf its interrupt safe 517 * @skb: Pointer to network buffer 518 * 519 * Return: none 520 */ 521 522 #ifdef CONFIG_MCL 523 void __qdf_nbuf_free(struct sk_buff *skb) 524 { 525 if (pld_nbuf_pre_alloc_free(skb)) 526 return; 527 528 qdf_nbuf_count_dec(skb); 529 if (nbuf_free_cb) 530 nbuf_free_cb(skb); 531 else 532 dev_kfree_skb_any(skb); 533 } 534 #else 535 void __qdf_nbuf_free(struct sk_buff *skb) 536 { 537 if (pld_nbuf_pre_alloc_free(skb)) 538 return; 539 540 qdf_nbuf_count_dec(skb); 541 dev_kfree_skb_any(skb); 542 } 543 #endif 544 545 qdf_export_symbol(__qdf_nbuf_free); 546 547 #ifdef NBUF_MEMORY_DEBUG 548 enum qdf_nbuf_event_type { 549 QDF_NBUF_ALLOC, 550 QDF_NBUF_FREE, 551 QDF_NBUF_MAP, 552 QDF_NBUF_UNMAP, 553 }; 554 555 struct qdf_nbuf_event { 556 qdf_nbuf_t nbuf; 557 char file[QDF_MEM_FILE_NAME_SIZE]; 558 uint32_t line; 559 enum qdf_nbuf_event_type type; 560 uint64_t timestamp; 561 }; 562 563 #define QDF_NBUF_HISTORY_SIZE 4096 564 static qdf_atomic_t qdf_nbuf_history_index; 565 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE]; 566 567 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size) 568 { 569 int32_t next = qdf_atomic_inc_return(index); 570 571 if (next == size) 572 qdf_atomic_sub(size, index); 573 574 return next % size; 575 } 576 577 static void 578 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line, 579 enum qdf_nbuf_event_type type) 580 { 581 int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index, 582 QDF_NBUF_HISTORY_SIZE); 583 struct qdf_nbuf_event *event = &qdf_nbuf_history[idx]; 584 585 event->nbuf = nbuf; 586 qdf_str_lcopy(event->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE); 587 event->line = line; 588 event->type = type; 589 event->timestamp = qdf_get_log_timestamp(); 590 } 591 592 struct qdf_nbuf_map_metadata { 593 struct hlist_node node; 594 qdf_nbuf_t nbuf; 595 char file[QDF_MEM_FILE_NAME_SIZE]; 596 uint32_t line; 597 }; 598 599 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool, 600 sizeof(struct qdf_nbuf_map_metadata), 0); 601 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */ 602 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS); 603 static qdf_spinlock_t qdf_nbuf_map_lock; 604 605 static void qdf_nbuf_map_tracking_init(void) 606 { 607 qdf_flex_mem_init(&qdf_nbuf_map_pool); 608 hash_init(qdf_nbuf_map_ht); 609 qdf_spinlock_create(&qdf_nbuf_map_lock); 610 } 611 612 void qdf_nbuf_map_check_for_leaks(void) 613 { 614 struct qdf_nbuf_map_metadata *meta; 615 int bucket; 616 uint32_t count = 0; 617 bool is_empty; 618 619 qdf_flex_mem_release(&qdf_nbuf_map_pool); 620 621 qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); 622 is_empty = hash_empty(qdf_nbuf_map_ht); 623 qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); 624 625 if (is_empty) 626 return; 627 628 qdf_err("Nbuf map without unmap events detected!"); 629 qdf_err("------------------------------------------------------------"); 630 631 /* Hold the lock for the entire iteration for safe list/meta access. We 632 * are explicitly preferring the chance to watchdog on the print, over 633 * the posibility of invalid list/memory access. Since we are going to 634 * panic anyway, the worst case is loading up the crash dump to find out 635 * what was in the hash table. 636 */ 637 qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); 638 hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) { 639 count++; 640 qdf_err("0x%pk @ %s:%u", 641 meta->nbuf, meta->file, meta->line); 642 } 643 qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); 644 645 panic("%u fatal nbuf map without unmap events detected!", count); 646 } 647 648 static void qdf_nbuf_map_tracking_deinit(void) 649 { 650 qdf_nbuf_map_check_for_leaks(); 651 qdf_spinlock_destroy(&qdf_nbuf_map_lock); 652 qdf_flex_mem_deinit(&qdf_nbuf_map_pool); 653 } 654 655 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf) 656 { 657 struct qdf_nbuf_map_metadata *meta; 658 659 hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) { 660 if (meta->nbuf == nbuf) 661 return meta; 662 } 663 664 return NULL; 665 } 666 667 static QDF_STATUS 668 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line) 669 { 670 struct qdf_nbuf_map_metadata *meta; 671 672 QDF_BUG(nbuf); 673 if (!nbuf) { 674 qdf_err("Cannot map null nbuf"); 675 return QDF_STATUS_E_INVAL; 676 } 677 678 qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); 679 meta = qdf_nbuf_meta_get(nbuf); 680 qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); 681 if (meta) 682 QDF_DEBUG_PANIC( 683 "Double nbuf map detected @ %s:%u; last map from %s:%u", 684 kbasename(file), line, meta->file, meta->line); 685 686 meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool); 687 if (!meta) { 688 qdf_err("Failed to allocate nbuf map tracking metadata"); 689 return QDF_STATUS_E_NOMEM; 690 } 691 692 meta->nbuf = nbuf; 693 qdf_str_lcopy(meta->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE); 694 meta->line = line; 695 696 qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); 697 hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf); 698 qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); 699 700 qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP); 701 702 return QDF_STATUS_SUCCESS; 703 } 704 705 static void 706 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line) 707 { 708 struct qdf_nbuf_map_metadata *meta; 709 710 QDF_BUG(nbuf); 711 if (!nbuf) { 712 qdf_err("Cannot unmap null nbuf"); 713 return; 714 } 715 716 qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); 717 meta = qdf_nbuf_meta_get(nbuf); 718 719 if (!meta) 720 QDF_DEBUG_PANIC( 721 "Double nbuf unmap or unmap without map detected @ %s:%u", 722 kbasename(file), line); 723 724 hash_del(&meta->node); 725 qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); 726 727 qdf_flex_mem_free(&qdf_nbuf_map_pool, meta); 728 729 qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP); 730 } 731 732 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev, 733 qdf_nbuf_t buf, 734 qdf_dma_dir_t dir, 735 const char *file, 736 uint32_t line) 737 { 738 QDF_STATUS status; 739 740 status = qdf_nbuf_track_map(buf, file, line); 741 if (QDF_IS_STATUS_ERROR(status)) 742 return status; 743 744 status = __qdf_nbuf_map(osdev, buf, dir); 745 if (QDF_IS_STATUS_ERROR(status)) 746 qdf_nbuf_untrack_map(buf, file, line); 747 748 return status; 749 } 750 751 qdf_export_symbol(qdf_nbuf_map_debug); 752 753 void qdf_nbuf_unmap_debug(qdf_device_t osdev, 754 qdf_nbuf_t buf, 755 qdf_dma_dir_t dir, 756 const char *file, 757 uint32_t line) 758 { 759 qdf_nbuf_untrack_map(buf, file, line); 760 __qdf_nbuf_unmap_single(osdev, buf, dir); 761 } 762 763 qdf_export_symbol(qdf_nbuf_unmap_debug); 764 765 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev, 766 qdf_nbuf_t buf, 767 qdf_dma_dir_t dir, 768 const char *file, 769 uint32_t line) 770 { 771 QDF_STATUS status; 772 773 status = qdf_nbuf_track_map(buf, file, line); 774 if (QDF_IS_STATUS_ERROR(status)) 775 return status; 776 777 status = __qdf_nbuf_map_single(osdev, buf, dir); 778 if (QDF_IS_STATUS_ERROR(status)) 779 qdf_nbuf_untrack_map(buf, file, line); 780 781 return status; 782 } 783 784 qdf_export_symbol(qdf_nbuf_map_single_debug); 785 786 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev, 787 qdf_nbuf_t buf, 788 qdf_dma_dir_t dir, 789 const char *file, 790 uint32_t line) 791 { 792 qdf_nbuf_untrack_map(buf, file, line); 793 __qdf_nbuf_unmap_single(osdev, buf, dir); 794 } 795 796 qdf_export_symbol(qdf_nbuf_unmap_single_debug); 797 798 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev, 799 qdf_nbuf_t buf, 800 qdf_dma_dir_t dir, 801 int nbytes, 802 const char *file, 803 uint32_t line) 804 { 805 QDF_STATUS status; 806 807 status = qdf_nbuf_track_map(buf, file, line); 808 if (QDF_IS_STATUS_ERROR(status)) 809 return status; 810 811 status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes); 812 if (QDF_IS_STATUS_ERROR(status)) 813 qdf_nbuf_untrack_map(buf, file, line); 814 815 return status; 816 } 817 818 qdf_export_symbol(qdf_nbuf_map_nbytes_debug); 819 820 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev, 821 qdf_nbuf_t buf, 822 qdf_dma_dir_t dir, 823 int nbytes, 824 const char *file, 825 uint32_t line) 826 { 827 qdf_nbuf_untrack_map(buf, file, line); 828 __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes); 829 } 830 831 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug); 832 833 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev, 834 qdf_nbuf_t buf, 835 qdf_dma_dir_t dir, 836 int nbytes, 837 const char *file, 838 uint32_t line) 839 { 840 QDF_STATUS status; 841 842 status = qdf_nbuf_track_map(buf, file, line); 843 if (QDF_IS_STATUS_ERROR(status)) 844 return status; 845 846 status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes); 847 if (QDF_IS_STATUS_ERROR(status)) 848 qdf_nbuf_untrack_map(buf, file, line); 849 850 return status; 851 } 852 853 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug); 854 855 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev, 856 qdf_nbuf_t buf, 857 qdf_dma_dir_t dir, 858 int nbytes, 859 const char *file, 860 uint32_t line) 861 { 862 qdf_nbuf_untrack_map(buf, file, line); 863 __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes); 864 } 865 866 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug); 867 #endif /* NBUF_MEMORY_DEBUG */ 868 869 /** 870 * __qdf_nbuf_map() - map a buffer to local bus address space 871 * @osdev: OS device 872 * @bmap: Bitmap 873 * @skb: Pointer to network buffer 874 * @dir: Direction 875 * 876 * Return: QDF_STATUS 877 */ 878 #ifdef QDF_OS_DEBUG 879 QDF_STATUS 880 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir) 881 { 882 struct skb_shared_info *sh = skb_shinfo(skb); 883 884 qdf_assert((dir == QDF_DMA_TO_DEVICE) 885 || (dir == QDF_DMA_FROM_DEVICE)); 886 887 /* 888 * Assume there's only a single fragment. 889 * To support multiple fragments, it would be necessary to change 890 * qdf_nbuf_t to be a separate object that stores meta-info 891 * (including the bus address for each fragment) and a pointer 892 * to the underlying sk_buff. 893 */ 894 qdf_assert(sh->nr_frags == 0); 895 896 return __qdf_nbuf_map_single(osdev, skb, dir); 897 } 898 qdf_export_symbol(__qdf_nbuf_map); 899 900 #else 901 QDF_STATUS 902 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir) 903 { 904 return __qdf_nbuf_map_single(osdev, skb, dir); 905 } 906 qdf_export_symbol(__qdf_nbuf_map); 907 #endif 908 /** 909 * __qdf_nbuf_unmap() - to unmap a previously mapped buf 910 * @osdev: OS device 911 * @skb: Pointer to network buffer 912 * @dir: dma direction 913 * 914 * Return: none 915 */ 916 void 917 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb, 918 qdf_dma_dir_t dir) 919 { 920 qdf_assert((dir == QDF_DMA_TO_DEVICE) 921 || (dir == QDF_DMA_FROM_DEVICE)); 922 923 /* 924 * Assume there's a single fragment. 925 * If this is not true, the assertion in __qdf_nbuf_map will catch it. 926 */ 927 __qdf_nbuf_unmap_single(osdev, skb, dir); 928 } 929 qdf_export_symbol(__qdf_nbuf_unmap); 930 931 /** 932 * __qdf_nbuf_map_single() - map a single buffer to local bus address space 933 * @osdev: OS device 934 * @skb: Pointer to network buffer 935 * @dir: Direction 936 * 937 * Return: QDF_STATUS 938 */ 939 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO) 940 QDF_STATUS 941 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) 942 { 943 qdf_dma_addr_t paddr; 944 945 QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data; 946 BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data)); 947 BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data)); 948 return QDF_STATUS_SUCCESS; 949 } 950 qdf_export_symbol(__qdf_nbuf_map_single); 951 #else 952 QDF_STATUS 953 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) 954 { 955 qdf_dma_addr_t paddr; 956 957 /* assume that the OS only provides a single fragment */ 958 QDF_NBUF_CB_PADDR(buf) = paddr = 959 dma_map_single(osdev->dev, buf->data, 960 skb_end_pointer(buf) - buf->data, 961 __qdf_dma_dir_to_os(dir)); 962 return dma_mapping_error(osdev->dev, paddr) 963 ? QDF_STATUS_E_FAILURE 964 : QDF_STATUS_SUCCESS; 965 } 966 qdf_export_symbol(__qdf_nbuf_map_single); 967 #endif 968 /** 969 * __qdf_nbuf_unmap_single() - unmap a previously mapped buf 970 * @osdev: OS device 971 * @skb: Pointer to network buffer 972 * @dir: Direction 973 * 974 * Return: none 975 */ 976 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO) 977 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, 978 qdf_dma_dir_t dir) 979 { 980 } 981 #else 982 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, 983 qdf_dma_dir_t dir) 984 { 985 if (QDF_NBUF_CB_PADDR(buf)) 986 dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf), 987 skb_end_pointer(buf) - buf->data, 988 __qdf_dma_dir_to_os(dir)); 989 } 990 #endif 991 qdf_export_symbol(__qdf_nbuf_unmap_single); 992 993 /** 994 * __qdf_nbuf_set_rx_cksum() - set rx checksum 995 * @skb: Pointer to network buffer 996 * @cksum: Pointer to checksum value 997 * 998 * Return: QDF_STATUS 999 */ 1000 QDF_STATUS 1001 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum) 1002 { 1003 switch (cksum->l4_result) { 1004 case QDF_NBUF_RX_CKSUM_NONE: 1005 skb->ip_summed = CHECKSUM_NONE; 1006 break; 1007 case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: 1008 skb->ip_summed = CHECKSUM_UNNECESSARY; 1009 break; 1010 case QDF_NBUF_RX_CKSUM_TCP_UDP_HW: 1011 skb->ip_summed = CHECKSUM_PARTIAL; 1012 skb->csum = cksum->val; 1013 break; 1014 default: 1015 pr_err("Unknown checksum type\n"); 1016 qdf_assert(0); 1017 return QDF_STATUS_E_NOSUPPORT; 1018 } 1019 return QDF_STATUS_SUCCESS; 1020 } 1021 qdf_export_symbol(__qdf_nbuf_set_rx_cksum); 1022 1023 /** 1024 * __qdf_nbuf_get_tx_cksum() - get tx checksum 1025 * @skb: Pointer to network buffer 1026 * 1027 * Return: TX checksum value 1028 */ 1029 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb) 1030 { 1031 switch (skb->ip_summed) { 1032 case CHECKSUM_NONE: 1033 return QDF_NBUF_TX_CKSUM_NONE; 1034 case CHECKSUM_PARTIAL: 1035 return QDF_NBUF_TX_CKSUM_TCP_UDP; 1036 case CHECKSUM_COMPLETE: 1037 return QDF_NBUF_TX_CKSUM_TCP_UDP_IP; 1038 default: 1039 return QDF_NBUF_TX_CKSUM_NONE; 1040 } 1041 } 1042 qdf_export_symbol(__qdf_nbuf_get_tx_cksum); 1043 1044 /** 1045 * __qdf_nbuf_get_tid() - get tid 1046 * @skb: Pointer to network buffer 1047 * 1048 * Return: tid 1049 */ 1050 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb) 1051 { 1052 return skb->priority; 1053 } 1054 qdf_export_symbol(__qdf_nbuf_get_tid); 1055 1056 /** 1057 * __qdf_nbuf_set_tid() - set tid 1058 * @skb: Pointer to network buffer 1059 * 1060 * Return: none 1061 */ 1062 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid) 1063 { 1064 skb->priority = tid; 1065 } 1066 qdf_export_symbol(__qdf_nbuf_set_tid); 1067 1068 /** 1069 * __qdf_nbuf_set_tid() - set tid 1070 * @skb: Pointer to network buffer 1071 * 1072 * Return: none 1073 */ 1074 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb) 1075 { 1076 return QDF_NBUF_EXEMPT_NO_EXEMPTION; 1077 } 1078 qdf_export_symbol(__qdf_nbuf_get_exemption_type); 1079 1080 /** 1081 * __qdf_nbuf_reg_trace_cb() - register trace callback 1082 * @cb_func_ptr: Pointer to trace callback function 1083 * 1084 * Return: none 1085 */ 1086 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr) 1087 { 1088 qdf_trace_update_cb = cb_func_ptr; 1089 } 1090 qdf_export_symbol(__qdf_nbuf_reg_trace_cb); 1091 1092 /** 1093 * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype 1094 * of DHCP packet. 1095 * @data: Pointer to DHCP packet data buffer 1096 * 1097 * This func. returns the subtype of DHCP packet. 1098 * 1099 * Return: subtype of the DHCP packet. 1100 */ 1101 enum qdf_proto_subtype 1102 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data) 1103 { 1104 enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; 1105 1106 if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) && 1107 (data[QDF_DHCP_OPTION53_LENGTH_OFFSET] == 1108 QDF_DHCP_OPTION53_LENGTH)) { 1109 1110 switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) { 1111 case QDF_DHCP_DISCOVER: 1112 subtype = QDF_PROTO_DHCP_DISCOVER; 1113 break; 1114 case QDF_DHCP_REQUEST: 1115 subtype = QDF_PROTO_DHCP_REQUEST; 1116 break; 1117 case QDF_DHCP_OFFER: 1118 subtype = QDF_PROTO_DHCP_OFFER; 1119 break; 1120 case QDF_DHCP_ACK: 1121 subtype = QDF_PROTO_DHCP_ACK; 1122 break; 1123 case QDF_DHCP_NAK: 1124 subtype = QDF_PROTO_DHCP_NACK; 1125 break; 1126 case QDF_DHCP_RELEASE: 1127 subtype = QDF_PROTO_DHCP_RELEASE; 1128 break; 1129 case QDF_DHCP_INFORM: 1130 subtype = QDF_PROTO_DHCP_INFORM; 1131 break; 1132 case QDF_DHCP_DECLINE: 1133 subtype = QDF_PROTO_DHCP_DECLINE; 1134 break; 1135 default: 1136 break; 1137 } 1138 } 1139 1140 return subtype; 1141 } 1142 1143 /** 1144 * __qdf_nbuf_data_get_eapol_subtype() - get the subtype 1145 * of EAPOL packet. 1146 * @data: Pointer to EAPOL packet data buffer 1147 * 1148 * This func. returns the subtype of EAPOL packet. 1149 * 1150 * Return: subtype of the EAPOL packet. 1151 */ 1152 enum qdf_proto_subtype 1153 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data) 1154 { 1155 uint16_t eapol_key_info; 1156 enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; 1157 uint16_t mask; 1158 1159 eapol_key_info = (uint16_t)(*(uint16_t *) 1160 (data + EAPOL_KEY_INFO_OFFSET)); 1161 1162 mask = eapol_key_info & EAPOL_MASK; 1163 switch (mask) { 1164 case EAPOL_M1_BIT_MASK: 1165 subtype = QDF_PROTO_EAPOL_M1; 1166 break; 1167 case EAPOL_M2_BIT_MASK: 1168 subtype = QDF_PROTO_EAPOL_M2; 1169 break; 1170 case EAPOL_M3_BIT_MASK: 1171 subtype = QDF_PROTO_EAPOL_M3; 1172 break; 1173 case EAPOL_M4_BIT_MASK: 1174 subtype = QDF_PROTO_EAPOL_M4; 1175 break; 1176 default: 1177 break; 1178 } 1179 1180 return subtype; 1181 } 1182 1183 /** 1184 * __qdf_nbuf_data_get_arp_subtype() - get the subtype 1185 * of ARP packet. 1186 * @data: Pointer to ARP packet data buffer 1187 * 1188 * This func. returns the subtype of ARP packet. 1189 * 1190 * Return: subtype of the ARP packet. 1191 */ 1192 enum qdf_proto_subtype 1193 __qdf_nbuf_data_get_arp_subtype(uint8_t *data) 1194 { 1195 uint16_t subtype; 1196 enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; 1197 1198 subtype = (uint16_t)(*(uint16_t *) 1199 (data + ARP_SUB_TYPE_OFFSET)); 1200 1201 switch (QDF_SWAP_U16(subtype)) { 1202 case ARP_REQUEST: 1203 proto_subtype = QDF_PROTO_ARP_REQ; 1204 break; 1205 case ARP_RESPONSE: 1206 proto_subtype = QDF_PROTO_ARP_RES; 1207 break; 1208 default: 1209 break; 1210 } 1211 1212 return proto_subtype; 1213 } 1214 1215 /** 1216 * __qdf_nbuf_data_get_icmp_subtype() - get the subtype 1217 * of IPV4 ICMP packet. 1218 * @data: Pointer to IPV4 ICMP packet data buffer 1219 * 1220 * This func. returns the subtype of ICMP packet. 1221 * 1222 * Return: subtype of the ICMP packet. 1223 */ 1224 enum qdf_proto_subtype 1225 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data) 1226 { 1227 uint8_t subtype; 1228 enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; 1229 1230 subtype = (uint8_t)(*(uint8_t *) 1231 (data + ICMP_SUBTYPE_OFFSET)); 1232 1233 switch (subtype) { 1234 case ICMP_REQUEST: 1235 proto_subtype = QDF_PROTO_ICMP_REQ; 1236 break; 1237 case ICMP_RESPONSE: 1238 proto_subtype = QDF_PROTO_ICMP_RES; 1239 break; 1240 default: 1241 break; 1242 } 1243 1244 return proto_subtype; 1245 } 1246 1247 /** 1248 * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype 1249 * of IPV6 ICMPV6 packet. 1250 * @data: Pointer to IPV6 ICMPV6 packet data buffer 1251 * 1252 * This func. returns the subtype of ICMPV6 packet. 1253 * 1254 * Return: subtype of the ICMPV6 packet. 1255 */ 1256 enum qdf_proto_subtype 1257 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data) 1258 { 1259 uint8_t subtype; 1260 enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; 1261 1262 subtype = (uint8_t)(*(uint8_t *) 1263 (data + ICMPV6_SUBTYPE_OFFSET)); 1264 1265 switch (subtype) { 1266 case ICMPV6_REQUEST: 1267 proto_subtype = QDF_PROTO_ICMPV6_REQ; 1268 break; 1269 case ICMPV6_RESPONSE: 1270 proto_subtype = QDF_PROTO_ICMPV6_RES; 1271 break; 1272 case ICMPV6_RS: 1273 proto_subtype = QDF_PROTO_ICMPV6_RS; 1274 break; 1275 case ICMPV6_RA: 1276 proto_subtype = QDF_PROTO_ICMPV6_RA; 1277 break; 1278 case ICMPV6_NS: 1279 proto_subtype = QDF_PROTO_ICMPV6_NS; 1280 break; 1281 case ICMPV6_NA: 1282 proto_subtype = QDF_PROTO_ICMPV6_NA; 1283 break; 1284 default: 1285 break; 1286 } 1287 1288 return proto_subtype; 1289 } 1290 1291 /** 1292 * __qdf_nbuf_data_get_ipv4_proto() - get the proto type 1293 * of IPV4 packet. 1294 * @data: Pointer to IPV4 packet data buffer 1295 * 1296 * This func. returns the proto type of IPV4 packet. 1297 * 1298 * Return: proto type of IPV4 packet. 1299 */ 1300 uint8_t 1301 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data) 1302 { 1303 uint8_t proto_type; 1304 1305 proto_type = (uint8_t)(*(uint8_t *)(data + 1306 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); 1307 return proto_type; 1308 } 1309 1310 /** 1311 * __qdf_nbuf_data_get_ipv6_proto() - get the proto type 1312 * of IPV6 packet. 1313 * @data: Pointer to IPV6 packet data buffer 1314 * 1315 * This func. returns the proto type of IPV6 packet. 1316 * 1317 * Return: proto type of IPV6 packet. 1318 */ 1319 uint8_t 1320 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data) 1321 { 1322 uint8_t proto_type; 1323 1324 proto_type = (uint8_t)(*(uint8_t *)(data + 1325 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); 1326 return proto_type; 1327 } 1328 1329 /** 1330 * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet 1331 * @data: Pointer to network data 1332 * 1333 * This api is for Tx packets. 1334 * 1335 * Return: true if packet is ipv4 packet 1336 * false otherwise 1337 */ 1338 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data) 1339 { 1340 uint16_t ether_type; 1341 1342 ether_type = (uint16_t)(*(uint16_t *)(data + 1343 QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); 1344 1345 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) 1346 return true; 1347 else 1348 return false; 1349 } 1350 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt); 1351 1352 /** 1353 * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet 1354 * @data: Pointer to network data buffer 1355 * 1356 * This api is for ipv4 packet. 1357 * 1358 * Return: true if packet is DHCP packet 1359 * false otherwise 1360 */ 1361 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data) 1362 { 1363 uint16_t sport; 1364 uint16_t dport; 1365 1366 sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET + 1367 QDF_NBUF_TRAC_IPV4_HEADER_SIZE)); 1368 dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET + 1369 QDF_NBUF_TRAC_IPV4_HEADER_SIZE + 1370 sizeof(uint16_t))); 1371 1372 if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) && 1373 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) || 1374 ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) && 1375 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)))) 1376 return true; 1377 else 1378 return false; 1379 } 1380 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt); 1381 1382 /** 1383 * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet 1384 * @data: Pointer to network data buffer 1385 * 1386 * This api is for ipv4 packet. 1387 * 1388 * Return: true if packet is EAPOL packet 1389 * false otherwise. 1390 */ 1391 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data) 1392 { 1393 uint16_t ether_type; 1394 1395 ether_type = (uint16_t)(*(uint16_t *)(data + 1396 QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); 1397 1398 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)) 1399 return true; 1400 else 1401 return false; 1402 } 1403 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt); 1404 1405 /** 1406 * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet 1407 * @skb: Pointer to network buffer 1408 * 1409 * This api is for ipv4 packet. 1410 * 1411 * Return: true if packet is WAPI packet 1412 * false otherwise. 1413 */ 1414 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb) 1415 { 1416 uint16_t ether_type; 1417 1418 ether_type = (uint16_t)(*(uint16_t *)(skb->data + 1419 QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); 1420 1421 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE)) 1422 return true; 1423 else 1424 return false; 1425 } 1426 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt); 1427 1428 /** 1429 * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet 1430 * @skb: Pointer to network buffer 1431 * 1432 * This api is for ipv4 packet. 1433 * 1434 * Return: true if packet is tdls packet 1435 * false otherwise. 1436 */ 1437 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb) 1438 { 1439 uint16_t ether_type; 1440 1441 ether_type = *(uint16_t *)(skb->data + 1442 QDF_NBUF_TRAC_ETH_TYPE_OFFSET); 1443 1444 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE)) 1445 return true; 1446 else 1447 return false; 1448 } 1449 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt); 1450 1451 /** 1452 * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet 1453 * @data: Pointer to network data buffer 1454 * 1455 * This api is for ipv4 packet. 1456 * 1457 * Return: true if packet is ARP packet 1458 * false otherwise. 1459 */ 1460 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data) 1461 { 1462 uint16_t ether_type; 1463 1464 ether_type = (uint16_t)(*(uint16_t *)(data + 1465 QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); 1466 1467 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE)) 1468 return true; 1469 else 1470 return false; 1471 } 1472 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt); 1473 1474 /** 1475 * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request 1476 * @data: Pointer to network data buffer 1477 * 1478 * This api is for ipv4 packet. 1479 * 1480 * Return: true if packet is ARP request 1481 * false otherwise. 1482 */ 1483 bool __qdf_nbuf_data_is_arp_req(uint8_t *data) 1484 { 1485 uint16_t op_code; 1486 1487 op_code = (uint16_t)(*(uint16_t *)(data + 1488 QDF_NBUF_PKT_ARP_OPCODE_OFFSET)); 1489 1490 if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ)) 1491 return true; 1492 return false; 1493 } 1494 1495 /** 1496 * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response 1497 * @data: Pointer to network data buffer 1498 * 1499 * This api is for ipv4 packet. 1500 * 1501 * Return: true if packet is ARP response 1502 * false otherwise. 1503 */ 1504 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data) 1505 { 1506 uint16_t op_code; 1507 1508 op_code = (uint16_t)(*(uint16_t *)(data + 1509 QDF_NBUF_PKT_ARP_OPCODE_OFFSET)); 1510 1511 if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY)) 1512 return true; 1513 return false; 1514 } 1515 1516 /** 1517 * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP 1518 * @data: Pointer to network data buffer 1519 * 1520 * This api is for ipv4 packet. 1521 * 1522 * Return: ARP packet source IP value. 1523 */ 1524 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data) 1525 { 1526 uint32_t src_ip; 1527 1528 src_ip = (uint32_t)(*(uint32_t *)(data + 1529 QDF_NBUF_PKT_ARP_SRC_IP_OFFSET)); 1530 1531 return src_ip; 1532 } 1533 1534 /** 1535 * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP 1536 * @data: Pointer to network data buffer 1537 * 1538 * This api is for ipv4 packet. 1539 * 1540 * Return: ARP packet target IP value. 1541 */ 1542 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data) 1543 { 1544 uint32_t tgt_ip; 1545 1546 tgt_ip = (uint32_t)(*(uint32_t *)(data + 1547 QDF_NBUF_PKT_ARP_TGT_IP_OFFSET)); 1548 1549 return tgt_ip; 1550 } 1551 1552 /** 1553 * __qdf_nbuf_get_dns_domain_name() - get dns domain name 1554 * @data: Pointer to network data buffer 1555 * @len: length to copy 1556 * 1557 * This api is for dns domain name 1558 * 1559 * Return: dns domain name. 1560 */ 1561 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len) 1562 { 1563 uint8_t *domain_name; 1564 1565 domain_name = (uint8_t *) 1566 (data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET); 1567 return domain_name; 1568 } 1569 1570 1571 /** 1572 * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query 1573 * @data: Pointer to network data buffer 1574 * 1575 * This api is for dns query packet. 1576 * 1577 * Return: true if packet is dns query packet. 1578 * false otherwise. 1579 */ 1580 bool __qdf_nbuf_data_is_dns_query(uint8_t *data) 1581 { 1582 uint16_t op_code; 1583 uint16_t tgt_port; 1584 1585 tgt_port = (uint16_t)(*(uint16_t *)(data + 1586 QDF_NBUF_PKT_DNS_DST_PORT_OFFSET)); 1587 /* Standard DNS query always happen on Dest Port 53. */ 1588 if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) { 1589 op_code = (uint16_t)(*(uint16_t *)(data + 1590 QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET)); 1591 if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) == 1592 QDF_NBUF_PKT_DNSOP_STANDARD_QUERY) 1593 return true; 1594 } 1595 return false; 1596 } 1597 1598 /** 1599 * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response 1600 * @data: Pointer to network data buffer 1601 * 1602 * This api is for dns query response. 1603 * 1604 * Return: true if packet is dns response packet. 1605 * false otherwise. 1606 */ 1607 bool __qdf_nbuf_data_is_dns_response(uint8_t *data) 1608 { 1609 uint16_t op_code; 1610 uint16_t src_port; 1611 1612 src_port = (uint16_t)(*(uint16_t *)(data + 1613 QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET)); 1614 /* Standard DNS response always comes on Src Port 53. */ 1615 if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) { 1616 op_code = (uint16_t)(*(uint16_t *)(data + 1617 QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET)); 1618 1619 if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) == 1620 QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE) 1621 return true; 1622 } 1623 return false; 1624 } 1625 1626 /** 1627 * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn 1628 * @data: Pointer to network data buffer 1629 * 1630 * This api is for tcp syn packet. 1631 * 1632 * Return: true if packet is tcp syn packet. 1633 * false otherwise. 1634 */ 1635 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data) 1636 { 1637 uint8_t op_code; 1638 1639 op_code = (uint8_t)(*(uint8_t *)(data + 1640 QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); 1641 1642 if (op_code == QDF_NBUF_PKT_TCPOP_SYN) 1643 return true; 1644 return false; 1645 } 1646 1647 /** 1648 * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack 1649 * @data: Pointer to network data buffer 1650 * 1651 * This api is for tcp syn ack packet. 1652 * 1653 * Return: true if packet is tcp syn ack packet. 1654 * false otherwise. 1655 */ 1656 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data) 1657 { 1658 uint8_t op_code; 1659 1660 op_code = (uint8_t)(*(uint8_t *)(data + 1661 QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); 1662 1663 if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK) 1664 return true; 1665 return false; 1666 } 1667 1668 /** 1669 * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack 1670 * @data: Pointer to network data buffer 1671 * 1672 * This api is for tcp ack packet. 1673 * 1674 * Return: true if packet is tcp ack packet. 1675 * false otherwise. 1676 */ 1677 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data) 1678 { 1679 uint8_t op_code; 1680 1681 op_code = (uint8_t)(*(uint8_t *)(data + 1682 QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); 1683 1684 if (op_code == QDF_NBUF_PKT_TCPOP_ACK) 1685 return true; 1686 return false; 1687 } 1688 1689 /** 1690 * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port 1691 * @data: Pointer to network data buffer 1692 * 1693 * This api is for tcp packet. 1694 * 1695 * Return: tcp source port value. 1696 */ 1697 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data) 1698 { 1699 uint16_t src_port; 1700 1701 src_port = (uint16_t)(*(uint16_t *)(data + 1702 QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET)); 1703 1704 return src_port; 1705 } 1706 1707 /** 1708 * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port 1709 * @data: Pointer to network data buffer 1710 * 1711 * This api is for tcp packet. 1712 * 1713 * Return: tcp destination port value. 1714 */ 1715 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data) 1716 { 1717 uint16_t tgt_port; 1718 1719 tgt_port = (uint16_t)(*(uint16_t *)(data + 1720 QDF_NBUF_PKT_TCP_DST_PORT_OFFSET)); 1721 1722 return tgt_port; 1723 } 1724 1725 /** 1726 * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request 1727 * @data: Pointer to network data buffer 1728 * 1729 * This api is for ipv4 req packet. 1730 * 1731 * Return: true if packet is icmpv4 request 1732 * false otherwise. 1733 */ 1734 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data) 1735 { 1736 uint8_t op_code; 1737 1738 op_code = (uint8_t)(*(uint8_t *)(data + 1739 QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET)); 1740 1741 if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ) 1742 return true; 1743 return false; 1744 } 1745 1746 /** 1747 * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res 1748 * @data: Pointer to network data buffer 1749 * 1750 * This api is for ipv4 res packet. 1751 * 1752 * Return: true if packet is icmpv4 response 1753 * false otherwise. 1754 */ 1755 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data) 1756 { 1757 uint8_t op_code; 1758 1759 op_code = (uint8_t)(*(uint8_t *)(data + 1760 QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET)); 1761 1762 if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY) 1763 return true; 1764 return false; 1765 } 1766 1767 /** 1768 * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP 1769 * @data: Pointer to network data buffer 1770 * 1771 * This api is for ipv4 packet. 1772 * 1773 * Return: icmpv4 packet source IP value. 1774 */ 1775 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data) 1776 { 1777 uint32_t src_ip; 1778 1779 src_ip = (uint32_t)(*(uint32_t *)(data + 1780 QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET)); 1781 1782 return src_ip; 1783 } 1784 1785 /** 1786 * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP 1787 * @data: Pointer to network data buffer 1788 * 1789 * This api is for ipv4 packet. 1790 * 1791 * Return: icmpv4 packet target IP value. 1792 */ 1793 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data) 1794 { 1795 uint32_t tgt_ip; 1796 1797 tgt_ip = (uint32_t)(*(uint32_t *)(data + 1798 QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET)); 1799 1800 return tgt_ip; 1801 } 1802 1803 1804 /** 1805 * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet. 1806 * @data: Pointer to IPV6 packet data buffer 1807 * 1808 * This func. checks whether it is a IPV6 packet or not. 1809 * 1810 * Return: TRUE if it is a IPV6 packet 1811 * FALSE if not 1812 */ 1813 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data) 1814 { 1815 uint16_t ether_type; 1816 1817 ether_type = (uint16_t)(*(uint16_t *)(data + 1818 QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); 1819 1820 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) 1821 return true; 1822 else 1823 return false; 1824 } 1825 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt); 1826 1827 /** 1828 * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet 1829 * @data: Pointer to network data buffer 1830 * 1831 * This api is for ipv6 packet. 1832 * 1833 * Return: true if packet is DHCP packet 1834 * false otherwise 1835 */ 1836 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data) 1837 { 1838 uint16_t sport; 1839 uint16_t dport; 1840 1841 sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET + 1842 QDF_NBUF_TRAC_IPV6_HEADER_SIZE); 1843 dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET + 1844 QDF_NBUF_TRAC_IPV6_HEADER_SIZE + 1845 sizeof(uint16_t)); 1846 1847 if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) && 1848 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) || 1849 ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) && 1850 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)))) 1851 return true; 1852 else 1853 return false; 1854 } 1855 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt); 1856 1857 /** 1858 * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet. 1859 * @data: Pointer to IPV4 packet data buffer 1860 * 1861 * This func. checks whether it is a IPV4 multicast packet or not. 1862 * 1863 * Return: TRUE if it is a IPV4 multicast packet 1864 * FALSE if not 1865 */ 1866 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data) 1867 { 1868 if (__qdf_nbuf_data_is_ipv4_pkt(data)) { 1869 uint32_t *dst_addr = 1870 (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET); 1871 1872 /* 1873 * Check first word of the IPV4 address and if it is 1874 * equal to 0xE then it represents multicast IP. 1875 */ 1876 if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) == 1877 QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK) 1878 return true; 1879 else 1880 return false; 1881 } else 1882 return false; 1883 } 1884 1885 /** 1886 * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet. 1887 * @data: Pointer to IPV6 packet data buffer 1888 * 1889 * This func. checks whether it is a IPV6 multicast packet or not. 1890 * 1891 * Return: TRUE if it is a IPV6 multicast packet 1892 * FALSE if not 1893 */ 1894 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data) 1895 { 1896 if (__qdf_nbuf_data_is_ipv6_pkt(data)) { 1897 uint16_t *dst_addr; 1898 1899 dst_addr = (uint16_t *) 1900 (data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET); 1901 1902 /* 1903 * Check first byte of the IP address and if it 1904 * 0xFF00 then it is a IPV6 mcast packet. 1905 */ 1906 if (*dst_addr == 1907 QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR)) 1908 return true; 1909 else 1910 return false; 1911 } else 1912 return false; 1913 } 1914 1915 /** 1916 * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet. 1917 * @data: Pointer to IPV4 ICMP packet data buffer 1918 * 1919 * This func. checks whether it is a ICMP packet or not. 1920 * 1921 * Return: TRUE if it is a ICMP packet 1922 * FALSE if not 1923 */ 1924 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data) 1925 { 1926 if (__qdf_nbuf_data_is_ipv4_pkt(data)) { 1927 uint8_t pkt_type; 1928 1929 pkt_type = (uint8_t)(*(uint8_t *)(data + 1930 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); 1931 1932 if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE) 1933 return true; 1934 else 1935 return false; 1936 } else 1937 return false; 1938 } 1939 1940 /** 1941 * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet. 1942 * @data: Pointer to IPV6 ICMPV6 packet data buffer 1943 * 1944 * This func. checks whether it is a ICMPV6 packet or not. 1945 * 1946 * Return: TRUE if it is a ICMPV6 packet 1947 * FALSE if not 1948 */ 1949 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data) 1950 { 1951 if (__qdf_nbuf_data_is_ipv6_pkt(data)) { 1952 uint8_t pkt_type; 1953 1954 pkt_type = (uint8_t)(*(uint8_t *)(data + 1955 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); 1956 1957 if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE) 1958 return true; 1959 else 1960 return false; 1961 } else 1962 return false; 1963 } 1964 1965 /** 1966 * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet. 1967 * @data: Pointer to IPV4 UDP packet data buffer 1968 * 1969 * This func. checks whether it is a IPV4 UDP packet or not. 1970 * 1971 * Return: TRUE if it is a IPV4 UDP packet 1972 * FALSE if not 1973 */ 1974 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data) 1975 { 1976 if (__qdf_nbuf_data_is_ipv4_pkt(data)) { 1977 uint8_t pkt_type; 1978 1979 pkt_type = (uint8_t)(*(uint8_t *)(data + 1980 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); 1981 1982 if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE) 1983 return true; 1984 else 1985 return false; 1986 } else 1987 return false; 1988 } 1989 1990 /** 1991 * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet. 1992 * @data: Pointer to IPV4 TCP packet data buffer 1993 * 1994 * This func. checks whether it is a IPV4 TCP packet or not. 1995 * 1996 * Return: TRUE if it is a IPV4 TCP packet 1997 * FALSE if not 1998 */ 1999 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data) 2000 { 2001 if (__qdf_nbuf_data_is_ipv4_pkt(data)) { 2002 uint8_t pkt_type; 2003 2004 pkt_type = (uint8_t)(*(uint8_t *)(data + 2005 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); 2006 2007 if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE) 2008 return true; 2009 else 2010 return false; 2011 } else 2012 return false; 2013 } 2014 2015 /** 2016 * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet. 2017 * @data: Pointer to IPV6 UDP packet data buffer 2018 * 2019 * This func. checks whether it is a IPV6 UDP packet or not. 2020 * 2021 * Return: TRUE if it is a IPV6 UDP packet 2022 * FALSE if not 2023 */ 2024 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data) 2025 { 2026 if (__qdf_nbuf_data_is_ipv6_pkt(data)) { 2027 uint8_t pkt_type; 2028 2029 pkt_type = (uint8_t)(*(uint8_t *)(data + 2030 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); 2031 2032 if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE) 2033 return true; 2034 else 2035 return false; 2036 } else 2037 return false; 2038 } 2039 2040 /** 2041 * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet. 2042 * @data: Pointer to IPV6 TCP packet data buffer 2043 * 2044 * This func. checks whether it is a IPV6 TCP packet or not. 2045 * 2046 * Return: TRUE if it is a IPV6 TCP packet 2047 * FALSE if not 2048 */ 2049 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data) 2050 { 2051 if (__qdf_nbuf_data_is_ipv6_pkt(data)) { 2052 uint8_t pkt_type; 2053 2054 pkt_type = (uint8_t)(*(uint8_t *)(data + 2055 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); 2056 2057 if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE) 2058 return true; 2059 else 2060 return false; 2061 } else 2062 return false; 2063 } 2064 2065 /** 2066 * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast 2067 * @nbuf - sk buff 2068 * 2069 * Return: true if packet is broadcast 2070 * false otherwise 2071 */ 2072 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf) 2073 { 2074 struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf); 2075 return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest); 2076 } 2077 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt); 2078 2079 #ifdef NBUF_MEMORY_DEBUG 2080 #define QDF_NET_BUF_TRACK_MAX_SIZE (1024) 2081 2082 /** 2083 * struct qdf_nbuf_track_t - Network buffer track structure 2084 * 2085 * @p_next: Pointer to next 2086 * @net_buf: Pointer to network buffer 2087 * @file_name: File name 2088 * @line_num: Line number 2089 * @size: Size 2090 */ 2091 struct qdf_nbuf_track_t { 2092 struct qdf_nbuf_track_t *p_next; 2093 qdf_nbuf_t net_buf; 2094 char file_name[QDF_MEM_FILE_NAME_SIZE]; 2095 uint32_t line_num; 2096 size_t size; 2097 }; 2098 2099 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE]; 2100 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK; 2101 2102 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE]; 2103 static struct kmem_cache *nbuf_tracking_cache; 2104 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list; 2105 static spinlock_t qdf_net_buf_track_free_list_lock; 2106 static uint32_t qdf_net_buf_track_free_list_count; 2107 static uint32_t qdf_net_buf_track_used_list_count; 2108 static uint32_t qdf_net_buf_track_max_used; 2109 static uint32_t qdf_net_buf_track_max_free; 2110 static uint32_t qdf_net_buf_track_max_allocated; 2111 2112 /** 2113 * update_max_used() - update qdf_net_buf_track_max_used tracking variable 2114 * 2115 * tracks the max number of network buffers that the wlan driver was tracking 2116 * at any one time. 2117 * 2118 * Return: none 2119 */ 2120 static inline void update_max_used(void) 2121 { 2122 int sum; 2123 2124 if (qdf_net_buf_track_max_used < 2125 qdf_net_buf_track_used_list_count) 2126 qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count; 2127 sum = qdf_net_buf_track_free_list_count + 2128 qdf_net_buf_track_used_list_count; 2129 if (qdf_net_buf_track_max_allocated < sum) 2130 qdf_net_buf_track_max_allocated = sum; 2131 } 2132 2133 /** 2134 * update_max_free() - update qdf_net_buf_track_free_list_count 2135 * 2136 * tracks the max number tracking buffers kept in the freelist. 2137 * 2138 * Return: none 2139 */ 2140 static inline void update_max_free(void) 2141 { 2142 if (qdf_net_buf_track_max_free < 2143 qdf_net_buf_track_free_list_count) 2144 qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count; 2145 } 2146 2147 /** 2148 * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan 2149 * 2150 * This function pulls from a freelist if possible and uses kmem_cache_alloc. 2151 * This function also ads fexibility to adjust the allocation and freelist 2152 * scheems. 2153 * 2154 * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed. 2155 */ 2156 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void) 2157 { 2158 int flags = GFP_KERNEL; 2159 unsigned long irq_flag; 2160 QDF_NBUF_TRACK *new_node = NULL; 2161 2162 spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); 2163 qdf_net_buf_track_used_list_count++; 2164 if (qdf_net_buf_track_free_list != NULL) { 2165 new_node = qdf_net_buf_track_free_list; 2166 qdf_net_buf_track_free_list = 2167 qdf_net_buf_track_free_list->p_next; 2168 qdf_net_buf_track_free_list_count--; 2169 } 2170 update_max_used(); 2171 spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); 2172 2173 if (new_node != NULL) 2174 return new_node; 2175 2176 if (in_interrupt() || irqs_disabled() || in_atomic()) 2177 flags = GFP_ATOMIC; 2178 2179 return kmem_cache_alloc(nbuf_tracking_cache, flags); 2180 } 2181 2182 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */ 2183 #define FREEQ_POOLSIZE 2048 2184 2185 /** 2186 * qdf_nbuf_track_free() - free the nbuf tracking cookie. 2187 * 2188 * Matches calls to qdf_nbuf_track_alloc. 2189 * Either frees the tracking cookie to kernel or an internal 2190 * freelist based on the size of the freelist. 2191 * 2192 * Return: none 2193 */ 2194 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node) 2195 { 2196 unsigned long irq_flag; 2197 2198 if (!node) 2199 return; 2200 2201 /* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE 2202 * only shrink the freelist if it is bigger than twice the number of 2203 * nbufs in use. If the driver is stalling in a consistent bursty 2204 * fasion, this will keep 3/4 of thee allocations from the free list 2205 * while also allowing the system to recover memory as less frantic 2206 * traffic occurs. 2207 */ 2208 2209 spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); 2210 2211 qdf_net_buf_track_used_list_count--; 2212 if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE && 2213 (qdf_net_buf_track_free_list_count > 2214 qdf_net_buf_track_used_list_count << 1)) { 2215 kmem_cache_free(nbuf_tracking_cache, node); 2216 } else { 2217 node->p_next = qdf_net_buf_track_free_list; 2218 qdf_net_buf_track_free_list = node; 2219 qdf_net_buf_track_free_list_count++; 2220 } 2221 update_max_free(); 2222 spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); 2223 } 2224 2225 /** 2226 * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist 2227 * 2228 * Removes a 'warmup time' characteristic of the freelist. Prefilling 2229 * the freelist first makes it performant for the first iperf udp burst 2230 * as well as steady state. 2231 * 2232 * Return: None 2233 */ 2234 static void qdf_nbuf_track_prefill(void) 2235 { 2236 int i; 2237 QDF_NBUF_TRACK *node, *head; 2238 2239 /* prepopulate the freelist */ 2240 head = NULL; 2241 for (i = 0; i < FREEQ_POOLSIZE; i++) { 2242 node = qdf_nbuf_track_alloc(); 2243 if (node == NULL) 2244 continue; 2245 node->p_next = head; 2246 head = node; 2247 } 2248 while (head) { 2249 node = head->p_next; 2250 qdf_nbuf_track_free(head); 2251 head = node; 2252 } 2253 2254 /* prefilled buffers should not count as used */ 2255 qdf_net_buf_track_max_used = 0; 2256 } 2257 2258 /** 2259 * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies 2260 * 2261 * This initializes the memory manager for the nbuf tracking cookies. Because 2262 * these cookies are all the same size and only used in this feature, we can 2263 * use a kmem_cache to provide tracking as well as to speed up allocations. 2264 * To avoid the overhead of allocating and freeing the buffers (including SLUB 2265 * features) a freelist is prepopulated here. 2266 * 2267 * Return: None 2268 */ 2269 static void qdf_nbuf_track_memory_manager_create(void) 2270 { 2271 spin_lock_init(&qdf_net_buf_track_free_list_lock); 2272 nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache", 2273 sizeof(QDF_NBUF_TRACK), 2274 0, 0, NULL); 2275 2276 qdf_nbuf_track_prefill(); 2277 } 2278 2279 /** 2280 * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies 2281 * 2282 * Empty the freelist and print out usage statistics when it is no longer 2283 * needed. Also the kmem_cache should be destroyed here so that it can warn if 2284 * any nbuf tracking cookies were leaked. 2285 * 2286 * Return: None 2287 */ 2288 static void qdf_nbuf_track_memory_manager_destroy(void) 2289 { 2290 QDF_NBUF_TRACK *node, *tmp; 2291 unsigned long irq_flag; 2292 2293 spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); 2294 node = qdf_net_buf_track_free_list; 2295 2296 if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4) 2297 qdf_print("%s: unexpectedly large max_used count %d", 2298 __func__, qdf_net_buf_track_max_used); 2299 2300 if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated) 2301 qdf_print("%s: %d unused trackers were allocated", 2302 __func__, 2303 qdf_net_buf_track_max_allocated - 2304 qdf_net_buf_track_max_used); 2305 2306 if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE && 2307 qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4) 2308 qdf_print("%s: check freelist shrinking functionality", 2309 __func__); 2310 2311 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, 2312 "%s: %d residual freelist size\n", 2313 __func__, qdf_net_buf_track_free_list_count); 2314 2315 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, 2316 "%s: %d max freelist size observed\n", 2317 __func__, qdf_net_buf_track_max_free); 2318 2319 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, 2320 "%s: %d max buffers used observed\n", 2321 __func__, qdf_net_buf_track_max_used); 2322 2323 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, 2324 "%s: %d max buffers allocated observed\n", 2325 __func__, qdf_net_buf_track_max_allocated); 2326 2327 while (node) { 2328 tmp = node; 2329 node = node->p_next; 2330 kmem_cache_free(nbuf_tracking_cache, tmp); 2331 qdf_net_buf_track_free_list_count--; 2332 } 2333 2334 if (qdf_net_buf_track_free_list_count != 0) 2335 qdf_print("%s: %d unfreed tracking memory lost in freelist\n", 2336 __func__, qdf_net_buf_track_free_list_count); 2337 2338 if (qdf_net_buf_track_used_list_count != 0) 2339 qdf_print("%s: %d unfreed tracking memory still in use\n", 2340 __func__, qdf_net_buf_track_used_list_count); 2341 2342 spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); 2343 kmem_cache_destroy(nbuf_tracking_cache); 2344 qdf_net_buf_track_free_list = NULL; 2345 } 2346 2347 /** 2348 * qdf_net_buf_debug_init() - initialize network buffer debug functionality 2349 * 2350 * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver 2351 * in a hash table and when driver is unloaded it reports about leaked SKBs. 2352 * WLAN driver module whose allocated SKB is freed by network stack are 2353 * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not 2354 * reported as memory leak. 2355 * 2356 * Return: none 2357 */ 2358 void qdf_net_buf_debug_init(void) 2359 { 2360 uint32_t i; 2361 2362 qdf_atomic_set(&qdf_nbuf_history_index, -1); 2363 2364 qdf_nbuf_map_tracking_init(); 2365 qdf_nbuf_track_memory_manager_create(); 2366 2367 for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) { 2368 gp_qdf_net_buf_track_tbl[i] = NULL; 2369 spin_lock_init(&g_qdf_net_buf_track_lock[i]); 2370 } 2371 } 2372 qdf_export_symbol(qdf_net_buf_debug_init); 2373 2374 /** 2375 * qdf_net_buf_debug_init() - exit network buffer debug functionality 2376 * 2377 * Exit network buffer tracking debug functionality and log SKB memory leaks 2378 * As part of exiting the functionality, free the leaked memory and 2379 * cleanup the tracking buffers. 2380 * 2381 * Return: none 2382 */ 2383 void qdf_net_buf_debug_exit(void) 2384 { 2385 uint32_t i; 2386 uint32_t count = 0; 2387 unsigned long irq_flag; 2388 QDF_NBUF_TRACK *p_node; 2389 QDF_NBUF_TRACK *p_prev; 2390 2391 for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) { 2392 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); 2393 p_node = gp_qdf_net_buf_track_tbl[i]; 2394 while (p_node) { 2395 p_prev = p_node; 2396 p_node = p_node->p_next; 2397 count++; 2398 qdf_print("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK\n", 2399 p_prev->file_name, p_prev->line_num, 2400 p_prev->size, p_prev->net_buf); 2401 qdf_nbuf_track_free(p_prev); 2402 } 2403 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); 2404 } 2405 2406 qdf_nbuf_track_memory_manager_destroy(); 2407 qdf_nbuf_map_tracking_deinit(); 2408 2409 #ifdef CONFIG_HALT_KMEMLEAK 2410 if (count) { 2411 qdf_print("%d SKBs leaked .. please fix the SKB leak", count); 2412 QDF_BUG(0); 2413 } 2414 #endif 2415 } 2416 qdf_export_symbol(qdf_net_buf_debug_exit); 2417 2418 /** 2419 * qdf_net_buf_debug_hash() - hash network buffer pointer 2420 * 2421 * Return: hash value 2422 */ 2423 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf) 2424 { 2425 uint32_t i; 2426 2427 i = (uint32_t) (((uintptr_t) net_buf) >> 4); 2428 i += (uint32_t) (((uintptr_t) net_buf) >> 14); 2429 i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1); 2430 2431 return i; 2432 } 2433 2434 /** 2435 * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table 2436 * 2437 * Return: If skb is found in hash table then return pointer to network buffer 2438 * else return %NULL 2439 */ 2440 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf) 2441 { 2442 uint32_t i; 2443 QDF_NBUF_TRACK *p_node; 2444 2445 i = qdf_net_buf_debug_hash(net_buf); 2446 p_node = gp_qdf_net_buf_track_tbl[i]; 2447 2448 while (p_node) { 2449 if (p_node->net_buf == net_buf) 2450 return p_node; 2451 p_node = p_node->p_next; 2452 } 2453 2454 return NULL; 2455 } 2456 2457 /** 2458 * qdf_net_buf_debug_add_node() - store skb in debug hash table 2459 * 2460 * Return: none 2461 */ 2462 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size, 2463 uint8_t *file_name, uint32_t line_num) 2464 { 2465 uint32_t i; 2466 unsigned long irq_flag; 2467 QDF_NBUF_TRACK *p_node; 2468 QDF_NBUF_TRACK *new_node; 2469 2470 new_node = qdf_nbuf_track_alloc(); 2471 2472 i = qdf_net_buf_debug_hash(net_buf); 2473 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); 2474 2475 p_node = qdf_net_buf_debug_look_up(net_buf); 2476 2477 if (p_node) { 2478 qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d", 2479 p_node->net_buf, p_node->file_name, p_node->line_num, 2480 net_buf, kbasename(file_name), line_num); 2481 qdf_nbuf_track_free(new_node); 2482 } else { 2483 p_node = new_node; 2484 if (p_node) { 2485 p_node->net_buf = net_buf; 2486 qdf_str_lcopy(p_node->file_name, kbasename(file_name), 2487 QDF_MEM_FILE_NAME_SIZE); 2488 p_node->line_num = line_num; 2489 p_node->size = size; 2490 qdf_mem_skb_inc(size); 2491 p_node->p_next = gp_qdf_net_buf_track_tbl[i]; 2492 gp_qdf_net_buf_track_tbl[i] = p_node; 2493 } else 2494 qdf_print( 2495 "Mem alloc failed ! Could not track skb from %s %d of size %zu", 2496 kbasename(file_name), line_num, size); 2497 } 2498 2499 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); 2500 } 2501 qdf_export_symbol(qdf_net_buf_debug_add_node); 2502 2503 /** 2504 * qdf_net_buf_debug_delete_node() - remove skb from debug hash table 2505 * 2506 * Return: none 2507 */ 2508 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf) 2509 { 2510 uint32_t i; 2511 QDF_NBUF_TRACK *p_head; 2512 QDF_NBUF_TRACK *p_node = NULL; 2513 unsigned long irq_flag; 2514 QDF_NBUF_TRACK *p_prev; 2515 2516 i = qdf_net_buf_debug_hash(net_buf); 2517 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); 2518 2519 p_head = gp_qdf_net_buf_track_tbl[i]; 2520 2521 /* Unallocated SKB */ 2522 if (!p_head) 2523 goto done; 2524 2525 p_node = p_head; 2526 /* Found at head of the table */ 2527 if (p_head->net_buf == net_buf) { 2528 gp_qdf_net_buf_track_tbl[i] = p_node->p_next; 2529 goto done; 2530 } 2531 2532 /* Search in collision list */ 2533 while (p_node) { 2534 p_prev = p_node; 2535 p_node = p_node->p_next; 2536 if ((NULL != p_node) && (p_node->net_buf == net_buf)) { 2537 p_prev->p_next = p_node->p_next; 2538 break; 2539 } 2540 } 2541 2542 done: 2543 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); 2544 2545 if (p_node) { 2546 qdf_mem_skb_dec(p_node->size); 2547 qdf_nbuf_track_free(p_node); 2548 } else { 2549 qdf_print("Unallocated buffer ! Double free of net_buf %pK ?", 2550 net_buf); 2551 QDF_BUG(0); 2552 } 2553 } 2554 qdf_export_symbol(qdf_net_buf_debug_delete_node); 2555 2556 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf, 2557 uint8_t *file_name, uint32_t line_num) 2558 { 2559 qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf); 2560 2561 while (ext_list) { 2562 /* 2563 * Take care to add if it is Jumbo packet connected using 2564 * frag_list 2565 */ 2566 qdf_nbuf_t next; 2567 2568 next = qdf_nbuf_queue_next(ext_list); 2569 qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num); 2570 ext_list = next; 2571 } 2572 qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num); 2573 } 2574 qdf_export_symbol(qdf_net_buf_debug_acquire_skb); 2575 2576 /** 2577 * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak 2578 * @net_buf: Network buf holding head segment (single) 2579 * 2580 * WLAN driver module whose allocated SKB is freed by network stack are 2581 * suppose to call this API before returning SKB to network stack such 2582 * that the SKB is not reported as memory leak. 2583 * 2584 * Return: none 2585 */ 2586 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf) 2587 { 2588 qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf); 2589 2590 while (ext_list) { 2591 /* 2592 * Take care to free if it is Jumbo packet connected using 2593 * frag_list 2594 */ 2595 qdf_nbuf_t next; 2596 2597 next = qdf_nbuf_queue_next(ext_list); 2598 2599 if (qdf_nbuf_is_tso(ext_list) && 2600 qdf_nbuf_get_users(ext_list) > 1) { 2601 ext_list = next; 2602 continue; 2603 } 2604 2605 qdf_net_buf_debug_delete_node(ext_list); 2606 ext_list = next; 2607 } 2608 2609 if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1) 2610 return; 2611 2612 qdf_net_buf_debug_delete_node(net_buf); 2613 } 2614 qdf_export_symbol(qdf_net_buf_debug_release_skb); 2615 2616 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size, 2617 int reserve, int align, int prio, 2618 uint8_t *file, uint32_t line) 2619 { 2620 qdf_nbuf_t nbuf; 2621 2622 nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio); 2623 2624 /* Store SKB in internal QDF tracking table */ 2625 if (qdf_likely(nbuf)) { 2626 qdf_net_buf_debug_add_node(nbuf, size, file, line); 2627 qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC); 2628 } 2629 2630 return nbuf; 2631 } 2632 qdf_export_symbol(qdf_nbuf_alloc_debug); 2633 2634 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line) 2635 { 2636 if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1) 2637 goto free_buf; 2638 2639 /* Remove SKB from internal QDF tracking table */ 2640 if (qdf_likely(nbuf)) { 2641 struct qdf_nbuf_map_metadata *meta; 2642 2643 meta = qdf_nbuf_meta_get(nbuf); 2644 if (meta) 2645 QDF_DEBUG_PANIC( 2646 "Nbuf freed @ %s:%u while mapped from %s:%u", 2647 kbasename(file), line, meta->file, meta->line); 2648 2649 qdf_net_buf_debug_delete_node(nbuf); 2650 qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE); 2651 } 2652 2653 free_buf: 2654 __qdf_nbuf_free(nbuf); 2655 } 2656 qdf_export_symbol(qdf_nbuf_free_debug); 2657 2658 #endif /* NBUF_MEMORY_DEBUG */ 2659 2660 #if defined(FEATURE_TSO) 2661 2662 /** 2663 * struct qdf_tso_cmn_seg_info_t - TSO common info structure 2664 * 2665 * @ethproto: ethernet type of the msdu 2666 * @ip_tcp_hdr_len: ip + tcp length for the msdu 2667 * @l2_len: L2 length for the msdu 2668 * @eit_hdr: pointer to EIT header 2669 * @eit_hdr_len: EIT header length for the msdu 2670 * @eit_hdr_dma_map_addr: dma addr for EIT header 2671 * @tcphdr: pointer to tcp header 2672 * @ipv4_csum_en: ipv4 checksum enable 2673 * @tcp_ipv4_csum_en: TCP ipv4 checksum enable 2674 * @tcp_ipv6_csum_en: TCP ipv6 checksum enable 2675 * @ip_id: IP id 2676 * @tcp_seq_num: TCP sequence number 2677 * 2678 * This structure holds the TSO common info that is common 2679 * across all the TCP segments of the jumbo packet. 2680 */ 2681 struct qdf_tso_cmn_seg_info_t { 2682 uint16_t ethproto; 2683 uint16_t ip_tcp_hdr_len; 2684 uint16_t l2_len; 2685 uint8_t *eit_hdr; 2686 uint32_t eit_hdr_len; 2687 qdf_dma_addr_t eit_hdr_dma_map_addr; 2688 struct tcphdr *tcphdr; 2689 uint16_t ipv4_csum_en; 2690 uint16_t tcp_ipv4_csum_en; 2691 uint16_t tcp_ipv6_csum_en; 2692 uint16_t ip_id; 2693 uint32_t tcp_seq_num; 2694 }; 2695 2696 /** 2697 * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common 2698 * information 2699 * @osdev: qdf device handle 2700 * @skb: skb buffer 2701 * @tso_info: Parameters common to all segements 2702 * 2703 * Get the TSO information that is common across all the TCP 2704 * segments of the jumbo packet 2705 * 2706 * Return: 0 - success 1 - failure 2707 */ 2708 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev, 2709 struct sk_buff *skb, 2710 struct qdf_tso_cmn_seg_info_t *tso_info) 2711 { 2712 /* Get ethernet type and ethernet header length */ 2713 tso_info->ethproto = vlan_get_protocol(skb); 2714 2715 /* Determine whether this is an IPv4 or IPv6 packet */ 2716 if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */ 2717 /* for IPv4, get the IP ID and enable TCP and IP csum */ 2718 struct iphdr *ipv4_hdr = ip_hdr(skb); 2719 2720 tso_info->ip_id = ntohs(ipv4_hdr->id); 2721 tso_info->ipv4_csum_en = 1; 2722 tso_info->tcp_ipv4_csum_en = 1; 2723 if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) { 2724 qdf_print("TSO IPV4 proto 0x%x not TCP\n", 2725 ipv4_hdr->protocol); 2726 return 1; 2727 } 2728 } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */ 2729 /* for IPv6, enable TCP csum. No IP ID or IP csum */ 2730 tso_info->tcp_ipv6_csum_en = 1; 2731 } else { 2732 qdf_print("TSO: ethertype 0x%x is not supported!\n", 2733 tso_info->ethproto); 2734 return 1; 2735 } 2736 tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb)); 2737 tso_info->tcphdr = tcp_hdr(skb); 2738 tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq); 2739 /* get pointer to the ethernet + IP + TCP header and their length */ 2740 tso_info->eit_hdr = skb->data; 2741 tso_info->eit_hdr_len = (skb_transport_header(skb) 2742 - skb_mac_header(skb)) + tcp_hdrlen(skb); 2743 tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev, 2744 tso_info->eit_hdr, 2745 tso_info->eit_hdr_len, 2746 DMA_TO_DEVICE); 2747 if (unlikely(dma_mapping_error(osdev->dev, 2748 tso_info->eit_hdr_dma_map_addr))) { 2749 qdf_print("DMA mapping error!\n"); 2750 qdf_assert(0); 2751 return 1; 2752 } 2753 2754 if (tso_info->ethproto == htons(ETH_P_IP)) { 2755 /* inlcude IPv4 header length for IPV4 (total length) */ 2756 tso_info->ip_tcp_hdr_len = 2757 tso_info->eit_hdr_len - tso_info->l2_len; 2758 } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { 2759 /* exclude IPv6 header length for IPv6 (payload length) */ 2760 tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb); 2761 } 2762 /* 2763 * The length of the payload (application layer data) is added to 2764 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext 2765 * descriptor. 2766 */ 2767 2768 TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u skb len %u\n", __func__, 2769 tso_info->tcp_seq_num, 2770 tso_info->eit_hdr_len, 2771 tso_info->l2_len, 2772 skb->len); 2773 return 0; 2774 } 2775 2776 2777 /** 2778 * qdf_dmaaddr_to_32s - return high and low parts of dma_addr 2779 * 2780 * Returns the high and low 32-bits of the DMA addr in the provided ptrs 2781 * 2782 * Return: N/A 2783 */ 2784 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr, 2785 uint32_t *lo, uint32_t *hi) 2786 { 2787 if (sizeof(dmaaddr) > sizeof(uint32_t)) { 2788 *lo = lower_32_bits(dmaaddr); 2789 *hi = upper_32_bits(dmaaddr); 2790 } else { 2791 *lo = dmaaddr; 2792 *hi = 0; 2793 } 2794 } 2795 qdf_export_symbol(__qdf_dmaaddr_to_32s); 2796 2797 /** 2798 * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment 2799 * 2800 * @curr_seg: Segment whose contents are initialized 2801 * @tso_cmn_info: Parameters common to all segements 2802 * 2803 * Return: None 2804 */ 2805 static inline void __qdf_nbuf_fill_tso_cmn_seg_info( 2806 struct qdf_tso_seg_elem_t *curr_seg, 2807 struct qdf_tso_cmn_seg_info_t *tso_cmn_info) 2808 { 2809 /* Initialize the flags to 0 */ 2810 memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg)); 2811 2812 /* 2813 * The following fields remain the same across all segments of 2814 * a jumbo packet 2815 */ 2816 curr_seg->seg.tso_flags.tso_enable = 1; 2817 curr_seg->seg.tso_flags.ipv4_checksum_en = 2818 tso_cmn_info->ipv4_csum_en; 2819 curr_seg->seg.tso_flags.tcp_ipv6_checksum_en = 2820 tso_cmn_info->tcp_ipv6_csum_en; 2821 curr_seg->seg.tso_flags.tcp_ipv4_checksum_en = 2822 tso_cmn_info->tcp_ipv4_csum_en; 2823 curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF; 2824 2825 /* The following fields change for the segments */ 2826 curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id; 2827 tso_cmn_info->ip_id++; 2828 2829 curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn; 2830 curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst; 2831 curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh; 2832 curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack; 2833 curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg; 2834 curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece; 2835 curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr; 2836 2837 curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num; 2838 2839 /* 2840 * First fragment for each segment always contains the ethernet, 2841 * IP and TCP header 2842 */ 2843 curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr; 2844 curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len; 2845 curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length; 2846 curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr; 2847 2848 TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n", 2849 __func__, __LINE__, tso_cmn_info->eit_hdr, 2850 tso_cmn_info->eit_hdr_len, 2851 curr_seg->seg.tso_flags.tcp_seq_num, 2852 curr_seg->seg.total_len); 2853 qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG); 2854 } 2855 2856 /** 2857 * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf 2858 * into segments 2859 * @nbuf: network buffer to be segmented 2860 * @tso_info: This is the output. The information about the 2861 * TSO segments will be populated within this. 2862 * 2863 * This function fragments a TCP jumbo packet into smaller 2864 * segments to be transmitted by the driver. It chains the TSO 2865 * segments created into a list. 2866 * 2867 * Return: number of TSO segments 2868 */ 2869 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb, 2870 struct qdf_tso_info_t *tso_info) 2871 { 2872 /* common across all segments */ 2873 struct qdf_tso_cmn_seg_info_t tso_cmn_info; 2874 /* segment specific */ 2875 void *tso_frag_vaddr; 2876 qdf_dma_addr_t tso_frag_paddr = 0; 2877 uint32_t num_seg = 0; 2878 struct qdf_tso_seg_elem_t *curr_seg; 2879 struct qdf_tso_num_seg_elem_t *total_num_seg; 2880 struct skb_frag_struct *frag = NULL; 2881 uint32_t tso_frag_len = 0; /* tso segment's fragment length*/ 2882 uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/ 2883 uint32_t skb_proc = skb->len; /* bytes of skb pending processing */ 2884 uint32_t tso_seg_size = skb_shinfo(skb)->gso_size; 2885 int j = 0; /* skb fragment index */ 2886 2887 memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info)); 2888 2889 if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev, 2890 skb, &tso_cmn_info))) { 2891 qdf_print("TSO: error getting common segment info\n"); 2892 return 0; 2893 } 2894 2895 total_num_seg = tso_info->tso_num_seg_list; 2896 curr_seg = tso_info->tso_seg_list; 2897 2898 /* length of the first chunk of data in the skb */ 2899 skb_frag_len = skb_headlen(skb); 2900 2901 /* the 0th tso segment's 0th fragment always contains the EIT header */ 2902 /* update the remaining skb fragment length and TSO segment length */ 2903 skb_frag_len -= tso_cmn_info.eit_hdr_len; 2904 skb_proc -= tso_cmn_info.eit_hdr_len; 2905 2906 /* get the address to the next tso fragment */ 2907 tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len; 2908 /* get the length of the next tso fragment */ 2909 tso_frag_len = min(skb_frag_len, tso_seg_size); 2910 2911 if (tso_frag_len != 0) { 2912 tso_frag_paddr = dma_map_single(osdev->dev, 2913 tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE); 2914 } 2915 2916 if (unlikely(dma_mapping_error(osdev->dev, 2917 tso_frag_paddr))) { 2918 qdf_print("%s:%d DMA mapping error!\n", __func__, __LINE__); 2919 qdf_assert(0); 2920 return 0; 2921 } 2922 TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__, 2923 __LINE__, skb_frag_len, tso_frag_len); 2924 num_seg = tso_info->num_segs; 2925 tso_info->num_segs = 0; 2926 tso_info->is_tso = 1; 2927 total_num_seg->num_seg.tso_cmn_num_seg = 0; 2928 2929 while (num_seg && curr_seg) { 2930 int i = 1; /* tso fragment index */ 2931 uint8_t more_tso_frags = 1; 2932 2933 curr_seg->seg.num_frags = 0; 2934 tso_info->num_segs++; 2935 total_num_seg->num_seg.tso_cmn_num_seg++; 2936 2937 __qdf_nbuf_fill_tso_cmn_seg_info(curr_seg, 2938 &tso_cmn_info); 2939 2940 if (unlikely(skb_proc == 0)) 2941 return tso_info->num_segs; 2942 2943 curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len; 2944 curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len; 2945 /* frag len is added to ip_len in while loop below*/ 2946 2947 curr_seg->seg.num_frags++; 2948 2949 while (more_tso_frags) { 2950 if (tso_frag_len != 0) { 2951 curr_seg->seg.tso_frags[i].vaddr = 2952 tso_frag_vaddr; 2953 curr_seg->seg.tso_frags[i].length = 2954 tso_frag_len; 2955 curr_seg->seg.total_len += tso_frag_len; 2956 curr_seg->seg.tso_flags.ip_len += tso_frag_len; 2957 curr_seg->seg.num_frags++; 2958 skb_proc = skb_proc - tso_frag_len; 2959 2960 /* increment the TCP sequence number */ 2961 2962 tso_cmn_info.tcp_seq_num += tso_frag_len; 2963 curr_seg->seg.tso_frags[i].paddr = 2964 tso_frag_paddr; 2965 } 2966 2967 TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n", 2968 __func__, __LINE__, 2969 i, 2970 tso_frag_len, 2971 curr_seg->seg.total_len, 2972 curr_seg->seg.tso_frags[i].vaddr); 2973 2974 /* if there is no more data left in the skb */ 2975 if (!skb_proc) 2976 return tso_info->num_segs; 2977 2978 /* get the next payload fragment information */ 2979 /* check if there are more fragments in this segment */ 2980 if (tso_frag_len < tso_seg_size) { 2981 more_tso_frags = 1; 2982 if (tso_frag_len != 0) { 2983 tso_seg_size = tso_seg_size - 2984 tso_frag_len; 2985 i++; 2986 if (curr_seg->seg.num_frags == 2987 FRAG_NUM_MAX) { 2988 more_tso_frags = 0; 2989 /* 2990 * reset i and the tso 2991 * payload size 2992 */ 2993 i = 1; 2994 tso_seg_size = 2995 skb_shinfo(skb)-> 2996 gso_size; 2997 } 2998 } 2999 } else { 3000 more_tso_frags = 0; 3001 /* reset i and the tso payload size */ 3002 i = 1; 3003 tso_seg_size = skb_shinfo(skb)->gso_size; 3004 } 3005 3006 /* if the next fragment is contiguous */ 3007 if ((tso_frag_len != 0) && (tso_frag_len < skb_frag_len)) { 3008 tso_frag_vaddr = tso_frag_vaddr + tso_frag_len; 3009 skb_frag_len = skb_frag_len - tso_frag_len; 3010 tso_frag_len = min(skb_frag_len, tso_seg_size); 3011 3012 } else { /* the next fragment is not contiguous */ 3013 if (skb_shinfo(skb)->nr_frags == 0) { 3014 qdf_print("TSO: nr_frags == 0!\n"); 3015 qdf_assert(0); 3016 return 0; 3017 } 3018 if (j >= skb_shinfo(skb)->nr_frags) { 3019 qdf_print("TSO: nr_frags %d j %d\n", 3020 skb_shinfo(skb)->nr_frags, j); 3021 qdf_assert(0); 3022 return 0; 3023 } 3024 frag = &skb_shinfo(skb)->frags[j]; 3025 skb_frag_len = skb_frag_size(frag); 3026 tso_frag_len = min(skb_frag_len, tso_seg_size); 3027 tso_frag_vaddr = skb_frag_address_safe(frag); 3028 j++; 3029 } 3030 3031 TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n", 3032 __func__, __LINE__, skb_frag_len, tso_frag_len, 3033 tso_seg_size); 3034 3035 if (!(tso_frag_vaddr)) { 3036 TSO_DEBUG("%s: Fragment virtual addr is NULL", 3037 __func__); 3038 return 0; 3039 } 3040 3041 tso_frag_paddr = 3042 dma_map_single(osdev->dev, 3043 tso_frag_vaddr, 3044 tso_frag_len, 3045 DMA_TO_DEVICE); 3046 if (unlikely(dma_mapping_error(osdev->dev, 3047 tso_frag_paddr))) { 3048 qdf_print("%s:%d DMA mapping error!\n", 3049 __func__, __LINE__); 3050 qdf_assert(0); 3051 return 0; 3052 } 3053 } 3054 TSO_DEBUG("%s tcp_seq_num: %u", __func__, 3055 curr_seg->seg.tso_flags.tcp_seq_num); 3056 num_seg--; 3057 /* if TCP FIN flag was set, set it in the last segment */ 3058 if (!num_seg) 3059 curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin; 3060 3061 qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO); 3062 curr_seg = curr_seg->next; 3063 } 3064 return tso_info->num_segs; 3065 } 3066 qdf_export_symbol(__qdf_nbuf_get_tso_info); 3067 3068 /** 3069 * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element 3070 * 3071 * @osdev: qdf device handle 3072 * @tso_seg: TSO segment element to be unmapped 3073 * @is_last_seg: whether this is last tso seg or not 3074 * 3075 * Return: none 3076 */ 3077 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev, 3078 struct qdf_tso_seg_elem_t *tso_seg, 3079 bool is_last_seg) 3080 { 3081 uint32_t num_frags = 0; 3082 3083 if (tso_seg->seg.num_frags > 0) 3084 num_frags = tso_seg->seg.num_frags - 1; 3085 3086 /*Num of frags in a tso seg cannot be less than 2 */ 3087 if (num_frags < 1) { 3088 qdf_assert(0); 3089 qdf_print("ERROR: num of frags in a tso segment is %d\n", 3090 (num_frags + 1)); 3091 return; 3092 } 3093 3094 while (num_frags) { 3095 /*Do dma unmap the tso seg except the 0th frag */ 3096 if (0 == tso_seg->seg.tso_frags[num_frags].paddr) { 3097 qdf_print("ERROR: TSO seg frag %d mapped physical address is NULL\n", 3098 num_frags); 3099 qdf_assert(0); 3100 return; 3101 } 3102 dma_unmap_single(osdev->dev, 3103 tso_seg->seg.tso_frags[num_frags].paddr, 3104 tso_seg->seg.tso_frags[num_frags].length, 3105 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE)); 3106 tso_seg->seg.tso_frags[num_frags].paddr = 0; 3107 num_frags--; 3108 qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO); 3109 } 3110 3111 if (is_last_seg) { 3112 /*Do dma unmap for the tso seg 0th frag */ 3113 if (0 == tso_seg->seg.tso_frags[0].paddr) { 3114 qdf_print("ERROR: TSO seg frag 0 mapped physical address is NULL\n"); 3115 qdf_assert(0); 3116 return; 3117 } 3118 dma_unmap_single(osdev->dev, 3119 tso_seg->seg.tso_frags[0].paddr, 3120 tso_seg->seg.tso_frags[0].length, 3121 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE)); 3122 tso_seg->seg.tso_frags[0].paddr = 0; 3123 qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST); 3124 } 3125 } 3126 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment); 3127 3128 /** 3129 * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf 3130 * into segments 3131 * @nbuf: network buffer to be segmented 3132 * @tso_info: This is the output. The information about the 3133 * TSO segments will be populated within this. 3134 * 3135 * This function fragments a TCP jumbo packet into smaller 3136 * segments to be transmitted by the driver. It chains the TSO 3137 * segments created into a list. 3138 * 3139 * Return: 0 - success, 1 - failure 3140 */ 3141 #ifndef BUILD_X86 3142 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) 3143 { 3144 uint32_t tso_seg_size = skb_shinfo(skb)->gso_size; 3145 uint32_t remainder, num_segs = 0; 3146 uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags; 3147 uint8_t frags_per_tso = 0; 3148 uint32_t skb_frag_len = 0; 3149 uint32_t eit_hdr_len = (skb_transport_header(skb) 3150 - skb_mac_header(skb)) + tcp_hdrlen(skb); 3151 struct skb_frag_struct *frag = NULL; 3152 int j = 0; 3153 uint32_t temp_num_seg = 0; 3154 3155 /* length of the first chunk of data in the skb minus eit header*/ 3156 skb_frag_len = skb_headlen(skb) - eit_hdr_len; 3157 3158 /* Calculate num of segs for skb's first chunk of data*/ 3159 remainder = skb_frag_len % tso_seg_size; 3160 num_segs = skb_frag_len / tso_seg_size; 3161 /** 3162 * Remainder non-zero and nr_frags zero implies end of skb data. 3163 * In that case, one more tso seg is required to accommodate 3164 * remaining data, hence num_segs++. If nr_frags is non-zero, 3165 * then remaining data will be accomodated while doing the calculation 3166 * for nr_frags data. Hence, frags_per_tso++. 3167 */ 3168 if (remainder) { 3169 if (!skb_nr_frags) 3170 num_segs++; 3171 else 3172 frags_per_tso++; 3173 } 3174 3175 while (skb_nr_frags) { 3176 if (j >= skb_shinfo(skb)->nr_frags) { 3177 qdf_print("TSO: nr_frags %d j %d\n", 3178 skb_shinfo(skb)->nr_frags, j); 3179 qdf_assert(0); 3180 return 0; 3181 } 3182 /** 3183 * Calculate the number of tso seg for nr_frags data: 3184 * Get the length of each frag in skb_frag_len, add to 3185 * remainder.Get the number of segments by dividing it to 3186 * tso_seg_size and calculate the new remainder. 3187 * Decrement the nr_frags value and keep 3188 * looping all the skb_fragments. 3189 */ 3190 frag = &skb_shinfo(skb)->frags[j]; 3191 skb_frag_len = skb_frag_size(frag); 3192 temp_num_seg = num_segs; 3193 remainder += skb_frag_len; 3194 num_segs += remainder / tso_seg_size; 3195 remainder = remainder % tso_seg_size; 3196 skb_nr_frags--; 3197 if (remainder) { 3198 if (num_segs > temp_num_seg) 3199 frags_per_tso = 0; 3200 /** 3201 * increment the tso per frags whenever remainder is 3202 * positive. If frags_per_tso reaches the (max-1), 3203 * [First frags always have EIT header, therefore max-1] 3204 * increment the num_segs as no more data can be 3205 * accomodated in the curr tso seg. Reset the remainder 3206 * and frags per tso and keep looping. 3207 */ 3208 frags_per_tso++; 3209 if (frags_per_tso == FRAG_NUM_MAX - 1) { 3210 num_segs++; 3211 frags_per_tso = 0; 3212 remainder = 0; 3213 } 3214 /** 3215 * If this is the last skb frag and still remainder is 3216 * non-zero(frags_per_tso is not reached to the max-1) 3217 * then increment the num_segs to take care of the 3218 * remaining length. 3219 */ 3220 if (!skb_nr_frags && remainder) { 3221 num_segs++; 3222 frags_per_tso = 0; 3223 } 3224 } else { 3225 /* Whenever remainder is 0, reset the frags_per_tso. */ 3226 frags_per_tso = 0; 3227 } 3228 j++; 3229 } 3230 3231 return num_segs; 3232 } 3233 #else 3234 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) 3235 { 3236 uint32_t i, gso_size, tmp_len, num_segs = 0; 3237 struct skb_frag_struct *frag = NULL; 3238 3239 /* 3240 * Check if the head SKB or any of frags are allocated in < 0x50000000 3241 * region which cannot be accessed by Target 3242 */ 3243 if (virt_to_phys(skb->data) < 0x50000040) { 3244 TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n", 3245 __func__, __LINE__, skb_shinfo(skb)->nr_frags, 3246 virt_to_phys(skb->data)); 3247 goto fail; 3248 3249 } 3250 3251 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3252 frag = &skb_shinfo(skb)->frags[i]; 3253 3254 if (!frag) 3255 goto fail; 3256 3257 if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040) 3258 goto fail; 3259 } 3260 3261 3262 gso_size = skb_shinfo(skb)->gso_size; 3263 tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb)) 3264 + tcp_hdrlen(skb)); 3265 while (tmp_len) { 3266 num_segs++; 3267 if (tmp_len > gso_size) 3268 tmp_len -= gso_size; 3269 else 3270 break; 3271 } 3272 3273 return num_segs; 3274 3275 /* 3276 * Do not free this frame, just do socket level accounting 3277 * so that this is not reused. 3278 */ 3279 fail: 3280 if (skb->sk) 3281 atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc)); 3282 3283 return 0; 3284 } 3285 #endif 3286 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg); 3287 3288 #endif /* FEATURE_TSO */ 3289 3290 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb) 3291 { 3292 qdf_nbuf_users_inc(&skb->users); 3293 return skb; 3294 } 3295 qdf_export_symbol(__qdf_nbuf_inc_users); 3296 3297 int __qdf_nbuf_get_users(struct sk_buff *skb) 3298 { 3299 return qdf_nbuf_users_read(&skb->users); 3300 } 3301 qdf_export_symbol(__qdf_nbuf_get_users); 3302 3303 /** 3304 * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free. 3305 * @skb: sk_buff handle 3306 * 3307 * Return: none 3308 */ 3309 3310 void __qdf_nbuf_ref(struct sk_buff *skb) 3311 { 3312 skb_get(skb); 3313 } 3314 qdf_export_symbol(__qdf_nbuf_ref); 3315 3316 /** 3317 * __qdf_nbuf_shared() - Check whether the buffer is shared 3318 * @skb: sk_buff buffer 3319 * 3320 * Return: true if more than one person has a reference to this buffer. 3321 */ 3322 int __qdf_nbuf_shared(struct sk_buff *skb) 3323 { 3324 return skb_shared(skb); 3325 } 3326 qdf_export_symbol(__qdf_nbuf_shared); 3327 3328 /** 3329 * __qdf_nbuf_dmamap_create() - create a DMA map. 3330 * @osdev: qdf device handle 3331 * @dmap: dma map handle 3332 * 3333 * This can later be used to map networking buffers. They : 3334 * - need space in adf_drv's software descriptor 3335 * - are typically created during adf_drv_create 3336 * - need to be created before any API(qdf_nbuf_map) that uses them 3337 * 3338 * Return: QDF STATUS 3339 */ 3340 QDF_STATUS 3341 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap) 3342 { 3343 QDF_STATUS error = QDF_STATUS_SUCCESS; 3344 /* 3345 * driver can tell its SG capablity, it must be handled. 3346 * Bounce buffers if they are there 3347 */ 3348 (*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL); 3349 if (!(*dmap)) 3350 error = QDF_STATUS_E_NOMEM; 3351 3352 return error; 3353 } 3354 qdf_export_symbol(__qdf_nbuf_dmamap_create); 3355 /** 3356 * __qdf_nbuf_dmamap_destroy() - delete a dma map 3357 * @osdev: qdf device handle 3358 * @dmap: dma map handle 3359 * 3360 * Return: none 3361 */ 3362 void 3363 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap) 3364 { 3365 kfree(dmap); 3366 } 3367 qdf_export_symbol(__qdf_nbuf_dmamap_destroy); 3368 3369 /** 3370 * __qdf_nbuf_map_nbytes_single() - map nbytes 3371 * @osdev: os device 3372 * @buf: buffer 3373 * @dir: direction 3374 * @nbytes: number of bytes 3375 * 3376 * Return: QDF_STATUS 3377 */ 3378 #ifdef A_SIMOS_DEVHOST 3379 QDF_STATUS __qdf_nbuf_map_nbytes_single( 3380 qdf_device_t osdev, struct sk_buff *buf, 3381 qdf_dma_dir_t dir, int nbytes) 3382 { 3383 qdf_dma_addr_t paddr; 3384 3385 QDF_NBUF_CB_PADDR(buf) = paddr = buf->data; 3386 return QDF_STATUS_SUCCESS; 3387 } 3388 qdf_export_symbol(__qdf_nbuf_map_nbytes_single); 3389 #else 3390 QDF_STATUS __qdf_nbuf_map_nbytes_single( 3391 qdf_device_t osdev, struct sk_buff *buf, 3392 qdf_dma_dir_t dir, int nbytes) 3393 { 3394 qdf_dma_addr_t paddr; 3395 3396 /* assume that the OS only provides a single fragment */ 3397 QDF_NBUF_CB_PADDR(buf) = paddr = 3398 dma_map_single(osdev->dev, buf->data, 3399 nbytes, __qdf_dma_dir_to_os(dir)); 3400 return dma_mapping_error(osdev->dev, paddr) ? 3401 QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS; 3402 } 3403 qdf_export_symbol(__qdf_nbuf_map_nbytes_single); 3404 #endif 3405 /** 3406 * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes 3407 * @osdev: os device 3408 * @buf: buffer 3409 * @dir: direction 3410 * @nbytes: number of bytes 3411 * 3412 * Return: none 3413 */ 3414 #if defined(A_SIMOS_DEVHOST) 3415 void 3416 __qdf_nbuf_unmap_nbytes_single( 3417 qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes) 3418 { 3419 } 3420 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single); 3421 3422 #else 3423 void 3424 __qdf_nbuf_unmap_nbytes_single( 3425 qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes) 3426 { 3427 if (0 == QDF_NBUF_CB_PADDR(buf)) { 3428 qdf_print("ERROR: NBUF mapped physical address is NULL\n"); 3429 return; 3430 } 3431 dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf), 3432 nbytes, __qdf_dma_dir_to_os(dir)); 3433 } 3434 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single); 3435 #endif 3436 /** 3437 * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf 3438 * @osdev: os device 3439 * @skb: skb handle 3440 * @dir: dma direction 3441 * @nbytes: number of bytes to be mapped 3442 * 3443 * Return: QDF_STATUS 3444 */ 3445 #ifdef QDF_OS_DEBUG 3446 QDF_STATUS 3447 __qdf_nbuf_map_nbytes( 3448 qdf_device_t osdev, 3449 struct sk_buff *skb, 3450 qdf_dma_dir_t dir, 3451 int nbytes) 3452 { 3453 struct skb_shared_info *sh = skb_shinfo(skb); 3454 3455 qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); 3456 3457 /* 3458 * Assume there's only a single fragment. 3459 * To support multiple fragments, it would be necessary to change 3460 * adf_nbuf_t to be a separate object that stores meta-info 3461 * (including the bus address for each fragment) and a pointer 3462 * to the underlying sk_buff. 3463 */ 3464 qdf_assert(sh->nr_frags == 0); 3465 3466 return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes); 3467 } 3468 qdf_export_symbol(__qdf_nbuf_map_nbytes); 3469 #else 3470 QDF_STATUS 3471 __qdf_nbuf_map_nbytes( 3472 qdf_device_t osdev, 3473 struct sk_buff *skb, 3474 qdf_dma_dir_t dir, 3475 int nbytes) 3476 { 3477 return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes); 3478 } 3479 qdf_export_symbol(__qdf_nbuf_map_nbytes); 3480 #endif 3481 /** 3482 * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf 3483 * @osdev: OS device 3484 * @skb: skb handle 3485 * @dir: direction 3486 * @nbytes: number of bytes 3487 * 3488 * Return: none 3489 */ 3490 void 3491 __qdf_nbuf_unmap_nbytes( 3492 qdf_device_t osdev, 3493 struct sk_buff *skb, 3494 qdf_dma_dir_t dir, 3495 int nbytes) 3496 { 3497 qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); 3498 3499 /* 3500 * Assume there's a single fragment. 3501 * If this is not true, the assertion in __adf_nbuf_map will catch it. 3502 */ 3503 __qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes); 3504 } 3505 qdf_export_symbol(__qdf_nbuf_unmap_nbytes); 3506 3507 /** 3508 * __qdf_nbuf_dma_map_info() - return the dma map info 3509 * @bmap: dma map 3510 * @sg: dma map info 3511 * 3512 * Return: none 3513 */ 3514 void 3515 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg) 3516 { 3517 qdf_assert(bmap->mapped); 3518 qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER); 3519 3520 memcpy(sg->dma_segs, bmap->seg, bmap->nsegs * 3521 sizeof(struct __qdf_segment)); 3522 sg->nsegs = bmap->nsegs; 3523 } 3524 qdf_export_symbol(__qdf_nbuf_dma_map_info); 3525 /** 3526 * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is 3527 * specified by the index 3528 * @skb: sk buff 3529 * @sg: scatter/gather list of all the frags 3530 * 3531 * Return: none 3532 */ 3533 #if defined(__QDF_SUPPORT_FRAG_MEM) 3534 void 3535 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) 3536 { 3537 qdf_assert(skb != NULL); 3538 sg->sg_segs[0].vaddr = skb->data; 3539 sg->sg_segs[0].len = skb->len; 3540 sg->nsegs = 1; 3541 3542 for (int i = 1; i <= sh->nr_frags; i++) { 3543 skb_frag_t *f = &sh->frags[i - 1]; 3544 3545 sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) + 3546 f->page_offset); 3547 sg->sg_segs[i].len = f->size; 3548 3549 qdf_assert(i < QDF_MAX_SGLIST); 3550 } 3551 sg->nsegs += i; 3552 3553 } 3554 qdf_export_symbol(__qdf_nbuf_frag_info); 3555 #else 3556 #ifdef QDF_OS_DEBUG 3557 void 3558 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) 3559 { 3560 3561 struct skb_shared_info *sh = skb_shinfo(skb); 3562 3563 qdf_assert(skb != NULL); 3564 sg->sg_segs[0].vaddr = skb->data; 3565 sg->sg_segs[0].len = skb->len; 3566 sg->nsegs = 1; 3567 3568 qdf_assert(sh->nr_frags == 0); 3569 } 3570 qdf_export_symbol(__qdf_nbuf_frag_info); 3571 #else 3572 void 3573 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) 3574 { 3575 sg->sg_segs[0].vaddr = skb->data; 3576 sg->sg_segs[0].len = skb->len; 3577 sg->nsegs = 1; 3578 } 3579 qdf_export_symbol(__qdf_nbuf_frag_info); 3580 #endif 3581 #endif 3582 /** 3583 * __qdf_nbuf_get_frag_size() - get frag size 3584 * @nbuf: sk buffer 3585 * @cur_frag: current frag 3586 * 3587 * Return: frag size 3588 */ 3589 uint32_t 3590 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag) 3591 { 3592 struct skb_shared_info *sh = skb_shinfo(nbuf); 3593 const skb_frag_t *frag = sh->frags + cur_frag; 3594 3595 return skb_frag_size(frag); 3596 } 3597 qdf_export_symbol(__qdf_nbuf_get_frag_size); 3598 3599 /** 3600 * __qdf_nbuf_frag_map() - dma map frag 3601 * @osdev: os device 3602 * @nbuf: sk buff 3603 * @offset: offset 3604 * @dir: direction 3605 * @cur_frag: current fragment 3606 * 3607 * Return: QDF status 3608 */ 3609 #ifdef A_SIMOS_DEVHOST 3610 QDF_STATUS __qdf_nbuf_frag_map( 3611 qdf_device_t osdev, __qdf_nbuf_t nbuf, 3612 int offset, qdf_dma_dir_t dir, int cur_frag) 3613 { 3614 int32_t paddr, frag_len; 3615 3616 QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data; 3617 return QDF_STATUS_SUCCESS; 3618 } 3619 qdf_export_symbol(__qdf_nbuf_frag_map); 3620 #else 3621 QDF_STATUS __qdf_nbuf_frag_map( 3622 qdf_device_t osdev, __qdf_nbuf_t nbuf, 3623 int offset, qdf_dma_dir_t dir, int cur_frag) 3624 { 3625 dma_addr_t paddr, frag_len; 3626 struct skb_shared_info *sh = skb_shinfo(nbuf); 3627 const skb_frag_t *frag = sh->frags + cur_frag; 3628 3629 frag_len = skb_frag_size(frag); 3630 3631 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr = 3632 skb_frag_dma_map(osdev->dev, frag, offset, frag_len, 3633 __qdf_dma_dir_to_os(dir)); 3634 return dma_mapping_error(osdev->dev, paddr) ? 3635 QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS; 3636 } 3637 qdf_export_symbol(__qdf_nbuf_frag_map); 3638 #endif 3639 /** 3640 * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map 3641 * @dmap: dma map 3642 * @cb: callback 3643 * @arg: argument 3644 * 3645 * Return: none 3646 */ 3647 void 3648 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg) 3649 { 3650 return; 3651 } 3652 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb); 3653 3654 3655 /** 3656 * __qdf_nbuf_sync_single_for_cpu() - nbuf sync 3657 * @osdev: os device 3658 * @buf: sk buff 3659 * @dir: direction 3660 * 3661 * Return: none 3662 */ 3663 #if defined(A_SIMOS_DEVHOST) 3664 static void __qdf_nbuf_sync_single_for_cpu( 3665 qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) 3666 { 3667 return; 3668 } 3669 #else 3670 static void __qdf_nbuf_sync_single_for_cpu( 3671 qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) 3672 { 3673 if (0 == QDF_NBUF_CB_PADDR(buf)) { 3674 qdf_print("ERROR: NBUF mapped physical address is NULL\n"); 3675 return; 3676 } 3677 dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf), 3678 skb_end_offset(buf) - skb_headroom(buf), 3679 __qdf_dma_dir_to_os(dir)); 3680 } 3681 #endif 3682 /** 3683 * __qdf_nbuf_sync_for_cpu() - nbuf sync 3684 * @osdev: os device 3685 * @skb: sk buff 3686 * @dir: direction 3687 * 3688 * Return: none 3689 */ 3690 void 3691 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, 3692 struct sk_buff *skb, qdf_dma_dir_t dir) 3693 { 3694 qdf_assert( 3695 (dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); 3696 3697 /* 3698 * Assume there's a single fragment. 3699 * If this is not true, the assertion in __adf_nbuf_map will catch it. 3700 */ 3701 __qdf_nbuf_sync_single_for_cpu(osdev, skb, dir); 3702 } 3703 qdf_export_symbol(__qdf_nbuf_sync_for_cpu); 3704 3705 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 3706 /** 3707 * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags 3708 * @rx_status: Pointer to rx_status. 3709 * @rtap_buf: Buf to which VHT info has to be updated. 3710 * @rtap_len: Current length of radiotap buffer 3711 * 3712 * Return: Length of radiotap after VHT flags updated. 3713 */ 3714 static unsigned int qdf_nbuf_update_radiotap_vht_flags( 3715 struct mon_rx_status *rx_status, 3716 int8_t *rtap_buf, 3717 uint32_t rtap_len) 3718 { 3719 uint16_t vht_flags = 0; 3720 3721 /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */ 3722 vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 3723 IEEE80211_RADIOTAP_VHT_KNOWN_GI | 3724 IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM | 3725 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED | 3726 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH | 3727 IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID; 3728 put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]); 3729 rtap_len += 2; 3730 3731 rtap_buf[rtap_len] |= 3732 (rx_status->is_stbc ? 3733 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) | 3734 (rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) | 3735 (rx_status->ldpc ? 3736 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) | 3737 (rx_status->beamformed ? 3738 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0); 3739 rtap_len += 1; 3740 switch (rx_status->vht_flag_values2) { 3741 case IEEE80211_RADIOTAP_VHT_BW_20: 3742 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20; 3743 break; 3744 case IEEE80211_RADIOTAP_VHT_BW_40: 3745 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40; 3746 break; 3747 case IEEE80211_RADIOTAP_VHT_BW_80: 3748 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80; 3749 break; 3750 case IEEE80211_RADIOTAP_VHT_BW_160: 3751 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160; 3752 break; 3753 } 3754 rtap_len += 1; 3755 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]); 3756 rtap_len += 1; 3757 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]); 3758 rtap_len += 1; 3759 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]); 3760 rtap_len += 1; 3761 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]); 3762 rtap_len += 1; 3763 rtap_buf[rtap_len] = (rx_status->vht_flag_values4); 3764 rtap_len += 1; 3765 rtap_buf[rtap_len] = (rx_status->vht_flag_values5); 3766 rtap_len += 1; 3767 put_unaligned_le16(rx_status->vht_flag_values6, 3768 &rtap_buf[rtap_len]); 3769 rtap_len += 2; 3770 3771 return rtap_len; 3772 } 3773 3774 /** 3775 * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status 3776 * @rx_status: Pointer to rx_status. 3777 * @rtap_buf: buffer to which radiotap has to be updated 3778 * @rtap_len: radiotap length 3779 * 3780 * API update high-efficiency (11ax) fields in the radiotap header 3781 * 3782 * Return: length of rtap_len updated. 3783 */ 3784 static unsigned int 3785 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status, 3786 int8_t *rtap_buf, uint32_t rtap_len) 3787 { 3788 /* 3789 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16 3790 * Enable all "known" HE radiotap flags for now 3791 */ 3792 put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); 3793 rtap_len += 2; 3794 3795 put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); 3796 rtap_len += 2; 3797 3798 put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); 3799 rtap_len += 2; 3800 3801 put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); 3802 rtap_len += 2; 3803 3804 put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); 3805 rtap_len += 2; 3806 3807 put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); 3808 rtap_len += 2; 3809 qdf_info("he data %x %x %x %x %x %x", 3810 rx_status->he_data1, 3811 rx_status->he_data2, rx_status->he_data3, 3812 rx_status->he_data4, rx_status->he_data5, 3813 rx_status->he_data6); 3814 return rtap_len; 3815 } 3816 3817 3818 /** 3819 * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags 3820 * @rx_status: Pointer to rx_status. 3821 * @rtap_buf: buffer to which radiotap has to be updated 3822 * @rtap_len: radiotap length 3823 * 3824 * API update HE-MU fields in the radiotap header 3825 * 3826 * Return: length of rtap_len updated. 3827 */ 3828 static unsigned int 3829 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status, 3830 int8_t *rtap_buf, uint32_t rtap_len) 3831 { 3832 /* 3833 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4] 3834 * Enable all "known" he-mu radiotap flags for now 3835 */ 3836 put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); 3837 rtap_len += 2; 3838 3839 put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); 3840 rtap_len += 2; 3841 3842 rtap_buf[rtap_len] = rx_status->he_RU[0]; 3843 rtap_len += 1; 3844 3845 rtap_buf[rtap_len] = rx_status->he_RU[1]; 3846 rtap_len += 1; 3847 3848 rtap_buf[rtap_len] = rx_status->he_RU[2]; 3849 rtap_len += 1; 3850 3851 rtap_buf[rtap_len] = rx_status->he_RU[3]; 3852 rtap_len += 1; 3853 qdf_info("he_flags %x %x he-RU %x %x %x %x", 3854 rx_status->he_flags1, 3855 rx_status->he_flags2, rx_status->he_RU[0], 3856 rx_status->he_RU[1], rx_status->he_RU[2], 3857 rx_status->he_RU[3]); 3858 3859 return rtap_len; 3860 } 3861 3862 /** 3863 * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags 3864 * @rx_status: Pointer to rx_status. 3865 * @rtap_buf: buffer to which radiotap has to be updated 3866 * @rtap_len: radiotap length 3867 * 3868 * API update he-mu-other fields in the radiotap header 3869 * 3870 * Return: length of rtap_len updated. 3871 */ 3872 static unsigned int 3873 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status, 3874 int8_t *rtap_buf, uint32_t rtap_len) 3875 { 3876 /* 3877 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8 3878 * Enable all "known" he-mu-other radiotap flags for now 3879 */ 3880 put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]); 3881 rtap_len += 2; 3882 3883 put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]); 3884 rtap_len += 2; 3885 3886 rtap_buf[rtap_len] = rx_status->he_per_user_position; 3887 rtap_len += 1; 3888 3889 rtap_buf[rtap_len] = rx_status->he_per_user_known; 3890 rtap_len += 1; 3891 qdf_info("he_per_user %x %x pos %x knwn %x", 3892 rx_status->he_per_user_1, 3893 rx_status->he_per_user_2, rx_status->he_per_user_position, 3894 rx_status->he_per_user_known); 3895 return rtap_len; 3896 } 3897 3898 #define NORMALIZED_TO_NOISE_FLOOR (-96) 3899 3900 /* This is the length for radiotap, combined length 3901 * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN) 3902 * cannot be more than available headroom_sz. 3903 * increase this when we add more radiotap elements. 3904 */ 3905 3906 #define RADIOTAP_VHT_FLAGS_LEN 12 3907 #define RADIOTAP_HE_FLAGS_LEN 12 3908 #define RADIOTAP_HE_MU_FLAGS_LEN 8 3909 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN 18 3910 #define RADIOTAP_FIXED_HEADER_LEN 16 3911 #define RADIOTAP_HT_FLAGS_LEN 3 3912 #define RADIOTAP_AMPDU_STATUS_LEN 8 3913 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \ 3914 RADIOTAP_FIXED_HEADER_LEN + \ 3915 RADIOTAP_HT_FLAGS_LEN + \ 3916 RADIOTAP_VHT_FLAGS_LEN + \ 3917 RADIOTAP_AMPDU_STATUS_LEN + \ 3918 RADIOTAP_HE_FLAGS_LEN + \ 3919 RADIOTAP_HE_MU_FLAGS_LEN + \ 3920 RADIOTAP_HE_MU_OTHER_FLAGS_LEN) 3921 3922 #define IEEE80211_RADIOTAP_HE 23 3923 #define IEEE80211_RADIOTAP_HE_MU 24 3924 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25 3925 3926 /** 3927 * radiotap_num_to_freq() - Get frequency from chan number 3928 * @chan_num - Input channel number 3929 * 3930 * Return - Channel frequency in Mhz 3931 */ 3932 static uint16_t radiotap_num_to_freq (uint16_t chan_num) 3933 { 3934 if (chan_num == CHANNEL_NUM_14) 3935 return CHANNEL_FREQ_2484; 3936 if (chan_num < CHANNEL_NUM_14) 3937 return CHANNEL_FREQ_2407 + 3938 (chan_num * FREQ_MULTIPLIER_CONST_5MHZ); 3939 3940 if (chan_num < CHANNEL_NUM_27) 3941 return CHANNEL_FREQ_2512 + 3942 ((chan_num - CHANNEL_NUM_15) * 3943 FREQ_MULTIPLIER_CONST_20MHZ); 3944 3945 if (chan_num > CHANNEL_NUM_182 && 3946 chan_num < CHANNEL_NUM_197) 3947 return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) + 3948 CHANNEL_FREQ_4000); 3949 3950 return CHANNEL_FREQ_5000 + 3951 (chan_num * FREQ_MULTIPLIER_CONST_5MHZ); 3952 } 3953 3954 /** 3955 * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags 3956 * @rx_status: Pointer to rx_status. 3957 * @rtap_buf: Buf to which AMPDU info has to be updated. 3958 * @rtap_len: Current length of radiotap buffer 3959 * 3960 * Return: Length of radiotap after AMPDU flags updated. 3961 */ 3962 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags( 3963 struct mon_rx_status *rx_status, 3964 uint8_t *rtap_buf, 3965 uint32_t rtap_len) 3966 { 3967 /* 3968 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 3969 * First 32 bits of AMPDU represents the reference number 3970 */ 3971 3972 uint32_t ampdu_reference_num = rx_status->ppdu_id; 3973 uint16_t ampdu_flags = 0; 3974 uint16_t ampdu_reserved_flags = 0; 3975 3976 put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]); 3977 rtap_len += 4; 3978 put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]); 3979 rtap_len += 2; 3980 put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]); 3981 rtap_len += 2; 3982 3983 return rtap_len; 3984 } 3985 3986 /** 3987 * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status 3988 * @rx_status: Pointer to rx_status. 3989 * @nbuf: nbuf pointer to which radiotap has to be updated 3990 * @headroom_sz: Available headroom size. 3991 * 3992 * Return: length of rtap_len updated. 3993 */ 3994 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, 3995 qdf_nbuf_t nbuf, uint32_t headroom_sz) 3996 { 3997 uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0}; 3998 struct ieee80211_radiotap_header *rthdr = 3999 (struct ieee80211_radiotap_header *)rtap_buf; 4000 uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header); 4001 uint32_t rtap_len = rtap_hdr_len; 4002 uint8_t length = rtap_len; 4003 4004 /* IEEE80211_RADIOTAP_TSFT __le64 microseconds*/ 4005 rthdr->it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 4006 put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]); 4007 rtap_len += 8; 4008 4009 /* IEEE80211_RADIOTAP_FLAGS u8 */ 4010 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_FLAGS); 4011 4012 if (rx_status->rs_fcs_err) 4013 rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS; 4014 4015 rtap_buf[rtap_len] = rx_status->rtap_flags; 4016 rtap_len += 1; 4017 4018 /* IEEE80211_RADIOTAP_RATE u8 500kb/s */ 4019 if (!rx_status->ht_flags && !rx_status->vht_flags && 4020 !rx_status->he_flags) { 4021 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 4022 rtap_buf[rtap_len] = rx_status->rate; 4023 } else 4024 rtap_buf[rtap_len] = 0; 4025 rtap_len += 1; 4026 4027 /* IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap */ 4028 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); 4029 rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num); 4030 put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]); 4031 rtap_len += 2; 4032 /* Channel flags. */ 4033 if (rx_status->chan_num > CHANNEL_NUM_35) 4034 rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL; 4035 else 4036 rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL; 4037 if (rx_status->cck_flag) 4038 rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL; 4039 if (rx_status->ofdm_flag) 4040 rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL; 4041 put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]); 4042 rtap_len += 2; 4043 4044 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from one milliwatt 4045 * (dBm) 4046 */ 4047 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 4048 /* 4049 * rssi_comb is int dB, need to convert it to dBm. 4050 * normalize value to noise floor of -96 dBm 4051 */ 4052 rtap_buf[rtap_len] = rx_status->rssi_comb + 4053 NORMALIZED_TO_NOISE_FLOOR; 4054 rtap_len += 1; 4055 4056 /* IEEE80211_RADIOTAP_ANTENNA u8 antenna index */ 4057 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ANTENNA); 4058 rtap_buf[rtap_len] = rx_status->nr_ant; 4059 rtap_len += 1; 4060 4061 if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) { 4062 qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN"); 4063 return 0; 4064 } 4065 4066 if (rx_status->ht_flags) { 4067 length = rtap_len; 4068 /* IEEE80211_RADIOTAP_VHT u8, u8, u8 */ 4069 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 4070 rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW | 4071 IEEE80211_RADIOTAP_MCS_HAVE_MCS | 4072 IEEE80211_RADIOTAP_MCS_HAVE_GI; 4073 rtap_len += 1; 4074 4075 if (rx_status->sgi) 4076 rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI; 4077 if (rx_status->bw) 4078 rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40; 4079 else 4080 rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20; 4081 rtap_len += 1; 4082 4083 rtap_buf[rtap_len] = rx_status->mcs; 4084 rtap_len += 1; 4085 4086 if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) { 4087 qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN"); 4088 return 0; 4089 } 4090 } 4091 4092 if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) { 4093 /* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */ 4094 rthdr->it_present |= 4095 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 4096 rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status, 4097 rtap_buf, 4098 rtap_len); 4099 } 4100 4101 if (rx_status->vht_flags) { 4102 length = rtap_len; 4103 /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */ 4104 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 4105 rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status, 4106 rtap_buf, 4107 rtap_len); 4108 4109 if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) { 4110 qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN"); 4111 return 0; 4112 } 4113 } 4114 4115 if (rx_status->he_flags) { 4116 length = rtap_len; 4117 /* IEEE80211_RADIOTAP_HE */ 4118 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); 4119 rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status, 4120 rtap_buf, 4121 rtap_len); 4122 4123 if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) { 4124 qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN"); 4125 return 0; 4126 } 4127 } 4128 4129 if (rx_status->he_mu_flags) { 4130 length = rtap_len; 4131 /* IEEE80211_RADIOTAP_HE-MU */ 4132 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); 4133 rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status, 4134 rtap_buf, 4135 rtap_len); 4136 4137 if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) { 4138 qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN"); 4139 return 0; 4140 } 4141 } 4142 4143 if (rx_status->he_mu_other_flags) { 4144 length = rtap_len; 4145 /* IEEE80211_RADIOTAP_HE-MU-OTHER */ 4146 rthdr->it_present |= 4147 cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU_OTHER); 4148 rtap_len = 4149 qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status, 4150 rtap_buf, 4151 rtap_len); 4152 4153 if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) { 4154 qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN"); 4155 return 0; 4156 } 4157 } 4158 4159 rthdr->it_len = cpu_to_le16(rtap_len); 4160 4161 if (headroom_sz < rtap_len) { 4162 qdf_print("ERROR: not enough space to update radiotap\n"); 4163 return 0; 4164 } 4165 qdf_nbuf_push_head(nbuf, rtap_len); 4166 qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len); 4167 return rtap_len; 4168 } 4169 #else 4170 static unsigned int qdf_nbuf_update_radiotap_vht_flags( 4171 struct mon_rx_status *rx_status, 4172 int8_t *rtap_buf, 4173 uint32_t rtap_len) 4174 { 4175 qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); 4176 return 0; 4177 } 4178 4179 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status, 4180 int8_t *rtap_buf, uint32_t rtap_len) 4181 { 4182 qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); 4183 return 0; 4184 } 4185 4186 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags( 4187 struct mon_rx_status *rx_status, 4188 uint8_t *rtap_buf, 4189 uint32_t rtap_len) 4190 { 4191 qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); 4192 return 0; 4193 } 4194 4195 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, 4196 qdf_nbuf_t nbuf, uint32_t headroom_sz) 4197 { 4198 qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); 4199 return 0; 4200 } 4201 #endif 4202 qdf_export_symbol(qdf_nbuf_update_radiotap); 4203 4204 /** 4205 * __qdf_nbuf_reg_free_cb() - register nbuf free callback 4206 * @cb_func_ptr: function pointer to the nbuf free callback 4207 * 4208 * This function registers a callback function for nbuf free. 4209 * 4210 * Return: none 4211 */ 4212 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr) 4213 { 4214 nbuf_free_cb = cb_func_ptr; 4215 } 4216 4217 /** 4218 * qdf_nbuf_classify_pkt() - classify packet 4219 * @skb - sk buff 4220 * 4221 * Return: none 4222 */ 4223 void qdf_nbuf_classify_pkt(struct sk_buff *skb) 4224 { 4225 struct ethhdr *eh = (struct ethhdr *)skb->data; 4226 4227 /* check destination mac address is broadcast/multicast */ 4228 if (is_broadcast_ether_addr((uint8_t *)eh)) 4229 QDF_NBUF_CB_SET_BCAST(skb); 4230 else if (is_multicast_ether_addr((uint8_t *)eh)) 4231 QDF_NBUF_CB_SET_MCAST(skb); 4232 4233 if (qdf_nbuf_is_ipv4_arp_pkt(skb)) 4234 QDF_NBUF_CB_GET_PACKET_TYPE(skb) = 4235 QDF_NBUF_CB_PACKET_TYPE_ARP; 4236 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb)) 4237 QDF_NBUF_CB_GET_PACKET_TYPE(skb) = 4238 QDF_NBUF_CB_PACKET_TYPE_DHCP; 4239 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb)) 4240 QDF_NBUF_CB_GET_PACKET_TYPE(skb) = 4241 QDF_NBUF_CB_PACKET_TYPE_EAPOL; 4242 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb)) 4243 QDF_NBUF_CB_GET_PACKET_TYPE(skb) = 4244 QDF_NBUF_CB_PACKET_TYPE_WAPI; 4245 } 4246 qdf_export_symbol(qdf_nbuf_classify_pkt); 4247 4248 void __qdf_nbuf_init(__qdf_nbuf_t nbuf) 4249 { 4250 qdf_nbuf_users_set(&nbuf->users, 1); 4251 nbuf->data = nbuf->head + NET_SKB_PAD; 4252 skb_reset_tail_pointer(nbuf); 4253 } 4254 qdf_export_symbol(__qdf_nbuf_init); 4255 4256 #ifdef WLAN_FEATURE_FASTPATH 4257 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf) 4258 { 4259 qdf_nbuf_users_set(&nbuf->users, 1); 4260 nbuf->data = nbuf->head + NET_SKB_PAD; 4261 skb_reset_tail_pointer(nbuf); 4262 } 4263 qdf_export_symbol(qdf_nbuf_init_fast); 4264 #endif /* WLAN_FEATURE_FASTPATH */ 4265 4266 4267 #ifdef QDF_NBUF_GLOBAL_COUNT 4268 /** 4269 * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf 4270 * 4271 * Return void 4272 */ 4273 void __qdf_nbuf_mod_init(void) 4274 { 4275 qdf_atomic_init(&nbuf_count); 4276 qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count); 4277 } 4278 4279 /** 4280 * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf 4281 * 4282 * Return void 4283 */ 4284 void __qdf_nbuf_mod_exit(void) 4285 { 4286 } 4287 #endif 4288