Lines Matching +full:mctp +full:- +full:handling

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
54 #include <net/dropreason-core.h>
102 * - qdisc return codes
103 * - driver transmit return codes
104 * - errno values
108 * the driver transmit return codes though - when qdiscs are used, the actual
115 /* qdisc ->enqueue() return codes. */
125 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
145 * - successful transmission (rc == NETDEV_TX_OK) in dev_xmit_complete()
146 * - error while transmitting (rc < 0) in dev_xmit_complete()
147 * - error while queueing to a different device (rc & NET_XMIT_MASK) in dev_xmit_complete()
156 * Compute the worst-case header length according to the protocols
217 /* per-cpu stats, allocated on demand.
258 #define netdev_hw_addr_list_count(l) ((l)->count)
261 list_for_each_entry(ha, &(l)->list, list)
263 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
264 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
266 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
269 if ((_ha)->sync_cnt)
271 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
272 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
274 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
277 if ((_ha)->sync_cnt)
286 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
288 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
292 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
294 * dev->hard_header_len ? (dev->hard_header_len +
295 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
301 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
302 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
352 * to the per-CPU poll_list, and whoever clears that bit
376 /* control-path-only fields follow */
386 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
390 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
418 * enum rx_handler_result - Possible return values for rx_handlers.
422 * case skb->dev was changed by rx_handler.
430 * to register a second rx_handler will return -EBUSY.
443 * If the rx_handler changed skb->dev, to divert the skb to another
449 * are registered on exact device (ptype->dev == skb->dev).
451 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
472 return test_bit(NAPI_STATE_DISABLE, &n->state); in napi_disable_pending()
477 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); in napi_prefer_busy_poll()
481 * napi_is_scheduled - test if NAPI is scheduled
484 * This check is "best-effort". With no locking implemented,
500 return test_bit(NAPI_STATE_SCHED, &n->state); in napi_is_scheduled()
506 * napi_schedule - schedule NAPI poll
526 * napi_schedule_irqoff - schedule NAPI poll
538 * napi_complete_done - NAPI processing complete
557 * napi_disable - prevent NAPI from scheduling
568 * napi_synchronize - wait until NAPI is not running
578 while (test_bit(NAPI_STATE_SCHED, &n->state)) in napi_synchronize()
585 * napi_if_scheduled_mark_missed - if napi is running, set the
596 val = READ_ONCE(n->state); in napi_if_scheduled_mark_missed()
605 } while (!try_cmpxchg(&n->state, &val, new)); in napi_if_scheduled_mark_missed()
638 * read-mostly part
662 * write-mostly part
677 * slow- / control-path part
721 return q->numa_node; in netdev_queue_numa_node_read()
730 q->numa_node = node; in netdev_queue_numa_node_write()
739 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
759 - sizeof(struct xps_map)) / sizeof(u16))
765 * in nr_ids. This will help not accessing out-of-bound memory.
769 * not crossing its upper bound, as the original dev->num_tc can be updated in
825 return a->id_len == b->id_len && in netdev_phys_item_id_same()
826 memcmp(a->id, b->id, a->id_len) == 0; in netdev_phys_item_id_same()
1036 * corner cases, but the stack really does a non-trivial amount
1073 * Old-style ioctl entry point. This is used internally by the
1097 * for dev->watchdog ticks.
1104 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1107 * (which should normally be dev->stats) and return a pointer to
1110 * 3. Update dev->stats asynchronously and atomically, and define
1131 * SR-IOV management functions.
1165 * so the underlying device can perform whatever needed clean-ups to
1192 * FC-GS Fabric Device Management Information(FDMI) specification.
1223 * Adjusts the requested feature flags according to device-specific
1230 * Must return >0 or -errno if it changed dev->features itself.
1272 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1276 * network cables) or protocol-dependent mechanisms (eg
1303 * Called when a user wants to set a max-rate limitation of specific
1327 * no frames were xmit'ed and core-caller will free all frames.
1609 * enum netdev_priv_flags - &struct net_device priv_flags
1625 * release skb->dst
1627 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1715 * struct net_device - The DEVICE structure.
1718 * data with strictly "high-level" data, and it has to know about
1744 * @ptype_all: Device-specific packet handlers for all protocols
1745 * @ptype_specific: Device-specific, protocol-specific packet handlers
1749 * @hw_features: User-changeable features
1751 * @wanted_features: User-requested features
1785 * discovery handling. Necessary for e.g. 6LoWPAN.
1794 * @priv_len: Size of the ->priv flexible array
1848 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1851 * @mctp_ptr: MCTP specific data
1872 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1912 * @ml_priv: Mid-layer private
1913 * @ml_priv_type: Mid-layer private type
1917 * means the driver is handling statistics allocation/
1932 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1934 * @stat_ops: Optional ops for queue-aware statistics
1958 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1974 * @net_notifier_list: List of per-net netdev notifier block
1987 * dev->addr_list_lock.
2022 /* TX read-mostly hotpath */
2037 /* Note : dev->mtu is often read without holding a lock.
2056 /* TXRX read-mostly hotpath */
2070 /* RX read-mostly hotpath */
2121 /* Read-mostly cache-line for fast-path access */
2210 /* Protocol-specific pointers */
2249 * and shinfo->gso_segs is a 16bit field.
2277 /* These may be needed for future network-power-down code. */
2306 /* mid-layer private */
2335 * and shinfo->gso_segs is a 16bit field.
2414 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
2415 ((dev)->devlink_port = (port)); \
2420 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) in netif_elide_gro()
2430 return dev->prio_tc_map[prio & TC_BITMASK]; in netdev_get_prio_tc_map()
2436 if (tc >= dev->num_tc) in netdev_set_prio_tc_map()
2437 return -EINVAL; in netdev_set_prio_tc_map()
2439 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; in netdev_set_prio_tc_map()
2451 return dev->num_tc; in netdev_get_num_tc()
2478 return max_t(int, -dev->num_tc, 0); in netdev_get_sb_channel()
2485 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); in netdev_get_tx_queue()
2486 return &dev->_tx[index]; in netdev_get_tx_queue()
2503 for (i = 0; i < dev->num_tx_queues; i++) in netdev_for_each_tx_queue()
2504 f(dev, &dev->_tx[i], arg); in netdev_for_each_tx_queue()
2514 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2515 lockdep_set_class(&(dev)->addr_list_lock, \
2517 for (i = 0; i < (dev)->num_tx_queues; i++) \
2518 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2533 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; in netdev_get_fwd_headroom()
2538 if (dev->netdev_ops->ndo_set_rx_headroom) in netdev_set_rx_headroom()
2539 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); in netdev_set_rx_headroom()
2545 netdev_set_rx_headroom(dev, -1); in netdev_reset_rx_headroom()
2551 if (dev->ml_priv_type != type) in netdev_get_ml_priv()
2554 return dev->ml_priv; in netdev_get_ml_priv()
2561 WARN(dev->ml_priv_type && dev->ml_priv_type != type, in netdev_set_ml_priv()
2563 dev->ml_priv_type, type); in netdev_set_ml_priv()
2564 WARN(!dev->ml_priv_type && dev->ml_priv, in netdev_set_ml_priv()
2567 dev->ml_priv = ml_priv; in netdev_set_ml_priv()
2568 dev->ml_priv_type = type; in netdev_set_ml_priv()
2577 return read_pnet(&dev->nd_net); in dev_net()
2583 write_pnet(&dev->nd_net, net); in dev_net_set()
2587 * netdev_priv - access network device private data
2594 return (void *)dev->priv; in netdev_priv()
2600 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2603 * fine-grained identification of different network device types. For
2606 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2614 napi->irq = irq; in netif_napi_set_irq()
2626 * netif_napi_add() - initialize a NAPI context
2632 * *any* of the other NAPI-related functions.
2647 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); in netif_napi_add_tx_weight()
2652 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2669 * __netif_napi_del - remove a NAPI context
2679 * netif_napi_del - remove a NAPI context
2724 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2753 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_rx_add()
2755 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_rx_add()
2756 u64_stats_add(&tstats->rx_bytes, len); in dev_sw_netstats_rx_add()
2757 u64_stats_inc(&tstats->rx_packets); in dev_sw_netstats_rx_add()
2758 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_rx_add()
2765 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_tx_add()
2767 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_tx_add()
2768 u64_stats_add(&tstats->tx_bytes, len); in dev_sw_netstats_tx_add()
2769 u64_stats_add(&tstats->tx_packets, packets); in dev_sw_netstats_tx_add()
2770 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_tx_add()
2775 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); in dev_lstats_add()
2777 u64_stats_update_begin(&lstats->syncp); in dev_lstats_add()
2778 u64_stats_add(&lstats->bytes, len); in dev_lstats_add()
2779 u64_stats_inc(&lstats->packets); in dev_lstats_add()
2780 u64_stats_update_end(&lstats->syncp); in dev_lstats_add()
2791 u64_stats_init(&stat->syncp); \
2808 u64_stats_init(&stat->syncp); \
2855 - we can use this eg to kick tcp sessions
2983 info->dev = dev; in netdev_notifier_info_init()
2984 info->extack = NULL; in netdev_notifier_info_init()
2990 return info->dev; in netdev_notifier_info_to_dev()
2996 return info->extack; in netdev_notifier_info_to_extack()
3004 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
3006 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
3008 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
3010 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
3012 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3014 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3017 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3024 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
3033 lh = dev->dev_list.next; in next_net_device()
3034 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device()
3043 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); in next_net_device_rcu()
3044 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device_rcu()
3049 return list_empty(&net->dev_base_head) ? NULL : in first_net_device()
3050 net_device_entry(net->dev_base_head.next); in first_net_device()
3055 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); in first_net_device_rcu()
3057 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in first_net_device_rcu()
3145 if (!dev->header_ops || !dev->header_ops->create) in dev_hard_header()
3148 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); in dev_hard_header()
3154 const struct net_device *dev = skb->dev; in dev_parse_header()
3156 if (!dev->header_ops || !dev->header_ops->parse) in dev_parse_header()
3158 return dev->header_ops->parse(skb, haddr); in dev_parse_header()
3163 const struct net_device *dev = skb->dev; in dev_parse_header_protocol()
3165 if (!dev->header_ops || !dev->header_ops->parse_protocol) in dev_parse_header_protocol()
3167 return dev->header_ops->parse_protocol(skb); in dev_parse_header_protocol()
3174 if (likely(len >= dev->hard_header_len)) in dev_validate_header()
3176 if (len < dev->min_header_len) in dev_validate_header()
3180 memset(ll_header + len, 0, dev->hard_header_len - len); in dev_validate_header()
3184 if (dev->header_ops && dev->header_ops->validate) in dev_validate_header()
3185 return dev->header_ops->validate(ll_header, len); in dev_validate_header()
3192 return dev->header_ops && dev->header_ops->create; in dev_has_header()
3196 * Incoming packets are placed on per-CPU queues
3260 return current->net_xmit.recursion; in dev_recursion_level()
3272 for (i = 0; i < dev->num_tx_queues; i++) in netif_tx_schedule_all()
3278 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue()
3282 * netif_start_queue - allow transmit
3296 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_start_all_queues()
3305 * netif_wake_queue - restart transmit
3320 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_wake_all_queues()
3329 WRITE_ONCE(dev_queue->trans_start, jiffies); in netif_tx_stop_queue()
3335 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue()
3339 * netif_stop_queue - stop transmitted packets
3354 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped()
3358 * netif_queue_stopped - test if transmit queue is flowblocked
3370 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped()
3376 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; in netif_xmit_frozen_or_stopped()
3382 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; in netif_xmit_frozen_or_drv_stopped()
3386 * netdev_queue_set_dql_min_limit - set dql minimum limit
3399 dev_queue->dql.min_limit = min_limit; in netdev_queue_set_dql_min_limit()
3406 /* Non-BQL migrated drivers will return 0, too. */ in netdev_queue_dql_avail()
3407 return dql_avail(&txq->dql); in netdev_queue_dql_avail()
3414 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3423 prefetchw(&dev_queue->dql.num_queued); in netdev_txq_bql_enqueue_prefetchw()
3428 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3437 prefetchw(&dev_queue->dql.limit); in netdev_txq_bql_complete_prefetchw()
3442 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3455 dql_queued(&dev_queue->dql, bytes); in netdev_tx_sent_queue()
3457 if (likely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3461 WRITE_ONCE(dev_queue->trans_start, jiffies); in netdev_tx_sent_queue()
3466 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3476 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3477 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3493 dql_queued(&dev_queue->dql, bytes); in __netdev_tx_sent_queue()
3502 * netdev_sent_queue - report the number of bytes queued to hardware
3525 * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3540 dql_completed(&dev_queue->dql, bytes); in netdev_tx_completed_queue()
3549 if (unlikely(dql_avail(&dev_queue->dql) < 0)) in netdev_tx_completed_queue()
3552 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) in netdev_tx_completed_queue()
3558 * netdev_completed_queue - report bytes and packets completed by device
3576 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); in netdev_tx_reset_queue()
3577 dql_reset(&q->dql); in netdev_tx_reset_queue()
3582 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
3593 * netdev_reset_queue - reset the packets and bytes count of a network device
3605 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3614 if (unlikely(queue_index >= dev->real_num_tx_queues)) { in netdev_cap_txqueue()
3616 dev->name, queue_index, in netdev_cap_txqueue()
3617 dev->real_num_tx_queues); in netdev_cap_txqueue()
3625 * netif_running - test if up
3632 return test_bit(__LINK_STATE_START, &dev->state); in netif_running()
3643 * netif_start_subqueue - allow sending packets on subqueue
3657 * netif_stop_subqueue - stop sending packets on subqueue
3670 * __netif_subqueue_stopped - test status of subqueue
3685 * netif_subqueue_stopped - test status of subqueue
3698 * netif_wake_subqueue - allow sending packets on subqueue
3718 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3734 * netif_attr_test_online - Test for online CPU/Rx queue
3754 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3764 /* -1 is a legal arg here. */ in netif_attrmask_next()
3765 if (n != -1) in netif_attrmask_next()
3775 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3787 /* -1 is a legal arg here. */ in netif_attrmask_next_and()
3788 if (n != -1) in netif_attrmask_next_and()
3817 * netif_is_multiqueue - test if device has multiple transmit queues
3824 return dev->num_tx_queues > 1; in netif_is_multiqueue()
3835 dev->real_num_rx_queues = rxqs; in netif_set_real_num_rx_queues()
3905 kfree_skb(napi->skb); in napi_free_frags()
3906 napi->skb = NULL; in napi_free_frags()
3983 if (!(dev->flags & IFF_UP)) in __is_skb_forwardable()
3989 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; in __is_skb_forwardable()
3990 if (skb->len <= len) in __is_skb_forwardable()
4027 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); in DEV_CORE_STATS_INC()
4028 skb->priority = 0; in DEV_CORE_STATS_INC()
4039 this_cpu_dec(*dev->pcpu_refcnt); in __dev_put()
4041 refcount_dec(&dev->dev_refcnt); in __dev_put()
4050 this_cpu_inc(*dev->pcpu_refcnt); in __dev_hold()
4052 refcount_inc(&dev->dev_refcnt); in __dev_hold()
4062 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); in __netdev_tracker_alloc()
4073 refcount_dec(&dev->refcnt_tracker.no_tracker); in netdev_tracker_alloc()
4082 ref_tracker_free(&dev->refcnt_tracker, tracker); in netdev_tracker_free()
4105 * dev_hold - get reference to device
4117 * dev_put - release reference to device
4156 * linkwatch_sync_dev - sync linkwatch for the given device
4165 * netif_carrier_ok - test if carrier present
4172 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); in netif_carrier_ok()
4184 * netif_dormant_on - mark device as dormant.
4191 * in a "pending" state, waiting for some external event. For "on-
4197 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_on()
4202 * netif_dormant_off - set device as not dormant.
4209 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_off()
4214 * netif_dormant - test if device is dormant
4221 return test_bit(__LINK_STATE_DORMANT, &dev->state); in netif_dormant()
4226 * netif_testing_on - mark device as under test.
4237 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_on()
4242 * netif_testing_off - set device as not under test.
4249 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_off()
4254 * netif_testing - test if device is under test
4261 return test_bit(__LINK_STATE_TESTING, &dev->state); in netif_testing()
4266 * netif_oper_up - test if device is operational
4273 unsigned int operstate = READ_ONCE(dev->operstate); in netif_oper_up()
4280 * netif_device_present - is device available or removed
4287 return test_bit(__LINK_STATE_PRESENT, &dev->state); in netif_device_present()
4342 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4343 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4344 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4345 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4346 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4347 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4348 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4349 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4350 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4351 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4352 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4353 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4354 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4355 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4356 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4366 return (1U << debug_value) - 1; in netif_msg_init()
4371 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
4373 WRITE_ONCE(txq->xmit_lock_owner, cpu); in __netif_tx_lock()
4378 __acquire(&txq->_xmit_lock); in __netif_tx_acquire()
4384 __release(&txq->_xmit_lock); in __netif_tx_release()
4389 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
4391 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); in __netif_tx_lock_bh()
4396 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
4400 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); in __netif_tx_trylock()
4408 WRITE_ONCE(txq->xmit_lock_owner, -1); in __netif_tx_unlock()
4409 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
4415 WRITE_ONCE(txq->xmit_lock_owner, -1); in __netif_tx_unlock_bh()
4416 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
4420 * txq->trans_start can be read locklessly from dev_watchdog()
4424 if (txq->xmit_lock_owner != -1) in txq_trans_update()
4425 WRITE_ONCE(txq->trans_start, jiffies); in txq_trans_update()
4432 if (READ_ONCE(txq->trans_start) != now) in txq_trans_cond_update()
4433 WRITE_ONCE(txq->trans_start, now); in txq_trans_cond_update()
4436 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
4445 * netif_tx_lock - grab network device transmit lock
4467 if (!(dev)->lltx) { \
4475 (!(dev)->lltx ? \
4480 if (!(dev)->lltx) { \
4494 spin_lock(&dev->tx_global_lock); in netif_tx_disable()
4495 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_disable()
4502 spin_unlock(&dev->tx_global_lock); in netif_tx_disable()
4511 nest_level = dev->nested_level; in netif_addr_lock()
4513 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock()
4521 nest_level = dev->nested_level; in netif_addr_lock_bh()
4524 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock_bh()
4529 spin_unlock(&dev->addr_list_lock); in netif_addr_unlock()
4534 spin_unlock_bh(&dev->addr_list_lock); in netif_addr_unlock_bh()
4542 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4551 /* Support for loadable net-drivers */
4568 /* General hardware address lists handling functions */
4594 /* Functions used for device addresses handling */
4606 __dev_addr_set(dev, addr, dev->addr_len); in dev_addr_set()
4614 /* Functions used for unicast addresses handling */
4625 * __dev_uc_sync - Synchronize device's unicast list
4639 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); in __dev_uc_sync()
4643 * __dev_uc_unsync - Remove synchronized addresses from device
4653 __hw_addr_unsync_dev(&dev->uc, dev, unsync); in __dev_uc_unsync()
4656 /* Functions used for multicast addresses handling */
4669 * __dev_mc_sync - Synchronize device's multicast list
4683 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); in __dev_mc_sync()
4687 * __dev_mc_unsync - Remove synchronized addresses from device
4697 __hw_addr_unsync_dev(&dev->mc, dev, unsync); in __dev_mc_unsync()
4740 for (iter = &(dev)->adj_list.upper, \
4761 for (iter = (dev)->adj_list.lower.next, \
4767 for (iter = &(dev)->adj_list.lower, \
4776 for (iter = (dev)->adj_list.lower.next, \
4893 const struct net_device_ops *ops = dev->netdev_ops; in netdev_get_tstamp()
4895 if (ops->ndo_get_tstamp) in netdev_get_tstamp()
4896 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); in netdev_get_tstamp()
4898 return hwtstamps->hwtstamp; in netdev_get_tstamp()
4914 current->net_xmit.more = more; in netdev_xmit_set_more()
4919 return current->net_xmit.more; in netdev_xmit_more()
4928 return ops->ndo_start_xmit(skb, dev); in __netdev_start_xmit()
4934 const struct net_device_ops *ops = dev->netdev_ops; in netdev_start_xmit()
4969 return (dev->features & ~dev->hw_features) | dev->wanted_features; in netdev_get_wanted_features()
5027 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && in skb_gso_ok()
5035 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && in netif_needs_gso()
5036 (skb->ip_summed != CHECKSUM_UNNECESSARY))); in netif_needs_gso()
5048 return skb->protocol == htons(ETH_P_IPV6) ? in netif_get_gro_max_size()
5049 READ_ONCE(dev->gro_max_size) : in netif_get_gro_max_size()
5050 READ_ONCE(dev->gro_ipv4_max_size); in netif_get_gro_max_size()
5057 return skb->protocol == htons(ETH_P_IPV6) ? in netif_get_gso_max_size()
5058 READ_ONCE(dev->gso_max_size) : in netif_get_gso_max_size()
5059 READ_ONCE(dev->gso_ipv4_max_size); in netif_get_gso_max_size()
5064 return dev->priv_flags & IFF_MACSEC; in netif_is_macsec()
5069 return dev->priv_flags & IFF_MACVLAN; in netif_is_macvlan()
5074 return dev->priv_flags & IFF_MACVLAN_PORT; in netif_is_macvlan_port()
5079 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; in netif_is_bond_master()
5084 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; in netif_is_bond_slave()
5089 return dev->priv_flags & IFF_SUPP_NOFCS; in netif_supports_nofcs()
5094 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; in netif_has_l3_rx_handler()
5099 return dev->priv_flags & IFF_L3MDEV_MASTER; in netif_is_l3_master()
5104 return dev->priv_flags & IFF_L3MDEV_SLAVE; in netif_is_l3_slave()
5111 return dev->ifindex; in dev_sdif()
5118 return dev->priv_flags & IFF_EBRIDGE; in netif_is_bridge_master()
5123 return dev->priv_flags & IFF_BRIDGE_PORT; in netif_is_bridge_port()
5128 return dev->priv_flags & IFF_OPENVSWITCH; in netif_is_ovs_master()
5133 return dev->priv_flags & IFF_OVS_DATAPATH; in netif_is_ovs_port()
5148 return dev->priv_flags & IFF_TEAM; in netif_is_team_master()
5153 return dev->priv_flags & IFF_TEAM_PORT; in netif_is_team_port()
5168 return dev->priv_flags & IFF_RXFH_CONFIGURED; in netif_is_rxfh_configured()
5173 return dev->priv_flags & IFF_FAILOVER; in netif_is_failover()
5178 return dev->priv_flags & IFF_FAILOVER_SLAVE; in netif_is_failover_slave()
5184 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); in netif_keep_dst()
5202 if (!dev->name[0] || strchr(dev->name, '%')) in netdev_name()
5204 return dev->name; in netdev_name()
5209 u8 reg_state = READ_ONCE(dev->reg_state); in netdev_reg_state()
5220 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state); in netdev_reg_state()
5225 MODULE_ALIAS("netdev-" device)
5260 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5266 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5267 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5269 atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
5270 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)