/linux-6.12.1/fs/reiserfs/ |
D | do_balan.c | 19 static inline void buffer_info_init_left(struct tree_balance *tb, in buffer_info_init_left() argument 22 bi->tb = tb; in buffer_info_init_left() 23 bi->bi_bh = tb->L[0]; in buffer_info_init_left() 24 bi->bi_parent = tb->FL[0]; in buffer_info_init_left() 25 bi->bi_position = get_left_neighbor_position(tb, 0); in buffer_info_init_left() 28 static inline void buffer_info_init_right(struct tree_balance *tb, in buffer_info_init_right() argument 31 bi->tb = tb; in buffer_info_init_right() 32 bi->bi_bh = tb->R[0]; in buffer_info_init_right() 33 bi->bi_parent = tb->FR[0]; in buffer_info_init_right() 34 bi->bi_position = get_right_neighbor_position(tb, 0); in buffer_info_init_right() [all …]
|
D | fix_node.c | 51 static void create_virtual_node(struct tree_balance *tb, int h) in create_virtual_node() argument 54 struct virtual_node *vn = tb->tb_vn; in create_virtual_node() 58 Sh = PATH_H_PBUFFER(tb->tb_path, h); in create_virtual_node() 62 MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h]; in create_virtual_node() 76 vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1); in create_virtual_node() 115 op_create_vi(vn, vi, is_affected, tb->insert_size[0]); in create_virtual_node() 116 if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr) in create_virtual_node() 117 reiserfs_panic(tb->tb_sb, "vs-8030", in create_virtual_node() 125 vn->vn_vi[new_num].vi_item_len += tb->insert_size[0]; in create_virtual_node() 137 vi->vi_item_len = tb->insert_size[0]; in create_virtual_node() [all …]
|
D | ibalance.c | 28 struct tree_balance *tb, in internal_define_dest_src_infos() argument 41 src_bi->tb = tb; in internal_define_dest_src_infos() 42 src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); in internal_define_dest_src_infos() 43 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); in internal_define_dest_src_infos() 44 src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); in internal_define_dest_src_infos() 45 dest_bi->tb = tb; in internal_define_dest_src_infos() 46 dest_bi->bi_bh = tb->L[h]; in internal_define_dest_src_infos() 47 dest_bi->bi_parent = tb->FL[h]; in internal_define_dest_src_infos() 48 dest_bi->bi_position = get_left_neighbor_position(tb, h); in internal_define_dest_src_infos() 49 *d_key = tb->lkey[h]; in internal_define_dest_src_infos() [all …]
|
D | prints.c | 640 void store_print_tb(struct tree_balance *tb) in store_print_tb() argument 646 if (!tb) in store_print_tb() 654 REISERFS_SB(tb->tb_sb)->s_do_balance, in store_print_tb() 655 tb->tb_mode, PATH_LAST_POSITION(tb->tb_path), in store_print_tb() 656 tb->tb_path->pos_in_item); in store_print_tb() 658 for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) { in store_print_tb() 659 if (PATH_H_PATH_OFFSET(tb->tb_path, h) <= in store_print_tb() 660 tb->tb_path->path_length in store_print_tb() 661 && PATH_H_PATH_OFFSET(tb->tb_path, in store_print_tb() 663 tbSh = PATH_H_PBUFFER(tb->tb_path, h); in store_print_tb() [all …]
|
/linux-6.12.1/drivers/thunderbolt/ |
D | domain.c | 123 struct tb *tb = container_of(dev, struct tb, dev); in boot_acl_show() local 128 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); in boot_acl_show() 132 pm_runtime_get_sync(&tb->dev); in boot_acl_show() 134 if (mutex_lock_interruptible(&tb->lock)) { in boot_acl_show() 138 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl); in boot_acl_show() 140 mutex_unlock(&tb->lock); in boot_acl_show() 143 mutex_unlock(&tb->lock); in boot_acl_show() 145 for (ret = 0, i = 0; i < tb->nboot_acl; i++) { in boot_acl_show() 149 ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n"); in boot_acl_show() 153 pm_runtime_mark_last_busy(&tb->dev); in boot_acl_show() [all …]
|
D | icm.c | 105 bool (*is_supported)(struct tb *tb); 106 int (*cio_reset)(struct tb *tb); 107 int (*get_mode)(struct tb *tb); 108 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); 109 void (*save_devices)(struct tb *tb); 110 int (*driver_ready)(struct tb *tb, 113 void (*set_uuid)(struct tb *tb); 114 void (*device_connected)(struct tb *tb, 116 void (*device_disconnected)(struct tb *tb, 118 void (*xdomain_connected)(struct tb *tb, [all …]
|
D | tb.c | 66 static inline struct tb *tcm_to_tb(struct tb_cm *tcm) in tcm_to_tb() 68 return ((void *)tcm - sizeof(struct tb)); in tcm_to_tb() 73 struct tb *tb; member 81 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) in tb_queue_hotplug() argument 89 ev->tb = tb; in tb_queue_hotplug() 94 queue_work(tb->wq, &ev->work); in tb_queue_hotplug() 101 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources() 128 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources() 145 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) in tb_discover_dp_resource() argument 147 struct tb_cm *tcm = tb_priv(tb); in tb_discover_dp_resource() [all …]
|
D | tb.h | 175 struct tb *tb; member 234 struct tb *tb; member 336 struct tb *tb; member 426 struct tb *tb; member 502 int (*driver_ready)(struct tb *tb); 503 int (*start)(struct tb *tb, bool reset); 504 void (*stop)(struct tb *tb); 505 void (*deinit)(struct tb *tb); 506 int (*suspend_noirq)(struct tb *tb); 507 int (*resume_noirq)(struct tb *tb); [all …]
|
D | debugfs.c | 179 struct tb *tb = sw->tb; in regs_write() local 190 if (mutex_lock_interruptible(&tb->lock)) { in regs_write() 208 mutex_unlock(&tb->lock); in regs_write() 323 struct tb *tb = sw->tb; in port_sb_regs_write() local 333 if (mutex_lock_interruptible(&tb->lock)) { in port_sb_regs_write() 341 mutex_unlock(&tb->lock); in port_sb_regs_write() 356 struct tb *tb = rt->tb; in retimer_sb_regs_write() local 366 if (mutex_lock_interruptible(&tb->lock)) { in retimer_sb_regs_write() 374 mutex_unlock(&tb->lock); in retimer_sb_regs_write() 509 struct tb *tb = margining->port->sw->tb; in margining_ber_level_write() local [all …]
|
/linux-6.12.1/tools/bpf/bpftool/ |
D | netlink_dumper.c | 14 static void xdp_dump_prog_id(struct nlattr **tb, int attr, in xdp_dump_prog_id() argument 18 if (!tb[attr]) in xdp_dump_prog_id() 24 NET_DUMP_UINT("id", " id %u", libbpf_nla_getattr_u32(tb[attr])) in xdp_dump_prog_id() 32 struct nlattr *tb[IFLA_XDP_MAX + 1]; in do_xdp_dump_one() local 35 if (libbpf_nla_parse_nested(tb, IFLA_XDP_MAX, attr, NULL) < 0) in do_xdp_dump_one() 38 if (!tb[IFLA_XDP_ATTACHED]) in do_xdp_dump_one() 41 mode = libbpf_nla_getattr_u8(tb[IFLA_XDP_ATTACHED]); in do_xdp_dump_one() 55 xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true); in do_xdp_dump_one() 56 xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true); in do_xdp_dump_one() 57 xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload", true); in do_xdp_dump_one() [all …]
|
/linux-6.12.1/net/bridge/ |
D | br_cfm_netlink.c | 93 struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_CREATE_MAX + 1]; in br_mep_create_parse() local 98 err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_CREATE_MAX, attr, in br_mep_create_parse() 103 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]) { in br_mep_create_parse() 107 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]) { in br_mep_create_parse() 111 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]) { in br_mep_create_parse() 115 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]) { in br_mep_create_parse() 122 instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]); in br_mep_create_parse() 123 create.domain = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]); in br_mep_create_parse() 124 create.direction = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]); in br_mep_create_parse() 125 create.ifindex = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]); in br_mep_create_parse() [all …]
|
D | br_mrp_netlink.c | 34 struct nlattr *tb[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1]; in br_mrp_instance_parse() local 38 err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_INSTANCE_MAX, attr, in br_mrp_instance_parse() 43 if (!tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID] || in br_mrp_instance_parse() 44 !tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] || in br_mrp_instance_parse() 45 !tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]) { in br_mrp_instance_parse() 53 inst.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID]); in br_mrp_instance_parse() 54 inst.p_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX]); in br_mrp_instance_parse() 55 inst.s_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]); in br_mrp_instance_parse() 58 if (tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]) in br_mrp_instance_parse() 59 inst.prio = nla_get_u16(tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]); in br_mrp_instance_parse() [all …]
|
/linux-6.12.1/tools/testing/selftests/powerpc/pmu/ebb/ |
D | trace.c | 17 struct trace_buffer *tb; in trace_buffer_allocate() local 19 if (size < sizeof(*tb)) { in trace_buffer_allocate() 24 tb = mmap(NULL, size, PROT_READ | PROT_WRITE, in trace_buffer_allocate() 26 if (tb == MAP_FAILED) { in trace_buffer_allocate() 31 tb->size = size; in trace_buffer_allocate() 32 tb->tail = tb->data; in trace_buffer_allocate() 33 tb->overflow = false; in trace_buffer_allocate() 35 return tb; in trace_buffer_allocate() 38 static bool trace_check_bounds(struct trace_buffer *tb, void *p) in trace_check_bounds() argument 40 return p < ((void *)tb + tb->size); in trace_check_bounds() [all …]
|
/linux-6.12.1/include/drm/ |
D | task_barrier.h | 55 static inline void task_barrier_init(struct task_barrier *tb) in task_barrier_init() argument 57 tb->n = 0; in task_barrier_init() 58 atomic_set(&tb->count, 0); in task_barrier_init() 59 sema_init(&tb->enter_turnstile, 0); in task_barrier_init() 60 sema_init(&tb->exit_turnstile, 0); in task_barrier_init() 63 static inline void task_barrier_add_task(struct task_barrier *tb) in task_barrier_add_task() argument 65 tb->n++; in task_barrier_add_task() 68 static inline void task_barrier_rem_task(struct task_barrier *tb) in task_barrier_rem_task() argument 70 tb->n--; in task_barrier_rem_task() 78 static inline void task_barrier_enter(struct task_barrier *tb) in task_barrier_enter() argument [all …]
|
/linux-6.12.1/net/netfilter/ |
D | nfnetlink_cthelper.c | 76 struct nlattr *tb[NFCTH_TUPLE_MAX+1]; in nfnl_cthelper_parse_tuple() local 78 err = nla_parse_nested_deprecated(tb, NFCTH_TUPLE_MAX, attr, in nfnl_cthelper_parse_tuple() 83 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) in nfnl_cthelper_parse_tuple() 89 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); in nfnl_cthelper_parse_tuple() 90 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); in nfnl_cthelper_parse_tuple() 141 struct nlattr *tb[NFCTH_POLICY_MAX+1]; in nfnl_cthelper_expect_policy() local 143 err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_MAX, attr, in nfnl_cthelper_expect_policy() 148 if (!tb[NFCTH_POLICY_NAME] || in nfnl_cthelper_expect_policy() 149 !tb[NFCTH_POLICY_EXPECT_MAX] || in nfnl_cthelper_expect_policy() 150 !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) in nfnl_cthelper_expect_policy() [all …]
|
D | nft_tunnel.c | 76 const struct nlattr * const tb[]) in nft_tunnel_get_init() argument 81 if (!tb[NFTA_TUNNEL_KEY] || in nft_tunnel_get_init() 82 !tb[NFTA_TUNNEL_DREG]) in nft_tunnel_get_init() 85 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY])); in nft_tunnel_get_init() 97 if (tb[NFTA_TUNNEL_MODE]) { in nft_tunnel_get_init() 98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE])); in nft_tunnel_get_init() 106 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg, in nft_tunnel_get_init() 195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1]; in nft_tunnel_obj_ip_init() local 198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr, in nft_tunnel_obj_ip_init() 203 if (!tb[NFTA_TUNNEL_KEY_IP_DST]) in nft_tunnel_obj_ip_init() [all …]
|
/linux-6.12.1/net/ethtool/ |
D | bitset.c | 330 struct nlattr *tb[ARRAY_SIZE(bitset_policy)]; in ethnl_bitset_is_compact() local 333 ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, bitset, in ethnl_bitset_is_compact() 338 if (tb[ETHTOOL_A_BITSET_BITS]) { in ethnl_bitset_is_compact() 339 if (tb[ETHTOOL_A_BITSET_VALUE] || tb[ETHTOOL_A_BITSET_MASK]) in ethnl_bitset_is_compact() 344 if (!tb[ETHTOOL_A_BITSET_SIZE] || !tb[ETHTOOL_A_BITSET_VALUE]) in ethnl_bitset_is_compact() 382 struct nlattr *tb[ARRAY_SIZE(bit_policy)]; in ethnl_parse_bit() local 385 ret = nla_parse_nested(tb, ARRAY_SIZE(bit_policy) - 1, bit_attr, in ethnl_parse_bit() 390 if (tb[ETHTOOL_A_BITSET_BIT_INDEX]) { in ethnl_parse_bit() 393 idx = nla_get_u32(tb[ETHTOOL_A_BITSET_BIT_INDEX]); in ethnl_parse_bit() 396 tb[ETHTOOL_A_BITSET_BIT_INDEX], in ethnl_parse_bit() [all …]
|
D | rings.c | 140 struct nlattr **tb = info->attrs; in ethnl_set_rings_validate() local 142 if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && in ethnl_set_rings_validate() 145 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], in ethnl_set_rings_validate() 150 if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] && in ethnl_set_rings_validate() 153 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], in ethnl_set_rings_validate() 158 if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && in ethnl_set_rings_validate() 161 tb[ETHTOOL_A_RINGS_CQE_SIZE], in ethnl_set_rings_validate() 166 if (tb[ETHTOOL_A_RINGS_TX_PUSH] && in ethnl_set_rings_validate() 169 tb[ETHTOOL_A_RINGS_TX_PUSH], in ethnl_set_rings_validate() 174 if (tb[ETHTOOL_A_RINGS_RX_PUSH] && in ethnl_set_rings_validate() [all …]
|
/linux-6.12.1/arch/sparc/mm/ |
D | tlb.c | 25 struct tlb_batch *tb = &get_cpu_var(tlb_batch); in flush_tlb_pending() local 26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() 28 if (!tb->tlb_nr) in flush_tlb_pending() 31 flush_tsb_user(tb); in flush_tlb_pending() 34 if (tb->tlb_nr == 1) { in flush_tlb_pending() 35 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 39 &tb->vaddrs[0]); in flush_tlb_pending() 41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending() 42 tb->tlb_nr, &tb->vaddrs[0]); in flush_tlb_pending() [all …]
|
/linux-6.12.1/drivers/target/ |
D | target_core_hba.c | 40 struct target_backend *tb, *old; in transport_backend_register() local 42 tb = kzalloc(sizeof(*tb), GFP_KERNEL); in transport_backend_register() 43 if (!tb) in transport_backend_register() 45 tb->ops = ops; in transport_backend_register() 52 kfree(tb); in transport_backend_register() 56 target_setup_backend_cits(tb); in transport_backend_register() 57 list_add_tail(&tb->list, &backend_list); in transport_backend_register() 68 struct target_backend *tb; in target_backend_unregister() local 71 list_for_each_entry(tb, &backend_list, list) { in target_backend_unregister() 72 if (tb->ops == ops) { in target_backend_unregister() [all …]
|
/linux-6.12.1/net/netfilter/ipset/ |
D | ip_set_hash_ipportnet.c | 160 hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipportnet4_uadt() argument 173 if (tb[IPSET_ATTR_LINENO]) in hash_ipportnet4_uadt() 174 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipportnet4_uadt() 176 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_ipportnet4_uadt() 177 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipportnet4_uadt() 178 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_ipportnet4_uadt() 179 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_ipportnet4_uadt() 182 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_ipportnet4_uadt() 186 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipportnet4_uadt() 190 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); in hash_ipportnet4_uadt() [all …]
|
D | ip_set_hash_ipportip.c | 108 hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipportip4_uadt() argument 119 if (tb[IPSET_ATTR_LINENO]) in hash_ipportip4_uadt() 120 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipportip4_uadt() 122 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_ipportip4_uadt() 123 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipportip4_uadt() 124 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO))) in hash_ipportip4_uadt() 127 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); in hash_ipportip4_uadt() 131 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipportip4_uadt() 135 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2); in hash_ipportip4_uadt() 139 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); in hash_ipportip4_uadt() [all …]
|
D | ip_set_hash_ipmac.c | 110 hash_ipmac4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipmac4_uadt() argument 118 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_ipmac4_uadt() 119 !tb[IPSET_ATTR_ETHER] || in hash_ipmac4_uadt() 120 nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN || in hash_ipmac4_uadt() 121 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || in hash_ipmac4_uadt() 122 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || in hash_ipmac4_uadt() 123 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || in hash_ipmac4_uadt() 124 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || in hash_ipmac4_uadt() 125 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || in hash_ipmac4_uadt() 126 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) in hash_ipmac4_uadt() [all …]
|
D | ip_set_hash_netportnet.c | 188 hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_netportnet4_uadt() argument 200 if (tb[IPSET_ATTR_LINENO]) in hash_netportnet4_uadt() 201 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_netportnet4_uadt() 204 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_netportnet4_uadt() 205 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_netportnet4_uadt() 206 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_netportnet4_uadt() 207 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_netportnet4_uadt() 210 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_netportnet4_uadt() 214 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); in hash_netportnet4_uadt() 218 ret = ip_set_get_extensions(set, tb, &ext); in hash_netportnet4_uadt() [all …]
|
/linux-6.12.1/drivers/net/wireless/ti/wlcore/ |
D | testmode.c | 58 static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) in wl1271_tm_cmd_test() argument 67 if (!tb[WL1271_TM_ATTR_DATA]) in wl1271_tm_cmd_test() 70 buf = nla_data(tb[WL1271_TM_ATTR_DATA]); in wl1271_tm_cmd_test() 71 buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); in wl1271_tm_cmd_test() 73 if (tb[WL1271_TM_ATTR_ANSWER]) in wl1271_tm_cmd_test() 74 answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]); in wl1271_tm_cmd_test() 138 static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) in wl1271_tm_cmd_interrogate() argument 147 if (!tb[WL1271_TM_ATTR_IE_ID]) in wl1271_tm_cmd_interrogate() 150 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); in wl1271_tm_cmd_interrogate() 203 static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) in wl1271_tm_cmd_configure() argument [all …]
|