/linux-6.12.1/drivers/base/ |
D | class.c | 129 int class_create_file_ns(const struct class *cls, const struct class_attribute *attr, in class_create_file_ns() argument 132 struct subsys_private *sp = class_to_subsys(cls); in class_create_file_ns() 145 void class_remove_file_ns(const struct class *cls, const struct class_attribute *attr, in class_remove_file_ns() argument 148 struct subsys_private *sp = class_to_subsys(cls); in class_remove_file_ns() 178 int class_register(const struct class *cls) in class_register() argument 184 pr_debug("device class '%s': registering\n", cls->name); in class_register() 186 if (cls->ns_type && !cls->namespace) { in class_register() 188 __func__, cls->name); in class_register() 191 if (!cls->ns_type && cls->namespace) { in class_register() 193 __func__, cls->name); in class_register() [all …]
|
/linux-6.12.1/scripts/ |
D | rust_is_available_test.py | 27 def generate_executable(cls, content): argument 28 path = pathlib.Path(cls.tempdir.name) 37 def generate_clang(cls, stdout): argument 38 return cls.generate_executable(f"""#!/usr/bin/env python3 47 def generate_rustc(cls, stdout): argument 48 return cls.generate_executable(f"""#!/usr/bin/env python3 57 def generate_bindgen(cls, version_stdout, libclang_stderr, version_0_66_patched=False): argument 68 return cls.generate_executable(f"""#!/usr/bin/env python3 79 def generate_bindgen_version(cls, stdout, version_0_66_patched=False): argument 80 …return cls.generate_bindgen(stdout, cls.bindgen_default_bindgen_libclang_stderr, version_0_66_patc… [all …]
|
/linux-6.12.1/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_tc_u32.c | 46 struct tc_cls_u32_offload *cls, in fill_match_fields() argument 55 for (i = 0; i < cls->knode.sel->nkeys; i++) { in fill_match_fields() 56 off = cls->knode.sel->keys[i].off; in fill_match_fields() 57 val = cls->knode.sel->keys[i].val; in fill_match_fields() 58 mask = cls->knode.sel->keys[i].mask; in fill_match_fields() 62 if (!cls->knode.sel->keys[i].offmask) in fill_match_fields() 66 if (cls->knode.sel->keys[i].offmask) in fill_match_fields() 92 struct tc_cls_u32_offload *cls) in fill_action_fields() argument 99 exts = cls->knode.exts; in fill_action_fields() 149 int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) in cxgb4_config_knode() argument [all …]
|
D | cxgb4_tc_matchall.c | 52 struct tc_cls_matchall_offload *cls) in cxgb4_matchall_egress_validate() argument 54 struct netlink_ext_ack *extack = cls->common.extack; in cxgb4_matchall_egress_validate() 55 struct flow_action *actions = &cls->rule->action; in cxgb4_matchall_egress_validate() 165 struct tc_cls_matchall_offload *cls) in cxgb4_matchall_alloc_tc() argument 178 struct netlink_ext_ack *extack = cls->common.extack; in cxgb4_matchall_alloc_tc() 189 flow_action_for_each(i, entry, &cls->rule->action) in cxgb4_matchall_alloc_tc() 193 ret = cxgb4_policer_validate(&cls->rule->action, entry, extack); in cxgb4_matchall_alloc_tc() 215 tc_port_matchall->egress.cookie = cls->cookie; in cxgb4_matchall_alloc_tc() 240 struct tc_cls_matchall_offload *cls) in cxgb4_matchall_mirror_alloc() argument 242 struct netlink_ext_ack *extack = cls->common.extack; in cxgb4_matchall_mirror_alloc() [all …]
|
D | cxgb4_tc_flower.h | 135 struct flow_cls_offload *cls); 137 struct flow_cls_offload *cls); 139 struct flow_cls_offload *cls);
|
D | cxgb4_tc_u32.h | 47 int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); 48 int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls);
|
D | cxgb4_tc_flower.c | 926 struct flow_cls_offload *cls) in cxgb4_tc_flower_replace() argument 928 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in cxgb4_tc_flower_replace() 929 struct netlink_ext_ack *extack = cls->common.extack; in cxgb4_tc_flower_replace() 943 fs->tc_cookie = cls->cookie; in cxgb4_tc_flower_replace() 945 ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs, in cxgb4_tc_flower_replace() 950 ch_flower->tc_flower_cookie = cls->cookie; in cxgb4_tc_flower_replace() 960 cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio); in cxgb4_tc_flower_replace() 989 struct flow_cls_offload *cls) in cxgb4_tc_flower_destroy() argument 995 ch_flower = ch_flower_lookup(adap, cls->cookie); in cxgb4_tc_flower_destroy() 1060 struct flow_cls_offload *cls) in cxgb4_tc_flower_stats() argument [all …]
|
/linux-6.12.1/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac_tc.c | 29 struct tc_cls_u32_offload *cls, in tc_find_entry() argument 33 u32 loc = cls->knode.handle; in tc_find_entry() 59 struct tc_cls_u32_offload *cls) in tc_fill_actions() argument 66 exts = cls->knode.exts; in tc_fill_actions() 92 struct tc_cls_u32_offload *cls) in tc_fill_entry() argument 95 struct tc_u32_sel *sel = cls->knode.sel; in tc_fill_entry() 97 u32 prio = cls->common.prio << 16; in tc_fill_entry() 108 switch (ntohs(cls->common.protocol)) { in tc_fill_entry() 124 entry = tc_find_entry(priv, cls, true); in tc_fill_entry() 129 frag = tc_find_entry(priv, cls, true); in tc_fill_entry() [all …]
|
D | stmmac_selftests.c | 1337 struct flow_cls_offload *cls; in __stmmac_test_l3filt() local 1361 cls = kzalloc(sizeof(*cls), GFP_KERNEL); in __stmmac_test_l3filt() 1362 if (!cls) { in __stmmac_test_l3filt() 1367 cls->common.chain_index = 0; in __stmmac_test_l3filt() 1368 cls->command = FLOW_CLS_REPLACE; in __stmmac_test_l3filt() 1369 cls->cookie = dummy_cookie; in __stmmac_test_l3filt() 1386 cls->rule = rule; in __stmmac_test_l3filt() 1401 ret = stmmac_tc_setup_cls(priv, priv, cls); in __stmmac_test_l3filt() 1409 cls->command = FLOW_CLS_DESTROY; in __stmmac_test_l3filt() 1410 stmmac_tc_setup_cls(priv, priv, cls); in __stmmac_test_l3filt() [all …]
|
/linux-6.12.1/drivers/net/ethernet/freescale/dpaa2/ |
D | dpaa2-switch-flower.c | 11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, in dpaa2_switch_flower_parse_key() argument 14 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in dpaa2_switch_flower_parse_key() 16 struct netlink_ext_ack *extack = cls->common.extack; in dpaa2_switch_flower_parse_key() 494 struct flow_cls_offload *cls) in dpaa2_switch_cls_flower_replace_acl() argument 496 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in dpaa2_switch_cls_flower_replace_acl() 497 struct netlink_ext_ack *extack = cls->common.extack; in dpaa2_switch_cls_flower_replace_acl() 512 err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key); in dpaa2_switch_cls_flower_replace_acl() 522 acl_entry->prio = cls->common.prio; in dpaa2_switch_cls_flower_replace_acl() 523 acl_entry->cookie = cls->cookie; in dpaa2_switch_cls_flower_replace_acl() 537 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, in dpaa2_switch_flower_parse_mirror_key() argument [all …]
|
D | dpaa2-switch.h | 258 struct flow_cls_offload *cls); 261 struct flow_cls_offload *cls); 264 struct tc_cls_matchall_offload *cls); 267 struct tc_cls_matchall_offload *cls);
|
/linux-6.12.1/include/linux/ |
D | dynamic_debug.h | 163 #define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \ argument 172 .class_id = cls, \ 175 BUILD_BUG_ON_MSG(cls > _DPRINTK_CLASS_DFLT, \ 221 #define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \ argument 222 DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \ 230 #define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \ argument 231 DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \ 247 #define _dynamic_func_call_cls(cls, fmt, func, ...) \ argument 248 __dynamic_func_call_cls(__UNIQUE_ID(ddebug), cls, fmt, func, ##__VA_ARGS__) 257 #define _dynamic_func_call_cls_no_desc(cls, fmt, func, ...) \ argument [all …]
|
D | transport_class.h | 27 #define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \ argument 28 struct transport_class cls = { \ 43 #define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \ argument 44 struct anon_transport_class cls = { \
|
/linux-6.12.1/scripts/gdb/linux/ |
D | device.py | 78 def class_for_each_device(cls): argument 79 for kn in klist_for_each(cls['klist_devices']): 125 for cls in for_each_class(): 126 gdb.write("class {}:\t{}\n".format(cls['class']['name'].string(), cls)) 127 for dev in class_for_each_device(cls): 130 cls = get_class_by_name(arg) 131 for dev in class_for_each_device(cls): 170 def invoke(self, cls, name): argument 172 cls = get_class_by_name(cls.string()) 173 for dev in class_for_each_device(cls):
|
/linux-6.12.1/drivers/net/dsa/microchip/ |
D | ksz9477_tc_flower.c | 159 struct flow_cls_offload *cls, in ksz9477_flower_parse_action() argument 162 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in ksz9477_flower_parse_action() 170 if (TC_H_MIN(cls->classid)) { in ksz9477_flower_parse_action() 216 struct flow_cls_offload *cls, bool ingress) in ksz9477_cls_flower_add() argument 218 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in ksz9477_cls_flower_add() 219 struct netlink_ext_ack *extack = cls->common.extack; in ksz9477_cls_flower_add() 237 ret = ksz9477_flower_parse_key(dev, port, extack, rule, cls->cookie, in ksz9477_cls_flower_add() 238 cls->common.prio); in ksz9477_cls_flower_add() 242 ret = ksz9477_flower_parse_action(dev, port, extack, cls, in ksz9477_cls_flower_add() 270 struct flow_cls_offload *cls, bool ingress) in ksz9477_cls_flower_del() argument [all …]
|
/linux-6.12.1/tools/perf/scripts/python/ |
D | compaction-times.py | 114 def add_filter(cls, filter): argument 115 cls.fobj = filter 118 def create_pending(cls, pid, comm, start_secs, start_nsecs): argument 121 head = cls.heads[pid] 124 if cls.fobj != None: 125 filtered = cls.fobj.filter(pid, comm) 126 head = cls.heads[pid] = chead(comm, pid, filtered) 132 def increment_pending(cls, pid, migrated, fscan, mscan): argument 133 head = cls.heads[pid] 141 def complete_pending(cls, pid, secs, nsecs): argument [all …]
|
/linux-6.12.1/tools/testing/selftests/hid/tests/ |
D | base_device.py | 136 def _init_pyudev(cls: Type["UdevHIDIsReady"]) -> None: 137 if cls._pyudev_context is None: 138 cls._pyudev_context = pyudev.Context() 139 cls._pyudev_monitor = pyudev.Monitor.from_netlink(cls._pyudev_context) 140 cls._pyudev_monitor.filter_by("hid") 141 cls._pyudev_monitor.start() 144 cls._pyudev_monitor.fileno(), cls._cls_udev_event_callback 148 def _cls_udev_event_callback(cls: Type["UdevHIDIsReady"]) -> None: 149 if cls._pyudev_monitor is None: 152 for event in iter(functools.partial(cls._pyudev_monitor.poll, 0.02), None): [all …]
|
/linux-6.12.1/drivers/acpi/acpica/ |
D | utids.c | 341 struct acpi_pnp_device_id *cls; in acpi_ut_execute_CLS() local 377 cls = in acpi_ut_execute_CLS() 380 if (!cls) { in acpi_ut_execute_CLS() 387 cls->string = in acpi_ut_execute_CLS() 388 ACPI_ADD_PTR(char, cls, sizeof(struct acpi_pnp_device_id)); in acpi_ut_execute_CLS() 392 acpi_ex_pci_cls_to_string(cls->string, class_code); in acpi_ut_execute_CLS() 393 cls->length = length; in acpi_ut_execute_CLS() 394 *return_id = cls; in acpi_ut_execute_CLS()
|
D | nsxfname.c | 234 struct acpi_pnp_device_id *cls = NULL; in acpi_get_object_info() local 317 status = acpi_ut_execute_CLS(node, &cls); in acpi_get_object_info() 319 info_size += cls->length; in acpi_get_object_info() 427 if (cls) { in acpi_get_object_info() 429 cls, next_id_string); in acpi_get_object_info() 453 if (cls) { in acpi_get_object_info() 454 ACPI_FREE(cls); in acpi_get_object_info()
|
/linux-6.12.1/drivers/net/dsa/sja1105/ |
D | sja1105_flower.c | 197 struct flow_cls_offload *cls, in sja1105_flower_parse_key() argument 200 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in sja1105_flower_parse_key() 347 struct flow_cls_offload *cls, bool ingress) in sja1105_cls_flower_add() argument 349 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in sja1105_cls_flower_add() 350 struct netlink_ext_ack *extack = cls->common.extack; in sja1105_cls_flower_add() 353 unsigned long cookie = cls->cookie; in sja1105_cls_flower_add() 360 rc = sja1105_flower_parse_key(priv, extack, cls, &key); in sja1105_cls_flower_add() 463 struct flow_cls_offload *cls, bool ingress) in sja1105_cls_flower_del() argument 466 struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie); in sja1105_cls_flower_del() 474 return sja1105_vl_delete(priv, port, rule, cls->common.extack); in sja1105_cls_flower_del() [all …]
|
/linux-6.12.1/drivers/net/ethernet/meta/fbnic/ |
D | fbnic_mac.c | 12 unsigned int cls, unsigned int readrq) in fbnic_init_readrq() argument 22 FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls); in fbnic_init_readrq() 28 unsigned int cls, unsigned int mps) in fbnic_init_mps() argument 36 FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls); in fbnic_init_mps() 44 int readrq, mps, cls; in fbnic_mac_init_axi() local 60 cls = ilog2(L1_CACHE_BYTES) - 6; in fbnic_mac_init_axi() 61 cls = clamp(cls, 0, 3); in fbnic_mac_init_axi() 64 fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq); in fbnic_mac_init_axi() 65 fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps); in fbnic_mac_init_axi() 77 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls)); in fbnic_mac_init_axi() [all …]
|
/linux-6.12.1/tools/testing/selftests/bpf/progs/ |
D | test_spin_lock.c | 56 struct cls_elem *cls; in bpf_spin_lock_test() local 96 cls = bpf_get_local_storage(&cls_map, 0); in bpf_spin_lock_test() 97 bpf_spin_lock(&cls->lock); in bpf_spin_lock_test() 98 cls->cnt++; in bpf_spin_lock_test() 99 bpf_spin_unlock(&cls->lock); in bpf_spin_lock_test()
|
/linux-6.12.1/arch/arm64/mm/ |
D | dma-mapping.c | 43 int cls = cache_line_size_of_cpu(); in arch_setup_dma_ops() local 45 WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN, in arch_setup_dma_ops() 49 ARCH_DMA_MINALIGN, cls); in arch_setup_dma_ops()
|
/linux-6.12.1/include/linux/device/ |
D | class.h | 84 void class_compat_unregister(struct class_compat *cls); 85 int class_compat_create_link(struct class_compat *cls, struct device *dev, 87 void class_compat_remove_link(struct class_compat *cls, struct device *dev, 229 void class_destroy(const struct class *cls);
|
/linux-6.12.1/net/dsa/ |
D | user.c | 1367 struct tc_cls_matchall_offload *cls, in dsa_user_add_cls_matchall_mirred() argument 1370 struct netlink_ext_ack *extack = cls->common.extack; in dsa_user_add_cls_matchall_mirred() 1383 if (!flow_action_basic_hw_stats_check(&cls->rule->action, in dsa_user_add_cls_matchall_mirred() 1384 cls->common.extack)) in dsa_user_add_cls_matchall_mirred() 1387 act = &cls->rule->action.entries[0]; in dsa_user_add_cls_matchall_mirred() 1407 mall_tc_entry->cookie = cls->cookie; in dsa_user_add_cls_matchall_mirred() 1426 struct tc_cls_matchall_offload *cls, in dsa_user_add_cls_matchall_police() argument 1429 struct netlink_ext_ack *extack = cls->common.extack; in dsa_user_add_cls_matchall_police() 1450 if (!flow_action_basic_hw_stats_check(&cls->rule->action, in dsa_user_add_cls_matchall_police() 1451 cls->common.extack)) in dsa_user_add_cls_matchall_police() [all …]
|