/linux-6.12.1/tools/lib/subcmd/ |
D | help.c | 16 void add_cmdname(struct cmdnames *cmds, const char *name, size_t len) in add_cmdname() argument 26 ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc); in add_cmdname() 27 cmds->names[cmds->cnt++] = ent; in add_cmdname() 30 void clean_cmdnames(struct cmdnames *cmds) in clean_cmdnames() argument 34 for (i = 0; i < cmds->cnt; ++i) in clean_cmdnames() 35 zfree(&cmds->names[i]); in clean_cmdnames() 36 zfree(&cmds->names); in clean_cmdnames() 37 cmds->cnt = 0; in clean_cmdnames() 38 cmds->alloc = 0; in clean_cmdnames() 48 void uniq(struct cmdnames *cmds) in uniq() argument [all …]
|
D | help.h | 26 void add_cmdname(struct cmdnames *cmds, const char *name, size_t len); 27 void clean_cmdnames(struct cmdnames *cmds); 29 void uniq(struct cmdnames *cmds); 31 void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
|
/linux-6.12.1/drivers/media/pci/saa7164/ |
D | saa7164-cmd.c | 18 if (dev->cmds[i].inuse == 0) { in saa7164_cmd_alloc_seqno() 19 dev->cmds[i].inuse = 1; in saa7164_cmd_alloc_seqno() 20 dev->cmds[i].signalled = 0; in saa7164_cmd_alloc_seqno() 21 dev->cmds[i].timeout = 0; in saa7164_cmd_alloc_seqno() 22 ret = dev->cmds[i].seqno; in saa7164_cmd_alloc_seqno() 34 if ((dev->cmds[seqno].inuse == 1) && in saa7164_cmd_free_seqno() 35 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_free_seqno() 36 dev->cmds[seqno].inuse = 0; in saa7164_cmd_free_seqno() 37 dev->cmds[seqno].signalled = 0; in saa7164_cmd_free_seqno() 38 dev->cmds[seqno].timeout = 0; in saa7164_cmd_free_seqno() [all …]
|
/linux-6.12.1/drivers/infiniband/core/ |
D | roce_gid_mgmt.c | 69 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ]; member 625 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) { in netdevice_event_work_handler() 626 ib_enum_all_roce_netdevs(work->cmds[i].filter, in netdevice_event_work_handler() 627 work->cmds[i].filter_ndev, in netdevice_event_work_handler() 628 work->cmds[i].cb, in netdevice_event_work_handler() 629 work->cmds[i].ndev); in netdevice_event_work_handler() 630 dev_put(work->cmds[i].ndev); in netdevice_event_work_handler() 631 dev_put(work->cmds[i].filter_ndev); in netdevice_event_work_handler() 637 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds, in netdevice_queue_work() argument 647 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds)); in netdevice_queue_work() [all …]
|
/linux-6.12.1/tools/perf/util/ |
D | help-unknown-cmd.c | 32 static int add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) in add_cmd_list() argument 34 unsigned int i, nr = cmds->cnt + old->cnt; in add_cmd_list() 37 if (nr > cmds->alloc) { in add_cmd_list() 39 if (alloc_nr(cmds->alloc) < nr) in add_cmd_list() 40 cmds->alloc = nr; in add_cmd_list() 42 cmds->alloc = alloc_nr(cmds->alloc); in add_cmd_list() 43 tmp = realloc(cmds->names, cmds->alloc * sizeof(*cmds->names)); in add_cmd_list() 46 cmds->names = tmp; in add_cmd_list() 49 cmds->names[cmds->cnt++] = old->names[i]; in add_cmd_list()
|
/linux-6.12.1/drivers/gpu/drm/i915/gt/ |
D | gen7_renderclear.c | 383 struct batch_chunk cmds, state; in emit_batch() local 387 batch_init(&cmds, vma, start, 0, bv->state_start); in emit_batch() 397 gen7_emit_pipeline_flush(&cmds); in emit_batch() 398 gen7_emit_pipeline_invalidate(&cmds); in emit_batch() 399 batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); in emit_batch() 400 batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); in emit_batch() 401 batch_add(&cmds, 0xffff0000 | in emit_batch() 405 batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); in emit_batch() 406 batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); in emit_batch() 407 gen7_emit_pipeline_invalidate(&cmds); in emit_batch() [all …]
|
/linux-6.12.1/tools/usb/usbip/src/ |
D | usbip.c | 41 static const struct command cmds[] = { variable 100 for (i = 0; cmds[i].name != NULL; i++) in usbip_help() 101 if (!strcmp(cmds[i].name, argv[0]) && cmds[i].usage) { in usbip_help() 102 cmds[i].usage(); in usbip_help() 110 for (cmd = cmds; cmd->name != NULL; cmd++) in usbip_help() 177 for (i = 0; cmds[i].name != NULL; i++) in main() 178 if (!strcmp(cmds[i].name, cmd)) { in main() 182 rc = run_command(&cmds[i], argc, argv); in main()
|
/linux-6.12.1/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_overlay.c | 105 } *cmds; in vmw_overlay_send_put() local 117 fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; in vmw_overlay_send_put() 119 cmds = VMW_CMD_RESERVE(dev_priv, fifo_size); in vmw_overlay_send_put() 121 if (!cmds) in vmw_overlay_send_put() 124 items = (typeof(items))&cmds[1]; in vmw_overlay_send_put() 128 fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1)); in vmw_overlay_send_put() 130 cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; in vmw_overlay_send_put() 131 cmds->header.streamId = arg->stream_id; in vmw_overlay_send_put() 185 } *cmds; in vmw_overlay_send_stop() local 189 cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds)); in vmw_overlay_send_stop() [all …]
|
/linux-6.12.1/tools/testing/selftests/tc-testing/plugin-lib/ |
D | nsPlugin.py | 163 cmds = [] 167 cmds.append(self._replace_keywords('netns add {}'.format(ns))) 168 cmds.append(self._replace_keywords('link add $DEV1 type veth peer name $DEV0')) 169 cmds.append(self._replace_keywords('link set $DEV1 netns {}'.format(ns))) 170 cmds.append(self._replace_keywords('link add $DUMMY type dummy'.format(ns))) 171 cmds.append(self._replace_keywords('link set $DUMMY netns {}'.format(ns))) 172 cmds.append(self._replace_keywords('netns exec {} $IP link set $DEV1 up'.format(ns))) 173 cmds.append(self._replace_keywords('netns exec {} $IP link set $DUMMY up'.format(ns))) 174 cmds.append(self._replace_keywords('link set $DEV0 up'.format(ns))) 177 cmds.append(self._replace_keywords('link set $DEV2 netns {}'.format(ns))) [all …]
|
/linux-6.12.1/drivers/net/wireless/intel/iwlwifi/fw/ |
D | notif-wait.c | 47 if (w->cmds[i] == rec_id || in iwl_notification_wait() 48 (!iwl_cmd_groupid(w->cmds[i]) && in iwl_notification_wait() 49 DEF_ID(w->cmds[i]) == rec_id)) { in iwl_notification_wait() 85 const u16 *cmds, int n_cmds, in iwl_init_notification_wait() argument 96 memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16)); in iwl_init_notification_wait()
|
D | notif-wait.h | 53 u16 cmds[MAX_NOTIF_CMDS]; member 83 const u16 *cmds, int n_cmds,
|
/linux-6.12.1/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_ct.c | 175 u32 *cmds, u32 size_in_bytes, u32 resv_space) in guc_ct_buffer_init() argument 180 ctb->cmds = cmds; in guc_ct_buffer_init() 265 u32 *cmds; in intel_guc_ct_init() local 286 cmds = blob + 2 * CTB_DESC_SIZE; in intel_guc_ct_init() 290 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, in intel_guc_ct_init() 293 guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space); in intel_guc_ct_init() 297 cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE; in intel_guc_ct_init() 301 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, in intel_guc_ct_init() 304 guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space); in intel_guc_ct_init() 333 u32 base, desc, cmds, size; in intel_guc_ct_enable() local [all …]
|
/linux-6.12.1/drivers/gpu/host1x/ |
D | job.c | 66 job->cmds = num_cmdbufs ? mem : NULL; in host1x_job_alloc() 116 struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather; in host1x_job_add_gather() 129 struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds]; in host1x_job_add_wait() 220 if (job->cmds[i].is_wait) in pin_job() 223 g = &job->cmds[i].gather; in pin_job() 541 if (job->cmds[i].is_wait) in copy_gathers() 544 g = &job->cmds[i].gather; in copy_gathers() 570 if (job->cmds[i].is_wait) in copy_gathers() 572 g = &job->cmds[i].gather; in copy_gathers() 619 if (job->cmds[i].is_wait) in host1x_job_pin() [all …]
|
/linux-6.12.1/drivers/dma/ |
D | bcm-sba-raid.c | 115 struct brcm_sba_command cmds[]; member 556 struct brcm_sba_command *cmds, in sba_fillup_interrupt_msg() argument 562 struct brcm_sba_command *cmdsp = cmds; in sba_fillup_interrupt_msg() 608 msg->sba.cmds = cmds; in sba_fillup_interrupt_msg() 609 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_interrupt_msg() 632 sba_fillup_interrupt_msg(req, req->cmds, &req->msg); in sba_prep_dma_interrupt() 642 struct brcm_sba_command *cmds, in sba_fillup_memcpy_msg() argument 650 struct brcm_sba_command *cmdsp = cmds; in sba_fillup_memcpy_msg() 696 msg->sba.cmds = cmds; in sba_fillup_memcpy_msg() 697 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_memcpy_msg() [all …]
|
/linux-6.12.1/drivers/vdpa/mlx5/core/ |
D | mr.c | 210 struct mlx5_vdpa_async_cmd *cmds; in create_direct_keys() local 215 cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL); in create_direct_keys() 216 if (!cmds) in create_direct_keys() 231 cmds[i].out = cmd_mem->out; in create_direct_keys() 232 cmds[i].outlen = sizeof(cmd_mem->out); in create_direct_keys() 233 cmds[i].in = cmd_mem->in; in create_direct_keys() 234 cmds[i].inlen = struct_size(cmd_mem, mtt, mttcount); in create_direct_keys() 241 err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs); in create_direct_keys() 250 struct mlx5_vdpa_async_cmd *cmd = &cmds[i++]; in create_direct_keys() 268 cmd_mem = container_of(cmds[i].out, struct mlx5_create_mkey_mem, out); in create_direct_keys() [all …]
|
D | resources.c | 332 struct mlx5_vdpa_async_cmd *cmds, in issue_async_cmd() argument 337 struct mlx5_vdpa_async_cmd *cmd = &cmds[issued]; in issue_async_cmd() 349 wait_for_completion(&cmds[*completed].cmd_done); in issue_async_cmd() 367 struct mlx5_vdpa_async_cmd *cmds, in mlx5_vdpa_exec_async_cmds() argument 375 init_completion(&cmds[i].cmd_done); in mlx5_vdpa_exec_async_cmds() 379 err = issue_async_cmd(mvdev, cmds, issued, &completed); in mlx5_vdpa_exec_async_cmds() 390 wait_for_completion(&cmds[completed++].cmd_done); in mlx5_vdpa_exec_async_cmds()
|
/linux-6.12.1/drivers/crypto/ccp/ |
D | ccp-crypto-main.c | 54 struct list_head cmds; member 103 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { in ccp_crypto_cmd_complete() 114 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 225 list_for_each_entry(tmp, &req_queue.cmds, entry) { in ccp_crypto_enqueue_cmd() 241 if (req_queue.backlog == &req_queue.cmds) in ccp_crypto_enqueue_cmd() 247 list_add_tail(&crypto_cmd->entry, &req_queue.cmds); in ccp_crypto_enqueue_cmd() 406 INIT_LIST_HEAD(&req_queue.cmds); in ccp_crypto_init() 407 req_queue.backlog = &req_queue.cmds; in ccp_crypto_init()
|
/linux-6.12.1/tools/perf/ |
D | perf-completion.sh | 131 cmds_=$($cmd $1 --list-cmds) 159 cmds=$($cmd --list-opts) 161 cmds=$($cmd --list-cmds) 163 __perfcomp "$cmds" "$cur" 220 subcmds=$($cmd $prev_skip_opts --list-cmds)
|
/linux-6.12.1/tools/bpf/bpftool/ |
D | main.c | 187 int cmd_select(const struct cmd *cmds, int argc, char **argv, in cmd_select() argument 196 if (argc < 1 && cmds[0].func) in cmd_select() 197 return cmds[0].func(argc, argv); in cmd_select() 199 for (i = 0; cmds[i].cmd; i++) { in cmd_select() 200 if (is_prefix(*argv, cmds[i].cmd)) { in cmd_select() 201 if (!cmds[i].func) { in cmd_select() 203 cmds[i].cmd); in cmd_select() 206 return cmds[i].func(argc - 1, argv + 1); in cmd_select()
|
/linux-6.12.1/drivers/iio/adc/ |
D | ti-adc108s102.c | 116 unsigned int bit, cmds; in adc108s102_update_scan_mode() local 122 cmds = 0; in adc108s102_update_scan_mode() 124 st->tx_buf[cmds++] = cpu_to_be16(ADC108S102_CMD(bit)); in adc108s102_update_scan_mode() 127 st->tx_buf[cmds++] = 0x00; in adc108s102_update_scan_mode() 132 st->ring_xfer.len = cmds * sizeof(st->tx_buf[0]); in adc108s102_update_scan_mode()
|
/linux-6.12.1/drivers/interconnect/qcom/ |
D | bcm-voter.c | 281 struct tcs_cmd cmds[MAX_BCMS]; in qcom_icc_bcm_voter_commit() local 309 tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx); in qcom_icc_bcm_voter_commit() 316 cmds, commit_idx); in qcom_icc_bcm_voter_commit() 345 tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx); in qcom_icc_bcm_voter_commit() 347 ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx); in qcom_icc_bcm_voter_commit() 353 tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx); in qcom_icc_bcm_voter_commit() 355 ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx); in qcom_icc_bcm_voter_commit()
|
/linux-6.12.1/drivers/media/platform/renesas/vsp1/ |
D | vsp1_dl.c | 160 struct vsp1_dl_ext_cmd *cmds; member 439 pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL); in vsp1_dl_cmd_pool_create() 440 if (!pool->cmds) { in vsp1_dl_cmd_pool_create() 453 kfree(pool->cmds); in vsp1_dl_cmd_pool_create() 459 struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i]; in vsp1_dl_cmd_pool_create() 473 cmd->cmds = pool->mem + cmd_offset; in vsp1_dl_cmd_pool_create() 528 kfree(pool->cmds); in vsp1_dl_ext_cmd_pool_destroy() 745 cmd->cmds[0].opcode = cmd->opcode; in vsp1_dl_ext_cmd_fill_header() 746 cmd->cmds[0].flags = cmd->flags; in vsp1_dl_ext_cmd_fill_header() 747 cmd->cmds[0].address_set = cmd->data_dma; in vsp1_dl_ext_cmd_fill_header() [all …]
|
/linux-6.12.1/sound/ppc/ |
D | pmac.c | 60 rec->cmds = (void __iomem *)DBDMA_ALIGN(rec->space); in snd_pmac_dbdma_alloc() 61 rec->addr = rec->dma_base + (unsigned long)((char *)rec->cmds - (char *)rec->space); in snd_pmac_dbdma_alloc() 211 chip->extra_dma.cmds->command = cpu_to_le16(DBDMA_STOP); in snd_pmac_pcm_prepare() 221 for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) { in snd_pmac_pcm_prepare() 259 for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) in snd_pmac_pcm_trigger() 273 for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) in snd_pmac_pcm_trigger() 297 volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period]; in snd_pmac_pcm_pointer() 391 memcpy((void *)emergency_dbdma.cmds, (void *)cp, in snd_pmac_pcm_dead_xfer() 396 cp = emergency_dbdma.cmds; in snd_pmac_pcm_dead_xfer() 438 cp = emergency_dbdma.cmds; in snd_pmac_pcm_update() [all …]
|
/linux-6.12.1/drivers/nvme/target/ |
D | rdma.c | 104 struct nvmet_rdma_cmd *cmds; member 129 struct nvmet_rdma_cmd *cmds; member 367 struct nvmet_rdma_cmd *cmds; in nvmet_rdma_alloc_cmds() local 370 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); in nvmet_rdma_alloc_cmds() 371 if (!cmds) in nvmet_rdma_alloc_cmds() 375 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); in nvmet_rdma_alloc_cmds() 380 return cmds; in nvmet_rdma_alloc_cmds() 384 nvmet_rdma_free_cmd(ndev, cmds + i, admin); in nvmet_rdma_alloc_cmds() 385 kfree(cmds); in nvmet_rdma_alloc_cmds() 391 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) in nvmet_rdma_free_cmds() argument [all …]
|
/linux-6.12.1/drivers/gpu/drm/xe/ |
D | xe_guc_ct_types.h | 44 struct iosys_map cmds; member 56 u32 *cmds; member
|