/linux-6.12.1/tools/tracing/rtla/src/ |
D | osnoise.c | 24 char *osnoise_get_cpus(struct osnoise_context *context) in osnoise_get_cpus() argument 26 if (context->curr_cpus) in osnoise_get_cpus() 27 return context->curr_cpus; in osnoise_get_cpus() 29 if (context->orig_cpus) in osnoise_get_cpus() 30 return context->orig_cpus; in osnoise_get_cpus() 32 context->orig_cpus = tracefs_instance_file_read(NULL, "osnoise/cpus", NULL); in osnoise_get_cpus() 38 return context->orig_cpus; in osnoise_get_cpus() 48 int osnoise_set_cpus(struct osnoise_context *context, char *cpus) in osnoise_set_cpus() argument 50 char *orig_cpus = osnoise_get_cpus(context); in osnoise_set_cpus() 57 context->curr_cpus = strdup(cpus); in osnoise_set_cpus() [all …]
|
D | osnoise.h | 59 int osnoise_get_context(struct osnoise_context *context); 60 void osnoise_put_context(struct osnoise_context *context); 62 int osnoise_set_cpus(struct osnoise_context *context, char *cpus); 63 void osnoise_restore_cpus(struct osnoise_context *context); 65 int osnoise_set_runtime_period(struct osnoise_context *context, 68 void osnoise_restore_runtime_period(struct osnoise_context *context); 70 int osnoise_set_stop_us(struct osnoise_context *context, 72 void osnoise_restore_stop_us(struct osnoise_context *context); 74 int osnoise_set_stop_total_us(struct osnoise_context *context, 76 void osnoise_restore_stop_total_us(struct osnoise_context *context); [all …]
|
/linux-6.12.1/drivers/misc/vmw_vmci/ |
D | vmci_context.c | 30 * These, along with context lookup, are protected by the 35 spinlock_t lock; /* Spinlock for context list operations */ 44 static void ctx_signal_notify(struct vmci_ctx *context) in ctx_signal_notify() argument 46 *context->notify = true; in ctx_signal_notify() 49 static void ctx_clear_notify(struct vmci_ctx *context) in ctx_clear_notify() argument 51 *context->notify = false; in ctx_clear_notify() 58 static void ctx_clear_notify_call(struct vmci_ctx *context) in ctx_clear_notify_call() argument 60 if (context->pending_datagrams == 0 && in ctx_clear_notify_call() 61 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) in ctx_clear_notify_call() 62 ctx_clear_notify(context); in ctx_clear_notify_call() [all …]
|
D | vmci_route.c | 18 * devices. Will set the source context if it is invalid. 41 /* Must have a valid destination context. */ in vmci_route() 42 if (VMCI_INVALID_ID == dst->context) in vmci_route() 46 if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { in vmci_route() 63 /* And we cannot send if the source is the host context. */ in vmci_route() 64 if (VMCI_HOST_CONTEXT_ID == src->context) in vmci_route() 69 * respect it (both context and resource are invalid). in vmci_route() 70 * However, if they passed only an invalid context, in vmci_route() 72 * should set the real context here before passing it in vmci_route() 75 if (VMCI_INVALID_ID == src->context && in vmci_route() [all …]
|
/linux-6.12.1/drivers/accel/qaic/ |
D | sahara.c | 108 static int sahara_find_image(struct sahara_context *context, u32 image_id) in sahara_find_image() argument 112 if (image_id == context->active_image_id) in sahara_find_image() 115 if (context->active_image_id != SAHARA_IMAGE_ID_NONE) { in sahara_find_image() 116 dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n", in sahara_find_image() 117 image_id, context->active_image_id); in sahara_find_image() 121 if (image_id >= context->table_size || !context->image_table[image_id]) { in sahara_find_image() 122 dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id); in sahara_find_image() 131 ret = firmware_request_nowarn(&context->firmware, in sahara_find_image() 132 context->image_table[image_id], in sahara_find_image() 133 &context->mhi_dev->dev); in sahara_find_image() [all …]
|
/linux-6.12.1/security/selinux/ss/ |
D | mls.h | 23 #include "context.h" 27 int mls_compute_context_len(struct policydb *p, struct context *context); 28 void mls_sid_to_context(struct policydb *p, struct context *context, 30 int mls_context_isvalid(struct policydb *p, struct context *c); 35 struct context *context, struct sidtab *s, u32 def_sid); 37 int mls_from_string(struct policydb *p, char *str, struct context *context, 40 int mls_range_set(struct context *context, struct mls_range *range); 43 struct context *oldc, struct context *newc); 45 int mls_compute_sid(struct policydb *p, struct context *scontext, 46 struct context *tcontext, u16 tclass, u32 specified, [all …]
|
D | mls.c | 30 * security context string representation of `context'. 32 int mls_compute_context_len(struct policydb *p, struct context *context) in mls_compute_context_len() argument 44 u32 index_sens = context->range.level[l].sens; in mls_compute_context_len() 50 e = &context->range.level[l].cat; in mls_compute_context_len() 70 if (mls_level_eq(&context->range.level[0], in mls_compute_context_len() 71 &context->range.level[1])) in mls_compute_context_len() 82 * Write the security context string representation of 83 * the MLS fields of `context' into the string `*scontext'. 86 void mls_sid_to_context(struct policydb *p, struct context *context, in mls_sid_to_context() argument 104 context->range.level[l].sens - 1)); in mls_sid_to_context() [all …]
|
/linux-6.12.1/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, in etnaviv_context_unmap() argument 29 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap() 39 static int etnaviv_context_map(struct etnaviv_iommu_context *context, in etnaviv_context_map() argument 55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map() 67 etnaviv_context_unmap(context, orig_iova, orig_size - size); in etnaviv_context_map() 72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, in etnaviv_iommu_map() argument 79 if (!context || !sgt) in etnaviv_iommu_map() 88 ret = etnaviv_context_map(context, da, pa, bytes, prot); in etnaviv_iommu_map() 95 context->flush_seq++; in etnaviv_iommu_map() 100 etnaviv_context_unmap(context, iova, da - iova); in etnaviv_iommu_map() [all …]
|
D | etnaviv_iommu.c | 28 to_v1_context(struct etnaviv_iommu_context *context) in to_v1_context() argument 30 return container_of(context, struct etnaviv_iommuv1_context, base); in to_v1_context() 33 static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context) in etnaviv_iommuv1_free() argument 35 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); in etnaviv_iommuv1_free() 37 drm_mm_takedown(&context->mm); in etnaviv_iommuv1_free() 39 dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu, in etnaviv_iommuv1_free() 42 context->global->v1.shared_context = NULL; in etnaviv_iommuv1_free() 47 static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context, in etnaviv_iommuv1_map() argument 51 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); in etnaviv_iommuv1_map() 62 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context, in etnaviv_iommuv1_unmap() argument [all …]
|
D | etnaviv_iommu_v2.c | 42 to_v2_context(struct etnaviv_iommu_context *context) in to_v2_context() argument 44 return container_of(context, struct etnaviv_iommuv2_context, base); in to_v2_context() 47 static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context) in etnaviv_iommuv2_free() argument 49 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); in etnaviv_iommuv2_free() 52 drm_mm_takedown(&context->mm); in etnaviv_iommuv2_free() 56 dma_free_wc(context->global->dev, SZ_4K, in etnaviv_iommuv2_free() 61 dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu, in etnaviv_iommuv2_free() 64 clear_bit(v2_context->id, context->global->v2.pta_alloc); in etnaviv_iommuv2_free() 92 static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context, in etnaviv_iommuv2_map() argument 96 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); in etnaviv_iommuv2_map() [all …]
|
/linux-6.12.1/kernel/ |
D | auditsc.c | 28 * Subject and object context labeling support added by <danjones@us.ibm.com> 220 * it's going to remain 1-element for almost any setup) until we free context itself. 456 /* Determine if any context name data matches a rule's watch data */ 814 * @ctx: audit context 848 * @ctx: audit context 914 static inline void audit_proctitle_free(struct audit_context *context) in audit_proctitle_free() argument 916 kfree(context->proctitle.value); in audit_proctitle_free() 917 context->proctitle.value = NULL; in audit_proctitle_free() 918 context->proctitle.len = 0; in audit_proctitle_free() 921 static inline void audit_free_module(struct audit_context *context) in audit_free_module() argument [all …]
|
/linux-6.12.1/drivers/gpu/drm/tegra/ |
D | uapi.c | 31 static void tegra_drm_channel_context_close(struct tegra_drm_context *context) in tegra_drm_channel_context_close() argument 36 if (context->memory_context) in tegra_drm_channel_context_close() 37 host1x_memory_context_put(context->memory_context); in tegra_drm_channel_context_close() 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 42 xa_destroy(&context->mappings); in tegra_drm_channel_context_close() 44 host1x_channel_put(context->channel); in tegra_drm_channel_context_close() 46 kfree(context); in tegra_drm_channel_context_close() 51 struct tegra_drm_context *context; in tegra_drm_uapi_close_file() local 55 xa_for_each(&file->contexts, id, context) in tegra_drm_uapi_close_file() 56 tegra_drm_channel_context_close(context); in tegra_drm_uapi_close_file() [all …]
|
D | submit.c | 26 #define SUBMIT_ERR(context, fmt, ...) \ argument 27 dev_err_ratelimited(context->client->base.dev, \ 146 tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id) in tegra_drm_mapping_get() argument 150 xa_lock(&context->mappings); in tegra_drm_mapping_get() 152 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get() 156 xa_unlock(&context->mappings); in tegra_drm_mapping_get() 180 struct tegra_drm_context *context, in submit_copy_gather_data() argument 187 SUBMIT_ERR(context, "gather_data_words cannot be zero"); in submit_copy_gather_data() 192 SUBMIT_ERR(context, "gather_data_words is too large"); in submit_copy_gather_data() 198 SUBMIT_ERR(context, "failed to allocate memory for bo info"); in submit_copy_gather_data() [all …]
|
/linux-6.12.1/fs/xfs/ |
D | xfs_attr_list.c | 55 struct xfs_attr_list_context *context) in xfs_attr_shortform_list() argument 57 struct xfs_attrlist_cursor_kern *cursor = &context->cursor; in xfs_attr_shortform_list() 58 struct xfs_inode *dp = context->dp; in xfs_attr_shortform_list() 69 trace_xfs_attr_list_sf(context); in xfs_attr_shortform_list() 80 if (context->bufsize == 0 || in xfs_attr_shortform_list() 82 (dp->i_af.if_bytes + sf->count * 16) < context->bufsize)) { in xfs_attr_shortform_list() 84 if (XFS_IS_CORRUPT(context->dp->i_mount, in xfs_attr_shortform_list() 88 xfs_dirattr_mark_sick(context->dp, XFS_ATTR_FORK); in xfs_attr_shortform_list() 91 context->put_listent(context, in xfs_attr_shortform_list() 101 if (context->seen_enough) in xfs_attr_shortform_list() [all …]
|
D | xfs_handle.c | 150 void *context, in xfs_handle_acceptable() argument 353 struct xfs_attr_list_context *context, in xfs_ioc_attr_put_listent() argument 360 struct xfs_attrlist *alist = context->buffer; in xfs_ioc_attr_put_listent() 364 ASSERT(!context->seen_enough); in xfs_ioc_attr_put_listent() 365 ASSERT(context->count >= 0); in xfs_ioc_attr_put_listent() 366 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); in xfs_ioc_attr_put_listent() 367 ASSERT(context->firstu >= sizeof(*alist)); in xfs_ioc_attr_put_listent() 368 ASSERT(context->firstu <= context->bufsize); in xfs_ioc_attr_put_listent() 373 if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK)) in xfs_ioc_attr_put_listent() 377 context->count * sizeof(alist->al_offset[0]); in xfs_ioc_attr_put_listent() [all …]
|
/linux-6.12.1/drivers/net/ethernet/qlogic/qed/ |
D | qed_nvmetcp_fw_funcs.c | 129 init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context, in init_nvmetcp_task_params() argument 133 context->ystorm_st_context.state.cccid = task_params->host_cccid; in init_nvmetcp_task_params() 134 SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1); in init_nvmetcp_task_params() 135 context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo); in init_nvmetcp_task_params() 136 context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi); in init_nvmetcp_task_params() 145 struct e5_nvmetcp_task_context *context = task_params->context; in init_default_nvmetcp_task() local 146 const u8 val_byte = context->mstorm_ag_context.cdu_validation; in init_default_nvmetcp_task() 149 memset(context, 0, sizeof(*context)); in init_default_nvmetcp_task() 150 init_nvmetcp_task_params(context, task_params, in init_default_nvmetcp_task() 159 context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = in init_default_nvmetcp_task() [all …]
|
/linux-6.12.1/drivers/platform/x86/intel/int1092/ |
D | intel_sar.c | 33 * @context: pointer to driver context structure 36 * context->reg_value will never exceed MAX_REGULATORY 38 static void update_sar_data(struct wwan_sar_context *context) in update_sar_data() argument 41 &context->config_data[context->reg_value]; in update_sar_data() 44 context->sar_data.device_mode < config->total_dev_mode) { in update_sar_data() 48 if (context->sar_data.device_mode == in update_sar_data() 53 context->sar_data.antennatable_index = dev_mode->antennatable_index; in update_sar_data() 54 context->sar_data.bandtable_index = dev_mode->bandtable_index; in update_sar_data() 55 context->sar_data.sartable_index = dev_mode->sartable_index; in update_sar_data() 64 * @context: pointer to driver context structure [all …]
|
/linux-6.12.1/drivers/gpu/drm/imagination/ |
D | pvr_context.h | 30 * struct pvr_context - Context data 33 /** @ref_count: Refcount for context. */ 39 /** @vm_ctx: Pointer to associated VM context. */ 42 /** @type: Type of context. */ 45 /** @flags: Context flags. */ 48 /** @priority: Context priority*/ 51 /** @fw_obj: FW object representing FW-side context data. */ 54 /** @data: Pointer to local copy of FW context data. */ 57 /** @data_size: Size of FW context data, in bytes. */ 60 /** @ctx_id: FW context ID. */ [all …]
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/dml/dcn30/ |
D | dcn30_fpu.c | 365 void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) in dcn30_fpu_update_soc_for_wm_a() argument 371 if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || in dcn30_fpu_update_soc_for_wm_a() 372 context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0) in dcn30_fpu_update_soc_for_wm_a() 373 …context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries… in dcn30_fpu_update_soc_for_wm_a() 374 …context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[W… in dcn30_fpu_update_soc_for_wm_a() 375 …context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_in… in dcn30_fpu_update_soc_for_wm_a() 380 struct dc *dc, struct dc_state *context, in dcn30_fpu_calculate_wm_and_dlg() argument 385 int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; in dcn30_fpu_calculate_wm_and_dlg() 387 double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb]; in dcn30_fpu_calculate_wm_and_dlg() 388 …bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clo… in dcn30_fpu_calculate_wm_and_dlg() [all …]
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/dml/dcn32/ |
D | dcn32_fpu.c | 183 static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context, 276 struct dc_state *context, in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() argument 282 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() 284 …enum clock_change_support temp_clock_change_support = vba->DRAMClockChangeSupport[vlevel][context-… in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() 290 …vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_suppor… in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() 291 context->bw_ctx.dml.soc.dram_clock_change_latency_us = in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() 293 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() 297 dcn32_subvp_in_use(dc, context)) in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() 298 …vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_suppor… in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() 300 if (vlevel < context->bw_ctx.dml.vba.soc.num_states && in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() [all …]
|
/linux-6.12.1/arch/s390/include/asm/ |
D | mmu_context.h | 24 spin_lock_init(&mm->context.lock); in init_new_context() 25 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 26 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context() 27 atomic_set(&mm->context.flush_count, 0); in init_new_context() 28 atomic_set(&mm->context.protected_count, 0); in init_new_context() 29 mm->context.gmap_asce = 0; in init_new_context() 30 mm->context.flush_mm = 0; in init_new_context() 32 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context() 34 (current->mm && current->mm->context.alloc_pgste); in init_new_context() 35 mm->context.has_pgste = 0; in init_new_context() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx4/ |
D | en_resources.c | 42 int user_prio, struct mlx4_qp_context *context) in mlx4_en_fill_qp_context() argument 47 memset(context, 0, sizeof(*context)); in mlx4_en_fill_qp_context() 48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); in mlx4_en_fill_qp_context() 49 context->pd = cpu_to_be32(mdev->priv_pdn); in mlx4_en_fill_qp_context() 50 context->mtu_msgmax = 0xff; in mlx4_en_fill_qp_context() 52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context() 54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context() 56 context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP); in mlx4_en_fill_qp_context() 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; in mlx4_en_fill_qp_context() 61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, in mlx4_en_fill_qp_context() [all …]
|
/linux-6.12.1/include/misc/ |
D | cxl.h | 31 * Context lifetime overview: 33 * An AFU context may be inited and then started and stopped multiple times 42 * Once released, a context can't be started again. 44 * One context is inited by the cxl driver for every pci_dev. This is to be 45 * used as a default kernel context. cxl_get_context() will get this 46 * context. This context will be released by PCI hot unplug, so doesn't need to 52 * Once a context has been inited, IRQs may be configured. Firstly these IRQs 65 * On pci_enabled_device(), the cxl driver will init a single cxl context for 66 * use by the driver. It doesn't start this context (as that will likely 69 * This gets the default context associated with this pci_dev. This context [all …]
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/dml2/dml21/ |
D | dml21_wrapper.c | 119 static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct… in dml21_calculate_rq_and_dlg_params() argument 129 context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; in dml21_calculate_rq_and_dlg_params() 132 …memcpy(&context->bw_ctx.bw.dcn.arb_regs, &in_ctx->v21.mode_programming.programming->global_regs.ar… in dml21_calculate_rq_and_dlg_params() 135 …context->bw_ctx.bw.dcn.compbuf_size_kb = (int)in_ctx->v21.mode_programming.programming->global_reg… in dml21_calculate_rq_and_dlg_params() 137 context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; in dml21_calculate_rq_and_dlg_params() 138 context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; in dml21_calculate_rq_and_dlg_params() 139 context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; in dml21_calculate_rq_and_dlg_params() 153 …num_pipes = dml21_find_dc_pipes_for_plane(dc, context, in_ctx, dc_main_pipes, dc_phantom_pipes, dm… in dml21_calculate_rq_and_dlg_params() 160 dml21_program_dc_pipe(in_ctx, context, dc_main_pipes[dc_pipe_index], pln_prog, stream_prog); in dml21_calculate_rq_and_dlg_params() 163 dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog); in dml21_calculate_rq_and_dlg_params() [all …]
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/resource/dcn32/ |
D | dcn32_resource_helpers.c | 86 * @context: new dc state 92 struct dc_state *context) in dcn32_helper_calculate_num_ways_for_subvp() argument 94 if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { in dcn32_helper_calculate_num_ways_for_subvp() 98 …return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_s… in dcn32_helper_calculate_num_ways_for_subvp() 108 struct dc_state *context) in dcn32_merge_pipes_for_subvp() argument 114 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; in dcn32_merge_pipes_for_subvp() 132 dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); in dcn32_merge_pipes_for_subvp() 154 struct dc_state *context) in dcn32_all_pipes_have_stream_and_plane() argument 159 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; in dcn32_all_pipes_have_stream_and_plane() 171 struct dc_state *context) in dcn32_subvp_in_use() argument [all …]
|