/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_xcp.c | 66 struct amdgpu_xcp *xcp; in amdgpu_xcp_run_transition() local 69 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) in amdgpu_xcp_run_transition() 72 xcp = &xcp_mgr->xcp[xcp_id]; in amdgpu_xcp_run_transition() 74 xcp_ip = &xcp->ip[i]; in amdgpu_xcp_run_transition() 108 struct amdgpu_xcp *xcp; in __amdgpu_xcp_add_block() local 113 xcp = &xcp_mgr->xcp[xcp_id]; in __amdgpu_xcp_add_block() 114 xcp->ip[ip->ip_id] = *ip; in __amdgpu_xcp_add_block() 115 xcp->ip[ip->ip_id].valid = true; in __amdgpu_xcp_add_block() 117 xcp->valid = true; in __amdgpu_xcp_add_block() 133 xcp_mgr->xcp[i].valid = false; in amdgpu_xcp_init() [all …]
|
D | amdgpu_xcp.h | 94 struct amdgpu_xcp xcp[MAX_XCP]; member 110 struct amdgpu_xcp *xcp, uint8_t *mem_id); 136 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, 173 if (xcp_mgr->xcp[*from].valid) in amdgpu_get_next_xcp() 174 return &xcp_mgr->xcp[*from]; in amdgpu_get_next_xcp() 181 #define for_each_xcp(xcp_mgr, xcp, i) \ argument 182 for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \ 183 ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
|
D | aqua_vanjaram.c | 104 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { in aqua_vanjaram_set_xcp_id() 122 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] in aqua_vanjaram_xcp_gpu_sched_update() 124 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] in aqua_vanjaram_xcp_gpu_sched_update() 138 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); in aqua_vanjaram_xcp_sched_list_update() 139 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); in aqua_vanjaram_xcp_sched_list_update() 199 total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt); in aqua_vanjaram_select_scheds() 208 if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { in aqua_vanjaram_select_scheds() 209 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; in aqua_vanjaram_select_scheds() 210 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; in aqua_vanjaram_select_scheds() 211 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); in aqua_vanjaram_select_scheds() [all …]
|
D | amdgpu_amdkfd.c | 451 struct amdgpu_xcp *xcp) in amdgpu_amdkfd_get_local_mem_info() argument 455 if (xcp) { in amdgpu_amdkfd_get_local_mem_info() 458 KFD_XCP_MEMORY_SIZE(adev, xcp->id); in amdgpu_amdkfd_get_local_mem_info() 461 KFD_XCP_MEMORY_SIZE(adev, xcp->id); in amdgpu_amdkfd_get_local_mem_info()
|
D | amdgpu_amdkfd.h | 248 struct amdgpu_xcp *xcp); 362 (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
|
D | amdgpu_kms.c | 571 struct amdgpu_xcp *xcp; in amdgpu_info_ioctl() local 623 xcp = &adev->xcp_mgr->xcp[fpriv->xcp_id]; in amdgpu_info_ioctl() 626 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); in amdgpu_info_ioctl() 632 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_SDMA, &inst_mask); in amdgpu_info_ioctl() 638 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); in amdgpu_info_ioctl() 644 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); in amdgpu_info_ioctl()
|
/linux-6.12.1/arch/mips/math-emu/ |
D | cp1emu.c | 782 static inline int cop1_64bit(struct pt_regs *xcp) in cop1_64bit() argument 800 if (cop1_64bit(xcp) && !hybrid_fprs()) \ 808 if (cop1_64bit(xcp) && !hybrid_fprs()) { \ 829 ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) ^ 1)], 0)) 834 fpr = (x) & ~(cop1_64bit(xcp) ^ 1); \ 848 static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx, in cop1_cfc() argument 858 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value); in cop1_cfc() 868 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value); in cop1_cfc() 876 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value); in cop1_cfc() 887 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value); in cop1_cfc() [all …]
|
D | dsemul.c | 292 bool do_dsemulret(struct pt_regs *xcp) in do_dsemulret() argument 301 xcp->cp0_epc = current->thread.bd_emu_cont_pc; in do_dsemulret() 302 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); in do_dsemulret()
|
/linux-6.12.1/arch/mips/include/asm/ |
D | dsemul.h | 52 extern bool do_dsemulret(struct pt_regs *xcp); 54 static inline bool do_dsemulret(struct pt_regs *xcp) in do_dsemulret() argument
|
D | fpu_emulator.h | 168 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
|
/linux-6.12.1/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_device.c | 832 node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); in kgd2kfd_device_init() 834 if (node->xcp) { in kgd2kfd_device_init() 835 amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, in kgd2kfd_device_init() 843 if (node->xcp) { in kgd2kfd_device_init() 845 node->node_id, node->xcp->mem_id, in kgd2kfd_device_init() 876 &node->local_mem_info, node->xcp); in kgd2kfd_device_init()
|
D | kfd_migrate.c | 515 node->xcp ? node->xcp->id : 0); in svm_migrate_ram_to_vram() 561 node->xcp ? node->xcp->id : 0); in svm_migrate_ram_to_vram()
|
D | kfd_priv.h | 272 struct amdgpu_xcp *xcp; member 1530 if (node->xcp) in kfd_devcgroup_check_permission() 1531 ddev = node->xcp->ddev; in kfd_devcgroup_check_permission()
|
D | kfd_topology.c | 1214 dev->gpu->xcp); in kfd_fill_mem_clk_max_info() 2027 if (gpu->xcp && !gpu->xcp->ddev) { in kfd_topology_add_device() 2090 if (gpu->xcp) in kfd_topology_add_device() 2091 dev->node_props.drm_render_minor = gpu->xcp->ddev->render->index; in kfd_topology_add_device()
|
D | kfd_svm.c | 600 if (node->xcp) in svm_range_vram_node_new() 601 bp.xcp_id_plus1 = node->xcp->id + 1; in svm_range_vram_node_new() 1253 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id)) in svm_range_get_pte_flags() 2036 if (adev->kfd.dev->nodes[i]->xcp) in svm_range_set_max_pages() 2037 id = adev->kfd.dev->nodes[i]->xcp->id; in svm_range_set_max_pages()
|