/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/ |
D | umc_v6_7.c | 61 uint64_t mc_umc_status, uint32_t umc_reg_offset) in umc_v6_7_query_error_status_helper() argument 66 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) in umc_v6_7_query_error_status_helper() 69 if (mc_umc_status) in umc_v6_7_query_error_status_helper() 70 dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset); in umc_v6_7_query_error_status_helper() 98 uint64_t mc_umc_status; in umc_v6_7_ecc_info_query_correctable_error_count() local 109 mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; in umc_v6_7_ecc_info_query_correctable_error_count() 110 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && in umc_v6_7_ecc_info_query_correctable_error_count() 111 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) { in umc_v6_7_ecc_info_query_correctable_error_count() 114 umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset); in umc_v6_7_ecc_info_query_correctable_error_count() 140 uint64_t mc_umc_status; in umc_v6_7_ecc_info_querry_uncorrectable_error_count() local [all …]
|
D | umc_v8_10.c | 107 uint64_t mc_umc_status; in umc_v8_10_query_correctable_error_count() local 117 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); in umc_v8_10_query_correctable_error_count() 118 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && in umc_v8_10_query_correctable_error_count() 119 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) in umc_v8_10_query_correctable_error_count() 127 uint64_t mc_umc_status; in umc_v8_10_query_uncorrectable_error_count() local 133 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); in umc_v8_10_query_uncorrectable_error_count() 134 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && in umc_v8_10_query_uncorrectable_error_count() 135 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || in umc_v8_10_query_uncorrectable_error_count() 136 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || in umc_v8_10_query_uncorrectable_error_count() 137 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || in umc_v8_10_query_uncorrectable_error_count() [all …]
|
D | umc_v8_7.c | 54 uint64_t mc_umc_status; in umc_v8_7_ecc_info_query_correctable_error_count() local 63 mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; in umc_v8_7_ecc_info_query_correctable_error_count() 64 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && in umc_v8_7_ecc_info_query_correctable_error_count() 65 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) in umc_v8_7_ecc_info_query_correctable_error_count() 73 uint64_t mc_umc_status; in umc_v8_7_ecc_info_querry_uncorrectable_error_count() local 80 mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; in umc_v8_7_ecc_info_querry_uncorrectable_error_count() 81 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && in umc_v8_7_ecc_info_querry_uncorrectable_error_count() 82 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || in umc_v8_7_ecc_info_querry_uncorrectable_error_count() 83 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || in umc_v8_7_ecc_info_querry_uncorrectable_error_count() 84 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || in umc_v8_7_ecc_info_querry_uncorrectable_error_count() [all …]
|
D | umc_v12_0.c | 73 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status) in umc_v12_0_is_deferred_error() argument 77 mc_umc_status, in umc_v12_0_is_deferred_error() 78 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val), in umc_v12_0_is_deferred_error() 79 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison), in umc_v12_0_is_deferred_error() 80 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred), in umc_v12_0_is_deferred_error() 81 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC), in umc_v12_0_is_deferred_error() 82 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC), in umc_v12_0_is_deferred_error() 83 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) in umc_v12_0_is_deferred_error() 87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && in umc_v12_0_is_deferred_error() 88 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); in umc_v12_0_is_deferred_error() [all …]
|
D | umc_v6_1.c | 175 uint64_t mc_umc_status; in umc_v6_1_query_correctable_error_count() local 219 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); in umc_v6_1_query_correctable_error_count() 220 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && in umc_v6_1_query_correctable_error_count() 221 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && in umc_v6_1_query_correctable_error_count() 222 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) in umc_v6_1_query_correctable_error_count() 230 uint64_t mc_umc_status; in umc_v6_1_querry_uncorrectable_error_count() local 244 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); in umc_v6_1_querry_uncorrectable_error_count() 245 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && in umc_v6_1_querry_uncorrectable_error_count() 246 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || in umc_v6_1_querry_uncorrectable_error_count() 247 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || in umc_v6_1_querry_uncorrectable_error_count() [all …]
|
D | umc_v12_0.h | 89 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status); 90 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); 91 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); 93 typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status);
|