Lines Matching full:con

137 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
669 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
672 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
682 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
684 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
695 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
705 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
709 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
720 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
722 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
728 obj = &con->objs[i]; in amdgpu_ras_find_obj()
748 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
750 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
760 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
765 * Ras framework checks con->hw_supported to see if it need do in __amdgpu_ras_feature_enable()
767 * IP checks con->support to see if it need disable ras. in __amdgpu_ras_feature_enable()
781 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
784 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
796 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
800 if (!con) in amdgpu_ras_feature_enable()
853 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
856 if (!con) in amdgpu_ras_feature_enable_on_boot()
859 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
887 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
893 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
904 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
907 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
920 return con->features; in amdgpu_ras_disable_all_features()
926 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
973 return con->features; in amdgpu_ras_enable_all_features()
1588 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1593 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1605 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1683 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1685 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1714 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1717 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1723 struct amdgpu_ras *con = in amdgpu_ras_sysfs_version_show() local
1725 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); in amdgpu_ras_sysfs_version_show()
1731 struct amdgpu_ras *con = in amdgpu_ras_sysfs_schema_show() local
1733 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); in amdgpu_ras_sysfs_schema_show()
1748 struct amdgpu_ras *con = in amdgpu_ras_sysfs_event_state_show() local
1750 struct ras_event_manager *event_mgr = con->event_mgr; in amdgpu_ras_sysfs_event_state_show()
1771 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
1775 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
1781 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_dev_attr_node() local
1783 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1784 &con->version_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1785 &con->schema_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1786 &con->event_state_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1860 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
1863 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
1897 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
1898 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; in amdgpu_ras_debugfs_create_ctrl_node()
1908 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
1914 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
1917 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
1927 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
1934 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
1976 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
1985 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
1990 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
2023 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
2028 &con->features_attr.attr, in amdgpu_ras_fs_init()
2029 &con->version_attr.attr, in amdgpu_ras_fs_init()
2030 &con->schema_attr.attr, in amdgpu_ras_fs_init()
2031 &con->event_state_attr.attr, in amdgpu_ras_fs_init()
2043 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
2047 con->version_attr = dev_attr_version; in amdgpu_ras_fs_init()
2051 con->schema_attr = dev_attr_schema; in amdgpu_ras_fs_init()
2055 con->event_state_attr = dev_attr_event_state; in amdgpu_ras_fs_init()
2061 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
2062 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
2076 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
2080 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
2121 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_poison_consumption_handler() local
2126 if (!block_obj || !con) in amdgpu_ras_interrupt_poison_consumption_handler()
2184 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); in amdgpu_ras_interrupt_poison_creation_handler() local
2186 atomic_inc(&con->page_retirement_req_cnt); in amdgpu_ras_interrupt_poison_creation_handler()
2187 atomic_inc(&con->poison_creation_count); in amdgpu_ras_interrupt_poison_creation_handler()
2189 wake_up(&con->page_retirement_wq); in amdgpu_ras_interrupt_poison_creation_handler()
2364 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
2367 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
2378 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
2381 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
2384 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
2453 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
2456 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
2459 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
2476 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
2481 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
2484 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2485 data = con->eh_data; in amdgpu_ras_badpages_read()
2514 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2668 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
2673 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
2676 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2677 data = con->eh_data; in amdgpu_ras_add_bad_pages()
2682 if (amdgpu_ras_check_bad_page_unlock(con, in amdgpu_ras_add_bad_pages()
2699 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2712 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
2717 if (!con || !con->eh_data) { in amdgpu_ras_save_bad_pages()
2724 mutex_lock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2725 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
2726 data = con->eh_data; in amdgpu_ras_save_bad_pages()
2728 mutex_unlock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2777 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
2780 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
2799 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
2802 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
2805 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2806 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
2807 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2814 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
2840 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
2843 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
2854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_put_poison_req() local
2863 ret = kfifo_put(&con->poison_fifo, poison_msg); in amdgpu_ras_put_poison_req()
2875 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_poison_req() local
2877 return kfifo_get(&con->poison_fifo, poison_msg); in amdgpu_ras_get_poison_req()
2909 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, in amdgpu_ras_schedule_retirement_dwork() argument
2914 mutex_lock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
2915 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, in amdgpu_ras_schedule_retirement_dwork()
2917 mutex_unlock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
2920 schedule_delayed_work(&con->page_retirement_dwork, in amdgpu_ras_schedule_retirement_dwork()
2928 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_do_page_retirement() local
2930 struct amdgpu_device *adev = con->adev; in amdgpu_ras_do_page_retirement()
2936 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
2951 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3018 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_clear_poison_fifo() local
3023 ret = kfifo_get(&con->poison_fifo, &msg); in amdgpu_ras_clear_poison_fifo()
3030 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_poison_consumption_handler() local
3057 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_poison_consumption_handler()
3059 con->gpu_reset_flags |= reset; in amdgpu_ras_poison_consumption_handler()
3065 flush_work(&con->recovery_work); in amdgpu_ras_poison_consumption_handler()
3074 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_page_retirement_thread() local
3081 wait_event_interruptible(con->page_retirement_wq, in amdgpu_ras_page_retirement_thread()
3083 atomic_read(&con->page_retirement_req_cnt)); in amdgpu_ras_page_retirement_thread()
3091 poison_creation_count = atomic_read(&con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3097 atomic_sub(poison_creation_count, &con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3098 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3100 } while (atomic_read(&con->poison_creation_count)); in amdgpu_ras_page_retirement_thread()
3103 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3109 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3116 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_page_retirement_thread()
3122 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_page_retirement_thread()
3131 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3135 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3138 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3142 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3151 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
3156 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_recovery_init()
3164 con->adev = adev; in amdgpu_ras_recovery_init()
3169 data = &con->eh_data; in amdgpu_ras_recovery_init()
3176 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
3177 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
3178 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
3179 con->eeprom_control.bad_channel_bitmap = 0; in amdgpu_ras_recovery_init()
3181 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); in amdgpu_ras_recovery_init()
3190 ret = amdgpu_ras_eeprom_init(&con->eeprom_control); in amdgpu_ras_recovery_init()
3198 if (con->eeprom_control.ras_num_recs) { in amdgpu_ras_recovery_init()
3203 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); in amdgpu_ras_recovery_init()
3205 if (con->update_channel_flag == true) { in amdgpu_ras_recovery_init()
3206 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); in amdgpu_ras_recovery_init()
3207 con->update_channel_flag = false; in amdgpu_ras_recovery_init()
3211 mutex_init(&con->page_rsv_lock); in amdgpu_ras_recovery_init()
3212 INIT_KFIFO(con->poison_fifo); in amdgpu_ras_recovery_init()
3213 mutex_init(&con->page_retirement_lock); in amdgpu_ras_recovery_init()
3214 init_waitqueue_head(&con->page_retirement_wq); in amdgpu_ras_recovery_init()
3215 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_init()
3216 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_init()
3217 con->page_retirement_thread = in amdgpu_ras_recovery_init()
3219 if (IS_ERR(con->page_retirement_thread)) { in amdgpu_ras_recovery_init()
3220 con->page_retirement_thread = NULL; in amdgpu_ras_recovery_init()
3224 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); in amdgpu_ras_recovery_init()
3225 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); in amdgpu_ras_recovery_init()
3236 con->eh_data = NULL; in amdgpu_ras_recovery_init()
3254 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
3255 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
3265 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3266 ret = amdgpu_ras_schedule_retirement_dwork(con, 0); in amdgpu_ras_recovery_fini()
3269 if (con->page_retirement_thread) in amdgpu_ras_recovery_fini()
3270 kthread_stop(con->page_retirement_thread); in amdgpu_ras_recovery_fini()
3272 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_fini()
3273 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_fini()
3275 mutex_destroy(&con->page_rsv_lock); in amdgpu_ras_recovery_fini()
3277 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
3279 cancel_delayed_work_sync(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3281 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); in amdgpu_ras_recovery_fini()
3283 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3284 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
3287 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3394 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_poison_mode() local
3398 if (amdgpu_sriov_vf(adev) || !con) in amdgpu_ras_query_poison_mode()
3405 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3417 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3480 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
3482 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
3494 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
3495 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
3549 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_reserved_vram_size() local
3551 if (!con || (adev->flags & AMD_IS_APU)) in amdgpu_ras_init_reserved_vram_size()
3558 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE; in amdgpu_ras_init_reserved_vram_size()
3567 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
3570 if (con) in amdgpu_ras_init()
3573 con = kzalloc(sizeof(*con) + in amdgpu_ras_init()
3577 if (!con) in amdgpu_ras_init()
3580 con->adev = adev; in amdgpu_ras_init()
3581 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
3582 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
3583 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
3585 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
3587 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
3596 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
3605 con->update_channel_flag = false; in amdgpu_ras_init()
3606 con->features = 0; in amdgpu_ras_init()
3607 con->schema = 0; in amdgpu_ras_init()
3608 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
3610 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
3664 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << in amdgpu_ras_init()
3668 con->schema = amdgpu_get_ras_schema(adev); in amdgpu_ras_init()
3693 kfree(con); in amdgpu_ras_init()
3727 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
3729 if (!con) in amdgpu_ras_is_poison_mode_supported()
3732 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
3740 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_block_late_init() local
3792 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_block_late_init()
3793 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_block_late_init()
3841 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
3844 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
3851 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
3863 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
3875 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
3877 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
3882 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_suspend()
3943 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
3945 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
3950 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_pre_fini()
3960 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
3962 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
3990 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); in amdgpu_ras_fini()
3992 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_fini()
3995 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
3998 kfree(con); in amdgpu_ras_fini()
4125 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
4127 if (!con) in amdgpu_release_ras_context()
4130 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
4131 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
4133 kfree(con); in amdgpu_release_ras_context()
4304 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_mca_debug_mode() local
4307 if (con) { in amdgpu_ras_set_mca_debug_mode()
4310 con->is_aca_debug_mode = enable; in amdgpu_ras_set_mca_debug_mode()
4318 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_aca_debug_mode() local
4321 if (con) { in amdgpu_ras_set_aca_debug_mode()
4327 con->is_aca_debug_mode = enable; in amdgpu_ras_set_aca_debug_mode()
4335 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_aca_debug_mode() local
4339 if (!con) in amdgpu_ras_get_aca_debug_mode()
4344 return con->is_aca_debug_mode; in amdgpu_ras_get_aca_debug_mode()
4352 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_error_query_mode() local
4356 if (!con) { in amdgpu_ras_get_error_query_mode()
4363 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; in amdgpu_ras_get_error_query_mode()
4801 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_reserve_page() local
4806 mutex_lock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
4810 mutex_unlock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
4835 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_rma() local
4837 if (!con) in amdgpu_ras_is_rma()
4840 return con->is_rma; in amdgpu_ras_is_rma()