Lines Matching +full:mc +full:- +full:sid

43 #include "sid.h"
1210 switch (rdev->family) { in si_init_golden_registers()
1278 * si_get_allowed_info_register - fetch the register for the info ioctl
1284 * Returns 0 for success or -EINVAL for an invalid register
1303 return -EINVAL; in si_get_allowed_info_register()
1311 * si_get_xclk - get the xclk
1320 u32 reference_clock = rdev->clock.spll.reference_freq; in si_get_xclk()
1560 if (!rdev->mc_fw) in si_mc_load_microcode()
1561 return -EINVAL; in si_mc_load_microcode()
1563 if (rdev->new_fw) { in si_mc_load_microcode()
1565 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data; in si_mc_load_microcode()
1567 radeon_ucode_print_mc_hdr(&hdr->header); in si_mc_load_microcode()
1568 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); in si_mc_load_microcode()
1570 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); in si_mc_load_microcode()
1571 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; in si_mc_load_microcode()
1573 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in si_mc_load_microcode()
1575 ucode_size = rdev->mc_fw->size / 4; in si_mc_load_microcode()
1577 switch (rdev->family) { in si_mc_load_microcode()
1600 fw_data = (const __be32 *)rdev->mc_fw->data; in si_mc_load_microcode()
1610 /* load mc io regs */ in si_mc_load_microcode()
1612 if (rdev->new_fw) { in si_mc_load_microcode()
1620 /* load the MC ucode */ in si_mc_load_microcode()
1622 if (rdev->new_fw) in si_mc_load_microcode()
1634 for (i = 0; i < rdev->usec_timeout; i++) { in si_mc_load_microcode()
1639 for (i = 0; i < rdev->usec_timeout; i++) { in si_mc_load_microcode()
1664 switch (rdev->family) { in si_init_microcode()
1678 if ((rdev->pdev->revision == 0x81) && in si_init_microcode()
1679 ((rdev->pdev->device == 0x6810) || in si_init_microcode()
1680 (rdev->pdev->device == 0x6811))) in si_init_microcode()
1693 if (((rdev->pdev->device == 0x6820) && in si_init_microcode()
1694 ((rdev->pdev->revision == 0x81) || in si_init_microcode()
1695 (rdev->pdev->revision == 0x83))) || in si_init_microcode()
1696 ((rdev->pdev->device == 0x6821) && in si_init_microcode()
1697 ((rdev->pdev->revision == 0x83) || in si_init_microcode()
1698 (rdev->pdev->revision == 0x87))) || in si_init_microcode()
1699 ((rdev->pdev->revision == 0x87) && in si_init_microcode()
1700 ((rdev->pdev->device == 0x6823) || in si_init_microcode()
1701 (rdev->pdev->device == 0x682b)))) in si_init_microcode()
1714 if (((rdev->pdev->revision == 0x81) && in si_init_microcode()
1715 ((rdev->pdev->device == 0x6600) || in si_init_microcode()
1716 (rdev->pdev->device == 0x6604) || in si_init_microcode()
1717 (rdev->pdev->device == 0x6605) || in si_init_microcode()
1718 (rdev->pdev->device == 0x6610))) || in si_init_microcode()
1719 ((rdev->pdev->revision == 0x83) && in si_init_microcode()
1720 (rdev->pdev->device == 0x6610))) in si_init_microcode()
1732 if (((rdev->pdev->revision == 0x81) && in si_init_microcode()
1733 (rdev->pdev->device == 0x6660)) || in si_init_microcode()
1734 ((rdev->pdev->revision == 0x83) && in si_init_microcode()
1735 ((rdev->pdev->device == 0x6660) || in si_init_microcode()
1736 (rdev->pdev->device == 0x6663) || in si_init_microcode()
1737 (rdev->pdev->device == 0x6665) || in si_init_microcode()
1738 (rdev->pdev->device == 0x6667)))) in si_init_microcode()
1740 else if ((rdev->pdev->revision == 0xc3) && in si_init_microcode()
1741 (rdev->pdev->device == 0x6665)) in si_init_microcode()
1762 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); in si_init_microcode()
1765 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); in si_init_microcode()
1768 if (rdev->pfp_fw->size != pfp_req_size) { in si_init_microcode()
1770 rdev->pfp_fw->size, fw_name); in si_init_microcode()
1771 err = -EINVAL; in si_init_microcode()
1775 err = radeon_ucode_validate(rdev->pfp_fw); in si_init_microcode()
1786 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); in si_init_microcode()
1789 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); in si_init_microcode()
1792 if (rdev->me_fw->size != me_req_size) { in si_init_microcode()
1794 rdev->me_fw->size, fw_name); in si_init_microcode()
1795 err = -EINVAL; in si_init_microcode()
1798 err = radeon_ucode_validate(rdev->me_fw); in si_init_microcode()
1809 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); in si_init_microcode()
1812 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); in si_init_microcode()
1815 if (rdev->ce_fw->size != ce_req_size) { in si_init_microcode()
1817 rdev->ce_fw->size, fw_name); in si_init_microcode()
1818 err = -EINVAL; in si_init_microcode()
1821 err = radeon_ucode_validate(rdev->ce_fw); in si_init_microcode()
1832 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); in si_init_microcode()
1835 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); in si_init_microcode()
1838 if (rdev->rlc_fw->size != rlc_req_size) { in si_init_microcode()
1840 rdev->rlc_fw->size, fw_name); in si_init_microcode()
1841 err = -EINVAL; in si_init_microcode()
1844 err = radeon_ucode_validate(rdev->rlc_fw); in si_init_microcode()
1858 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); in si_init_microcode()
1861 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); in si_init_microcode()
1864 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); in si_init_microcode()
1868 if ((rdev->mc_fw->size != mc_req_size) && in si_init_microcode()
1869 (rdev->mc_fw->size != mc2_req_size)) { in si_init_microcode()
1871 rdev->mc_fw->size, fw_name); in si_init_microcode()
1872 err = -EINVAL; in si_init_microcode()
1874 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); in si_init_microcode()
1876 err = radeon_ucode_validate(rdev->mc_fw); in si_init_microcode()
1892 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); in si_init_microcode()
1895 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); in si_init_microcode()
1898 release_firmware(rdev->smc_fw); in si_init_microcode()
1899 rdev->smc_fw = NULL; in si_init_microcode()
1901 } else if (rdev->smc_fw->size != smc_req_size) { in si_init_microcode()
1903 rdev->smc_fw->size, fw_name); in si_init_microcode()
1904 err = -EINVAL; in si_init_microcode()
1907 err = radeon_ucode_validate(rdev->smc_fw); in si_init_microcode()
1918 rdev->new_fw = false; in si_init_microcode()
1921 err = -EINVAL; in si_init_microcode()
1923 rdev->new_fw = true; in si_init_microcode()
1927 if (err != -EINVAL) in si_init_microcode()
1930 release_firmware(rdev->pfp_fw); in si_init_microcode()
1931 rdev->pfp_fw = NULL; in si_init_microcode()
1932 release_firmware(rdev->me_fw); in si_init_microcode()
1933 rdev->me_fw = NULL; in si_init_microcode()
1934 release_firmware(rdev->ce_fw); in si_init_microcode()
1935 rdev->ce_fw = NULL; in si_init_microcode()
1936 release_firmware(rdev->rlc_fw); in si_init_microcode()
1937 rdev->rlc_fw = NULL; in si_init_microcode()
1938 release_firmware(rdev->mc_fw); in si_init_microcode()
1939 rdev->mc_fw = NULL; in si_init_microcode()
1940 release_firmware(rdev->smc_fw); in si_init_microcode()
1941 rdev->smc_fw = NULL; in si_init_microcode()
1953 u32 pipe_offset = radeon_crtc->crtc_id * 0x20; in dce6_line_buffer_adjust()
1960 * 0 - half lb in dce6_line_buffer_adjust()
1961 * 2 - whole lb, other crtc must be disabled in dce6_line_buffer_adjust()
1965 * non-linked crtcs for maximum line buffer allocation. in dce6_line_buffer_adjust()
1967 if (radeon_crtc->base.enabled && mode) { in dce6_line_buffer_adjust()
1980 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, in dce6_line_buffer_adjust()
1985 for (i = 0; i < rdev->usec_timeout; i++) { in dce6_line_buffer_adjust()
1992 if (radeon_crtc->base.enabled && mode) { in dce6_line_buffer_adjust()
2057 yclk.full = dfixed_const(wm->yclk); in dce6_dram_bandwidth()
2059 dram_channels.full = dfixed_const(wm->dram_channels * 4); in dce6_dram_bandwidth()
2077 yclk.full = dfixed_const(wm->yclk); in dce6_dram_bandwidth_for_display()
2079 dram_channels.full = dfixed_const(wm->dram_channels * 4); in dce6_dram_bandwidth_for_display()
2097 sclk.full = dfixed_const(wm->sclk); in dce6_data_return_bandwidth()
2123 disp_clk.full = dfixed_const(wm->disp_clk); in dce6_dmif_request_bandwidth()
2129 sclk.full = dfixed_const(wm->sclk); in dce6_dmif_request_bandwidth()
2169 line_time.full = dfixed_const(wm->active_time + wm->blank_time); in dce6_average_bandwidth()
2171 bpp.full = dfixed_const(wm->bytes_per_pixel); in dce6_average_bandwidth()
2172 src_width.full = dfixed_const(wm->src_width); in dce6_average_bandwidth()
2174 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); in dce6_average_bandwidth()
2187 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ in dce6_latency_watermark()
2188 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + in dce6_latency_watermark()
2189 (wm->num_heads * cursor_line_pair_return_time); in dce6_latency_watermark()
2195 if (wm->num_heads == 0) in dce6_latency_watermark()
2200 if ((wm->vsc.full > a.full) || in dce6_latency_watermark()
2201 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || in dce6_latency_watermark()
2202 (wm->vtaps >= 5) || in dce6_latency_watermark()
2203 ((wm->vsc.full >= a.full) && wm->interlaced)) in dce6_latency_watermark()
2209 b.full = dfixed_const(wm->num_heads); in dce6_latency_watermark()
2211 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); in dce6_latency_watermark()
2214 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); in dce6_latency_watermark()
2216 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); in dce6_latency_watermark()
2223 if (line_fill_time < wm->active_time) in dce6_latency_watermark()
2226 return latency + (line_fill_time - wm->active_time); in dce6_latency_watermark()
2233 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads)) in dce6_average_bandwidth_vs_dram_bandwidth_for_display()
2242 (dce6_available_bandwidth(wm) / wm->num_heads)) in dce6_average_bandwidth_vs_available_bandwidth()
2250 u32 lb_partitions = wm->lb_size / wm->src_width; in dce6_check_latency_hiding()
2251 u32 line_time = wm->active_time + wm->blank_time; in dce6_check_latency_hiding()
2257 if (wm->vsc.full > a.full) in dce6_check_latency_hiding()
2260 if (lb_partitions <= (wm->vtaps + 1)) in dce6_check_latency_hiding()
2266 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); in dce6_check_latency_hiding()
2278 struct drm_display_mode *mode = &radeon_crtc->base.mode; in dce6_program_watermarks()
2290 if (radeon_crtc->base.enabled && num_heads && mode) { in dce6_program_watermarks()
2291 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, in dce6_program_watermarks()
2292 (u32)mode->clock); in dce6_program_watermarks()
2293 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, in dce6_program_watermarks()
2294 (u32)mode->clock); in dce6_program_watermarks()
2299 if (rdev->family == CHIP_ARUBA) in dce6_program_watermarks()
2305 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in dce6_program_watermarks()
2311 wm_high.yclk = rdev->pm.current_mclk * 10; in dce6_program_watermarks()
2312 wm_high.sclk = rdev->pm.current_sclk * 10; in dce6_program_watermarks()
2315 wm_high.disp_clk = mode->clock; in dce6_program_watermarks()
2316 wm_high.src_width = mode->crtc_hdisplay; in dce6_program_watermarks()
2318 wm_high.blank_time = line_time - wm_high.active_time; in dce6_program_watermarks()
2320 if (mode->flags & DRM_MODE_FLAG_INTERLACE) in dce6_program_watermarks()
2322 wm_high.vsc = radeon_crtc->vsc; in dce6_program_watermarks()
2324 if (radeon_crtc->rmx_type != RMX_OFF) in dce6_program_watermarks()
2332 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in dce6_program_watermarks()
2338 wm_low.yclk = rdev->pm.current_mclk * 10; in dce6_program_watermarks()
2339 wm_low.sclk = rdev->pm.current_sclk * 10; in dce6_program_watermarks()
2342 wm_low.disp_clk = mode->clock; in dce6_program_watermarks()
2343 wm_low.src_width = mode->crtc_hdisplay; in dce6_program_watermarks()
2345 wm_low.blank_time = line_time - wm_low.active_time; in dce6_program_watermarks()
2347 if (mode->flags & DRM_MODE_FLAG_INTERLACE) in dce6_program_watermarks()
2349 wm_low.vsc = radeon_crtc->vsc; in dce6_program_watermarks()
2351 if (radeon_crtc->rmx_type != RMX_OFF) in dce6_program_watermarks()
2368 (rdev->disp_priority == 2)) { in dce6_program_watermarks()
2376 (rdev->disp_priority == 2)) { in dce6_program_watermarks()
2383 b.full = dfixed_const(mode->clock); in dce6_program_watermarks()
2387 c.full = dfixed_mul(c, radeon_crtc->hsc); in dce6_program_watermarks()
2395 b.full = dfixed_const(mode->clock); in dce6_program_watermarks()
2399 c.full = dfixed_mul(c, radeon_crtc->hsc); in dce6_program_watermarks()
2407 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); in dce6_program_watermarks()
2411 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); in dce6_program_watermarks()
2415 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); in dce6_program_watermarks()
2416 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, in dce6_program_watermarks()
2420 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); in dce6_program_watermarks()
2423 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); in dce6_program_watermarks()
2424 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, in dce6_program_watermarks()
2428 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3); in dce6_program_watermarks()
2431 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); in dce6_program_watermarks()
2432 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); in dce6_program_watermarks()
2435 radeon_crtc->line_time = line_time; in dce6_program_watermarks()
2436 radeon_crtc->wm_high = latency_watermark_a; in dce6_program_watermarks()
2437 radeon_crtc->wm_low = latency_watermark_b; in dce6_program_watermarks()
2447 if (!rdev->mode_info.mode_config_initialized) in dce6_bandwidth_update()
2452 for (i = 0; i < rdev->num_crtc; i++) { in dce6_bandwidth_update()
2453 if (rdev->mode_info.crtcs[i]->base.enabled) in dce6_bandwidth_update()
2456 for (i = 0; i < rdev->num_crtc; i += 2) { in dce6_bandwidth_update()
2457 mode0 = &rdev->mode_info.crtcs[i]->base.mode; in dce6_bandwidth_update()
2458 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; in dce6_bandwidth_update()
2459 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); in dce6_bandwidth_update()
2460 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); in dce6_bandwidth_update()
2461 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); in dce6_bandwidth_update()
2462 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); in dce6_bandwidth_update()
2471 u32 *tile = rdev->config.si.tile_mode_array; in si_tiling_mode_table_init()
2473 ARRAY_SIZE(rdev->config.si.tile_mode_array); in si_tiling_mode_table_init()
2476 switch (rdev->config.si.mem_row_size_in_kb) { in si_tiling_mode_table_init()
2492 switch(rdev->family) { in si_tiling_mode_table_init()
2495 /* non-AA compressed depth or any compressed stencil */ in si_tiling_mode_table_init()
2531 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ in si_tiling_mode_table_init()
2540 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2549 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2710 /* non-AA compressed depth or any compressed stencil */ in si_tiling_mode_table_init()
2746 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ in si_tiling_mode_table_init()
2755 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2764 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2923 DRM_ERROR("unknown asic: 0x%x\n", rdev->family); in si_tiling_mode_table_init()
3044 rdev->config.si.backend_enable_mask = enabled_rbs; in si_setup_rb()
3078 switch (rdev->family) { in si_gpu_init()
3080 rdev->config.si.max_shader_engines = 2; in si_gpu_init()
3081 rdev->config.si.max_tile_pipes = 12; in si_gpu_init()
3082 rdev->config.si.max_cu_per_sh = 8; in si_gpu_init()
3083 rdev->config.si.max_sh_per_se = 2; in si_gpu_init()
3084 rdev->config.si.max_backends_per_se = 4; in si_gpu_init()
3085 rdev->config.si.max_texture_channel_caches = 12; in si_gpu_init()
3086 rdev->config.si.max_gprs = 256; in si_gpu_init()
3087 rdev->config.si.max_gs_threads = 32; in si_gpu_init()
3088 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3090 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3091 rdev->config.si.sc_prim_fifo_size_backend = 0x100; in si_gpu_init()
3092 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3093 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3097 rdev->config.si.max_shader_engines = 2; in si_gpu_init()
3098 rdev->config.si.max_tile_pipes = 8; in si_gpu_init()
3099 rdev->config.si.max_cu_per_sh = 5; in si_gpu_init()
3100 rdev->config.si.max_sh_per_se = 2; in si_gpu_init()
3101 rdev->config.si.max_backends_per_se = 4; in si_gpu_init()
3102 rdev->config.si.max_texture_channel_caches = 8; in si_gpu_init()
3103 rdev->config.si.max_gprs = 256; in si_gpu_init()
3104 rdev->config.si.max_gs_threads = 32; in si_gpu_init()
3105 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3107 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3108 rdev->config.si.sc_prim_fifo_size_backend = 0x100; in si_gpu_init()
3109 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3110 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3115 rdev->config.si.max_shader_engines = 1; in si_gpu_init()
3116 rdev->config.si.max_tile_pipes = 4; in si_gpu_init()
3117 rdev->config.si.max_cu_per_sh = 5; in si_gpu_init()
3118 rdev->config.si.max_sh_per_se = 2; in si_gpu_init()
3119 rdev->config.si.max_backends_per_se = 4; in si_gpu_init()
3120 rdev->config.si.max_texture_channel_caches = 4; in si_gpu_init()
3121 rdev->config.si.max_gprs = 256; in si_gpu_init()
3122 rdev->config.si.max_gs_threads = 32; in si_gpu_init()
3123 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3125 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3126 rdev->config.si.sc_prim_fifo_size_backend = 0x40; in si_gpu_init()
3127 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3128 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3132 rdev->config.si.max_shader_engines = 1; in si_gpu_init()
3133 rdev->config.si.max_tile_pipes = 4; in si_gpu_init()
3134 rdev->config.si.max_cu_per_sh = 6; in si_gpu_init()
3135 rdev->config.si.max_sh_per_se = 1; in si_gpu_init()
3136 rdev->config.si.max_backends_per_se = 2; in si_gpu_init()
3137 rdev->config.si.max_texture_channel_caches = 4; in si_gpu_init()
3138 rdev->config.si.max_gprs = 256; in si_gpu_init()
3139 rdev->config.si.max_gs_threads = 16; in si_gpu_init()
3140 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3142 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3143 rdev->config.si.sc_prim_fifo_size_backend = 0x40; in si_gpu_init()
3144 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3145 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3149 rdev->config.si.max_shader_engines = 1; in si_gpu_init()
3150 rdev->config.si.max_tile_pipes = 4; in si_gpu_init()
3151 rdev->config.si.max_cu_per_sh = 5; in si_gpu_init()
3152 rdev->config.si.max_sh_per_se = 1; in si_gpu_init()
3153 rdev->config.si.max_backends_per_se = 1; in si_gpu_init()
3154 rdev->config.si.max_texture_channel_caches = 2; in si_gpu_init()
3155 rdev->config.si.max_gprs = 256; in si_gpu_init()
3156 rdev->config.si.max_gs_threads = 16; in si_gpu_init()
3157 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3159 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3160 rdev->config.si.sc_prim_fifo_size_backend = 0x40; in si_gpu_init()
3161 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3162 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3187 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; in si_gpu_init()
3188 rdev->config.si.mem_max_burst_length_bytes = 256; in si_gpu_init()
3190 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; in si_gpu_init()
3191 if (rdev->config.si.mem_row_size_in_kb > 4) in si_gpu_init()
3192 rdev->config.si.mem_row_size_in_kb = 4; in si_gpu_init()
3193 /* XXX use MC settings? */ in si_gpu_init()
3194 rdev->config.si.shader_engine_tile_size = 32; in si_gpu_init()
3195 rdev->config.si.num_gpus = 1; in si_gpu_init()
3196 rdev->config.si.multi_gpu_tile_size = 64; in si_gpu_init()
3200 switch (rdev->config.si.mem_row_size_in_kb) { in si_gpu_init()
3220 rdev->config.si.tile_config = 0; in si_gpu_init()
3221 switch (rdev->config.si.num_tile_pipes) { in si_gpu_init()
3223 rdev->config.si.tile_config |= (0 << 0); in si_gpu_init()
3226 rdev->config.si.tile_config |= (1 << 0); in si_gpu_init()
3229 rdev->config.si.tile_config |= (2 << 0); in si_gpu_init()
3234 rdev->config.si.tile_config |= (3 << 0); in si_gpu_init()
3239 rdev->config.si.tile_config |= 0 << 4; in si_gpu_init()
3242 rdev->config.si.tile_config |= 1 << 4; in si_gpu_init()
3246 rdev->config.si.tile_config |= 2 << 4; in si_gpu_init()
3249 rdev->config.si.tile_config |= in si_gpu_init()
3251 rdev->config.si.tile_config |= in si_gpu_init()
3260 if (rdev->has_uvd) { in si_gpu_init()
3268 si_setup_rb(rdev, rdev->config.si.max_shader_engines, in si_gpu_init()
3269 rdev->config.si.max_sh_per_se, in si_gpu_init()
3270 rdev->config.si.max_backends_per_se); in si_gpu_init()
3272 si_setup_spi(rdev, rdev->config.si.max_shader_engines, in si_gpu_init()
3273 rdev->config.si.max_sh_per_se, in si_gpu_init()
3274 rdev->config.si.max_cu_per_sh); in si_gpu_init()
3276 rdev->config.si.active_cus = 0; in si_gpu_init()
3277 for (i = 0; i < rdev->config.si.max_shader_engines; i++) { in si_gpu_init()
3278 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { in si_gpu_init()
3279 rdev->config.si.active_cus += in si_gpu_init()
3294 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) | in si_gpu_init()
3295 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) | in si_gpu_init()
3296 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) | in si_gpu_init()
3297 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size))); in si_gpu_init()
3342 rdev->scratch.num_reg = 7; in si_scratch_init()
3343 rdev->scratch.reg_base = SCRATCH_REG0; in si_scratch_init()
3344 for (i = 0; i < rdev->scratch.num_reg; i++) { in si_scratch_init()
3345 rdev->scratch.free[i] = true; in si_scratch_init()
3346 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); in si_scratch_init()
3353 struct radeon_ring *ring = &rdev->ring[fence->ring]; in si_fence_ring_emit()
3354 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in si_fence_ring_emit()
3358 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); in si_fence_ring_emit()
3368 /* EVENT_WRITE_EOP - flush caches, send int */ in si_fence_ring_emit()
3373 radeon_ring_write(ring, fence->seq); in si_fence_ring_emit()
3382 struct radeon_ring *ring = &rdev->ring[ib->ring]; in si_ring_ib_execute()
3383 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; in si_ring_ib_execute()
3386 if (ib->is_const_ib) { in si_ring_ib_execute()
3394 if (ring->rptr_save_reg) { in si_ring_ib_execute()
3395 next_rptr = ring->wptr + 3 + 4 + 8; in si_ring_ib_execute()
3397 radeon_ring_write(ring, ((ring->rptr_save_reg - in si_ring_ib_execute()
3400 } else if (rdev->wb.enabled) { in si_ring_ib_execute()
3401 next_rptr = ring->wptr + 5 + 4 + 8; in si_ring_ib_execute()
3404 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); in si_ring_ib_execute()
3405 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); in si_ring_ib_execute()
3417 (ib->gpu_addr & 0xFFFFFFFC)); in si_ring_ib_execute()
3418 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); in si_ring_ib_execute()
3419 radeon_ring_write(ring, ib->length_dw | (vm_id << 24)); in si_ring_ib_execute()
3421 if (!ib->is_const_ib) { in si_ring_ib_execute()
3424 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); in si_ring_ib_execute()
3445 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) in si_cp_enable()
3446 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); in si_cp_enable()
3449 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; in si_cp_enable()
3450 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; in si_cp_enable()
3451 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; in si_cp_enable()
3460 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) in si_cp_load_microcode()
3461 return -EINVAL; in si_cp_load_microcode()
3465 if (rdev->new_fw) { in si_cp_load_microcode()
3467 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; in si_cp_load_microcode()
3469 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; in si_cp_load_microcode()
3471 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; in si_cp_load_microcode()
3475 radeon_ucode_print_gfx_hdr(&pfp_hdr->header); in si_cp_load_microcode()
3476 radeon_ucode_print_gfx_hdr(&ce_hdr->header); in si_cp_load_microcode()
3477 radeon_ucode_print_gfx_hdr(&me_hdr->header); in si_cp_load_microcode()
3481 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); in si_cp_load_microcode()
3482 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; in si_cp_load_microcode()
3490 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); in si_cp_load_microcode()
3491 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; in si_cp_load_microcode()
3499 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); in si_cp_load_microcode()
3500 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; in si_cp_load_microcode()
3509 fw_data = (const __be32 *)rdev->pfp_fw->data; in si_cp_load_microcode()
3516 fw_data = (const __be32 *)rdev->ce_fw->data; in si_cp_load_microcode()
3523 fw_data = (const __be32 *)rdev->me_fw->data; in si_cp_load_microcode()
3539 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_cp_start()
3551 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1); in si_cp_start()
3593 ring = &rdev->ring[i]; in si_cp_start()
3615 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_cp_fini()
3617 radeon_scratch_free(rdev, ring->rptr_save_reg); in si_cp_fini()
3619 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_cp_fini()
3621 radeon_scratch_free(rdev, ring->rptr_save_reg); in si_cp_fini()
3623 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_cp_fini()
3625 radeon_scratch_free(rdev, ring->rptr_save_reg); in si_cp_fini()
3644 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); in si_cp_resume()
3646 /* ring 0 - compute and gfx */ in si_cp_resume()
3648 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_cp_resume()
3649 rb_bufsz = order_base_2(ring->ring_size / 8); in si_cp_resume()
3658 ring->wptr = 0; in si_cp_resume()
3659 WREG32(CP_RB0_WPTR, ring->wptr); in si_cp_resume()
3662 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); in si_cp_resume()
3663 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); in si_cp_resume()
3665 if (rdev->wb.enabled) in si_cp_resume()
3675 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); in si_cp_resume()
3677 /* ring1 - compute only */ in si_cp_resume()
3679 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_cp_resume()
3680 rb_bufsz = order_base_2(ring->ring_size / 8); in si_cp_resume()
3689 ring->wptr = 0; in si_cp_resume()
3690 WREG32(CP_RB1_WPTR, ring->wptr); in si_cp_resume()
3693 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); in si_cp_resume()
3694 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); in si_cp_resume()
3699 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); in si_cp_resume()
3701 /* ring2 - compute only */ in si_cp_resume()
3703 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_cp_resume()
3704 rb_bufsz = order_base_2(ring->ring_size / 8); in si_cp_resume()
3713 ring->wptr = 0; in si_cp_resume()
3714 WREG32(CP_RB2_WPTR, ring->wptr); in si_cp_resume()
3717 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); in si_cp_resume()
3718 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); in si_cp_resume()
3723 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); in si_cp_resume()
3727 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; in si_cp_resume()
3728 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true; in si_cp_resume()
3729 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true; in si_cp_resume()
3730 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); in si_cp_resume()
3732 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; in si_cp_resume()
3733 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; in si_cp_resume()
3734 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; in si_cp_resume()
3737 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); in si_cp_resume()
3739 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; in si_cp_resume()
3741 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); in si_cp_resume()
3743 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; in si_cp_resume()
3748 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) in si_cp_resume()
3749 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); in si_cp_resume()
3826 /* Skip MC reset as it's mostly likely not hung, just busy */ in si_gpu_check_soft_reset()
3828 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); in si_gpu_check_soft_reset()
3844 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); in si_gpu_soft_reset()
3847 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in si_gpu_soft_reset()
3849 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in si_gpu_soft_reset()
3879 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); in si_gpu_soft_reset()
3933 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); in si_gpu_soft_reset()
3947 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); in si_gpu_soft_reset()
3979 for (i = 0; i < rdev->usec_timeout; i++) { in si_set_clk_bypass_mode()
4020 dev_info(rdev->dev, "GPU pci config reset\n"); in si_gpu_pci_config_reset()
4048 dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); in si_gpu_pci_config_reset()
4056 pci_clear_master(rdev->pdev); in si_gpu_pci_config_reset()
4060 for (i = 0; i < rdev->usec_timeout; i++) { in si_gpu_pci_config_reset()
4099 * si_gfx_is_lockup - Check if the GFX engine is locked up
4120 /* MC */
4139 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); in si_mc_program()
4146 rdev->mc.vram_start >> 12); in si_mc_program()
4148 rdev->mc.vram_end >> 12); in si_mc_program()
4150 rdev->vram_scratch.gpu_addr >> 12); in si_mc_program()
4151 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; in si_mc_program()
4152 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); in si_mc_program()
4155 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); in si_mc_program()
4162 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); in si_mc_program()
4173 struct radeon_mc *mc) in si_vram_gtt_location() argument
4175 if (mc->mc_vram_size > 0xFFC0000000ULL) { in si_vram_gtt_location()
4177 dev_warn(rdev->dev, "limiting VRAM\n"); in si_vram_gtt_location()
4178 mc->real_vram_size = 0xFFC0000000ULL; in si_vram_gtt_location()
4179 mc->mc_vram_size = 0xFFC0000000ULL; in si_vram_gtt_location()
4181 radeon_vram_location(rdev, &rdev->mc, 0); in si_vram_gtt_location()
4182 rdev->mc.gtt_base_align = 0; in si_vram_gtt_location()
4183 radeon_gtt_location(rdev, mc); in si_vram_gtt_location()
4192 rdev->mc.vram_is_ddr = true; in si_mc_init()
4232 rdev->mc.vram_width = numchan * chansize; in si_mc_init()
4234 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); in si_mc_init()
4235 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); in si_mc_init()
4244 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL; in si_mc_init()
4245 rdev->mc.real_vram_size = rdev->mc.mc_vram_size; in si_mc_init()
4246 rdev->mc.visible_vram_size = rdev->mc.aper_size; in si_mc_init()
4247 si_vram_gtt_location(rdev, &rdev->mc); in si_mc_init()
4261 /* bits 0-15 are the VM contexts0-15 */ in si_pcie_gart_tlb_flush()
4269 if (rdev->gart.robj == NULL) { in si_pcie_gart_enable()
4270 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); in si_pcie_gart_enable()
4271 return -EINVAL; in si_pcie_gart_enable()
4296 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); in si_pcie_gart_enable()
4297 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); in si_pcie_gart_enable()
4298 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); in si_pcie_gart_enable()
4300 (u32)(rdev->dummy_page.addr >> 12)); in si_pcie_gart_enable()
4309 /* empty context1-15 */ in si_pcie_gart_enable()
4312 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); in si_pcie_gart_enable()
4320 rdev->vm_manager.saved_table_addr[i]); in si_pcie_gart_enable()
4322 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), in si_pcie_gart_enable()
4323 rdev->vm_manager.saved_table_addr[i]); in si_pcie_gart_enable()
4326 /* enable context1-15 */ in si_pcie_gart_enable()
4328 (u32)(rdev->dummy_page.addr >> 12)); in si_pcie_gart_enable()
4331 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | in si_pcie_gart_enable()
4347 (unsigned)(rdev->mc.gtt_size >> 20), in si_pcie_gart_enable()
4348 (unsigned long long)rdev->gart.table_addr); in si_pcie_gart_enable()
4349 rdev->gart.ready = true; in si_pcie_gart_enable()
4362 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); in si_pcie_gart_disable()
4363 rdev->vm_manager.saved_table_addr[i] = RREG32(reg); in si_pcie_gart_disable()
4440 switch (pkt->opcode) { in si_vm_packet3_ce_check()
4453 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode); in si_vm_packet3_ce_check()
4454 return -EINVAL; in si_vm_packet3_ce_check()
4473 return -EINVAL; in si_vm_packet3_cp_dma_check()
4480 return -EINVAL; in si_vm_packet3_cp_dma_check()
4494 return -EINVAL; in si_vm_packet3_cp_dma_check()
4501 return -EINVAL; in si_vm_packet3_cp_dma_check()
4514 u32 idx = pkt->idx + 1; in si_vm_packet3_gfx_check()
4518 switch (pkt->opcode) { in si_vm_packet3_gfx_check()
4569 return -EINVAL; in si_vm_packet3_gfx_check()
4577 return -EINVAL; in si_vm_packet3_gfx_check()
4579 for (i = 0; i < (pkt->count - 2); i++) { in si_vm_packet3_gfx_check()
4582 return -EINVAL; in si_vm_packet3_gfx_check()
4591 return -EINVAL; in si_vm_packet3_gfx_check()
4598 return -EINVAL; in si_vm_packet3_gfx_check()
4603 end_reg = 4 * pkt->count + start_reg - 4; in si_vm_packet3_gfx_check()
4608 return -EINVAL; in si_vm_packet3_gfx_check()
4610 for (i = 0; i < pkt->count; i++) { in si_vm_packet3_gfx_check()
4613 return -EINVAL; in si_vm_packet3_gfx_check()
4622 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); in si_vm_packet3_gfx_check()
4623 return -EINVAL; in si_vm_packet3_gfx_check()
4632 u32 idx = pkt->idx + 1; in si_vm_packet3_compute_check()
4636 switch (pkt->opcode) { in si_vm_packet3_compute_check()
4672 return -EINVAL; in si_vm_packet3_compute_check()
4680 return -EINVAL; in si_vm_packet3_compute_check()
4682 for (i = 0; i < (pkt->count - 2); i++) { in si_vm_packet3_compute_check()
4685 return -EINVAL; in si_vm_packet3_compute_check()
4694 return -EINVAL; in si_vm_packet3_compute_check()
4701 return -EINVAL; in si_vm_packet3_compute_check()
4710 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); in si_vm_packet3_compute_check()
4711 return -EINVAL; in si_vm_packet3_compute_check()
4724 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]); in si_ib_parse()
4725 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]); in si_ib_parse()
4729 dev_err(rdev->dev, "Packet0 not allowed!\n"); in si_ib_parse()
4730 ret = -EINVAL; in si_ib_parse()
4736 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]); in si_ib_parse()
4737 if (ib->is_const_ib) in si_ib_parse()
4738 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); in si_ib_parse()
4740 switch (ib->ring) { in si_ib_parse()
4742 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt); in si_ib_parse()
4746 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt); in si_ib_parse()
4749 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring); in si_ib_parse()
4750 ret = -EINVAL; in si_ib_parse()
4757 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type); in si_ib_parse()
4758 ret = -EINVAL; in si_ib_parse()
4762 for (i = 0; i < ib->length_dw; i++) { in si_ib_parse()
4764 printk("\t0x%08x <---\n", ib->ptr[i]); in si_ib_parse()
4766 printk("\t0x%08x\n", ib->ptr[i]); in si_ib_parse()
4770 } while (idx < ib->length_dw); in si_ib_parse()
4781 rdev->vm_manager.nvm = 16; in si_vm_init()
4783 rdev->vm_manager.vram_base_offset = 0; in si_vm_init()
4793 * si_vm_decode_fault - print human readable fault info
4809 if (rdev->family == CHIP_TAHITI) { in si_vm_decode_fault()
5069 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2); in si_vm_flush()
5082 /* bits 0-15 are the VM contexts0-15 */ in si_vm_flush()
5112 for (i = 0; i < rdev->usec_timeout; i++) { in si_wait_for_rlc_serdes()
5118 for (i = 0; i < rdev->usec_timeout; i++) { in si_wait_for_rlc_serdes()
5143 for (i = 0; i < rdev->usec_timeout; i++) { in si_enable_gui_idle_interrupt()
5215 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA)) in si_enable_dma_pg()
5239 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { in si_enable_gfx_cgpg()
5263 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); in si_init_gfx_cgpg()
5269 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); in si_init_gfx_cgpg()
5294 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) { in si_get_cu_active_bitmap()
5308 for (i = 0; i < rdev->config.si.max_shader_engines; i++) { in si_init_ao_cu_mask()
5309 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { in si_init_ao_cu_mask()
5313 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { in si_init_ao_cu_mask()
5342 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { in si_enable_cgcg()
5380 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) { in si_enable_mgcg()
5386 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) { in si_enable_mgcg()
5436 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) { in si_enable_uvd_mgcg()
5484 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) in si_enable_mc_ls()
5501 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG)) in si_enable_mc_mgcg()
5516 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) { in si_enable_dma_mgcg()
5554 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS)) in si_enable_bif_mgls()
5572 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG)) in si_enable_hdp_mgcg()
5588 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS)) in si_enable_hdp_ls()
5627 if (rdev->has_uvd) { in si_update_cg()
5645 if (rdev->has_uvd) { in si_init_cg()
5653 if (rdev->has_uvd) { in si_fini_cg()
5669 if (rdev->rlc.cs_data == NULL) in si_get_csb_size()
5677 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { in si_get_csb_size()
5678 for (ext = sect->section; ext->extent != NULL; ++ext) { in si_get_csb_size()
5679 if (sect->id == SECT_CONTEXT) in si_get_csb_size()
5680 count += 2 + ext->reg_count; in si_get_csb_size()
5701 if (rdev->rlc.cs_data == NULL) in si_get_csb_buffer()
5713 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { in si_get_csb_buffer()
5714 for (ext = sect->section; ext->extent != NULL; ++ext) { in si_get_csb_buffer()
5715 if (sect->id == SECT_CONTEXT) { in si_get_csb_buffer()
5717 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); in si_get_csb_buffer()
5718 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000); in si_get_csb_buffer()
5719 for (i = 0; i < ext->reg_count; i++) in si_get_csb_buffer()
5720 buffer[count++] = cpu_to_le32(ext->extent[i]); in si_get_csb_buffer()
5728 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); in si_get_csb_buffer()
5729 switch (rdev->family) { in si_get_csb_buffer()
5757 if (rdev->pg_flags) { in si_init_pg()
5758 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) { in si_init_pg()
5762 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { in si_init_pg()
5765 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); in si_init_pg()
5766 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); in si_init_pg()
5771 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); in si_init_pg()
5772 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); in si_init_pg()
5778 if (rdev->pg_flags) { in si_fini_pg()
5849 if (!rdev->rlc_fw) in si_rlc_resume()
5850 return -EINVAL; in si_rlc_resume()
5870 if (rdev->new_fw) { in si_rlc_resume()
5872 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data; in si_rlc_resume()
5873 u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; in si_rlc_resume()
5875 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in si_rlc_resume()
5877 radeon_ucode_print_rlc_hdr(&hdr->header); in si_rlc_resume()
5885 (const __be32 *)rdev->rlc_fw->data; in si_rlc_resume()
5909 rdev->ih.enabled = true; in si_enable_interrupts()
5924 rdev->ih.enabled = false; in si_disable_interrupts()
5925 rdev->ih.rptr = 0; in si_disable_interrupts()
5944 for (i = 0; i < rdev->num_crtc; i++) in si_disable_interrupt_state()
5946 for (i = 0; i < rdev->num_crtc; i++) in si_disable_interrupt_state()
5981 WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); in si_irq_init()
5983 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi in si_irq_init()
5984 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN in si_irq_init()
5987 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ in si_irq_init()
5991 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); in si_irq_init()
5992 rb_bufsz = order_base_2(rdev->ih.ring_size / 4); in si_irq_init()
5998 if (rdev->wb.enabled) in si_irq_init()
6002 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); in si_irq_init()
6003 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); in si_irq_init()
6014 if (rdev->msi_enabled) in si_irq_init()
6021 pci_set_master(rdev->pdev); in si_irq_init()
6039 if (!rdev->irq.installed) { in si_irq_set()
6041 return -EINVAL; in si_irq_set()
6044 if (!rdev->ih.enabled) { in si_irq_set()
6061 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { in si_irq_set()
6065 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { in si_irq_set()
6069 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { in si_irq_set()
6073 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { in si_irq_set()
6078 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { in si_irq_set()
6092 if (rdev->irq.dpm_thermal) { in si_irq_set()
6097 for (i = 0; i < rdev->num_crtc; i++) { in si_irq_set()
6100 rdev->irq.crtc_vblank_int[i] || in si_irq_set()
6101 atomic_read(&rdev->irq.pflip[i]), "vblank", i); in si_irq_set()
6104 for (i = 0; i < rdev->num_crtc; i++) in si_irq_set()
6112 rdev->irq.hpd[i], "HPD", i); in si_irq_set()
6128 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int; in si_irq_ack()
6129 u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int; in si_irq_ack()
6136 if (i < rdev->num_crtc) in si_irq_ack()
6141 for (i = 0; i < rdev->num_crtc; i += 2) { in si_irq_ack()
6194 if (rdev->wb.enabled) in si_get_ih_wptr()
6195 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); in si_get_ih_wptr()
6205 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", in si_get_ih_wptr()
6206 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); in si_get_ih_wptr()
6207 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; in si_get_ih_wptr()
6212 return (wptr & rdev->ih.ptr_mask); in si_get_ih_wptr()
6217 * [7:0] - interrupt source id
6218 * [31:8] - reserved
6219 * [59:32] - interrupt source data
6220 * [63:60] - reserved
6221 * [71:64] - RINGID
6222 * [79:72] - VMID
6223 * [127:80] - reserved
6227 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int; in si_irq_process()
6240 if (!rdev->ih.enabled || rdev->shutdown) in si_irq_process()
6247 if (atomic_xchg(&rdev->ih.lock, 1)) in si_irq_process()
6250 rptr = rdev->ih.rptr; in si_irq_process()
6262 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; in si_irq_process()
6263 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; in si_irq_process()
6264 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; in si_irq_process()
6273 crtc_idx = src_id - 1; in si_irq_process()
6279 if (rdev->irq.crtc_vblank_int[crtc_idx]) { in si_irq_process()
6281 rdev->pm.vblank_sync = true; in si_irq_process()
6282 wake_up(&rdev->irq.vblank_queue); in si_irq_process()
6284 if (atomic_read(&rdev->irq.pflip[crtc_idx])) { in si_irq_process()
6299 DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n", in si_irq_process()
6313 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); in si_irq_process()
6315 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); in si_irq_process()
6325 hpd_idx = src_data - 6; in si_irq_process()
6358 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); in si_irq_process()
6359 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in si_irq_process()
6361 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in si_irq_process()
6394 rdev->pm.dpm.thermal.high_to_low = false; in si_irq_process()
6399 rdev->pm.dpm.thermal.high_to_low = true; in si_irq_process()
6416 rptr &= rdev->ih.ptr_mask; in si_irq_process()
6420 schedule_work(&rdev->dp_work); in si_irq_process()
6422 schedule_delayed_work(&rdev->hotplug_work, 0); in si_irq_process()
6423 if (queue_thermal && rdev->pm.dpm_enabled) in si_irq_process()
6424 schedule_work(&rdev->pm.dpm.thermal.work); in si_irq_process()
6425 rdev->ih.rptr = rptr; in si_irq_process()
6426 atomic_set(&rdev->ih.lock, 0); in si_irq_process()
6443 if (!rdev->has_uvd) in si_uvd_init()
6448 dev_err(rdev->dev, "failed UVD (%d) init.\n", r); in si_uvd_init()
6450 * At this point rdev->uvd.vcpu_bo is NULL which trickles down in si_uvd_init()
6455 rdev->has_uvd = false; in si_uvd_init()
6458 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; in si_uvd_init()
6459 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); in si_uvd_init()
6466 if (!rdev->has_uvd) in si_uvd_start()
6471 dev_err(rdev->dev, "failed UVD resume (%d).\n", r); in si_uvd_start()
6476 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); in si_uvd_start()
6482 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; in si_uvd_start()
6490 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size) in si_uvd_resume()
6493 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; in si_uvd_resume()
6494 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); in si_uvd_resume()
6496 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); in si_uvd_resume()
6501 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r); in si_uvd_resume()
6510 if (!rdev->has_vce) in si_vce_init()
6515 dev_err(rdev->dev, "failed VCE (%d) init.\n", r); in si_vce_init()
6517 * At this point rdev->vce.vcpu_bo is NULL which trickles down in si_vce_init()
6522 rdev->has_vce = false; in si_vce_init()
6525 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL; in si_vce_init()
6526 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096); in si_vce_init()
6527 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL; in si_vce_init()
6528 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096); in si_vce_init()
6535 if (!rdev->has_vce) in si_vce_start()
6540 dev_err(rdev->dev, "failed VCE resume (%d).\n", r); in si_vce_start()
6545 dev_err(rdev->dev, "failed VCE resume (%d).\n", r); in si_vce_start()
6550 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r); in si_vce_start()
6555 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r); in si_vce_start()
6561 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; in si_vce_start()
6562 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; in si_vce_start()
6570 if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size) in si_vce_resume()
6573 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; in si_vce_resume()
6574 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP); in si_vce_resume()
6576 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r); in si_vce_resume()
6579 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; in si_vce_resume()
6580 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP); in si_vce_resume()
6582 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r); in si_vce_resume()
6587 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r); in si_vce_resume()
6602 /* scratch needs to be initialized before MC */ in si_startup()
6609 if (!rdev->pm.dpm_enabled) { in si_startup()
6612 DRM_ERROR("Failed to load MC firmware!\n"); in si_startup()
6623 if (rdev->family == CHIP_VERDE) { in si_startup()
6624 rdev->rlc.reg_list = verde_rlc_save_restore_register_list; in si_startup()
6625 rdev->rlc.reg_list_size = in si_startup()
6628 rdev->rlc.cs_data = si_cs_data; in si_startup()
6642 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); in si_startup()
6648 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); in si_startup()
6654 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); in si_startup()
6660 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); in si_startup()
6666 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); in si_startup()
6674 if (!rdev->irq.installed) { in si_startup()
6688 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_startup()
6689 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, in si_startup()
6694 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_startup()
6695 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, in si_startup()
6700 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_startup()
6701 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, in si_startup()
6706 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in si_startup()
6707 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, in si_startup()
6712 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; in si_startup()
6713 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, in si_startup()
6734 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); in si_startup()
6740 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); in si_startup()
6760 atom_asic_init(rdev->mode_info.atom_context); in si_resume()
6765 if (rdev->pm.pm_method == PM_METHOD_DPM) in si_resume()
6768 rdev->accel_working = true; in si_resume()
6772 rdev->accel_working = false; in si_resume()
6787 if (rdev->has_uvd) { in si_suspend()
6791 if (rdev->has_vce) in si_suspend()
6809 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_init()
6815 return -EINVAL; in si_init()
6818 if (!rdev->is_atom_bios) { in si_init()
6819 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); in si_init()
6820 return -EINVAL; in si_init()
6828 if (!rdev->bios) { in si_init()
6829 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); in si_init()
6830 return -EINVAL; in si_init()
6833 atom_asic_init(rdev->mode_info.atom_context); in si_init()
6856 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || in si_init()
6857 !rdev->rlc_fw || !rdev->mc_fw) { in si_init()
6868 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_init()
6869 ring->ring_obj = NULL; in si_init()
6872 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_init()
6873 ring->ring_obj = NULL; in si_init()
6876 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_init()
6877 ring->ring_obj = NULL; in si_init()
6880 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in si_init()
6881 ring->ring_obj = NULL; in si_init()
6884 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; in si_init()
6885 ring->ring_obj = NULL; in si_init()
6891 rdev->ih.ring_obj = NULL; in si_init()
6898 rdev->accel_working = true; in si_init()
6901 dev_err(rdev->dev, "disabling GPU acceleration\n"); in si_init()
6911 rdev->accel_working = false; in si_init()
6914 /* Don't start up if the MC ucode is missing. in si_init()
6915 * The default clocks and voltages before the MC ucode in si_init()
6918 if (!rdev->mc_fw) { in si_init()
6919 DRM_ERROR("radeon: MC ucode required for NI+.\n"); in si_init()
6920 return -EINVAL; in si_init()
6939 if (rdev->has_uvd) { in si_fini()
6943 if (rdev->has_vce) in si_fini()
6951 kfree(rdev->bios); in si_fini()
6952 rdev->bios = NULL; in si_fini()
6956 * si_get_gpu_clock_counter - return GPU clock counter snapshot
6967 mutex_lock(&rdev->gpu_clock_mutex); in si_get_gpu_clock_counter()
6971 mutex_unlock(&rdev->gpu_clock_mutex); in si_get_gpu_clock_counter()
7066 struct pci_dev *root = rdev->pdev->bus->self; in si_pcie_gen3_enable()
7072 if (pci_is_root_bus(rdev->pdev->bus)) in si_pcie_gen3_enable()
7078 if (rdev->flags & RADEON_IS_IGP) in si_pcie_gen3_enable()
7081 if (!(rdev->flags & RADEON_IS_PCIE)) in si_pcie_gen3_enable()
7109 if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev)) in si_pcie_gen3_enable()
7113 /* re-try equalization if gen3 is not already enabled */ in si_pcie_gen3_enable()
7120 pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); in si_pcie_gen3_enable()
7138 pcie_capability_read_word(rdev->pdev, in si_pcie_gen3_enable()
7146 pcie_capability_read_word(rdev->pdev, in si_pcie_gen3_enable()
7152 pcie_capability_read_word(rdev->pdev, in si_pcie_gen3_enable()
7171 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL, in si_pcie_gen3_enable()
7183 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2, in si_pcie_gen3_enable()
7209 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2, in si_pcie_gen3_enable()
7216 for (i = 0; i < rdev->usec_timeout; i++) { in si_pcie_gen3_enable()
7233 if (!(rdev->flags & RADEON_IS_PCIE)) in si_program_aspm()
7291 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) { in si_program_aspm()
7340 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN)) in si_program_aspm()
7347 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN)) in si_program_aspm()
7353 !pci_is_root_bus(rdev->pdev->bus)) { in si_program_aspm()
7354 struct pci_dev *root = rdev->pdev->bus->self; in si_program_aspm()
7454 return -ETIMEDOUT; in si_vce_send_vcepll_ctlreq()