1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <linux/pci.h>
7 #include <linux/pnp.h>
8 #include <linux/vgaarb.h>
9
10 #include <drm/drm_managed.h>
11 #include <drm/intel/i915_drm.h>
12
13 #include "i915_drv.h"
14 #include "intel_gmch.h"
15 #include "intel_pci_config.h"
16
intel_gmch_bridge_release(struct drm_device * dev,void * bridge)17 static void intel_gmch_bridge_release(struct drm_device *dev, void *bridge)
18 {
19 pci_dev_put(bridge);
20 }
21
intel_gmch_bridge_setup(struct drm_i915_private * i915)22 int intel_gmch_bridge_setup(struct drm_i915_private *i915)
23 {
24 int domain = pci_domain_nr(to_pci_dev(i915->drm.dev)->bus);
25
26 i915->gmch.pdev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
27 if (!i915->gmch.pdev) {
28 drm_err(&i915->drm, "bridge device not found\n");
29 return -EIO;
30 }
31
32 return drmm_add_action_or_reset(&i915->drm, intel_gmch_bridge_release,
33 i915->gmch.pdev);
34 }
35
mchbar_reg(struct drm_i915_private * i915)36 static int mchbar_reg(struct drm_i915_private *i915)
37 {
38 return GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
39 }
40
41 /* Allocate space for the MCH regs if needed, return nonzero on error */
42 static int
intel_alloc_mchbar_resource(struct drm_i915_private * i915)43 intel_alloc_mchbar_resource(struct drm_i915_private *i915)
44 {
45 u32 temp_lo, temp_hi = 0;
46 u64 mchbar_addr;
47 int ret;
48
49 if (GRAPHICS_VER(i915) >= 4)
50 pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, &temp_hi);
51 pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp_lo);
52 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
53
54 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
55 if (IS_ENABLED(CONFIG_PNP) && mchbar_addr &&
56 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
57 return 0;
58
59 /* Get some space for it */
60 i915->gmch.mch_res.name = "i915 MCHBAR";
61 i915->gmch.mch_res.flags = IORESOURCE_MEM;
62 ret = pci_bus_alloc_resource(i915->gmch.pdev->bus,
63 &i915->gmch.mch_res,
64 MCHBAR_SIZE, MCHBAR_SIZE,
65 PCIBIOS_MIN_MEM,
66 0, pcibios_align_resource,
67 i915->gmch.pdev);
68 if (ret) {
69 drm_dbg(&i915->drm, "failed bus alloc: %d\n", ret);
70 i915->gmch.mch_res.start = 0;
71 return ret;
72 }
73
74 if (GRAPHICS_VER(i915) >= 4)
75 pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4,
76 upper_32_bits(i915->gmch.mch_res.start));
77
78 pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
79 lower_32_bits(i915->gmch.mch_res.start));
80 return 0;
81 }
82
83 /* Setup MCHBAR if possible, return true if we should disable it again */
intel_gmch_bar_setup(struct drm_i915_private * i915)84 void intel_gmch_bar_setup(struct drm_i915_private *i915)
85 {
86 u32 temp;
87 bool enabled;
88
89 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
90 return;
91
92 i915->gmch.mchbar_need_disable = false;
93
94 if (IS_I915G(i915) || IS_I915GM(i915)) {
95 pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
96 enabled = !!(temp & DEVEN_MCHBAR_EN);
97 } else {
98 pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
99 enabled = temp & 1;
100 }
101
102 /* If it's already enabled, don't have to do anything */
103 if (enabled)
104 return;
105
106 if (intel_alloc_mchbar_resource(i915))
107 return;
108
109 i915->gmch.mchbar_need_disable = true;
110
111 /* Space is allocated or reserved, so enable it. */
112 if (IS_I915G(i915) || IS_I915GM(i915)) {
113 pci_write_config_dword(i915->gmch.pdev, DEVEN,
114 temp | DEVEN_MCHBAR_EN);
115 } else {
116 pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
117 pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), temp | 1);
118 }
119 }
120
intel_gmch_bar_teardown(struct drm_i915_private * i915)121 void intel_gmch_bar_teardown(struct drm_i915_private *i915)
122 {
123 if (i915->gmch.mchbar_need_disable) {
124 if (IS_I915G(i915) || IS_I915GM(i915)) {
125 u32 deven_val;
126
127 pci_read_config_dword(i915->gmch.pdev, DEVEN,
128 &deven_val);
129 deven_val &= ~DEVEN_MCHBAR_EN;
130 pci_write_config_dword(i915->gmch.pdev, DEVEN,
131 deven_val);
132 } else {
133 u32 mchbar_val;
134
135 pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915),
136 &mchbar_val);
137 mchbar_val &= ~1;
138 pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
139 mchbar_val);
140 }
141 }
142
143 if (i915->gmch.mch_res.start)
144 release_resource(&i915->gmch.mch_res);
145 }
146
intel_gmch_vga_set_state(struct drm_i915_private * i915,bool enable_decode)147 int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
148 {
149 unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
150 u16 gmch_ctrl;
151
152 if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
153 drm_err(&i915->drm, "failed to read control word\n");
154 return -EIO;
155 }
156
157 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
158 return 0;
159
160 if (enable_decode)
161 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
162 else
163 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
164
165 if (pci_write_config_word(i915->gmch.pdev, reg, gmch_ctrl)) {
166 drm_err(&i915->drm, "failed to write control word\n");
167 return -EIO;
168 }
169
170 return 0;
171 }
172
intel_gmch_vga_set_decode(struct pci_dev * pdev,bool enable_decode)173 unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
174 {
175 struct drm_i915_private *i915 = pdev_to_i915(pdev);
176
177 intel_gmch_vga_set_state(i915, enable_decode);
178
179 if (enable_decode)
180 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
181 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
182 else
183 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
184 }
185