1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2023 Advanced Micro Devices, Inc.
4   */
5  
6  #define pr_fmt(fmt)     "AMD-Vi: " fmt
7  #define dev_fmt(fmt)    pr_fmt(fmt)
8  
9  #include <linux/amd-iommu.h>
10  #include <linux/delay.h>
11  #include <linux/mmu_notifier.h>
12  
13  #include <asm/iommu.h>
14  
15  #include "amd_iommu.h"
16  #include "amd_iommu_types.h"
17  
18  #include "../iommu-pages.h"
19  
amd_iommu_alloc_ppr_log(struct amd_iommu * iommu)20  int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu)
21  {
22  	iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
23  					      PPR_LOG_SIZE);
24  	return iommu->ppr_log ? 0 : -ENOMEM;
25  }
26  
amd_iommu_enable_ppr_log(struct amd_iommu * iommu)27  void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
28  {
29  	u64 entry;
30  
31  	if (iommu->ppr_log == NULL)
32  		return;
33  
34  	iommu_feature_enable(iommu, CONTROL_PPR_EN);
35  
36  	entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
37  
38  	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
39  		    &entry, sizeof(entry));
40  
41  	/* set head and tail to zero manually */
42  	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
43  	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
44  
45  	iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
46  	iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
47  }
48  
amd_iommu_free_ppr_log(struct amd_iommu * iommu)49  void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
50  {
51  	iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
52  }
53  
54  /*
55   * This function restarts ppr logging in case the IOMMU experienced
56   * PPR log overflow.
57   */
amd_iommu_restart_ppr_log(struct amd_iommu * iommu)58  void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
59  {
60  	amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
61  			      CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
62  			      MMIO_STATUS_PPR_OVERFLOW_MASK);
63  }
64  
ppr_flag_to_fault_perm(u16 flag)65  static inline u32 ppr_flag_to_fault_perm(u16 flag)
66  {
67  	int perm = 0;
68  
69  	if (flag & PPR_FLAG_READ)
70  		perm |= IOMMU_FAULT_PERM_READ;
71  	if (flag & PPR_FLAG_WRITE)
72  		perm |= IOMMU_FAULT_PERM_WRITE;
73  	if (flag & PPR_FLAG_EXEC)
74  		perm |= IOMMU_FAULT_PERM_EXEC;
75  	if (!(flag & PPR_FLAG_US))
76  		perm |= IOMMU_FAULT_PERM_PRIV;
77  
78  	return perm;
79  }
80  
ppr_is_valid(struct amd_iommu * iommu,u64 * raw)81  static bool ppr_is_valid(struct amd_iommu *iommu, u64 *raw)
82  {
83  	struct device *dev = iommu->iommu.dev;
84  	u16 devid = PPR_DEVID(raw[0]);
85  
86  	if (!(PPR_FLAGS(raw[0]) & PPR_FLAG_GN)) {
87  		dev_dbg(dev, "PPR logged [Request ignored due to GN=0 (device=%04x:%02x:%02x.%x "
88  			"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
89  			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
90  			PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
91  		return false;
92  	}
93  
94  	if (PPR_FLAGS(raw[0]) & PPR_FLAG_RVSD) {
95  		dev_dbg(dev, "PPR logged [Invalid request format (device=%04x:%02x:%02x.%x "
96  			"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
97  			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
98  			PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
99  		return false;
100  	}
101  
102  	return true;
103  }
104  
iommu_call_iopf_notifier(struct amd_iommu * iommu,u64 * raw)105  static void iommu_call_iopf_notifier(struct amd_iommu *iommu, u64 *raw)
106  {
107  	struct iommu_dev_data *dev_data;
108  	struct iopf_fault event;
109  	struct pci_dev *pdev;
110  	u16 devid = PPR_DEVID(raw[0]);
111  
112  	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
113  		pr_info_ratelimited("Unknown PPR request received\n");
114  		return;
115  	}
116  
117  	pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
118  					   PCI_BUS_NUM(devid), devid & 0xff);
119  	if (!pdev)
120  		return;
121  
122  	if (!ppr_is_valid(iommu, raw))
123  		goto out;
124  
125  	memset(&event, 0, sizeof(struct iopf_fault));
126  
127  	event.fault.type = IOMMU_FAULT_PAGE_REQ;
128  	event.fault.prm.perm = ppr_flag_to_fault_perm(PPR_FLAGS(raw[0]));
129  	event.fault.prm.addr = (u64)(raw[1] & PAGE_MASK);
130  	event.fault.prm.pasid = PPR_PASID(raw[0]);
131  	event.fault.prm.grpid = PPR_TAG(raw[0]) & 0x1FF;
132  
133  	/*
134  	 * PASID zero is used for requests from the I/O device without
135  	 * a PASID
136  	 */
137  	dev_data = dev_iommu_priv_get(&pdev->dev);
138  	if (event.fault.prm.pasid == 0 ||
139  	    event.fault.prm.pasid >= dev_data->max_pasids) {
140  		pr_info_ratelimited("Invalid PASID : 0x%x, device : 0x%x\n",
141  				    event.fault.prm.pasid, pdev->dev.id);
142  		goto out;
143  	}
144  
145  	event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
146  	event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
147  	if (PPR_TAG(raw[0]) & 0x200)
148  		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
149  
150  	/* Submit event */
151  	iommu_report_device_fault(&pdev->dev, &event);
152  
153  	return;
154  
155  out:
156  	/* Nobody cared, abort */
157  	amd_iommu_complete_ppr(&pdev->dev, PPR_PASID(raw[0]),
158  			       IOMMU_PAGE_RESP_FAILURE,
159  			       PPR_TAG(raw[0]) & 0x1FF);
160  }
161  
amd_iommu_poll_ppr_log(struct amd_iommu * iommu)162  void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
163  {
164  	u32 head, tail;
165  
166  	if (iommu->ppr_log == NULL)
167  		return;
168  
169  	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
170  	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
171  
172  	while (head != tail) {
173  		volatile u64 *raw;
174  		u64 entry[2];
175  		int i;
176  
177  		raw = (u64 *)(iommu->ppr_log + head);
178  
179  		/*
180  		 * Hardware bug: Interrupt may arrive before the entry is
181  		 * written to memory. If this happens we need to wait for the
182  		 * entry to arrive.
183  		 */
184  		for (i = 0; i < LOOP_TIMEOUT; ++i) {
185  			if (PPR_REQ_TYPE(raw[0]) != 0)
186  				break;
187  			udelay(1);
188  		}
189  
190  		/* Avoid memcpy function-call overhead */
191  		entry[0] = raw[0];
192  		entry[1] = raw[1];
193  
194  		/*
195  		 * To detect the hardware errata 733 we need to clear the
196  		 * entry back to zero. This issue does not exist on SNP
197  		 * enabled system. Also this buffer is not writeable on
198  		 * SNP enabled system.
199  		 */
200  		if (!amd_iommu_snp_en)
201  			raw[0] = raw[1] = 0UL;
202  
203  		/* Update head pointer of hardware ring-buffer */
204  		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
205  		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
206  
207  		/* Handle PPR entry */
208  		iommu_call_iopf_notifier(iommu, entry);
209  	}
210  }
211  
212  /**************************************************************
213   *
214   * IOPF handling stuff
215   */
216  
217  /* Setup per-IOMMU IOPF queue if not exist. */
amd_iommu_iopf_init(struct amd_iommu * iommu)218  int amd_iommu_iopf_init(struct amd_iommu *iommu)
219  {
220  	int ret = 0;
221  
222  	if (iommu->iopf_queue)
223  		return ret;
224  
225  	snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x",
226  		 PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
227  
228  	iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
229  	if (!iommu->iopf_queue)
230  		ret = -ENOMEM;
231  
232  	return ret;
233  }
234  
235  /* Destroy per-IOMMU IOPF queue if no longer needed. */
amd_iommu_iopf_uninit(struct amd_iommu * iommu)236  void amd_iommu_iopf_uninit(struct amd_iommu *iommu)
237  {
238  	iopf_queue_free(iommu->iopf_queue);
239  	iommu->iopf_queue = NULL;
240  }
241  
amd_iommu_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * resp)242  void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
243  			     struct iommu_page_response *resp)
244  {
245  	amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
246  }
247  
amd_iommu_iopf_add_device(struct amd_iommu * iommu,struct iommu_dev_data * dev_data)248  int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
249  			      struct iommu_dev_data *dev_data)
250  {
251  	int ret = 0;
252  
253  	if (!dev_data->pri_enabled)
254  		return ret;
255  
256  	if (!iommu->iopf_queue)
257  		return -EINVAL;
258  
259  	ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
260  	if (ret)
261  		return ret;
262  
263  	dev_data->ppr = true;
264  	return 0;
265  }
266  
267  /* Its assumed that caller has verified that device was added to iopf queue */
amd_iommu_iopf_remove_device(struct amd_iommu * iommu,struct iommu_dev_data * dev_data)268  void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
269  				  struct iommu_dev_data *dev_data)
270  {
271  	iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
272  	dev_data->ppr = false;
273  }
274