1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
3
4 #define dev_fmt(fmt) "tegra241_cmdqv: " fmt
5
6 #include <linux/acpi.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/iommu.h>
11 #include <linux/iopoll.h>
12
13 #include <acpi/acpixf.h>
14
15 #include "arm-smmu-v3.h"
16
17 /* CMDQV register page base and size defines */
18 #define TEGRA241_CMDQV_CONFIG_BASE (0)
19 #define TEGRA241_CMDQV_CONFIG_SIZE (SZ_64K)
20 #define TEGRA241_VCMDQ_PAGE0_BASE (TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
21 #define TEGRA241_VCMDQ_PAGE1_BASE (TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
22 #define TEGRA241_VINTF_PAGE_BASE (TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
23
24 /* CMDQV global base regs */
25 #define TEGRA241_CMDQV_CONFIG 0x0000
26 #define CMDQV_EN BIT(0)
27
28 #define TEGRA241_CMDQV_PARAM 0x0004
29 #define CMDQV_NUM_VINTF_LOG2 GENMASK(11, 8)
30 #define CMDQV_NUM_VCMDQ_LOG2 GENMASK(7, 4)
31
32 #define TEGRA241_CMDQV_STATUS 0x0008
33 #define CMDQV_ENABLED BIT(0)
34
35 #define TEGRA241_CMDQV_VINTF_ERR_MAP 0x0014
36 #define TEGRA241_CMDQV_VINTF_INT_MASK 0x001C
37 #define TEGRA241_CMDQV_CMDQ_ERR_MAP(m) (0x0024 + 0x4*(m))
38
39 #define TEGRA241_CMDQV_CMDQ_ALLOC(q) (0x0200 + 0x4*(q))
40 #define CMDQV_CMDQ_ALLOC_VINTF GENMASK(20, 15)
41 #define CMDQV_CMDQ_ALLOC_LVCMDQ GENMASK(7, 1)
42 #define CMDQV_CMDQ_ALLOCATED BIT(0)
43
44 /* VINTF base regs */
45 #define TEGRA241_VINTF(v) (0x1000 + 0x100*(v))
46
47 #define TEGRA241_VINTF_CONFIG 0x0000
48 #define VINTF_HYP_OWN BIT(17)
49 #define VINTF_VMID GENMASK(16, 1)
50 #define VINTF_EN BIT(0)
51
52 #define TEGRA241_VINTF_STATUS 0x0004
53 #define VINTF_STATUS GENMASK(3, 1)
54 #define VINTF_ENABLED BIT(0)
55
56 #define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
57 (0x00C0 + 0x8*(m))
58 #define LVCMDQ_ERR_MAP_NUM_64 2
59
60 /* VCMDQ base regs */
61 /* -- PAGE0 -- */
62 #define TEGRA241_VCMDQ_PAGE0(q) (TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
63
64 #define TEGRA241_VCMDQ_CONS 0x00000
65 #define VCMDQ_CONS_ERR GENMASK(30, 24)
66
67 #define TEGRA241_VCMDQ_PROD 0x00004
68
69 #define TEGRA241_VCMDQ_CONFIG 0x00008
70 #define VCMDQ_EN BIT(0)
71
72 #define TEGRA241_VCMDQ_STATUS 0x0000C
73 #define VCMDQ_ENABLED BIT(0)
74
75 #define TEGRA241_VCMDQ_GERROR 0x00010
76 #define TEGRA241_VCMDQ_GERRORN 0x00014
77
78 /* -- PAGE1 -- */
79 #define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
80 #define VCMDQ_ADDR GENMASK(47, 5)
81 #define VCMDQ_LOG2SIZE GENMASK(4, 0)
82 #define VCMDQ_LOG2SIZE_MAX 19
83
84 #define TEGRA241_VCMDQ_BASE 0x00000
85 #define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008
86
87 /* VINTF logical-VCMDQ pages */
88 #define TEGRA241_VINTFi_PAGE0(i) (TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
89 #define TEGRA241_VINTFi_PAGE1(i) (TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
90 #define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
91 (TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
92 #define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
93 (TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
94
95 /* MMIO helpers */
96 #define REG_CMDQV(_cmdqv, _regname) \
97 ((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
98 #define REG_VINTF(_vintf, _regname) \
99 ((_vintf)->base + TEGRA241_VINTF_##_regname)
100 #define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
101 ((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
102 #define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
103 ((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
104
105
106 static bool disable_cmdqv;
107 module_param(disable_cmdqv, bool, 0444);
108 MODULE_PARM_DESC(disable_cmdqv,
109 "This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
110
111 static bool bypass_vcmdq;
112 module_param(bypass_vcmdq, bool, 0444);
113 MODULE_PARM_DESC(bypass_vcmdq,
114 "This allows to bypass VCMDQ for debugging use or perf comparison.");
115
116 /**
117 * struct tegra241_vcmdq - Virtual Command Queue
118 * @idx: Global index in the CMDQV
119 * @lidx: Local index in the VINTF
120 * @enabled: Enable status
121 * @cmdqv: Parent CMDQV pointer
122 * @vintf: Parent VINTF pointer
123 * @cmdq: Command Queue struct
124 * @page0: MMIO Page0 base address
125 * @page1: MMIO Page1 base address
126 */
127 struct tegra241_vcmdq {
128 u16 idx;
129 u16 lidx;
130
131 bool enabled;
132
133 struct tegra241_cmdqv *cmdqv;
134 struct tegra241_vintf *vintf;
135 struct arm_smmu_cmdq cmdq;
136
137 void __iomem *page0;
138 void __iomem *page1;
139 };
140
141 /**
142 * struct tegra241_vintf - Virtual Interface
143 * @idx: Global index in the CMDQV
144 * @enabled: Enable status
145 * @hyp_own: Owned by hypervisor (in-kernel)
146 * @cmdqv: Parent CMDQV pointer
147 * @lvcmdqs: List of logical VCMDQ pointers
148 * @base: MMIO base address
149 */
150 struct tegra241_vintf {
151 u16 idx;
152
153 bool enabled;
154 bool hyp_own;
155
156 struct tegra241_cmdqv *cmdqv;
157 struct tegra241_vcmdq **lvcmdqs;
158
159 void __iomem *base;
160 };
161
162 /**
163 * struct tegra241_cmdqv - CMDQ-V for SMMUv3
164 * @smmu: SMMUv3 device
165 * @dev: CMDQV device
166 * @base: MMIO base address
167 * @irq: IRQ number
168 * @num_vintfs: Total number of VINTFs
169 * @num_vcmdqs: Total number of VCMDQs
170 * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
171 * @vintf_ids: VINTF id allocator
172 * @vintfs: List of VINTFs
173 */
174 struct tegra241_cmdqv {
175 struct arm_smmu_device smmu;
176 struct device *dev;
177
178 void __iomem *base;
179 int irq;
180
181 /* CMDQV Hardware Params */
182 u16 num_vintfs;
183 u16 num_vcmdqs;
184 u16 num_lvcmdqs_per_vintf;
185
186 struct ida vintf_ids;
187
188 struct tegra241_vintf **vintfs;
189 };
190
191 /* Config and Polling Helpers */
192
tegra241_cmdqv_write_config(struct tegra241_cmdqv * cmdqv,void __iomem * addr_config,void __iomem * addr_status,u32 regval,const char * header,bool * out_enabled)193 static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv,
194 void __iomem *addr_config,
195 void __iomem *addr_status,
196 u32 regval, const char *header,
197 bool *out_enabled)
198 {
199 bool en = regval & BIT(0);
200 int ret;
201
202 writel(regval, addr_config);
203 ret = readl_poll_timeout(addr_status, regval,
204 en ? regval & BIT(0) : !(regval & BIT(0)),
205 1, ARM_SMMU_POLL_TIMEOUT_US);
206 if (ret)
207 dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n",
208 header, en ? "en" : "dis", regval);
209 if (out_enabled)
210 WRITE_ONCE(*out_enabled, regval & BIT(0));
211 return ret;
212 }
213
cmdqv_write_config(struct tegra241_cmdqv * cmdqv,u32 regval)214 static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval)
215 {
216 return tegra241_cmdqv_write_config(cmdqv,
217 REG_CMDQV(cmdqv, CONFIG),
218 REG_CMDQV(cmdqv, STATUS),
219 regval, "CMDQV: ", NULL);
220 }
221
vintf_write_config(struct tegra241_vintf * vintf,u32 regval)222 static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval)
223 {
224 char header[16];
225
226 snprintf(header, 16, "VINTF%u: ", vintf->idx);
227 return tegra241_cmdqv_write_config(vintf->cmdqv,
228 REG_VINTF(vintf, CONFIG),
229 REG_VINTF(vintf, STATUS),
230 regval, header, &vintf->enabled);
231 }
232
lvcmdq_error_header(struct tegra241_vcmdq * vcmdq,char * header,int hlen)233 static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq,
234 char *header, int hlen)
235 {
236 WARN_ON(hlen < 64);
237 if (WARN_ON(!vcmdq->vintf))
238 return "";
239 snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
240 vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx);
241 return header;
242 }
243
vcmdq_write_config(struct tegra241_vcmdq * vcmdq,u32 regval)244 static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
245 {
246 char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
247
248 return tegra241_cmdqv_write_config(vcmdq->cmdqv,
249 REG_VCMDQ_PAGE0(vcmdq, CONFIG),
250 REG_VCMDQ_PAGE0(vcmdq, STATUS),
251 regval, h, &vcmdq->enabled);
252 }
253
254 /* ISR Functions */
255
tegra241_vintf0_handle_error(struct tegra241_vintf * vintf)256 static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
257 {
258 int i;
259
260 for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
261 u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
262
263 while (map) {
264 unsigned long lidx = __ffs64(map);
265 struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
266 u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
267
268 __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq);
269 writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
270 map &= ~BIT_ULL(lidx);
271 }
272 }
273 }
274
tegra241_cmdqv_isr(int irq,void * devid)275 static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
276 {
277 struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid;
278 void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP);
279 char err_str[256];
280 u64 vintf_map;
281
282 /* Use readl_relaxed() as register addresses are not 64-bit aligned */
283 vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
284 (u64)readl_relaxed(reg_vintf_map);
285
286 snprintf(err_str, sizeof(err_str),
287 "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map,
288 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))),
289 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))),
290 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))),
291 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0))));
292
293 dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str);
294
295 /* Handle VINTF0 and its LVCMDQs */
296 if (vintf_map & BIT_ULL(0)) {
297 tegra241_vintf0_handle_error(cmdqv->vintfs[0]);
298 vintf_map &= ~BIT_ULL(0);
299 }
300
301 return IRQ_HANDLED;
302 }
303
304 /* Command Queue Function */
305
tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent * ent)306 static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
307 {
308 switch (ent->opcode) {
309 case CMDQ_OP_TLBI_NH_ASID:
310 case CMDQ_OP_TLBI_NH_VA:
311 case CMDQ_OP_ATC_INV:
312 return true;
313 default:
314 return false;
315 }
316 }
317
318 static struct arm_smmu_cmdq *
tegra241_cmdqv_get_cmdq(struct arm_smmu_device * smmu,struct arm_smmu_cmdq_ent * ent)319 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
320 struct arm_smmu_cmdq_ent *ent)
321 {
322 struct tegra241_cmdqv *cmdqv =
323 container_of(smmu, struct tegra241_cmdqv, smmu);
324 struct tegra241_vintf *vintf = cmdqv->vintfs[0];
325 struct tegra241_vcmdq *vcmdq;
326 u16 lidx;
327
328 if (READ_ONCE(bypass_vcmdq))
329 return NULL;
330
331 /* Use SMMU CMDQ if VINTF0 is uninitialized */
332 if (!READ_ONCE(vintf->enabled))
333 return NULL;
334
335 /*
336 * Select a LVCMDQ to use. Here we use a temporal solution to
337 * balance out traffic on cmdq issuing: each cmdq has its own
338 * lock, if all cpus issue cmdlist using the same cmdq, only
339 * one CPU at a time can enter the process, while the others
340 * will be spinning at the same lock.
341 */
342 lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
343 vcmdq = vintf->lvcmdqs[lidx];
344 if (!vcmdq || !READ_ONCE(vcmdq->enabled))
345 return NULL;
346
347 /* Unsupported CMD goes for smmu->cmdq pathway */
348 if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
349 return NULL;
350 return &vcmdq->cmdq;
351 }
352
353 /* HW Reset Functions */
354
tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq * vcmdq)355 static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
356 {
357 char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
358 u32 gerrorn, gerror;
359
360 if (vcmdq_write_config(vcmdq, 0)) {
361 dev_err(vcmdq->cmdqv->dev,
362 "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
363 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
364 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
365 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
366 }
367 writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
368 writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
369 writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
370 writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE));
371
372 gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN));
373 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
374 if (gerror != gerrorn) {
375 dev_warn(vcmdq->cmdqv->dev,
376 "%suncleared error detected, resetting\n", h);
377 writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
378 }
379
380 dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
381 }
382
tegra241_vcmdq_hw_init(struct tegra241_vcmdq * vcmdq)383 static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
384 {
385 char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
386 int ret;
387
388 /* Reset VCMDQ */
389 tegra241_vcmdq_hw_deinit(vcmdq);
390
391 /* Configure and enable VCMDQ */
392 writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
393
394 ret = vcmdq_write_config(vcmdq, VCMDQ_EN);
395 if (ret) {
396 dev_err(vcmdq->cmdqv->dev,
397 "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
398 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
399 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
400 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
401 return ret;
402 }
403
404 dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h);
405 return 0;
406 }
407
tegra241_vintf_hw_deinit(struct tegra241_vintf * vintf)408 static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
409 {
410 u16 lidx;
411
412 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
413 if (vintf->lvcmdqs && vintf->lvcmdqs[lidx])
414 tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
415 vintf_write_config(vintf, 0);
416 }
417
tegra241_vintf_hw_init(struct tegra241_vintf * vintf,bool hyp_own)418 static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
419 {
420 u32 regval;
421 u16 lidx;
422 int ret;
423
424 /* Reset VINTF */
425 tegra241_vintf_hw_deinit(vintf);
426
427 /* Configure and enable VINTF */
428 /*
429 * Note that HYP_OWN bit is wired to zero when running in guest kernel,
430 * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
431 * restricted set of supported commands.
432 */
433 regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
434 writel(regval, REG_VINTF(vintf, CONFIG));
435
436 ret = vintf_write_config(vintf, regval | VINTF_EN);
437 if (ret)
438 return ret;
439 /*
440 * As being mentioned above, HYP_OWN bit is wired to zero for a guest
441 * kernel, so read it back from HW to ensure that reflects in hyp_own
442 */
443 vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
444
445 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
446 if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
447 ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
448 if (ret) {
449 tegra241_vintf_hw_deinit(vintf);
450 return ret;
451 }
452 }
453 }
454
455 return 0;
456 }
457
tegra241_cmdqv_hw_reset(struct arm_smmu_device * smmu)458 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
459 {
460 struct tegra241_cmdqv *cmdqv =
461 container_of(smmu, struct tegra241_cmdqv, smmu);
462 u16 qidx, lidx, idx;
463 u32 regval;
464 int ret;
465
466 /* Reset CMDQV */
467 regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG));
468 ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN);
469 if (ret)
470 return ret;
471 ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN);
472 if (ret)
473 return ret;
474
475 /* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
476 for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) {
477 for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
478 regval = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
479 regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
480 regval |= CMDQV_CMDQ_ALLOCATED;
481 writel_relaxed(regval,
482 REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
483 }
484 }
485
486 return tegra241_vintf_hw_init(cmdqv->vintfs[0], true);
487 }
488
489 /* VCMDQ Resource Helpers */
490
tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq * vcmdq)491 static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
492 {
493 struct arm_smmu_queue *q = &vcmdq->cmdq.q;
494 size_t nents = 1 << q->llq.max_n_shift;
495 size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
496
497 if (!q->base)
498 return;
499 dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
500 }
501
tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq * vcmdq)502 static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
503 {
504 struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
505 struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
506 struct arm_smmu_queue *q = &cmdq->q;
507 char name[16];
508 int ret;
509
510 snprintf(name, 16, "vcmdq%u", vcmdq->idx);
511
512 q->llq.max_n_shift = VCMDQ_LOG2SIZE_MAX;
513
514 /* Use the common helper to init the VCMDQ, and then... */
515 ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
516 TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
517 CMDQ_ENT_DWORDS, name);
518 if (ret)
519 return ret;
520
521 /* ...override q_base to write VCMDQ_BASE registers */
522 q->q_base = q->base_dma & VCMDQ_ADDR;
523 q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
524
525 if (!vcmdq->vintf->hyp_own)
526 cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
527
528 return arm_smmu_cmdq_init(smmu, cmdq);
529 }
530
531 /* VINTF Logical VCMDQ Resource Helpers */
532
tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)533 static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
534 {
535 vintf->lvcmdqs[lidx] = NULL;
536 }
537
tegra241_vintf_init_lvcmdq(struct tegra241_vintf * vintf,u16 lidx,struct tegra241_vcmdq * vcmdq)538 static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx,
539 struct tegra241_vcmdq *vcmdq)
540 {
541 struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
542 u16 idx = vintf->idx;
543
544 vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx;
545 vcmdq->lidx = lidx;
546 vcmdq->cmdqv = cmdqv;
547 vcmdq->vintf = vintf;
548 vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx);
549 vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx);
550
551 vintf->lvcmdqs[lidx] = vcmdq;
552 return 0;
553 }
554
tegra241_vintf_free_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)555 static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
556 {
557 struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
558 char header[64];
559
560 tegra241_vcmdq_free_smmu_cmdq(vcmdq);
561 tegra241_vintf_deinit_lvcmdq(vintf, lidx);
562
563 dev_dbg(vintf->cmdqv->dev,
564 "%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
565 kfree(vcmdq);
566 }
567
568 static struct tegra241_vcmdq *
tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)569 tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
570 {
571 struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
572 struct tegra241_vcmdq *vcmdq;
573 char header[64];
574 int ret;
575
576 vcmdq = kzalloc(sizeof(*vcmdq), GFP_KERNEL);
577 if (!vcmdq)
578 return ERR_PTR(-ENOMEM);
579
580 ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
581 if (ret)
582 goto free_vcmdq;
583
584 /* Build an arm_smmu_cmdq for each LVCMDQ */
585 ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq);
586 if (ret)
587 goto deinit_lvcmdq;
588
589 dev_dbg(cmdqv->dev,
590 "%sallocated\n", lvcmdq_error_header(vcmdq, header, 64));
591 return vcmdq;
592
593 deinit_lvcmdq:
594 tegra241_vintf_deinit_lvcmdq(vintf, lidx);
595 free_vcmdq:
596 kfree(vcmdq);
597 return ERR_PTR(ret);
598 }
599
600 /* VINTF Resource Helpers */
601
tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)602 static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
603 {
604 kfree(cmdqv->vintfs[idx]->lvcmdqs);
605 ida_free(&cmdqv->vintf_ids, idx);
606 cmdqv->vintfs[idx] = NULL;
607 }
608
tegra241_cmdqv_init_vintf(struct tegra241_cmdqv * cmdqv,u16 max_idx,struct tegra241_vintf * vintf)609 static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
610 struct tegra241_vintf *vintf)
611 {
612
613 u16 idx;
614 int ret;
615
616 ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL);
617 if (ret < 0)
618 return ret;
619 idx = ret;
620
621 vintf->idx = idx;
622 vintf->cmdqv = cmdqv;
623 vintf->base = cmdqv->base + TEGRA241_VINTF(idx);
624
625 vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf,
626 sizeof(*vintf->lvcmdqs), GFP_KERNEL);
627 if (!vintf->lvcmdqs) {
628 ida_free(&cmdqv->vintf_ids, idx);
629 return -ENOMEM;
630 }
631
632 cmdqv->vintfs[idx] = vintf;
633 return ret;
634 }
635
636 /* Remove Helpers */
637
tegra241_vintf_remove_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)638 static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
639 {
640 tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
641 tegra241_vintf_free_lvcmdq(vintf, lidx);
642 }
643
tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)644 static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
645 {
646 struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
647 u16 lidx;
648
649 /* Remove LVCMDQ resources */
650 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
651 if (vintf->lvcmdqs[lidx])
652 tegra241_vintf_remove_lvcmdq(vintf, lidx);
653
654 /* Remove VINTF resources */
655 tegra241_vintf_hw_deinit(vintf);
656
657 dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
658 tegra241_cmdqv_deinit_vintf(cmdqv, idx);
659 kfree(vintf);
660 }
661
tegra241_cmdqv_remove(struct arm_smmu_device * smmu)662 static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
663 {
664 struct tegra241_cmdqv *cmdqv =
665 container_of(smmu, struct tegra241_cmdqv, smmu);
666 u16 idx;
667
668 /* Remove VINTF resources */
669 for (idx = 0; idx < cmdqv->num_vintfs; idx++) {
670 if (cmdqv->vintfs[idx]) {
671 /* Only vintf0 should remain at this stage */
672 WARN_ON(idx > 0);
673 tegra241_cmdqv_remove_vintf(cmdqv, idx);
674 }
675 }
676
677 /* Remove cmdqv resources */
678 ida_destroy(&cmdqv->vintf_ids);
679
680 if (cmdqv->irq > 0)
681 free_irq(cmdqv->irq, cmdqv);
682 iounmap(cmdqv->base);
683 kfree(cmdqv->vintfs);
684 put_device(cmdqv->dev); /* smmu->impl_dev */
685 }
686
687 static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
688 .get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
689 .device_reset = tegra241_cmdqv_hw_reset,
690 .device_remove = tegra241_cmdqv_remove,
691 };
692
693 /* Probe Functions */
694
tegra241_cmdqv_acpi_is_memory(struct acpi_resource * res,void * data)695 static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource *res, void *data)
696 {
697 struct resource_win win;
698
699 return !acpi_dev_resource_address_space(res, &win);
700 }
701
tegra241_cmdqv_acpi_get_irqs(struct acpi_resource * ares,void * data)702 static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource *ares, void *data)
703 {
704 struct resource r;
705 int *irq = data;
706
707 if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
708 *irq = r.start;
709 return 1; /* No need to add resource to the list */
710 }
711
712 static struct resource *
tegra241_cmdqv_find_acpi_resource(struct device * dev,int * irq)713 tegra241_cmdqv_find_acpi_resource(struct device *dev, int *irq)
714 {
715 struct acpi_device *adev = to_acpi_device(dev);
716 struct list_head resource_list;
717 struct resource_entry *rentry;
718 struct resource *res = NULL;
719 int ret;
720
721 INIT_LIST_HEAD(&resource_list);
722 ret = acpi_dev_get_resources(adev, &resource_list,
723 tegra241_cmdqv_acpi_is_memory, NULL);
724 if (ret < 0) {
725 dev_err(dev, "failed to get memory resource: %d\n", ret);
726 return NULL;
727 }
728
729 rentry = list_first_entry_or_null(&resource_list,
730 struct resource_entry, node);
731 if (!rentry) {
732 dev_err(dev, "failed to get memory resource entry\n");
733 goto free_list;
734 }
735
736 /* Caller must free the res */
737 res = kzalloc(sizeof(*res), GFP_KERNEL);
738 if (!res)
739 goto free_list;
740
741 *res = *rentry->res;
742
743 acpi_dev_free_resource_list(&resource_list);
744
745 INIT_LIST_HEAD(&resource_list);
746
747 if (irq)
748 ret = acpi_dev_get_resources(adev, &resource_list,
749 tegra241_cmdqv_acpi_get_irqs, irq);
750 if (ret < 0 || !irq || *irq <= 0)
751 dev_warn(dev, "no interrupt. errors will not be reported\n");
752
753 free_list:
754 acpi_dev_free_resource_list(&resource_list);
755 return res;
756 }
757
tegra241_cmdqv_init_structures(struct arm_smmu_device * smmu)758 static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
759 {
760 struct tegra241_cmdqv *cmdqv =
761 container_of(smmu, struct tegra241_cmdqv, smmu);
762 struct tegra241_vintf *vintf;
763 int lidx;
764 int ret;
765
766 vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
767 if (!vintf)
768 goto out_fallback;
769
770 /* Init VINTF0 for in-kernel use */
771 ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
772 if (ret) {
773 dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
774 goto free_vintf;
775 }
776
777 /* Preallocate logical VCMDQs to VINTF0 */
778 for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
779 struct tegra241_vcmdq *vcmdq;
780
781 vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
782 if (IS_ERR(vcmdq))
783 goto free_lvcmdq;
784 }
785
786 /* Now, we are ready to run all the impl ops */
787 smmu->impl_ops = &tegra241_cmdqv_impl_ops;
788 return 0;
789
790 free_lvcmdq:
791 for (lidx--; lidx >= 0; lidx--)
792 tegra241_vintf_free_lvcmdq(vintf, lidx);
793 tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
794 free_vintf:
795 kfree(vintf);
796 out_fallback:
797 dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
798 smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
799 tegra241_cmdqv_remove(smmu);
800 return 0;
801 }
802
803 struct dentry *cmdqv_debugfs_dir;
804
805 static struct arm_smmu_device *
__tegra241_cmdqv_probe(struct arm_smmu_device * smmu,struct resource * res,int irq)806 __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
807 int irq)
808 {
809 static const struct arm_smmu_impl_ops init_ops = {
810 .init_structures = tegra241_cmdqv_init_structures,
811 .device_remove = tegra241_cmdqv_remove,
812 };
813 struct tegra241_cmdqv *cmdqv = NULL;
814 struct arm_smmu_device *new_smmu;
815 void __iomem *base;
816 u32 regval;
817 int ret;
818
819 static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0);
820
821 base = ioremap(res->start, resource_size(res));
822 if (!base) {
823 dev_err(smmu->dev, "failed to ioremap\n");
824 return NULL;
825 }
826
827 regval = readl(base + TEGRA241_CMDQV_CONFIG);
828 if (disable_cmdqv) {
829 dev_info(smmu->dev, "Detected disable_cmdqv=true\n");
830 writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG);
831 goto iounmap;
832 }
833
834 cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL);
835 if (!cmdqv)
836 goto iounmap;
837 new_smmu = &cmdqv->smmu;
838
839 cmdqv->irq = irq;
840 cmdqv->base = base;
841 cmdqv->dev = smmu->impl_dev;
842
843 if (cmdqv->irq > 0) {
844 ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv",
845 cmdqv);
846 if (ret) {
847 dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
848 cmdqv->irq, ret);
849 goto iounmap;
850 }
851 }
852
853 regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
854 cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
855 cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
856 cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
857
858 cmdqv->vintfs =
859 kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL);
860 if (!cmdqv->vintfs)
861 goto free_irq;
862
863 ida_init(&cmdqv->vintf_ids);
864
865 #ifdef CONFIG_IOMMU_DEBUGFS
866 if (!cmdqv_debugfs_dir) {
867 cmdqv_debugfs_dir =
868 debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir);
869 debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir,
870 &bypass_vcmdq);
871 }
872 #endif
873
874 /* Provide init-level ops only, until tegra241_cmdqv_init_structures */
875 new_smmu->impl_ops = &init_ops;
876
877 return new_smmu;
878
879 free_irq:
880 if (cmdqv->irq > 0)
881 free_irq(cmdqv->irq, cmdqv);
882 iounmap:
883 iounmap(base);
884 return NULL;
885 }
886
tegra241_cmdqv_probe(struct arm_smmu_device * smmu)887 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
888 {
889 struct arm_smmu_device *new_smmu;
890 struct resource *res = NULL;
891 int irq;
892
893 if (!smmu->dev->of_node)
894 res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq);
895 if (!res)
896 goto out_fallback;
897
898 new_smmu = __tegra241_cmdqv_probe(smmu, res, irq);
899 kfree(res);
900
901 if (new_smmu)
902 return new_smmu;
903
904 out_fallback:
905 dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
906 smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
907 put_device(smmu->impl_dev);
908 return ERR_PTR(-ENODEV);
909 }
910