1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Tegra host1x Interrupt Management
4   *
5   * Copyright (c) 2010-2021, NVIDIA Corporation.
6   */
7  
8  #include <linux/clk.h>
9  #include <linux/interrupt.h>
10  #include "dev.h"
11  #include "fence.h"
12  #include "intr.h"
13  
host1x_intr_add_fence_to_list(struct host1x_fence_list * list,struct host1x_syncpt_fence * fence)14  static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
15  					  struct host1x_syncpt_fence *fence)
16  {
17  	struct host1x_syncpt_fence *fence_in_list;
18  
19  	list_for_each_entry_reverse(fence_in_list, &list->list, list) {
20  		if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
21  			/* Fence in list is before us, we can insert here */
22  			list_add(&fence->list, &fence_in_list->list);
23  			return;
24  		}
25  	}
26  
27  	/* Add as first in list */
28  	list_add(&fence->list, &list->list);
29  }
30  
host1x_intr_update_hw_state(struct host1x * host,struct host1x_syncpt * sp)31  static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
32  {
33  	struct host1x_syncpt_fence *fence;
34  
35  	if (!list_empty(&sp->fences.list)) {
36  		fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);
37  
38  		host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
39  		host1x_hw_intr_enable_syncpt_intr(host, sp->id);
40  	} else {
41  		host1x_hw_intr_disable_syncpt_intr(host, sp->id);
42  	}
43  }
44  
host1x_intr_add_fence_locked(struct host1x * host,struct host1x_syncpt_fence * fence)45  void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
46  {
47  	struct host1x_fence_list *fence_list = &fence->sp->fences;
48  
49  	INIT_LIST_HEAD(&fence->list);
50  
51  	host1x_intr_add_fence_to_list(fence_list, fence);
52  	host1x_intr_update_hw_state(host, fence->sp);
53  }
54  
host1x_intr_remove_fence(struct host1x * host,struct host1x_syncpt_fence * fence)55  bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
56  {
57  	struct host1x_fence_list *fence_list = &fence->sp->fences;
58  	unsigned long irqflags;
59  
60  	spin_lock_irqsave(&fence_list->lock, irqflags);
61  
62  	if (list_empty(&fence->list)) {
63  		spin_unlock_irqrestore(&fence_list->lock, irqflags);
64  		return false;
65  	}
66  
67  	list_del_init(&fence->list);
68  	host1x_intr_update_hw_state(host, fence->sp);
69  
70  	spin_unlock_irqrestore(&fence_list->lock, irqflags);
71  
72  	return true;
73  }
74  
host1x_intr_handle_interrupt(struct host1x * host,unsigned int id)75  void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
76  {
77  	struct host1x_syncpt *sp = &host->syncpt[id];
78  	struct host1x_syncpt_fence *fence, *tmp;
79  	unsigned int value;
80  
81  	value = host1x_syncpt_load(sp);
82  
83  	spin_lock(&sp->fences.lock);
84  
85  	list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
86  		if (((value - fence->threshold) & 0x80000000U) != 0U) {
87  			/* Fence is not yet expired, we are done */
88  			break;
89  		}
90  
91  		list_del_init(&fence->list);
92  		host1x_fence_signal(fence);
93  	}
94  
95  	/* Re-enable interrupt if necessary */
96  	host1x_intr_update_hw_state(host, sp);
97  
98  	spin_unlock(&sp->fences.lock);
99  }
100  
host1x_intr_init(struct host1x * host)101  int host1x_intr_init(struct host1x *host)
102  {
103  	struct host1x_intr_irq_data *irq_data;
104  	unsigned int id;
105  	int i, err;
106  
107  	mutex_init(&host->intr_mutex);
108  
109  	for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
110  		struct host1x_syncpt *syncpt = &host->syncpt[id];
111  
112  		spin_lock_init(&syncpt->fences.lock);
113  		INIT_LIST_HEAD(&syncpt->fences.list);
114  	}
115  
116  	irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
117  	if (!irq_data)
118  		return -ENOMEM;
119  
120  	host1x_hw_intr_disable_all_syncpt_intrs(host);
121  
122  	for (i = 0; i < host->num_syncpt_irqs; i++) {
123  		irq_data[i].host = host;
124  		irq_data[i].offset = i;
125  
126  		err = devm_request_irq(host->dev, host->syncpt_irqs[i],
127  				       host->intr_op->isr, IRQF_SHARED,
128  				       "host1x_syncpt", &irq_data[i]);
129  		if (err < 0)
130  			return err;
131  	}
132  
133  	return 0;
134  }
135  
host1x_intr_deinit(struct host1x * host)136  void host1x_intr_deinit(struct host1x *host)
137  {
138  }
139  
host1x_intr_start(struct host1x * host)140  void host1x_intr_start(struct host1x *host)
141  {
142  	u32 hz = clk_get_rate(host->clk);
143  	int err;
144  
145  	mutex_lock(&host->intr_mutex);
146  	err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
147  	if (err) {
148  		mutex_unlock(&host->intr_mutex);
149  		return;
150  	}
151  	mutex_unlock(&host->intr_mutex);
152  }
153  
host1x_intr_stop(struct host1x * host)154  void host1x_intr_stop(struct host1x *host)
155  {
156  	host1x_hw_intr_disable_all_syncpt_intrs(host);
157  }
158