1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This driver adds support for HNS3 PMU iEP device. Related perf events are
4 * bandwidth, latency, packet rate, interrupt rate etc.
5 *
6 * Copyright (C) 2022 HiSilicon Limited
7 */
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/bug.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/cpumask.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/io-64-nonatomic-hi-lo.h>
19 #include <linux/irq.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/pci-epf.h>
25 #include <linux/perf_event.h>
26 #include <linux/smp.h>
27
28 /* registers offset address */
29 #define HNS3_PMU_REG_GLOBAL_CTRL 0x0000
30 #define HNS3_PMU_REG_CLOCK_FREQ 0x0020
31 #define HNS3_PMU_REG_BDF 0x0fe0
32 #define HNS3_PMU_REG_VERSION 0x0fe4
33 #define HNS3_PMU_REG_DEVICE_ID 0x0fe8
34
35 #define HNS3_PMU_REG_EVENT_OFFSET 0x1000
36 #define HNS3_PMU_REG_EVENT_SIZE 0x1000
37 #define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00
38 #define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04
39 #define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08
40 #define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c
41 #define HNS3_PMU_REG_EVENT_COUNTER 0x10
42 #define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18
43 #define HNS3_PMU_REG_EVENT_QID_CTRL 0x28
44 #define HNS3_PMU_REG_EVENT_QID_PARA 0x2c
45
46 #define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0)
47 #define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1)
48 #define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2)
49 #define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3)
50 #define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4)
51 #define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5)
52
53 #define HNS3_PMU_FILTER_ALL_TC 0xf
54 #define HNS3_PMU_FILTER_ALL_QUEUE 0xffff
55
56 #define HNS3_PMU_CTRL_SUBEVENT_S 4
57 #define HNS3_PMU_CTRL_FILTER_MODE_S 24
58
59 #define HNS3_PMU_GLOBAL_START BIT(0)
60
61 #define HNS3_PMU_EVENT_STATUS_RESET BIT(11)
62 #define HNS3_PMU_EVENT_EN BIT(12)
63 #define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15)
64
65 #define HNS3_PMU_QID_PARA_FUNC_S 0
66 #define HNS3_PMU_QID_PARA_QUEUE_S 16
67
68 #define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0)
69 #define HNS3_PMU_QID_CTRL_DONE BIT(1)
70 #define HNS3_PMU_QID_CTRL_MISS BIT(2)
71
72 #define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1)
73
74 #define HNS3_PMU_MAX_HW_EVENTS 8
75
76 /*
77 * Each hardware event contains two registers (counter and ext_counter) for
78 * bandwidth, packet rate, latency and interrupt rate. These two registers will
79 * be triggered to run at the same when a hardware event is enabled. The meaning
80 * of counter and ext_counter of different event type are different, their
81 * meaning show as follow:
82 *
83 * +----------------+------------------+---------------+
84 * | event type | counter | ext_counter |
85 * +----------------+------------------+---------------+
86 * | bandwidth | byte number | cycle number |
87 * +----------------+------------------+---------------+
88 * | packet rate | packet number | cycle number |
89 * +----------------+------------------+---------------+
90 * | latency | cycle number | packet number |
91 * +----------------+------------------+---------------+
92 * | interrupt rate | interrupt number | cycle number |
93 * +----------------+------------------+---------------+
94 *
95 * The cycle number indicates increment of counter of hardware timer, the
96 * frequency of hardware timer can be read from hw_clk_freq file.
97 *
98 * Performance of each hardware event is calculated by: counter / ext_counter.
99 *
100 * Since processing of data is preferred to be done in userspace, we expose
101 * ext_counter as a separate event for userspace and use bit 16 to indicate it.
102 * For example, event 0x00001 and 0x10001 are actually one event for hardware
103 * because bit 0-15 are same. If the bit 16 of one event is 0 means to read
104 * counter register, otherwise means to read ext_counter register.
105 */
106 /* bandwidth events */
107 #define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001
108 #define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001
109 #define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002
110 #define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002
111 #define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003
112 #define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003
113 #define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004
114 #define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004
115 #define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005
116 #define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005
117 #define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006
118 #define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006
119 #define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008
120 #define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008
121 #define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009
122 #define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009
123 #define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a
124 #define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a
125 #define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b
126 #define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b
127 #define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c
128 #define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c
129 #define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d
130 #define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d
131 #define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e
132 #define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e
133 #define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f
134 #define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f
135 #define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010
136 #define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010
137
138 /* packet rate events */
139 #define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100
140 #define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100
141 #define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101
142 #define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101
143 #define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102
144 #define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102
145 #define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103
146 #define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103
147 #define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104
148 #define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104
149 #define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105
150 #define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105
151 #define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106
152 #define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106
153 #define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107
154 #define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107
155 #define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108
156 #define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108
157 #define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109
158 #define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109
159 #define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a
160 #define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a
161 #define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b
162 #define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b
163 #define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c
164 #define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c
165 #define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d
166 #define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d
167 #define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e
168 #define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e
169 #define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f
170 #define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f
171 #define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110
172 #define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110
173 #define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111
174 #define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111
175 #define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112
176 #define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112
177
178 /* latency events */
179 #define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202
180 #define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202
181 #define HNS3_PMU_EVT_DLY_TX_TIME 0x00204
182 #define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204
183 #define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206
184 #define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206
185 #define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207
186 #define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207
187 #define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208
188 #define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208
189 #define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209
190 #define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209
191 #define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e
192 #define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e
193 #define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f
194 #define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f
195 #define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210
196 #define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210
197 #define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211
198 #define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211
199 #define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212
200 #define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212
201 #define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213
202 #define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213
203 #define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214
204 #define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214
205 #define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215
206 #define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215
207 #define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216
208 #define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216
209 #define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217
210 #define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217
211 #define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218
212 #define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218
213 #define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219
214 #define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219
215 #define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a
216 #define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a
217 #define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c
218 #define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c
219
220 /* interrupt rate events */
221 #define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300
222 #define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300
223
224 /* filter mode supported by each bandwidth event */
225 #define HNS3_PMU_FILTER_BW_SSU_EGU 0x07
226 #define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f
227 #define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f
228 #define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f
229 #define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f
230 #define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11
231 #define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11
232 #define HNS3_PMU_FILTER_BW_WR_FBD 0x1b
233 #define HNS3_PMU_FILTER_BW_WR_EBD 0x11
234 #define HNS3_PMU_FILTER_BW_RD_FBD 0x01
235 #define HNS3_PMU_FILTER_BW_RD_EBD 0x1b
236 #define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01
237 #define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01
238 #define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01
239 #define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01
240
241 /* filter mode supported by each packet rate event */
242 #define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07
243 #define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07
244 #define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f
245 #define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f
246 #define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f
247 #define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f
248 #define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11
249 #define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f
250 #define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11
251 #define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b
252 #define HNS3_PMU_FILTER_PPS_WR_EBD 0x11
253 #define HNS3_PMU_FILTER_PPS_RD_FBD 0x01
254 #define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b
255 #define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01
256 #define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01
257 #define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01
258 #define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01
259 #define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01
260 #define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01
261
262 /* filter mode supported by each latency event */
263 #define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01
264 #define HNS3_PMU_FILTER_DLY_TX 0x01
265 #define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07
266 #define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07
267 #define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07
268 #define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07
269 #define HNS3_PMU_FILTER_DLY_RPU 0x11
270 #define HNS3_PMU_FILTER_DLY_TPU 0x1f
271 #define HNS3_PMU_FILTER_DLY_RPE 0x01
272 #define HNS3_PMU_FILTER_DLY_TPE 0x0b
273 #define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b
274 #define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b
275 #define HNS3_PMU_FILTER_DLY_WR_EBD 0x11
276 #define HNS3_PMU_FILTER_DLY_RD_FBD 0x01
277 #define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b
278 #define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01
279 #define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01
280 #define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01
281 #define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01
282 #define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01
283
284 /* filter mode supported by each interrupt rate event */
285 #define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01
286
287 enum hns3_pmu_hw_filter_mode {
288 HNS3_PMU_HW_FILTER_GLOBAL,
289 HNS3_PMU_HW_FILTER_PORT,
290 HNS3_PMU_HW_FILTER_PORT_TC,
291 HNS3_PMU_HW_FILTER_FUNC,
292 HNS3_PMU_HW_FILTER_FUNC_QUEUE,
293 HNS3_PMU_HW_FILTER_FUNC_INTR,
294 };
295
296 struct hns3_pmu_event_attr {
297 u32 event;
298 u16 filter_support;
299 };
300
301 struct hns3_pmu {
302 struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS];
303 struct hlist_node node;
304 struct pci_dev *pdev;
305 struct pmu pmu;
306 void __iomem *base;
307 int irq;
308 int on_cpu;
309 u32 identifier;
310 u32 hw_clk_freq; /* hardware clock frequency of PMU */
311 /* maximum and minimum bdf allowed by PMU */
312 u16 bdf_min;
313 u16 bdf_max;
314 };
315
316 #define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu))
317
318 #define GET_PCI_DEVFN(bdf) ((bdf) & 0xff)
319
320 #define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff)
321 #define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07))
322 #define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func))
323
324 #define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \
325 static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \
326 { \
327 return FIELD_GET(GENMASK_ULL(_end, _start), \
328 event->attr._config); \
329 }
330
331 HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7);
332 HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15);
333 HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16);
334 HNS3_PMU_FILTER_ATTR(port, config1, 0, 3);
335 HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7);
336 HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23);
337 HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39);
338 HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51);
339 HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
340
341 #define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\
342 HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \
343 HNS3_PMU_FILTER_BW_##_name})
344 #define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
345 HNS3_PMU_EVT_BW_##_name##_TIME, \
346 HNS3_PMU_FILTER_BW_##_name})
347 #define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
348 HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \
349 HNS3_PMU_FILTER_PPS_##_name})
350 #define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
351 HNS3_PMU_EVT_PPS_##_name##_TIME, \
352 HNS3_PMU_FILTER_PPS_##_name})
353 #define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
354 HNS3_PMU_EVT_DLY_##_name##_TIME, \
355 HNS3_PMU_FILTER_DLY_##_name})
356 #define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
357 HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \
358 HNS3_PMU_FILTER_DLY_##_name})
359 #define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\
360 HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \
361 HNS3_PMU_FILTER_INTR_##_name})
362 #define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
363 HNS3_PMU_EVT_PPS_##_name##_TIME, \
364 HNS3_PMU_FILTER_INTR_##_name})
365
hns3_pmu_event_show(struct device * dev,struct device_attribute * attr,char * buf)366 static ssize_t hns3_pmu_event_show(struct device *dev,
367 struct device_attribute *attr, char *buf)
368 {
369 struct hns3_pmu_event_attr *event;
370 struct dev_ext_attribute *eattr;
371
372 eattr = container_of(attr, struct dev_ext_attribute, attr);
373 event = eattr->var;
374
375 return sysfs_emit(buf, "config=0x%x\n", event->event);
376 }
377
hns3_pmu_filter_mode_show(struct device * dev,struct device_attribute * attr,char * buf)378 static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
379 struct device_attribute *attr,
380 char *buf)
381 {
382 struct hns3_pmu_event_attr *event;
383 struct dev_ext_attribute *eattr;
384 int len;
385
386 eattr = container_of(attr, struct dev_ext_attribute, attr);
387 event = eattr->var;
388
389 len = sysfs_emit_at(buf, 0, "filter mode supported: ");
390 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)
391 len += sysfs_emit_at(buf, len, "global ");
392 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)
393 len += sysfs_emit_at(buf, len, "port ");
394 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)
395 len += sysfs_emit_at(buf, len, "port-tc ");
396 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)
397 len += sysfs_emit_at(buf, len, "func ");
398 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)
399 len += sysfs_emit_at(buf, len, "func-queue ");
400 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)
401 len += sysfs_emit_at(buf, len, "func-intr ");
402
403 len += sysfs_emit_at(buf, len, "\n");
404
405 return len;
406 }
407
408 #define HNS3_PMU_ATTR(_name, _func, _config) \
409 (&((struct dev_ext_attribute[]) { \
410 { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
411 })[0].attr.attr)
412
413 #define HNS3_PMU_FORMAT_ATTR(_name, _format) \
414 HNS3_PMU_ATTR(_name, device_show_string, _format)
415 #define HNS3_PMU_EVENT_ATTR(_name, _event) \
416 HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
417 #define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \
418 HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event)
419
420 #define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \
421 HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
422 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
423 #define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \
424 HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
425 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
426 #define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \
427 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
428 HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
429 #define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \
430 HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
431 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
432
433 #define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \
434 HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
435 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
436 #define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \
437 HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
438 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
439 #define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \
440 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
441 HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
442 #define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \
443 HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
444 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
445
446 static u8 hns3_pmu_hw_filter_modes[] = {
447 HNS3_PMU_HW_FILTER_GLOBAL,
448 HNS3_PMU_HW_FILTER_PORT,
449 HNS3_PMU_HW_FILTER_PORT_TC,
450 HNS3_PMU_HW_FILTER_FUNC,
451 HNS3_PMU_HW_FILTER_FUNC_QUEUE,
452 HNS3_PMU_HW_FILTER_FUNC_INTR,
453 };
454
455 #define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \
456 ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)])
457
identifier_show(struct device * dev,struct device_attribute * attr,char * buf)458 static ssize_t identifier_show(struct device *dev,
459 struct device_attribute *attr, char *buf)
460 {
461 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
462
463 return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier);
464 }
465 static DEVICE_ATTR_RO(identifier);
466
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)467 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
468 char *buf)
469 {
470 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
471
472 return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu);
473 }
474 static DEVICE_ATTR_RO(cpumask);
475
bdf_min_show(struct device * dev,struct device_attribute * attr,char * buf)476 static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr,
477 char *buf)
478 {
479 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
480 u16 bdf = hns3_pmu->bdf_min;
481
482 return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
483 PCI_SLOT(bdf), PCI_FUNC(bdf));
484 }
485 static DEVICE_ATTR_RO(bdf_min);
486
bdf_max_show(struct device * dev,struct device_attribute * attr,char * buf)487 static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr,
488 char *buf)
489 {
490 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
491 u16 bdf = hns3_pmu->bdf_max;
492
493 return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
494 PCI_SLOT(bdf), PCI_FUNC(bdf));
495 }
496 static DEVICE_ATTR_RO(bdf_max);
497
hw_clk_freq_show(struct device * dev,struct device_attribute * attr,char * buf)498 static ssize_t hw_clk_freq_show(struct device *dev,
499 struct device_attribute *attr, char *buf)
500 {
501 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
502
503 return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq);
504 }
505 static DEVICE_ATTR_RO(hw_clk_freq);
506
507 static struct attribute *hns3_pmu_events_attr[] = {
508 /* bandwidth events */
509 HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU),
510 HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU),
511 HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE),
512 HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU),
513 HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU),
514 HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
515 HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
516 HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD),
517 HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD),
518 HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD),
519 HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD),
520 HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0),
521 HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1),
522 HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0),
523 HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1),
524
525 /* packet rate events */
526 HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU),
527 HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU),
528 HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU),
529 HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE),
530 HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU),
531 HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU),
532 HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
533 HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
534 HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
535 HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD),
536 HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD),
537 HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD),
538 HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD),
539 HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0),
540 HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1),
541 HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0),
542 HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1),
543 HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
544 HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
545
546 /* latency events */
547 HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH),
548 HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX),
549 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
550 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
551 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
552 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
553 HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU),
554 HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU),
555 HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE),
556 HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE),
557 HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH),
558 HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD),
559 HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD),
560 HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD),
561 HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD),
562 HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0),
563 HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1),
564 HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0),
565 HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1),
566 HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE),
567
568 /* interrupt rate events */
569 HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC),
570
571 NULL
572 };
573
574 static struct attribute *hns3_pmu_filter_mode_attr[] = {
575 /* bandwidth events */
576 HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU),
577 HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU),
578 HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE),
579 HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU),
580 HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU),
581 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
582 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
583 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD),
584 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD),
585 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD),
586 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD),
587 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0),
588 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1),
589 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0),
590 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1),
591
592 /* packet rate events */
593 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU),
594 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU),
595 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU),
596 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE),
597 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU),
598 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU),
599 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
600 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
601 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
602 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD),
603 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD),
604 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD),
605 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD),
606 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0),
607 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1),
608 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0),
609 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1),
610 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
611 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
612
613 /* latency events */
614 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH),
615 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX),
616 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
617 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
618 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
619 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
620 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU),
621 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU),
622 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE),
623 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE),
624 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH),
625 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD),
626 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD),
627 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD),
628 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD),
629 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0),
630 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1),
631 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0),
632 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1),
633 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE),
634
635 /* interrupt rate events */
636 HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC),
637
638 NULL
639 };
640
641 static struct attribute_group hns3_pmu_events_group = {
642 .name = "events",
643 .attrs = hns3_pmu_events_attr,
644 };
645
646 static struct attribute_group hns3_pmu_filter_mode_group = {
647 .name = "filtermode",
648 .attrs = hns3_pmu_filter_mode_attr,
649 };
650
651 static struct attribute *hns3_pmu_format_attr[] = {
652 HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"),
653 HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"),
654 HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"),
655 HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"),
656 HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"),
657 HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"),
658 HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"),
659 HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"),
660 HNS3_PMU_FORMAT_ATTR(global, "config1:52"),
661 NULL
662 };
663
664 static struct attribute_group hns3_pmu_format_group = {
665 .name = "format",
666 .attrs = hns3_pmu_format_attr,
667 };
668
669 static struct attribute *hns3_pmu_cpumask_attrs[] = {
670 &dev_attr_cpumask.attr,
671 NULL
672 };
673
674 static struct attribute_group hns3_pmu_cpumask_attr_group = {
675 .attrs = hns3_pmu_cpumask_attrs,
676 };
677
678 static struct attribute *hns3_pmu_identifier_attrs[] = {
679 &dev_attr_identifier.attr,
680 NULL
681 };
682
683 static struct attribute_group hns3_pmu_identifier_attr_group = {
684 .attrs = hns3_pmu_identifier_attrs,
685 };
686
687 static struct attribute *hns3_pmu_bdf_range_attrs[] = {
688 &dev_attr_bdf_min.attr,
689 &dev_attr_bdf_max.attr,
690 NULL
691 };
692
693 static struct attribute_group hns3_pmu_bdf_range_attr_group = {
694 .attrs = hns3_pmu_bdf_range_attrs,
695 };
696
697 static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = {
698 &dev_attr_hw_clk_freq.attr,
699 NULL
700 };
701
702 static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = {
703 .attrs = hns3_pmu_hw_clk_freq_attrs,
704 };
705
706 static const struct attribute_group *hns3_pmu_attr_groups[] = {
707 &hns3_pmu_events_group,
708 &hns3_pmu_filter_mode_group,
709 &hns3_pmu_format_group,
710 &hns3_pmu_cpumask_attr_group,
711 &hns3_pmu_identifier_attr_group,
712 &hns3_pmu_bdf_range_attr_group,
713 &hns3_pmu_hw_clk_freq_attr_group,
714 NULL
715 };
716
hns3_pmu_get_event(struct perf_event * event)717 static u32 hns3_pmu_get_event(struct perf_event *event)
718 {
719 return hns3_pmu_get_ext_counter_used(event) << 16 |
720 hns3_pmu_get_event_type(event) << 8 |
721 hns3_pmu_get_subevent(event);
722 }
723
hns3_pmu_get_real_event(struct perf_event * event)724 static u32 hns3_pmu_get_real_event(struct perf_event *event)
725 {
726 return hns3_pmu_get_event_type(event) << 8 |
727 hns3_pmu_get_subevent(event);
728 }
729
hns3_pmu_get_offset(u32 offset,u32 idx)730 static u32 hns3_pmu_get_offset(u32 offset, u32 idx)
731 {
732 return offset + HNS3_PMU_REG_EVENT_OFFSET +
733 HNS3_PMU_REG_EVENT_SIZE * idx;
734 }
735
hns3_pmu_readl(struct hns3_pmu * hns3_pmu,u32 reg_offset,u32 idx)736 static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
737 {
738 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
739
740 return readl(hns3_pmu->base + offset);
741 }
742
hns3_pmu_writel(struct hns3_pmu * hns3_pmu,u32 reg_offset,u32 idx,u32 val)743 static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
744 u32 val)
745 {
746 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
747
748 writel(val, hns3_pmu->base + offset);
749 }
750
hns3_pmu_readq(struct hns3_pmu * hns3_pmu,u32 reg_offset,u32 idx)751 static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
752 {
753 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
754
755 return readq(hns3_pmu->base + offset);
756 }
757
hns3_pmu_writeq(struct hns3_pmu * hns3_pmu,u32 reg_offset,u32 idx,u64 val)758 static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
759 u64 val)
760 {
761 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
762
763 writeq(val, hns3_pmu->base + offset);
764 }
765
hns3_pmu_cmp_event(struct perf_event * target,struct perf_event * event)766 static bool hns3_pmu_cmp_event(struct perf_event *target,
767 struct perf_event *event)
768 {
769 return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event);
770 }
771
hns3_pmu_find_related_event_idx(struct hns3_pmu * hns3_pmu,struct perf_event * event)772 static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu,
773 struct perf_event *event)
774 {
775 struct perf_event *sibling;
776 int hw_event_used = 0;
777 int idx;
778
779 for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
780 sibling = hns3_pmu->hw_events[idx];
781 if (!sibling)
782 continue;
783
784 hw_event_used++;
785
786 if (!hns3_pmu_cmp_event(sibling, event))
787 continue;
788
789 /* Related events is used in group */
790 if (sibling->group_leader == event->group_leader)
791 return idx;
792 }
793
794 /* No related event and all hardware events are used up */
795 if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS)
796 return -EBUSY;
797
798 /* No related event and there is extra hardware events can be use */
799 return -ENOENT;
800 }
801
hns3_pmu_get_event_idx(struct hns3_pmu * hns3_pmu)802 static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu)
803 {
804 int idx;
805
806 for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
807 if (!hns3_pmu->hw_events[idx])
808 return idx;
809 }
810
811 return -EBUSY;
812 }
813
hns3_pmu_valid_bdf(struct hns3_pmu * hns3_pmu,u16 bdf)814 static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf)
815 {
816 struct pci_dev *pdev;
817
818 if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) {
819 pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf);
820 return false;
821 }
822
823 pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus),
824 PCI_BUS_NUM(bdf),
825 GET_PCI_DEVFN(bdf));
826 if (!pdev) {
827 pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf);
828 return false;
829 }
830
831 pci_dev_put(pdev);
832 return true;
833 }
834
hns3_pmu_set_qid_para(struct hns3_pmu * hns3_pmu,u32 idx,u16 bdf,u16 queue)835 static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
836 u16 queue)
837 {
838 u32 val;
839
840 val = GET_PCI_DEVFN(bdf);
841 val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S;
842 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val);
843 }
844
hns3_pmu_qid_req_start(struct hns3_pmu * hns3_pmu,u32 idx)845 static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx)
846 {
847 bool queue_id_valid = false;
848 u32 reg_qid_ctrl, val;
849 int err;
850
851 /* enable queue id request */
852 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx,
853 HNS3_PMU_QID_CTRL_REQ_ENABLE);
854
855 reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx);
856 err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val,
857 val & HNS3_PMU_QID_CTRL_DONE, 1, 1000);
858 if (err == -ETIMEDOUT) {
859 pci_err(hns3_pmu->pdev, "QID request timeout!\n");
860 goto out;
861 }
862
863 queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS);
864
865 out:
866 /* disable qid request and clear status */
867 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0);
868
869 return queue_id_valid;
870 }
871
hns3_pmu_valid_queue(struct hns3_pmu * hns3_pmu,u32 idx,u16 bdf,u16 queue)872 static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
873 u16 queue)
874 {
875 hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue);
876
877 return hns3_pmu_qid_req_start(hns3_pmu, idx);
878 }
879
hns3_pmu_get_pmu_event(u32 event)880 static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event)
881 {
882 struct hns3_pmu_event_attr *pmu_event;
883 struct dev_ext_attribute *eattr;
884 struct device_attribute *dattr;
885 struct attribute *attr;
886 u32 i;
887
888 for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) {
889 attr = hns3_pmu_events_attr[i];
890 dattr = container_of(attr, struct device_attribute, attr);
891 eattr = container_of(dattr, struct dev_ext_attribute, attr);
892 pmu_event = eattr->var;
893
894 if (event == pmu_event->event)
895 return pmu_event;
896 }
897
898 return NULL;
899 }
900
hns3_pmu_set_func_mode(struct perf_event * event,struct hns3_pmu * hns3_pmu)901 static int hns3_pmu_set_func_mode(struct perf_event *event,
902 struct hns3_pmu *hns3_pmu)
903 {
904 struct hw_perf_event *hwc = &event->hw;
905 u16 bdf = hns3_pmu_get_bdf(event);
906
907 if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
908 return -ENOENT;
909
910 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC);
911
912 return 0;
913 }
914
hns3_pmu_set_func_queue_mode(struct perf_event * event,struct hns3_pmu * hns3_pmu)915 static int hns3_pmu_set_func_queue_mode(struct perf_event *event,
916 struct hns3_pmu *hns3_pmu)
917 {
918 u16 queue_id = hns3_pmu_get_queue(event);
919 struct hw_perf_event *hwc = &event->hw;
920 u16 bdf = hns3_pmu_get_bdf(event);
921
922 if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
923 return -ENOENT;
924
925 if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) {
926 pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id);
927 return -ENOENT;
928 }
929
930 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE);
931
932 return 0;
933 }
934
935 static bool
hns3_pmu_is_enabled_global_mode(struct perf_event * event,struct hns3_pmu_event_attr * pmu_event)936 hns3_pmu_is_enabled_global_mode(struct perf_event *event,
937 struct hns3_pmu_event_attr *pmu_event)
938 {
939 u8 global = hns3_pmu_get_global(event);
940
941 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL))
942 return false;
943
944 return global;
945 }
946
hns3_pmu_is_enabled_func_mode(struct perf_event * event,struct hns3_pmu_event_attr * pmu_event)947 static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event,
948 struct hns3_pmu_event_attr *pmu_event)
949 {
950 u16 queue_id = hns3_pmu_get_queue(event);
951 u16 bdf = hns3_pmu_get_bdf(event);
952
953 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC))
954 return false;
955 else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE)
956 return false;
957
958 return bdf;
959 }
960
961 static bool
hns3_pmu_is_enabled_func_queue_mode(struct perf_event * event,struct hns3_pmu_event_attr * pmu_event)962 hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event,
963 struct hns3_pmu_event_attr *pmu_event)
964 {
965 u16 queue_id = hns3_pmu_get_queue(event);
966 u16 bdf = hns3_pmu_get_bdf(event);
967
968 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE))
969 return false;
970 else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE)
971 return false;
972
973 return bdf;
974 }
975
hns3_pmu_is_enabled_port_mode(struct perf_event * event,struct hns3_pmu_event_attr * pmu_event)976 static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event,
977 struct hns3_pmu_event_attr *pmu_event)
978 {
979 u8 tc_id = hns3_pmu_get_tc(event);
980
981 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT))
982 return false;
983
984 return tc_id == HNS3_PMU_FILTER_ALL_TC;
985 }
986
987 static bool
hns3_pmu_is_enabled_port_tc_mode(struct perf_event * event,struct hns3_pmu_event_attr * pmu_event)988 hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event,
989 struct hns3_pmu_event_attr *pmu_event)
990 {
991 u8 tc_id = hns3_pmu_get_tc(event);
992
993 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC))
994 return false;
995
996 return tc_id != HNS3_PMU_FILTER_ALL_TC;
997 }
998
999 static bool
hns3_pmu_is_enabled_func_intr_mode(struct perf_event * event,struct hns3_pmu * hns3_pmu,struct hns3_pmu_event_attr * pmu_event)1000 hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event,
1001 struct hns3_pmu *hns3_pmu,
1002 struct hns3_pmu_event_attr *pmu_event)
1003 {
1004 u16 bdf = hns3_pmu_get_bdf(event);
1005
1006 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR))
1007 return false;
1008
1009 return hns3_pmu_valid_bdf(hns3_pmu, bdf);
1010 }
1011
hns3_pmu_select_filter_mode(struct perf_event * event,struct hns3_pmu * hns3_pmu)1012 static int hns3_pmu_select_filter_mode(struct perf_event *event,
1013 struct hns3_pmu *hns3_pmu)
1014 {
1015 u32 event_id = hns3_pmu_get_event(event);
1016 struct hw_perf_event *hwc = &event->hw;
1017 struct hns3_pmu_event_attr *pmu_event;
1018
1019 pmu_event = hns3_pmu_get_pmu_event(event_id);
1020 if (!pmu_event) {
1021 pci_err(hns3_pmu->pdev, "Invalid pmu event\n");
1022 return -ENOENT;
1023 }
1024
1025 if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) {
1026 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL);
1027 return 0;
1028 }
1029
1030 if (hns3_pmu_is_enabled_func_mode(event, pmu_event))
1031 return hns3_pmu_set_func_mode(event, hns3_pmu);
1032
1033 if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event))
1034 return hns3_pmu_set_func_queue_mode(event, hns3_pmu);
1035
1036 if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) {
1037 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT);
1038 return 0;
1039 }
1040
1041 if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) {
1042 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC);
1043 return 0;
1044 }
1045
1046 if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) {
1047 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR);
1048 return 0;
1049 }
1050
1051 return -ENOENT;
1052 }
1053
hns3_pmu_validate_event_group(struct perf_event * event)1054 static bool hns3_pmu_validate_event_group(struct perf_event *event)
1055 {
1056 struct perf_event *sibling, *leader = event->group_leader;
1057 struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS];
1058 int counters = 1;
1059 int num;
1060
1061 event_group[0] = leader;
1062 if (!is_software_event(leader)) {
1063 if (leader->pmu != event->pmu)
1064 return false;
1065
1066 if (leader != event && !hns3_pmu_cmp_event(leader, event))
1067 event_group[counters++] = event;
1068 }
1069
1070 for_each_sibling_event(sibling, event->group_leader) {
1071 if (is_software_event(sibling))
1072 continue;
1073
1074 if (sibling->pmu != event->pmu)
1075 return false;
1076
1077 for (num = 0; num < counters; num++) {
1078 /*
1079 * If we find a related event, then it's a valid group
1080 * since we don't need to allocate a new counter for it.
1081 */
1082 if (hns3_pmu_cmp_event(event_group[num], sibling))
1083 break;
1084 }
1085
1086 /*
1087 * Otherwise it's a new event but if there's no available counter,
1088 * fail the check since we cannot schedule all the events in
1089 * the group simultaneously.
1090 */
1091 if (num == HNS3_PMU_MAX_HW_EVENTS)
1092 return false;
1093
1094 if (num == counters)
1095 event_group[counters++] = sibling;
1096 }
1097
1098 return true;
1099 }
1100
hns3_pmu_get_filter_condition(struct perf_event * event)1101 static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
1102 {
1103 struct hw_perf_event *hwc = &event->hw;
1104 u16 intr_id = hns3_pmu_get_intr(event);
1105 u8 port_id = hns3_pmu_get_port(event);
1106 u16 bdf = hns3_pmu_get_bdf(event);
1107 u8 tc_id = hns3_pmu_get_tc(event);
1108 u8 filter_mode;
1109
1110 filter_mode = *(u8 *)hwc->addr_filters;
1111 switch (filter_mode) {
1112 case HNS3_PMU_HW_FILTER_PORT:
1113 return FILTER_CONDITION_PORT(port_id);
1114 case HNS3_PMU_HW_FILTER_PORT_TC:
1115 return FILTER_CONDITION_PORT_TC(port_id, tc_id);
1116 case HNS3_PMU_HW_FILTER_FUNC:
1117 case HNS3_PMU_HW_FILTER_FUNC_QUEUE:
1118 return GET_PCI_DEVFN(bdf);
1119 case HNS3_PMU_HW_FILTER_FUNC_INTR:
1120 return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id);
1121 default:
1122 break;
1123 }
1124
1125 return 0;
1126 }
1127
hns3_pmu_config_filter(struct perf_event * event)1128 static void hns3_pmu_config_filter(struct perf_event *event)
1129 {
1130 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1131 u8 event_type = hns3_pmu_get_event_type(event);
1132 u8 subevent_id = hns3_pmu_get_subevent(event);
1133 u16 queue_id = hns3_pmu_get_queue(event);
1134 struct hw_perf_event *hwc = &event->hw;
1135 u8 filter_mode = *(u8 *)hwc->addr_filters;
1136 u16 bdf = hns3_pmu_get_bdf(event);
1137 u32 idx = hwc->idx;
1138 u32 val;
1139
1140 val = event_type;
1141 val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S;
1142 val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S;
1143 val |= HNS3_PMU_EVENT_OVERFLOW_RESTART;
1144 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1145
1146 val = hns3_pmu_get_filter_condition(event);
1147 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val);
1148
1149 if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE)
1150 hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id);
1151 }
1152
hns3_pmu_enable_counter(struct hns3_pmu * hns3_pmu,struct hw_perf_event * hwc)1153 static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu,
1154 struct hw_perf_event *hwc)
1155 {
1156 u32 idx = hwc->idx;
1157 u32 val;
1158
1159 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1160 val |= HNS3_PMU_EVENT_EN;
1161 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1162 }
1163
hns3_pmu_disable_counter(struct hns3_pmu * hns3_pmu,struct hw_perf_event * hwc)1164 static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu,
1165 struct hw_perf_event *hwc)
1166 {
1167 u32 idx = hwc->idx;
1168 u32 val;
1169
1170 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1171 val &= ~HNS3_PMU_EVENT_EN;
1172 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1173 }
1174
hns3_pmu_enable_intr(struct hns3_pmu * hns3_pmu,struct hw_perf_event * hwc)1175 static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu,
1176 struct hw_perf_event *hwc)
1177 {
1178 u32 idx = hwc->idx;
1179 u32 val;
1180
1181 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
1182 val &= ~HNS3_PMU_INTR_MASK_OVERFLOW;
1183 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
1184 }
1185
hns3_pmu_disable_intr(struct hns3_pmu * hns3_pmu,struct hw_perf_event * hwc)1186 static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu,
1187 struct hw_perf_event *hwc)
1188 {
1189 u32 idx = hwc->idx;
1190 u32 val;
1191
1192 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
1193 val |= HNS3_PMU_INTR_MASK_OVERFLOW;
1194 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
1195 }
1196
hns3_pmu_clear_intr_status(struct hns3_pmu * hns3_pmu,u32 idx)1197 static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx)
1198 {
1199 u32 val;
1200
1201 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1202 val |= HNS3_PMU_EVENT_STATUS_RESET;
1203 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1204
1205 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1206 val &= ~HNS3_PMU_EVENT_STATUS_RESET;
1207 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1208 }
1209
hns3_pmu_read_counter(struct perf_event * event)1210 static u64 hns3_pmu_read_counter(struct perf_event *event)
1211 {
1212 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1213
1214 return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx);
1215 }
1216
hns3_pmu_write_counter(struct perf_event * event,u64 value)1217 static void hns3_pmu_write_counter(struct perf_event *event, u64 value)
1218 {
1219 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1220 u32 idx = event->hw.idx;
1221
1222 hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value);
1223 hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value);
1224 }
1225
hns3_pmu_init_counter(struct perf_event * event)1226 static void hns3_pmu_init_counter(struct perf_event *event)
1227 {
1228 struct hw_perf_event *hwc = &event->hw;
1229
1230 local64_set(&hwc->prev_count, 0);
1231 hns3_pmu_write_counter(event, 0);
1232 }
1233
hns3_pmu_event_init(struct perf_event * event)1234 static int hns3_pmu_event_init(struct perf_event *event)
1235 {
1236 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1237 struct hw_perf_event *hwc = &event->hw;
1238 int idx;
1239 int ret;
1240
1241 if (event->attr.type != event->pmu->type)
1242 return -ENOENT;
1243
1244 /* Sampling is not supported */
1245 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1246 return -EOPNOTSUPP;
1247
1248 event->cpu = hns3_pmu->on_cpu;
1249
1250 idx = hns3_pmu_get_event_idx(hns3_pmu);
1251 if (idx < 0) {
1252 pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n",
1253 HNS3_PMU_MAX_HW_EVENTS);
1254 return -EBUSY;
1255 }
1256
1257 hwc->idx = idx;
1258
1259 ret = hns3_pmu_select_filter_mode(event, hns3_pmu);
1260 if (ret) {
1261 pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret);
1262 return ret;
1263 }
1264
1265 if (!hns3_pmu_validate_event_group(event)) {
1266 pci_err(hns3_pmu->pdev, "Invalid event group.\n");
1267 return -EINVAL;
1268 }
1269
1270 if (hns3_pmu_get_ext_counter_used(event))
1271 hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER;
1272 else
1273 hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER;
1274
1275 return 0;
1276 }
1277
hns3_pmu_read(struct perf_event * event)1278 static void hns3_pmu_read(struct perf_event *event)
1279 {
1280 struct hw_perf_event *hwc = &event->hw;
1281 u64 new_cnt, prev_cnt, delta;
1282
1283 do {
1284 prev_cnt = local64_read(&hwc->prev_count);
1285 new_cnt = hns3_pmu_read_counter(event);
1286 } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) !=
1287 prev_cnt);
1288
1289 delta = new_cnt - prev_cnt;
1290 local64_add(delta, &event->count);
1291 }
1292
hns3_pmu_start(struct perf_event * event,int flags)1293 static void hns3_pmu_start(struct perf_event *event, int flags)
1294 {
1295 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1296 struct hw_perf_event *hwc = &event->hw;
1297
1298 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
1299 return;
1300
1301 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
1302 hwc->state = 0;
1303
1304 hns3_pmu_config_filter(event);
1305 hns3_pmu_init_counter(event);
1306 hns3_pmu_enable_intr(hns3_pmu, hwc);
1307 hns3_pmu_enable_counter(hns3_pmu, hwc);
1308
1309 perf_event_update_userpage(event);
1310 }
1311
hns3_pmu_stop(struct perf_event * event,int flags)1312 static void hns3_pmu_stop(struct perf_event *event, int flags)
1313 {
1314 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1315 struct hw_perf_event *hwc = &event->hw;
1316
1317 hns3_pmu_disable_counter(hns3_pmu, hwc);
1318 hns3_pmu_disable_intr(hns3_pmu, hwc);
1319
1320 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1321 hwc->state |= PERF_HES_STOPPED;
1322
1323 if (hwc->state & PERF_HES_UPTODATE)
1324 return;
1325
1326 /* Read hardware counter and update the perf counter statistics */
1327 hns3_pmu_read(event);
1328 hwc->state |= PERF_HES_UPTODATE;
1329 }
1330
hns3_pmu_add(struct perf_event * event,int flags)1331 static int hns3_pmu_add(struct perf_event *event, int flags)
1332 {
1333 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1334 struct hw_perf_event *hwc = &event->hw;
1335 int idx;
1336
1337 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1338
1339 /* Check all working events to find a related event. */
1340 idx = hns3_pmu_find_related_event_idx(hns3_pmu, event);
1341 if (idx < 0 && idx != -ENOENT)
1342 return idx;
1343
1344 /* Current event shares an enabled hardware event with related event */
1345 if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) {
1346 hwc->idx = idx;
1347 goto start_count;
1348 }
1349
1350 idx = hns3_pmu_get_event_idx(hns3_pmu);
1351 if (idx < 0)
1352 return idx;
1353
1354 hwc->idx = idx;
1355 hns3_pmu->hw_events[idx] = event;
1356
1357 start_count:
1358 if (flags & PERF_EF_START)
1359 hns3_pmu_start(event, PERF_EF_RELOAD);
1360
1361 return 0;
1362 }
1363
hns3_pmu_del(struct perf_event * event,int flags)1364 static void hns3_pmu_del(struct perf_event *event, int flags)
1365 {
1366 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1367 struct hw_perf_event *hwc = &event->hw;
1368
1369 hns3_pmu_stop(event, PERF_EF_UPDATE);
1370 hns3_pmu->hw_events[hwc->idx] = NULL;
1371 perf_event_update_userpage(event);
1372 }
1373
hns3_pmu_enable(struct pmu * pmu)1374 static void hns3_pmu_enable(struct pmu *pmu)
1375 {
1376 struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
1377 u32 val;
1378
1379 val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1380 val |= HNS3_PMU_GLOBAL_START;
1381 writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1382 }
1383
hns3_pmu_disable(struct pmu * pmu)1384 static void hns3_pmu_disable(struct pmu *pmu)
1385 {
1386 struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
1387 u32 val;
1388
1389 val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1390 val &= ~HNS3_PMU_GLOBAL_START;
1391 writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1392 }
1393
hns3_pmu_alloc_pmu(struct pci_dev * pdev,struct hns3_pmu * hns3_pmu)1394 static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
1395 {
1396 u16 device_id;
1397 char *name;
1398 u32 val;
1399
1400 hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2];
1401 if (!hns3_pmu->base) {
1402 pci_err(pdev, "ioremap failed\n");
1403 return -ENOMEM;
1404 }
1405
1406 hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ);
1407
1408 val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF);
1409 hns3_pmu->bdf_min = val & 0xffff;
1410 hns3_pmu->bdf_max = val >> 16;
1411
1412 val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID);
1413 device_id = val & 0xffff;
1414 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id);
1415 if (!name)
1416 return -ENOMEM;
1417
1418 hns3_pmu->pdev = pdev;
1419 hns3_pmu->on_cpu = -1;
1420 hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION);
1421 hns3_pmu->pmu = (struct pmu) {
1422 .name = name,
1423 .module = THIS_MODULE,
1424 .parent = &pdev->dev,
1425 .event_init = hns3_pmu_event_init,
1426 .pmu_enable = hns3_pmu_enable,
1427 .pmu_disable = hns3_pmu_disable,
1428 .add = hns3_pmu_add,
1429 .del = hns3_pmu_del,
1430 .start = hns3_pmu_start,
1431 .stop = hns3_pmu_stop,
1432 .read = hns3_pmu_read,
1433 .task_ctx_nr = perf_invalid_context,
1434 .attr_groups = hns3_pmu_attr_groups,
1435 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1436 };
1437
1438 return 0;
1439 }
1440
hns3_pmu_irq(int irq,void * data)1441 static irqreturn_t hns3_pmu_irq(int irq, void *data)
1442 {
1443 struct hns3_pmu *hns3_pmu = data;
1444 u32 intr_status, idx;
1445
1446 for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
1447 intr_status = hns3_pmu_readl(hns3_pmu,
1448 HNS3_PMU_REG_EVENT_INTR_STATUS,
1449 idx);
1450
1451 /*
1452 * As each counter will restart from 0 when it is overflowed,
1453 * extra processing is no need, just clear interrupt status.
1454 */
1455 if (intr_status)
1456 hns3_pmu_clear_intr_status(hns3_pmu, idx);
1457 }
1458
1459 return IRQ_HANDLED;
1460 }
1461
hns3_pmu_online_cpu(unsigned int cpu,struct hlist_node * node)1462 static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
1463 {
1464 struct hns3_pmu *hns3_pmu;
1465
1466 hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
1467 if (!hns3_pmu)
1468 return -ENODEV;
1469
1470 if (hns3_pmu->on_cpu == -1) {
1471 hns3_pmu->on_cpu = cpu;
1472 irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu));
1473 }
1474
1475 return 0;
1476 }
1477
hns3_pmu_offline_cpu(unsigned int cpu,struct hlist_node * node)1478 static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1479 {
1480 struct hns3_pmu *hns3_pmu;
1481 unsigned int target;
1482
1483 hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
1484 if (!hns3_pmu)
1485 return -ENODEV;
1486
1487 /* Nothing to do if this CPU doesn't own the PMU */
1488 if (hns3_pmu->on_cpu != cpu)
1489 return 0;
1490
1491 /* Choose a new CPU from all online cpus */
1492 target = cpumask_any_but(cpu_online_mask, cpu);
1493 if (target >= nr_cpu_ids)
1494 return 0;
1495
1496 perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
1497 hns3_pmu->on_cpu = target;
1498 irq_set_affinity(hns3_pmu->irq, cpumask_of(target));
1499
1500 return 0;
1501 }
1502
hns3_pmu_free_irq(void * data)1503 static void hns3_pmu_free_irq(void *data)
1504 {
1505 struct pci_dev *pdev = data;
1506
1507 pci_free_irq_vectors(pdev);
1508 }
1509
hns3_pmu_irq_register(struct pci_dev * pdev,struct hns3_pmu * hns3_pmu)1510 static int hns3_pmu_irq_register(struct pci_dev *pdev,
1511 struct hns3_pmu *hns3_pmu)
1512 {
1513 int irq, ret;
1514
1515 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1516 if (ret < 0) {
1517 pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret);
1518 return ret;
1519 }
1520
1521 ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
1522 if (ret) {
1523 pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
1524 return ret;
1525 }
1526
1527 irq = pci_irq_vector(pdev, 0);
1528 ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0,
1529 hns3_pmu->pmu.name, hns3_pmu);
1530 if (ret) {
1531 pci_err(pdev, "failed to register irq, ret = %d.\n", ret);
1532 return ret;
1533 }
1534
1535 hns3_pmu->irq = irq;
1536
1537 return 0;
1538 }
1539
hns3_pmu_init_pmu(struct pci_dev * pdev,struct hns3_pmu * hns3_pmu)1540 static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
1541 {
1542 int ret;
1543
1544 ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu);
1545 if (ret)
1546 return ret;
1547
1548 ret = hns3_pmu_irq_register(pdev, hns3_pmu);
1549 if (ret)
1550 return ret;
1551
1552 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1553 &hns3_pmu->node);
1554 if (ret) {
1555 pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret);
1556 return ret;
1557 }
1558
1559 ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
1560 if (ret) {
1561 pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
1562 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1563 &hns3_pmu->node);
1564 }
1565
1566 return ret;
1567 }
1568
hns3_pmu_uninit_pmu(struct pci_dev * pdev)1569 static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
1570 {
1571 struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
1572
1573 perf_pmu_unregister(&hns3_pmu->pmu);
1574 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1575 &hns3_pmu->node);
1576 }
1577
hns3_pmu_init_dev(struct pci_dev * pdev)1578 static int hns3_pmu_init_dev(struct pci_dev *pdev)
1579 {
1580 int ret;
1581
1582 ret = pcim_enable_device(pdev);
1583 if (ret) {
1584 pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret);
1585 return ret;
1586 }
1587
1588 ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu");
1589 if (ret < 0) {
1590 pci_err(pdev, "failed to request pci region, ret = %d.\n", ret);
1591 return ret;
1592 }
1593
1594 pci_set_master(pdev);
1595
1596 return 0;
1597 }
1598
hns3_pmu_probe(struct pci_dev * pdev,const struct pci_device_id * id)1599 static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1600 {
1601 struct hns3_pmu *hns3_pmu;
1602 int ret;
1603
1604 hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL);
1605 if (!hns3_pmu)
1606 return -ENOMEM;
1607
1608 ret = hns3_pmu_init_dev(pdev);
1609 if (ret)
1610 return ret;
1611
1612 ret = hns3_pmu_init_pmu(pdev, hns3_pmu);
1613 if (ret) {
1614 pci_clear_master(pdev);
1615 return ret;
1616 }
1617
1618 pci_set_drvdata(pdev, hns3_pmu);
1619
1620 return ret;
1621 }
1622
hns3_pmu_remove(struct pci_dev * pdev)1623 static void hns3_pmu_remove(struct pci_dev *pdev)
1624 {
1625 hns3_pmu_uninit_pmu(pdev);
1626 pci_clear_master(pdev);
1627 pci_set_drvdata(pdev, NULL);
1628 }
1629
1630 static const struct pci_device_id hns3_pmu_ids[] = {
1631 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) },
1632 { 0, }
1633 };
1634 MODULE_DEVICE_TABLE(pci, hns3_pmu_ids);
1635
1636 static struct pci_driver hns3_pmu_driver = {
1637 .name = "hns3_pmu",
1638 .id_table = hns3_pmu_ids,
1639 .probe = hns3_pmu_probe,
1640 .remove = hns3_pmu_remove,
1641 };
1642
hns3_pmu_module_init(void)1643 static int __init hns3_pmu_module_init(void)
1644 {
1645 int ret;
1646
1647 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1648 "AP_PERF_ARM_HNS3_PMU_ONLINE",
1649 hns3_pmu_online_cpu,
1650 hns3_pmu_offline_cpu);
1651 if (ret) {
1652 pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret);
1653 return ret;
1654 }
1655
1656 ret = pci_register_driver(&hns3_pmu_driver);
1657 if (ret) {
1658 pr_err("failed to register pci driver, ret = %d.\n", ret);
1659 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
1660 }
1661
1662 return ret;
1663 }
1664 module_init(hns3_pmu_module_init);
1665
hns3_pmu_module_exit(void)1666 static void __exit hns3_pmu_module_exit(void)
1667 {
1668 pci_unregister_driver(&hns3_pmu_driver);
1669 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
1670 }
1671 module_exit(hns3_pmu_module_exit);
1672
1673 MODULE_DESCRIPTION("HNS3 PMU driver");
1674 MODULE_LICENSE("GPL v2");
1675