1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 #include "ixgbe.h"
5 #include "ixgbe_type.h"
6 #include "ixgbe_dcb.h"
7 #include "ixgbe_dcb_82599.h"
8 
9 /**
10  * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
11  * @hw: pointer to hardware structure
12  * @refill: refill credits index by traffic class
13  * @max: max credits index by traffic class
14  * @bwg_id: bandwidth grouping indexed by traffic class
15  * @prio_type: priority type indexed by traffic class
16  * @prio_tc: priority to tc assignments indexed by priority
17  *
18  * Configure Rx Packet Arbiter and credits for each traffic class.
19  */
ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * prio_type,u8 * prio_tc)20 int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
21 				      u16 *refill,
22 				      u16 *max,
23 				      u8 *bwg_id,
24 				      u8 *prio_type,
25 				      u8 *prio_tc)
26 {
27 	u32    reg           = 0;
28 	u32    credit_refill = 0;
29 	u32    credit_max    = 0;
30 	u8     i             = 0;
31 
32 	/*
33 	 * Disable the arbiter before changing parameters
34 	 * (always enable recycle mode; WSP)
35 	 */
36 	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
37 	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
38 
39 	/* Map all traffic classes to their UP */
40 	reg = 0;
41 	for (i = 0; i < MAX_USER_PRIORITY; i++)
42 		reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
43 	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
44 
45 	/* Configure traffic class credits and priority */
46 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
47 		credit_refill = refill[i];
48 		credit_max    = max[i];
49 		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
50 
51 		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
52 
53 		if (prio_type[i] == prio_link)
54 			reg |= IXGBE_RTRPT4C_LSP;
55 
56 		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
57 	}
58 
59 	/*
60 	 * Configure Rx packet plane (recycle mode; WSP) and
61 	 * enable arbiter
62 	 */
63 	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
64 	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
65 
66 	return 0;
67 }
68 
69 /**
70  * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
71  * @hw: pointer to hardware structure
72  * @refill: refill credits index by traffic class
73  * @max: max credits index by traffic class
74  * @bwg_id: bandwidth grouping indexed by traffic class
75  * @prio_type: priority type indexed by traffic class
76  *
77  * Configure Tx Descriptor Arbiter and credits for each traffic class.
78  */
ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * prio_type)79 int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
80 					   u16 *refill,
81 					   u16 *max,
82 					   u8 *bwg_id,
83 					   u8 *prio_type)
84 {
85 	u32    reg, max_credits;
86 	u8     i;
87 
88 	/* Clear the per-Tx queue credits; we use per-TC instead */
89 	for (i = 0; i < 128; i++) {
90 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
91 		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
92 	}
93 
94 	/* Configure traffic class credits and priority */
95 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
96 		max_credits = max[i];
97 		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
98 		reg |= refill[i];
99 		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
100 
101 		if (prio_type[i] == prio_group)
102 			reg |= IXGBE_RTTDT2C_GSP;
103 
104 		if (prio_type[i] == prio_link)
105 			reg |= IXGBE_RTTDT2C_LSP;
106 
107 		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
108 	}
109 
110 	/*
111 	 * Configure Tx descriptor plane (recycle mode; WSP) and
112 	 * enable arbiter
113 	 */
114 	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
115 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
116 
117 	return 0;
118 }
119 
120 /**
121  * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
122  * @hw: pointer to hardware structure
123  * @refill: refill credits index by traffic class
124  * @max: max credits index by traffic class
125  * @bwg_id: bandwidth grouping indexed by traffic class
126  * @prio_type: priority type indexed by traffic class
127  * @prio_tc: priority to tc assignments indexed by priority
128  *
129  * Configure Tx Packet Arbiter and credits for each traffic class.
130  */
ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * prio_type,u8 * prio_tc)131 int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
132 					   u16 *refill,
133 					   u16 *max,
134 					   u8 *bwg_id,
135 					   u8 *prio_type,
136 					   u8 *prio_tc)
137 {
138 	u32 reg;
139 	u8 i;
140 
141 	/*
142 	 * Disable the arbiter before changing parameters
143 	 * (always enable recycle mode; SP; arb delay)
144 	 */
145 	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
146 	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
147 	      IXGBE_RTTPCS_ARBDIS;
148 	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
149 
150 	/* Map all traffic classes to their UP */
151 	reg = 0;
152 	for (i = 0; i < MAX_USER_PRIORITY; i++)
153 		reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
154 	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
155 
156 	/* Configure traffic class credits and priority */
157 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
158 		reg = refill[i];
159 		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
160 		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
161 
162 		if (prio_type[i] == prio_group)
163 			reg |= IXGBE_RTTPT2C_GSP;
164 
165 		if (prio_type[i] == prio_link)
166 			reg |= IXGBE_RTTPT2C_LSP;
167 
168 		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
169 	}
170 
171 	/*
172 	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
173 	 * enable arbiter
174 	 */
175 	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
176 	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
177 	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
178 
179 	return 0;
180 }
181 
182 /**
183  * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
184  * @hw: pointer to hardware structure
185  * @pfc_en: enabled pfc bitmask
186  * @prio_tc: priority to tc assignments indexed by priority
187  *
188  * Configure Priority Flow Control (PFC) for each traffic class.
189  */
ixgbe_dcb_config_pfc_82599(struct ixgbe_hw * hw,u8 pfc_en,u8 * prio_tc)190 int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
191 {
192 	u32 i, j, fcrtl, reg;
193 	u8 max_tc = 0;
194 
195 	/* Enable Transmit Priority Flow Control */
196 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
197 
198 	/* Enable Receive Priority Flow Control */
199 	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
200 	reg |= IXGBE_MFLCN_DPF;
201 
202 	/*
203 	 * X540 & X550 supports per TC Rx priority flow control.
204 	 * So clear all TCs and only enable those that should be
205 	 * enabled.
206 	 */
207 	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
208 
209 	if (hw->mac.type >= ixgbe_mac_X540)
210 		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
211 
212 	if (pfc_en)
213 		reg |= IXGBE_MFLCN_RPFCE;
214 
215 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
216 
217 	for (i = 0; i < MAX_USER_PRIORITY; i++) {
218 		if (prio_tc[i] > max_tc)
219 			max_tc = prio_tc[i];
220 	}
221 
222 
223 	/* Configure PFC Tx thresholds per TC */
224 	for (i = 0; i <= max_tc; i++) {
225 		int enabled = 0;
226 
227 		for (j = 0; j < MAX_USER_PRIORITY; j++) {
228 			if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
229 				enabled = 1;
230 				break;
231 			}
232 		}
233 
234 		if (enabled) {
235 			reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
236 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
237 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
238 		} else {
239 			/* In order to prevent Tx hangs when the internal Tx
240 			 * switch is enabled we must set the high water mark
241 			 * to the Rx packet buffer size - 24KB.  This allows
242 			 * the Tx switch to function even under heavy Rx
243 			 * workloads.
244 			 */
245 			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
246 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
247 		}
248 
249 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
250 	}
251 
252 	for (; i < MAX_TRAFFIC_CLASS; i++) {
253 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
254 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
255 	}
256 
257 	/* Configure pause time (2 TCs per register) */
258 	reg = hw->fc.pause_time * 0x00010001;
259 	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
260 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
261 
262 	/* Configure flow control refresh threshold value */
263 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
264 
265 	return 0;
266 }
267 
268 /**
269  * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
270  * @hw: pointer to hardware structure
271  *
272  * Configure queue statistics registers, all queues belonging to same traffic
273  * class uses a single set of queue statistics counters.
274  */
ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw * hw)275 static int ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
276 {
277 	u32 reg = 0;
278 	u8  i   = 0;
279 
280 	/*
281 	 * Receive Queues stats setting
282 	 * 32 RQSMR registers, each configuring 4 queues.
283 	 * Set all 16 queues of each TC to the same stat
284 	 * with TC 'n' going to stat 'n'.
285 	 */
286 	for (i = 0; i < 32; i++) {
287 		reg = 0x01010101 * (i / 4);
288 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
289 	}
290 	/*
291 	 * Transmit Queues stats setting
292 	 * 32 TQSM registers, each controlling 4 queues.
293 	 * Set all queues of each TC to the same stat
294 	 * with TC 'n' going to stat 'n'.
295 	 * Tx queues are allocated non-uniformly to TCs:
296 	 * 32, 32, 16, 16, 8, 8, 8, 8.
297 	 */
298 	for (i = 0; i < 32; i++) {
299 		if (i < 8)
300 			reg = 0x00000000;
301 		else if (i < 16)
302 			reg = 0x01010101;
303 		else if (i < 20)
304 			reg = 0x02020202;
305 		else if (i < 24)
306 			reg = 0x03030303;
307 		else if (i < 26)
308 			reg = 0x04040404;
309 		else if (i < 28)
310 			reg = 0x05050505;
311 		else if (i < 30)
312 			reg = 0x06060606;
313 		else
314 			reg = 0x07070707;
315 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
316 	}
317 
318 	return 0;
319 }
320 
321 /**
322  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
323  * @hw: pointer to hardware structure
324  * @pfc_en: enabled pfc bitmask
325  * @refill: refill credits index by traffic class
326  * @max: max credits index by traffic class
327  * @bwg_id: bandwidth grouping indexed by traffic class
328  * @prio_type: priority type indexed by traffic class
329  * @prio_tc: priority to tc assignments indexed by priority
330  *
331  * Configure dcb settings and enable dcb mode.
332  */
ixgbe_dcb_hw_config_82599(struct ixgbe_hw * hw,u8 pfc_en,u16 * refill,u16 * max,u8 * bwg_id,u8 * prio_type,u8 * prio_tc)333 int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
334 			      u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
335 {
336 	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
337 					  prio_type, prio_tc);
338 	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
339 					       bwg_id, prio_type);
340 	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
341 					       bwg_id, prio_type, prio_tc);
342 	ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
343 	ixgbe_dcb_config_tc_stats_82599(hw);
344 
345 	return 0;
346 }
347 
348