1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/pci.h>
5 #include <linux/phylink.h>
6 #include <linux/netdevice.h>
7
8 #include "../libwx/wx_ethtool.h"
9 #include "../libwx/wx_type.h"
10 #include "../libwx/wx_lib.h"
11 #include "txgbe_type.h"
12 #include "txgbe_fdir.h"
13 #include "txgbe_ethtool.h"
14
txgbe_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)15 static int txgbe_set_ringparam(struct net_device *netdev,
16 struct ethtool_ringparam *ring,
17 struct kernel_ethtool_ringparam *kernel_ring,
18 struct netlink_ext_ack *extack)
19 {
20 struct wx *wx = netdev_priv(netdev);
21 u32 new_rx_count, new_tx_count;
22 struct wx_ring *temp_ring;
23 int i, err = 0;
24
25 new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
26 new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
27
28 new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
29 new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
30
31 if (new_tx_count == wx->tx_ring_count &&
32 new_rx_count == wx->rx_ring_count)
33 return 0;
34
35 err = wx_set_state_reset(wx);
36 if (err)
37 return err;
38
39 if (!netif_running(wx->netdev)) {
40 for (i = 0; i < wx->num_tx_queues; i++)
41 wx->tx_ring[i]->count = new_tx_count;
42 for (i = 0; i < wx->num_rx_queues; i++)
43 wx->rx_ring[i]->count = new_rx_count;
44 wx->tx_ring_count = new_tx_count;
45 wx->rx_ring_count = new_rx_count;
46
47 goto clear_reset;
48 }
49
50 /* allocate temporary buffer to store rings in */
51 i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
52 temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
53 if (!temp_ring) {
54 err = -ENOMEM;
55 goto clear_reset;
56 }
57
58 txgbe_down(wx);
59
60 wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
61 kvfree(temp_ring);
62
63 txgbe_up(wx);
64
65 clear_reset:
66 clear_bit(WX_STATE_RESETTING, wx->state);
67 return err;
68 }
69
txgbe_set_channels(struct net_device * dev,struct ethtool_channels * ch)70 static int txgbe_set_channels(struct net_device *dev,
71 struct ethtool_channels *ch)
72 {
73 int err;
74
75 err = wx_set_channels(dev, ch);
76 if (err < 0)
77 return err;
78
79 /* use setup TC to update any traffic class queue mapping */
80 return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
81 }
82
txgbe_get_ethtool_fdir_entry(struct txgbe * txgbe,struct ethtool_rxnfc * cmd)83 static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe,
84 struct ethtool_rxnfc *cmd)
85 {
86 struct ethtool_rx_flow_spec *fsp =
87 (struct ethtool_rx_flow_spec *)&cmd->fs;
88 union txgbe_atr_input *mask = &txgbe->fdir_mask;
89 struct txgbe_fdir_filter *rule = NULL;
90 struct hlist_node *node;
91
92 /* report total rule count */
93 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
94
95 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
96 fdir_node) {
97 if (fsp->location <= rule->sw_idx)
98 break;
99 }
100
101 if (!rule || fsp->location != rule->sw_idx)
102 return -EINVAL;
103
104 /* set flow type field */
105 switch (rule->filter.formatted.flow_type) {
106 case TXGBE_ATR_FLOW_TYPE_TCPV4:
107 fsp->flow_type = TCP_V4_FLOW;
108 break;
109 case TXGBE_ATR_FLOW_TYPE_UDPV4:
110 fsp->flow_type = UDP_V4_FLOW;
111 break;
112 case TXGBE_ATR_FLOW_TYPE_SCTPV4:
113 fsp->flow_type = SCTP_V4_FLOW;
114 break;
115 case TXGBE_ATR_FLOW_TYPE_IPV4:
116 fsp->flow_type = IP_USER_FLOW;
117 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
118 fsp->h_u.usr_ip4_spec.proto = 0;
119 fsp->m_u.usr_ip4_spec.proto = 0;
120 break;
121 default:
122 return -EINVAL;
123 }
124
125 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
126 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
127 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
128 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
129 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
130 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
131 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
132 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
133 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
134 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
135 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
136 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
137 fsp->flow_type |= FLOW_EXT;
138
139 /* record action */
140 if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE)
141 fsp->ring_cookie = RX_CLS_FLOW_DISC;
142 else
143 fsp->ring_cookie = rule->action;
144
145 return 0;
146 }
147
txgbe_get_ethtool_fdir_all(struct txgbe * txgbe,struct ethtool_rxnfc * cmd,u32 * rule_locs)148 static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe,
149 struct ethtool_rxnfc *cmd,
150 u32 *rule_locs)
151 {
152 struct txgbe_fdir_filter *rule;
153 struct hlist_node *node;
154 int cnt = 0;
155
156 /* report total rule count */
157 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
158
159 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
160 fdir_node) {
161 if (cnt == cmd->rule_cnt)
162 return -EMSGSIZE;
163 rule_locs[cnt] = rule->sw_idx;
164 cnt++;
165 }
166
167 cmd->rule_cnt = cnt;
168
169 return 0;
170 }
171
txgbe_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)172 static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
173 u32 *rule_locs)
174 {
175 struct wx *wx = netdev_priv(dev);
176 struct txgbe *txgbe = wx->priv;
177 int ret = -EOPNOTSUPP;
178
179 switch (cmd->cmd) {
180 case ETHTOOL_GRXRINGS:
181 cmd->data = wx->num_rx_queues;
182 ret = 0;
183 break;
184 case ETHTOOL_GRXCLSRLCNT:
185 cmd->rule_cnt = txgbe->fdir_filter_count;
186 ret = 0;
187 break;
188 case ETHTOOL_GRXCLSRULE:
189 ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd);
190 break;
191 case ETHTOOL_GRXCLSRLALL:
192 ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs);
193 break;
194 default:
195 break;
196 }
197
198 return ret;
199 }
200
txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec * fsp,u8 * flow_type)201 static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
202 u8 *flow_type)
203 {
204 switch (fsp->flow_type & ~FLOW_EXT) {
205 case TCP_V4_FLOW:
206 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
207 break;
208 case UDP_V4_FLOW:
209 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
210 break;
211 case SCTP_V4_FLOW:
212 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
213 break;
214 case IP_USER_FLOW:
215 switch (fsp->h_u.usr_ip4_spec.proto) {
216 case IPPROTO_TCP:
217 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
218 break;
219 case IPPROTO_UDP:
220 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
221 break;
222 case IPPROTO_SCTP:
223 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
224 break;
225 case 0:
226 if (!fsp->m_u.usr_ip4_spec.proto) {
227 *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
228 break;
229 }
230 fallthrough;
231 default:
232 return -EINVAL;
233 }
234 break;
235 default:
236 return -EINVAL;
237 }
238
239 return 0;
240 }
241
txgbe_match_ethtool_fdir_entry(struct txgbe * txgbe,struct txgbe_fdir_filter * input)242 static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe,
243 struct txgbe_fdir_filter *input)
244 {
245 struct txgbe_fdir_filter *rule = NULL;
246 struct hlist_node *node2;
247
248 hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list,
249 fdir_node) {
250 if (rule->filter.formatted.bkt_hash ==
251 input->filter.formatted.bkt_hash &&
252 rule->action == input->action) {
253 wx_dbg(txgbe->wx, "FDIR entry already exist\n");
254 return true;
255 }
256 }
257 return false;
258 }
259
txgbe_update_ethtool_fdir_entry(struct txgbe * txgbe,struct txgbe_fdir_filter * input,u16 sw_idx)260 static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe,
261 struct txgbe_fdir_filter *input,
262 u16 sw_idx)
263 {
264 struct hlist_node *node = NULL, *parent = NULL;
265 struct txgbe_fdir_filter *rule;
266 struct wx *wx = txgbe->wx;
267 bool deleted = false;
268 int err;
269
270 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
271 fdir_node) {
272 /* hash found, or no matching entry */
273 if (rule->sw_idx >= sw_idx)
274 break;
275 parent = node;
276 }
277
278 /* if there is an old rule occupying our place remove it */
279 if (rule && rule->sw_idx == sw_idx) {
280 /* hardware filters are only configured when interface is up,
281 * and we should not issue filter commands while the interface
282 * is down
283 */
284 if (netif_running(wx->netdev) &&
285 (!input || rule->filter.formatted.bkt_hash !=
286 input->filter.formatted.bkt_hash)) {
287 err = txgbe_fdir_erase_perfect_filter(wx,
288 &rule->filter,
289 sw_idx);
290 if (err)
291 return -EINVAL;
292 }
293
294 hlist_del(&rule->fdir_node);
295 kfree(rule);
296 txgbe->fdir_filter_count--;
297 deleted = true;
298 }
299
300 /* If we weren't given an input, then this was a request to delete a
301 * filter. We should return -EINVAL if the filter wasn't found, but
302 * return 0 if the rule was successfully deleted.
303 */
304 if (!input)
305 return deleted ? 0 : -EINVAL;
306
307 /* initialize node and set software index */
308 INIT_HLIST_NODE(&input->fdir_node);
309
310 /* add filter to the list */
311 if (parent)
312 hlist_add_behind(&input->fdir_node, parent);
313 else
314 hlist_add_head(&input->fdir_node,
315 &txgbe->fdir_filter_list);
316
317 /* update counts */
318 txgbe->fdir_filter_count++;
319
320 return 0;
321 }
322
txgbe_add_ethtool_fdir_entry(struct txgbe * txgbe,struct ethtool_rxnfc * cmd)323 static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
324 struct ethtool_rxnfc *cmd)
325 {
326 struct ethtool_rx_flow_spec *fsp =
327 (struct ethtool_rx_flow_spec *)&cmd->fs;
328 struct txgbe_fdir_filter *input;
329 union txgbe_atr_input mask;
330 struct wx *wx = txgbe->wx;
331 int err = -EINVAL;
332 u16 ptype = 0;
333 u8 queue;
334
335 if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
336 return -EOPNOTSUPP;
337
338 /* ring_cookie is a masked into a set of queues and txgbe pools or
339 * we use drop index
340 */
341 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
342 queue = TXGBE_RDB_FDIR_DROP_QUEUE;
343 } else {
344 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
345
346 if (ring >= wx->num_rx_queues)
347 return -EINVAL;
348
349 /* Map the ring onto the absolute queue index */
350 queue = wx->rx_ring[ring]->reg_idx;
351 }
352
353 /* Don't allow indexes to exist outside of available space */
354 if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) {
355 wx_err(wx, "Location out of range\n");
356 return -EINVAL;
357 }
358
359 input = kzalloc(sizeof(*input), GFP_ATOMIC);
360 if (!input)
361 return -ENOMEM;
362
363 memset(&mask, 0, sizeof(union txgbe_atr_input));
364
365 /* set SW index */
366 input->sw_idx = fsp->location;
367
368 /* record flow type */
369 if (txgbe_flowspec_to_flow_type(fsp,
370 &input->filter.formatted.flow_type)) {
371 wx_err(wx, "Unrecognized flow type\n");
372 goto err_out;
373 }
374
375 mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK |
376 TXGBE_ATR_L4TYPE_MASK;
377
378 if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4)
379 mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK;
380
381 /* Copy input into formatted structures */
382 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
383 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
384 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
385 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
386 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
387 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
388 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
389 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
390
391 if (fsp->flow_type & FLOW_EXT) {
392 input->filter.formatted.vm_pool =
393 (unsigned char)ntohl(fsp->h_ext.data[1]);
394 mask.formatted.vm_pool =
395 (unsigned char)ntohl(fsp->m_ext.data[1]);
396 input->filter.formatted.flex_bytes =
397 fsp->h_ext.vlan_etype;
398 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
399 }
400
401 switch (input->filter.formatted.flow_type) {
402 case TXGBE_ATR_FLOW_TYPE_TCPV4:
403 ptype = WX_PTYPE_L2_IPV4_TCP;
404 break;
405 case TXGBE_ATR_FLOW_TYPE_UDPV4:
406 ptype = WX_PTYPE_L2_IPV4_UDP;
407 break;
408 case TXGBE_ATR_FLOW_TYPE_SCTPV4:
409 ptype = WX_PTYPE_L2_IPV4_SCTP;
410 break;
411 case TXGBE_ATR_FLOW_TYPE_IPV4:
412 ptype = WX_PTYPE_L2_IPV4;
413 break;
414 default:
415 break;
416 }
417
418 input->filter.formatted.vlan_id = htons(ptype);
419 if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK)
420 mask.formatted.vlan_id = htons(0xFFFF);
421 else
422 mask.formatted.vlan_id = htons(0xFFF8);
423
424 /* determine if we need to drop or route the packet */
425 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
426 input->action = TXGBE_RDB_FDIR_DROP_QUEUE;
427 else
428 input->action = fsp->ring_cookie;
429
430 spin_lock(&txgbe->fdir_perfect_lock);
431
432 if (hlist_empty(&txgbe->fdir_filter_list)) {
433 /* save mask and program input mask into HW */
434 memcpy(&txgbe->fdir_mask, &mask, sizeof(mask));
435 err = txgbe_fdir_set_input_mask(wx, &mask);
436 if (err)
437 goto err_unlock;
438 } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) {
439 wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n");
440 goto err_unlock;
441 }
442
443 /* apply mask and compute/store hash */
444 txgbe_atr_compute_perfect_hash(&input->filter, &mask);
445
446 /* check if new entry does not exist on filter list */
447 if (txgbe_match_ethtool_fdir_entry(txgbe, input))
448 goto err_unlock;
449
450 /* only program filters to hardware if the net device is running, as
451 * we store the filters in the Rx buffer which is not allocated when
452 * the device is down
453 */
454 if (netif_running(wx->netdev)) {
455 err = txgbe_fdir_write_perfect_filter(wx, &input->filter,
456 input->sw_idx, queue);
457 if (err)
458 goto err_unlock;
459 }
460
461 txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx);
462
463 spin_unlock(&txgbe->fdir_perfect_lock);
464
465 return 0;
466 err_unlock:
467 spin_unlock(&txgbe->fdir_perfect_lock);
468 err_out:
469 kfree(input);
470 return err;
471 }
472
txgbe_del_ethtool_fdir_entry(struct txgbe * txgbe,struct ethtool_rxnfc * cmd)473 static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe,
474 struct ethtool_rxnfc *cmd)
475 {
476 struct ethtool_rx_flow_spec *fsp =
477 (struct ethtool_rx_flow_spec *)&cmd->fs;
478 int err = 0;
479
480 spin_lock(&txgbe->fdir_perfect_lock);
481 err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location);
482 spin_unlock(&txgbe->fdir_perfect_lock);
483
484 return err;
485 }
486
txgbe_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)487 static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
488 {
489 struct wx *wx = netdev_priv(dev);
490 struct txgbe *txgbe = wx->priv;
491 int ret = -EOPNOTSUPP;
492
493 switch (cmd->cmd) {
494 case ETHTOOL_SRXCLSRLINS:
495 ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd);
496 break;
497 case ETHTOOL_SRXCLSRLDEL:
498 ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd);
499 break;
500 default:
501 break;
502 }
503
504 return ret;
505 }
506
507 static const struct ethtool_ops txgbe_ethtool_ops = {
508 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
509 ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
510 .get_drvinfo = wx_get_drvinfo,
511 .nway_reset = wx_nway_reset,
512 .get_link = ethtool_op_get_link,
513 .get_link_ksettings = wx_get_link_ksettings,
514 .set_link_ksettings = wx_set_link_ksettings,
515 .get_sset_count = wx_get_sset_count,
516 .get_strings = wx_get_strings,
517 .get_ethtool_stats = wx_get_ethtool_stats,
518 .get_eth_mac_stats = wx_get_mac_stats,
519 .get_pause_stats = wx_get_pause_stats,
520 .get_pauseparam = wx_get_pauseparam,
521 .set_pauseparam = wx_set_pauseparam,
522 .get_ringparam = wx_get_ringparam,
523 .set_ringparam = txgbe_set_ringparam,
524 .get_coalesce = wx_get_coalesce,
525 .set_coalesce = wx_set_coalesce,
526 .get_channels = wx_get_channels,
527 .set_channels = txgbe_set_channels,
528 .get_rxnfc = txgbe_get_rxnfc,
529 .set_rxnfc = txgbe_set_rxnfc,
530 .get_msglevel = wx_get_msglevel,
531 .set_msglevel = wx_set_msglevel,
532 };
533
txgbe_set_ethtool_ops(struct net_device * netdev)534 void txgbe_set_ethtool_ops(struct net_device *netdev)
535 {
536 netdev->ethtool_ops = &txgbe_ethtool_ops;
537 }
538