1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 *
7 * Frame handler other utility functions for HSR and PRP.
8 */
9
10 #include "hsr_slave.h"
11 #include <linux/etherdevice.h>
12 #include <linux/if_arp.h>
13 #include <linux/if_vlan.h>
14 #include "hsr_main.h"
15 #include "hsr_device.h"
16 #include "hsr_forward.h"
17 #include "hsr_framereg.h"
18
hsr_invalid_dan_ingress_frame(__be16 protocol)19 bool hsr_invalid_dan_ingress_frame(__be16 protocol)
20 {
21 return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
22 }
23
hsr_handle_frame(struct sk_buff ** pskb)24 static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
25 {
26 struct sk_buff *skb = *pskb;
27 struct hsr_port *port;
28 struct hsr_priv *hsr;
29 __be16 protocol;
30
31 /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
32 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
33 return RX_HANDLER_PASS;
34
35 if (!skb_mac_header_was_set(skb)) {
36 WARN_ONCE(1, "%s: skb invalid", __func__);
37 return RX_HANDLER_PASS;
38 }
39
40 port = hsr_port_get_rcu(skb->dev);
41 if (!port)
42 goto finish_pass;
43 hsr = port->hsr;
44
45 if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
46 /* Directly kill frames sent by ourselves */
47 kfree_skb(skb);
48 goto finish_consume;
49 }
50
51 /* For HSR, only tagged frames are expected (unless the device offloads
52 * HSR tag removal), but for PRP there could be non tagged frames as
53 * well from Single attached nodes (SANs).
54 */
55 protocol = eth_hdr(skb)->h_proto;
56
57 if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
58 port->type != HSR_PT_INTERLINK &&
59 hsr->proto_ops->invalid_dan_ingress_frame &&
60 hsr->proto_ops->invalid_dan_ingress_frame(protocol))
61 goto finish_pass;
62
63 skb_push(skb, ETH_HLEN);
64 skb_reset_mac_header(skb);
65 if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
66 protocol == htons(ETH_P_HSR))
67 skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
68 skb_reset_mac_len(skb);
69
70 /* Only the frames received over the interlink port will assign a
71 * sequence number and require synchronisation vs other sender.
72 */
73 if (port->type == HSR_PT_INTERLINK) {
74 spin_lock_bh(&hsr->seqnr_lock);
75 hsr_forward_skb(skb, port);
76 spin_unlock_bh(&hsr->seqnr_lock);
77 } else {
78 hsr_forward_skb(skb, port);
79 }
80
81 finish_consume:
82 return RX_HANDLER_CONSUMED;
83
84 finish_pass:
85 return RX_HANDLER_PASS;
86 }
87
hsr_port_exists(const struct net_device * dev)88 bool hsr_port_exists(const struct net_device *dev)
89 {
90 return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
91 }
92
hsr_check_dev_ok(struct net_device * dev,struct netlink_ext_ack * extack)93 static int hsr_check_dev_ok(struct net_device *dev,
94 struct netlink_ext_ack *extack)
95 {
96 /* Don't allow HSR on non-ethernet like devices */
97 if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
98 dev->addr_len != ETH_ALEN) {
99 NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
100 return -EINVAL;
101 }
102
103 /* Don't allow enslaving hsr devices */
104 if (is_hsr_master(dev)) {
105 NL_SET_ERR_MSG_MOD(extack,
106 "Cannot create trees of HSR devices.");
107 return -EINVAL;
108 }
109
110 if (hsr_port_exists(dev)) {
111 NL_SET_ERR_MSG_MOD(extack,
112 "This device is already a HSR slave.");
113 return -EINVAL;
114 }
115
116 if (is_vlan_dev(dev)) {
117 NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
118 return -EINVAL;
119 }
120
121 if (dev->priv_flags & IFF_DONT_BRIDGE) {
122 NL_SET_ERR_MSG_MOD(extack,
123 "This device does not support bridging.");
124 return -EOPNOTSUPP;
125 }
126
127 /* HSR over bonded devices has not been tested, but I'm not sure it
128 * won't work...
129 */
130
131 return 0;
132 }
133
134 /* Setup device to be added to the HSR bridge. */
hsr_portdev_setup(struct hsr_priv * hsr,struct net_device * dev,struct hsr_port * port,struct netlink_ext_ack * extack)135 static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
136 struct hsr_port *port,
137 struct netlink_ext_ack *extack)
138
139 {
140 struct net_device *hsr_dev;
141 struct hsr_port *master;
142 int res;
143
144 /* Don't use promiscuous mode for offload since L2 frame forward
145 * happens at the offloaded hardware.
146 */
147 if (!port->hsr->fwd_offloaded) {
148 res = dev_set_promiscuity(dev, 1);
149 if (res)
150 return res;
151 }
152
153 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
154 hsr_dev = master->dev;
155
156 res = netdev_upper_dev_link(dev, hsr_dev, extack);
157 if (res)
158 goto fail_upper_dev_link;
159
160 res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
161 if (res)
162 goto fail_rx_handler;
163 dev_disable_lro(dev);
164
165 return 0;
166
167 fail_rx_handler:
168 netdev_upper_dev_unlink(dev, hsr_dev);
169 fail_upper_dev_link:
170 if (!port->hsr->fwd_offloaded)
171 dev_set_promiscuity(dev, -1);
172
173 return res;
174 }
175
hsr_add_port(struct hsr_priv * hsr,struct net_device * dev,enum hsr_port_type type,struct netlink_ext_ack * extack)176 int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
177 enum hsr_port_type type, struct netlink_ext_ack *extack)
178 {
179 struct hsr_port *port, *master;
180 int res;
181
182 if (type != HSR_PT_MASTER) {
183 res = hsr_check_dev_ok(dev, extack);
184 if (res)
185 return res;
186 }
187
188 port = hsr_port_get_hsr(hsr, type);
189 if (port)
190 return -EBUSY; /* This port already exists */
191
192 port = kzalloc(sizeof(*port), GFP_KERNEL);
193 if (!port)
194 return -ENOMEM;
195
196 port->hsr = hsr;
197 port->dev = dev;
198 port->type = type;
199
200 if (type != HSR_PT_MASTER) {
201 res = hsr_portdev_setup(hsr, dev, port, extack);
202 if (res)
203 goto fail_dev_setup;
204 }
205
206 list_add_tail_rcu(&port->port_list, &hsr->ports);
207 synchronize_rcu();
208
209 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
210 netdev_update_features(master->dev);
211 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
212
213 return 0;
214
215 fail_dev_setup:
216 kfree(port);
217 return res;
218 }
219
hsr_del_port(struct hsr_port * port)220 void hsr_del_port(struct hsr_port *port)
221 {
222 struct hsr_priv *hsr;
223 struct hsr_port *master;
224
225 hsr = port->hsr;
226 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
227 list_del_rcu(&port->port_list);
228
229 if (port != master) {
230 netdev_update_features(master->dev);
231 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
232 netdev_rx_handler_unregister(port->dev);
233 if (!port->hsr->fwd_offloaded)
234 dev_set_promiscuity(port->dev, -1);
235 netdev_upper_dev_unlink(port->dev, master->dev);
236 }
237
238 synchronize_rcu();
239
240 kfree(port);
241 }
242