1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <net/flow_offload.h>
8 #include <net/netfilter/nf_flow_table.h>
9
10 struct flow_offload_xdp_ft {
11 struct list_head head;
12 struct nf_flowtable *ft;
13 struct rcu_head rcuhead;
14 };
15
16 struct flow_offload_xdp {
17 struct hlist_node hnode;
18 unsigned long net_device_addr;
19 struct list_head head;
20 };
21
22 #define NF_XDP_HT_BITS 4
23 static DEFINE_HASHTABLE(nf_xdp_hashtable, NF_XDP_HT_BITS);
24 static DEFINE_MUTEX(nf_xdp_hashtable_lock);
25
26 /* caller must hold rcu read lock */
nf_flowtable_by_dev(const struct net_device * dev)27 struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev)
28 {
29 unsigned long key = (unsigned long)dev;
30 struct flow_offload_xdp *iter;
31
32 hash_for_each_possible_rcu(nf_xdp_hashtable, iter, hnode, key) {
33 if (key == iter->net_device_addr) {
34 struct flow_offload_xdp_ft *ft_elem;
35
36 /* The user is supposed to insert a given net_device
37 * just into a single nf_flowtable so we always return
38 * the first element here.
39 */
40 ft_elem = list_first_or_null_rcu(&iter->head,
41 struct flow_offload_xdp_ft,
42 head);
43 return ft_elem ? ft_elem->ft : NULL;
44 }
45 }
46
47 return NULL;
48 }
49
nf_flowtable_by_dev_insert(struct nf_flowtable * ft,const struct net_device * dev)50 static int nf_flowtable_by_dev_insert(struct nf_flowtable *ft,
51 const struct net_device *dev)
52 {
53 struct flow_offload_xdp *iter, *elem = NULL;
54 unsigned long key = (unsigned long)dev;
55 struct flow_offload_xdp_ft *ft_elem;
56
57 ft_elem = kzalloc(sizeof(*ft_elem), GFP_KERNEL_ACCOUNT);
58 if (!ft_elem)
59 return -ENOMEM;
60
61 ft_elem->ft = ft;
62
63 mutex_lock(&nf_xdp_hashtable_lock);
64
65 hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
66 if (key == iter->net_device_addr) {
67 elem = iter;
68 break;
69 }
70 }
71
72 if (!elem) {
73 elem = kzalloc(sizeof(*elem), GFP_KERNEL_ACCOUNT);
74 if (!elem)
75 goto err_unlock;
76
77 elem->net_device_addr = key;
78 INIT_LIST_HEAD(&elem->head);
79 hash_add_rcu(nf_xdp_hashtable, &elem->hnode, key);
80 }
81 list_add_tail_rcu(&ft_elem->head, &elem->head);
82
83 mutex_unlock(&nf_xdp_hashtable_lock);
84
85 return 0;
86
87 err_unlock:
88 mutex_unlock(&nf_xdp_hashtable_lock);
89 kfree(ft_elem);
90
91 return -ENOMEM;
92 }
93
nf_flowtable_by_dev_remove(struct nf_flowtable * ft,const struct net_device * dev)94 static void nf_flowtable_by_dev_remove(struct nf_flowtable *ft,
95 const struct net_device *dev)
96 {
97 struct flow_offload_xdp *iter, *elem = NULL;
98 unsigned long key = (unsigned long)dev;
99
100 mutex_lock(&nf_xdp_hashtable_lock);
101
102 hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
103 if (key == iter->net_device_addr) {
104 elem = iter;
105 break;
106 }
107 }
108
109 if (elem) {
110 struct flow_offload_xdp_ft *ft_elem, *ft_next;
111
112 list_for_each_entry_safe(ft_elem, ft_next, &elem->head, head) {
113 if (ft_elem->ft == ft) {
114 list_del_rcu(&ft_elem->head);
115 kfree_rcu(ft_elem, rcuhead);
116 }
117 }
118
119 if (list_empty(&elem->head))
120 hash_del_rcu(&elem->hnode);
121 else
122 elem = NULL;
123 }
124
125 mutex_unlock(&nf_xdp_hashtable_lock);
126
127 if (elem) {
128 synchronize_rcu();
129 kfree(elem);
130 }
131 }
132
nf_flow_offload_xdp_setup(struct nf_flowtable * flowtable,struct net_device * dev,enum flow_block_command cmd)133 int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
134 struct net_device *dev,
135 enum flow_block_command cmd)
136 {
137 switch (cmd) {
138 case FLOW_BLOCK_BIND:
139 return nf_flowtable_by_dev_insert(flowtable, dev);
140 case FLOW_BLOCK_UNBIND:
141 nf_flowtable_by_dev_remove(flowtable, dev);
142 return 0;
143 }
144
145 WARN_ON_ONCE(1);
146 return 0;
147 }
148