1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_VSOCK_H
3 #define _LINUX_VIRTIO_VSOCK_H
4 
5 #include <uapi/linux/virtio_vsock.h>
6 #include <linux/socket.h>
7 #include <net/sock.h>
8 #include <net/af_vsock.h>
9 
10 #define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
11 
12 struct virtio_vsock_skb_cb {
13 	bool reply;
14 	bool tap_delivered;
15 	u32 offset;
16 };
17 
18 #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
19 
virtio_vsock_hdr(struct sk_buff * skb)20 static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
21 {
22 	return (struct virtio_vsock_hdr *)skb->head;
23 }
24 
virtio_vsock_skb_reply(struct sk_buff * skb)25 static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
26 {
27 	return VIRTIO_VSOCK_SKB_CB(skb)->reply;
28 }
29 
virtio_vsock_skb_set_reply(struct sk_buff * skb)30 static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
31 {
32 	VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
33 }
34 
virtio_vsock_skb_tap_delivered(struct sk_buff * skb)35 static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
36 {
37 	return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
38 }
39 
virtio_vsock_skb_set_tap_delivered(struct sk_buff * skb)40 static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
41 {
42 	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
43 }
44 
virtio_vsock_skb_clear_tap_delivered(struct sk_buff * skb)45 static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
46 {
47 	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
48 }
49 
virtio_vsock_skb_rx_put(struct sk_buff * skb)50 static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
51 {
52 	u32 len;
53 
54 	len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
55 
56 	if (len > 0)
57 		skb_put(skb, len);
58 }
59 
virtio_vsock_alloc_skb(unsigned int size,gfp_t mask)60 static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
61 {
62 	struct sk_buff *skb;
63 
64 	if (size < VIRTIO_VSOCK_SKB_HEADROOM)
65 		return NULL;
66 
67 	skb = alloc_skb(size, mask);
68 	if (!skb)
69 		return NULL;
70 
71 	skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
72 	return skb;
73 }
74 
75 static inline void
virtio_vsock_skb_queue_head(struct sk_buff_head * list,struct sk_buff * skb)76 virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
77 {
78 	spin_lock_bh(&list->lock);
79 	__skb_queue_head(list, skb);
80 	spin_unlock_bh(&list->lock);
81 }
82 
83 static inline void
virtio_vsock_skb_queue_tail(struct sk_buff_head * list,struct sk_buff * skb)84 virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
85 {
86 	spin_lock_bh(&list->lock);
87 	__skb_queue_tail(list, skb);
88 	spin_unlock_bh(&list->lock);
89 }
90 
virtio_vsock_skb_dequeue(struct sk_buff_head * list)91 static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
92 {
93 	struct sk_buff *skb;
94 
95 	spin_lock_bh(&list->lock);
96 	skb = __skb_dequeue(list);
97 	spin_unlock_bh(&list->lock);
98 
99 	return skb;
100 }
101 
virtio_vsock_skb_queue_purge(struct sk_buff_head * list)102 static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
103 {
104 	spin_lock_bh(&list->lock);
105 	__skb_queue_purge(list);
106 	spin_unlock_bh(&list->lock);
107 }
108 
virtio_vsock_skb_len(struct sk_buff * skb)109 static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
110 {
111 	return (size_t)(skb_end_pointer(skb) - skb->head);
112 }
113 
114 #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	(1024 * 4)
115 #define VIRTIO_VSOCK_MAX_BUF_SIZE		0xFFFFFFFFUL
116 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		(1024 * 64)
117 
118 enum {
119 	VSOCK_VQ_RX     = 0, /* for host to guest data */
120 	VSOCK_VQ_TX     = 1, /* for guest to host data */
121 	VSOCK_VQ_EVENT  = 2,
122 	VSOCK_VQ_MAX    = 3,
123 };
124 
125 /* Per-socket state (accessed via vsk->trans) */
126 struct virtio_vsock_sock {
127 	struct vsock_sock *vsk;
128 
129 	spinlock_t tx_lock;
130 	spinlock_t rx_lock;
131 
132 	/* Protected by tx_lock */
133 	u32 tx_cnt;
134 	u32 peer_fwd_cnt;
135 	u32 peer_buf_alloc;
136 	size_t bytes_unsent;
137 
138 	/* Protected by rx_lock */
139 	u32 fwd_cnt;
140 	u32 last_fwd_cnt;
141 	u32 rx_bytes;
142 	u32 buf_alloc;
143 	struct sk_buff_head rx_queue;
144 	u32 msg_count;
145 };
146 
147 struct virtio_vsock_pkt_info {
148 	u32 remote_cid, remote_port;
149 	struct vsock_sock *vsk;
150 	struct msghdr *msg;
151 	u32 pkt_len;
152 	u16 type;
153 	u16 op;
154 	u32 flags;
155 	bool reply;
156 };
157 
158 struct virtio_transport {
159 	/* This must be the first field */
160 	struct vsock_transport transport;
161 
162 	/* Takes ownership of the packet */
163 	int (*send_pkt)(struct sk_buff *skb);
164 
165 	/* Used in MSG_ZEROCOPY mode. Checks, that provided data
166 	 * (number of buffers) could be transmitted with zerocopy
167 	 * mode. If this callback is not implemented for the current
168 	 * transport - this means that this transport doesn't need
169 	 * extra checks and can perform zerocopy transmission by
170 	 * default.
171 	 */
172 	bool (*can_msgzerocopy)(int bufs_num);
173 };
174 
175 ssize_t
176 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
177 				struct msghdr *msg,
178 				size_t len,
179 				int type);
180 int
181 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
182 			       struct msghdr *msg,
183 			       size_t len, int flags);
184 
185 int
186 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
187 				   struct msghdr *msg,
188 				   size_t len);
189 ssize_t
190 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
191 				   struct msghdr *msg,
192 				   int flags);
193 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
194 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
195 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
196 
197 ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk);
198 
199 void virtio_transport_consume_skb_sent(struct sk_buff *skb,
200 				       bool consume);
201 
202 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
203 				 struct vsock_sock *psk);
204 int
205 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
206 				size_t target,
207 				bool *data_ready_now);
208 int
209 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
210 				 size_t target,
211 				 bool *space_available_now);
212 
213 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
214 	size_t target, struct vsock_transport_recv_notify_data *data);
215 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
216 	size_t target, struct vsock_transport_recv_notify_data *data);
217 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
218 	size_t target, struct vsock_transport_recv_notify_data *data);
219 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
220 	size_t target, ssize_t copied, bool data_read,
221 	struct vsock_transport_recv_notify_data *data);
222 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
223 	struct vsock_transport_send_notify_data *data);
224 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
225 	struct vsock_transport_send_notify_data *data);
226 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
227 	struct vsock_transport_send_notify_data *data);
228 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
229 	ssize_t written, struct vsock_transport_send_notify_data *data);
230 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
231 
232 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
233 bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
234 bool virtio_transport_stream_allow(u32 cid, u32 port);
235 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
236 				struct sockaddr_vm *addr);
237 bool virtio_transport_dgram_allow(u32 cid, u32 port);
238 
239 int virtio_transport_connect(struct vsock_sock *vsk);
240 
241 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode);
242 
243 void virtio_transport_release(struct vsock_sock *vsk);
244 
245 ssize_t
246 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
247 				struct msghdr *msg,
248 				size_t len);
249 int
250 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
251 			       struct sockaddr_vm *remote_addr,
252 			       struct msghdr *msg,
253 			       size_t len);
254 
255 void virtio_transport_destruct(struct vsock_sock *vsk);
256 
257 void virtio_transport_recv_pkt(struct virtio_transport *t,
258 			       struct sk_buff *skb);
259 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
260 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
261 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
262 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
263 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
264 int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
265 int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val);
266 #endif /* _LINUX_VIRTIO_VSOCK_H */
267