1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2016 Chelsio Communications, Inc.
4   */
5  
6  #include <linux/module.h>
7  #include <linux/list.h>
8  #include <linux/workqueue.h>
9  #include <linux/skbuff.h>
10  #include <linux/timer.h>
11  #include <linux/notifier.h>
12  #include <linux/inetdevice.h>
13  #include <linux/ip.h>
14  #include <linux/tcp.h>
15  #include <linux/if_vlan.h>
16  
17  #include <net/neighbour.h>
18  #include <net/netevent.h>
19  #include <net/route.h>
20  #include <net/tcp.h>
21  #include <net/ip6_route.h>
22  #include <net/addrconf.h>
23  
24  #include <libcxgb_cm.h>
25  #include "cxgbit.h"
26  #include "clip_tbl.h"
27  
cxgbit_init_wr_wait(struct cxgbit_wr_wait * wr_waitp)28  static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
29  {
30  	wr_waitp->ret = 0;
31  	reinit_completion(&wr_waitp->completion);
32  }
33  
34  static void
cxgbit_wake_up(struct cxgbit_wr_wait * wr_waitp,const char * func,u8 ret)35  cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
36  {
37  	if (ret == CPL_ERR_NONE)
38  		wr_waitp->ret = 0;
39  	else
40  		wr_waitp->ret = -EIO;
41  
42  	if (wr_waitp->ret)
43  		pr_err("%s: err:%u", func, ret);
44  
45  	complete(&wr_waitp->completion);
46  }
47  
48  static int
cxgbit_wait_for_reply(struct cxgbit_device * cdev,struct cxgbit_wr_wait * wr_waitp,u32 tid,u32 timeout,const char * func)49  cxgbit_wait_for_reply(struct cxgbit_device *cdev,
50  		      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
51  		      const char *func)
52  {
53  	int ret;
54  
55  	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
56  		wr_waitp->ret = -EIO;
57  		goto out;
58  	}
59  
60  	ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
61  	if (!ret) {
62  		pr_info("%s - Device %s not responding tid %u\n",
63  			func, pci_name(cdev->lldi.pdev), tid);
64  		wr_waitp->ret = -ETIMEDOUT;
65  	}
66  out:
67  	if (wr_waitp->ret)
68  		pr_info("%s: FW reply %d tid %u\n",
69  			pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
70  	return wr_waitp->ret;
71  }
72  
cxgbit_np_hashfn(const struct cxgbit_np * cnp)73  static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
74  {
75  	return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
76  }
77  
78  static struct np_info *
cxgbit_np_hash_add(struct cxgbit_device * cdev,struct cxgbit_np * cnp,unsigned int stid)79  cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
80  		   unsigned int stid)
81  {
82  	struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
83  
84  	if (p) {
85  		int bucket = cxgbit_np_hashfn(cnp);
86  
87  		p->cnp = cnp;
88  		p->stid = stid;
89  		spin_lock(&cdev->np_lock);
90  		p->next = cdev->np_hash_tab[bucket];
91  		cdev->np_hash_tab[bucket] = p;
92  		spin_unlock(&cdev->np_lock);
93  	}
94  
95  	return p;
96  }
97  
98  static int
cxgbit_np_hash_find(struct cxgbit_device * cdev,struct cxgbit_np * cnp)99  cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
100  {
101  	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
102  	struct np_info *p;
103  
104  	spin_lock(&cdev->np_lock);
105  	for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
106  		if (p->cnp == cnp) {
107  			stid = p->stid;
108  			break;
109  		}
110  	}
111  	spin_unlock(&cdev->np_lock);
112  
113  	return stid;
114  }
115  
cxgbit_np_hash_del(struct cxgbit_device * cdev,struct cxgbit_np * cnp)116  static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
117  {
118  	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
119  	struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
120  
121  	spin_lock(&cdev->np_lock);
122  	for (p = *prev; p; prev = &p->next, p = p->next) {
123  		if (p->cnp == cnp) {
124  			stid = p->stid;
125  			*prev = p->next;
126  			kfree(p);
127  			break;
128  		}
129  	}
130  	spin_unlock(&cdev->np_lock);
131  
132  	return stid;
133  }
134  
_cxgbit_free_cnp(struct kref * kref)135  void _cxgbit_free_cnp(struct kref *kref)
136  {
137  	struct cxgbit_np *cnp;
138  
139  	cnp = container_of(kref, struct cxgbit_np, kref);
140  	kfree(cnp);
141  }
142  
143  static int
cxgbit_create_server6(struct cxgbit_device * cdev,unsigned int stid,struct cxgbit_np * cnp)144  cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
145  		      struct cxgbit_np *cnp)
146  {
147  	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
148  				     &cnp->com.local_addr;
149  	int addr_type;
150  	int ret;
151  
152  	pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
153  		 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
154  
155  	addr_type = ipv6_addr_type((const struct in6_addr *)
156  				   &sin6->sin6_addr);
157  	if (addr_type != IPV6_ADDR_ANY) {
158  		ret = cxgb4_clip_get(cdev->lldi.ports[0],
159  				     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
160  		if (ret) {
161  			pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
162  			       sin6->sin6_addr.s6_addr, ret);
163  			return -ENOMEM;
164  		}
165  	}
166  
167  	cxgbit_get_cnp(cnp);
168  	cxgbit_init_wr_wait(&cnp->com.wr_wait);
169  
170  	ret = cxgb4_create_server6(cdev->lldi.ports[0],
171  				   stid, &sin6->sin6_addr,
172  				   sin6->sin6_port,
173  				   cdev->lldi.rxq_ids[0]);
174  	if (!ret)
175  		ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
176  					    0, 10, __func__);
177  	else if (ret > 0)
178  		ret = net_xmit_errno(ret);
179  	else
180  		cxgbit_put_cnp(cnp);
181  
182  	if (ret) {
183  		if (ret != -ETIMEDOUT)
184  			cxgb4_clip_release(cdev->lldi.ports[0],
185  				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
186  
187  		pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
188  		       ret, stid, sin6->sin6_addr.s6_addr,
189  		       ntohs(sin6->sin6_port));
190  	}
191  
192  	return ret;
193  }
194  
195  static int
cxgbit_create_server4(struct cxgbit_device * cdev,unsigned int stid,struct cxgbit_np * cnp)196  cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
197  		      struct cxgbit_np *cnp)
198  {
199  	struct sockaddr_in *sin = (struct sockaddr_in *)
200  				   &cnp->com.local_addr;
201  	int ret;
202  
203  	pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
204  		 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
205  
206  	cxgbit_get_cnp(cnp);
207  	cxgbit_init_wr_wait(&cnp->com.wr_wait);
208  
209  	ret = cxgb4_create_server(cdev->lldi.ports[0],
210  				  stid, sin->sin_addr.s_addr,
211  				  sin->sin_port, 0,
212  				  cdev->lldi.rxq_ids[0]);
213  	if (!ret)
214  		ret = cxgbit_wait_for_reply(cdev,
215  					    &cnp->com.wr_wait,
216  					    0, 10, __func__);
217  	else if (ret > 0)
218  		ret = net_xmit_errno(ret);
219  	else
220  		cxgbit_put_cnp(cnp);
221  
222  	if (ret)
223  		pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
224  		       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
225  	return ret;
226  }
227  
cxgbit_find_device(struct net_device * ndev,u8 * port_id)228  struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
229  {
230  	struct cxgbit_device *cdev;
231  	u8 i;
232  
233  	list_for_each_entry(cdev, &cdev_list_head, list) {
234  		struct cxgb4_lld_info *lldi = &cdev->lldi;
235  
236  		for (i = 0; i < lldi->nports; i++) {
237  			if (lldi->ports[i] == ndev) {
238  				if (port_id)
239  					*port_id = i;
240  				return cdev;
241  			}
242  		}
243  	}
244  
245  	return NULL;
246  }
247  
cxgbit_get_real_dev(struct net_device * ndev)248  static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
249  {
250  	if (ndev->priv_flags & IFF_BONDING) {
251  		pr_err("Bond devices are not supported. Interface:%s\n",
252  		       ndev->name);
253  		return NULL;
254  	}
255  
256  	if (is_vlan_dev(ndev))
257  		return vlan_dev_real_dev(ndev);
258  
259  	return ndev;
260  }
261  
cxgbit_ipv4_netdev(__be32 saddr)262  static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
263  {
264  	struct net_device *ndev;
265  
266  	ndev = __ip_dev_find(&init_net, saddr, false);
267  	if (!ndev)
268  		return NULL;
269  
270  	return cxgbit_get_real_dev(ndev);
271  }
272  
cxgbit_ipv6_netdev(struct in6_addr * addr6)273  static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
274  {
275  	struct net_device *ndev = NULL;
276  	bool found = false;
277  
278  	if (IS_ENABLED(CONFIG_IPV6)) {
279  		for_each_netdev_rcu(&init_net, ndev)
280  			if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
281  				found = true;
282  				break;
283  			}
284  	}
285  	if (!found)
286  		return NULL;
287  	return cxgbit_get_real_dev(ndev);
288  }
289  
cxgbit_find_np_cdev(struct cxgbit_np * cnp)290  static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
291  {
292  	struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
293  	int ss_family = sockaddr->ss_family;
294  	struct net_device *ndev = NULL;
295  	struct cxgbit_device *cdev = NULL;
296  
297  	rcu_read_lock();
298  	if (ss_family == AF_INET) {
299  		struct sockaddr_in *sin;
300  
301  		sin = (struct sockaddr_in *)sockaddr;
302  		ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
303  	} else if (ss_family == AF_INET6) {
304  		struct sockaddr_in6 *sin6;
305  
306  		sin6 = (struct sockaddr_in6 *)sockaddr;
307  		ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
308  	}
309  	if (!ndev)
310  		goto out;
311  
312  	cdev = cxgbit_find_device(ndev, NULL);
313  out:
314  	rcu_read_unlock();
315  	return cdev;
316  }
317  
cxgbit_inaddr_any(struct cxgbit_np * cnp)318  static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
319  {
320  	struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
321  	int ss_family = sockaddr->ss_family;
322  	int addr_type;
323  
324  	if (ss_family == AF_INET) {
325  		struct sockaddr_in *sin;
326  
327  		sin = (struct sockaddr_in *)sockaddr;
328  		if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
329  			return true;
330  	} else if (ss_family == AF_INET6) {
331  		struct sockaddr_in6 *sin6;
332  
333  		sin6 = (struct sockaddr_in6 *)sockaddr;
334  		addr_type = ipv6_addr_type((const struct in6_addr *)
335  				&sin6->sin6_addr);
336  		if (addr_type == IPV6_ADDR_ANY)
337  			return true;
338  	}
339  	return false;
340  }
341  
342  static int
__cxgbit_setup_cdev_np(struct cxgbit_device * cdev,struct cxgbit_np * cnp)343  __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
344  {
345  	int stid, ret;
346  	int ss_family = cnp->com.local_addr.ss_family;
347  
348  	if (!test_bit(CDEV_STATE_UP, &cdev->flags))
349  		return -EINVAL;
350  
351  	stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
352  	if (stid < 0)
353  		return -EINVAL;
354  
355  	if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
356  		cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
357  		return -EINVAL;
358  	}
359  
360  	if (ss_family == AF_INET)
361  		ret = cxgbit_create_server4(cdev, stid, cnp);
362  	else
363  		ret = cxgbit_create_server6(cdev, stid, cnp);
364  
365  	if (ret) {
366  		if (ret != -ETIMEDOUT)
367  			cxgb4_free_stid(cdev->lldi.tids, stid,
368  					ss_family);
369  		cxgbit_np_hash_del(cdev, cnp);
370  		return ret;
371  	}
372  	return ret;
373  }
374  
cxgbit_setup_cdev_np(struct cxgbit_np * cnp)375  static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
376  {
377  	struct cxgbit_device *cdev;
378  	int ret = -1;
379  
380  	mutex_lock(&cdev_list_lock);
381  	cdev = cxgbit_find_np_cdev(cnp);
382  	if (!cdev)
383  		goto out;
384  
385  	if (cxgbit_np_hash_find(cdev, cnp) >= 0)
386  		goto out;
387  
388  	if (__cxgbit_setup_cdev_np(cdev, cnp))
389  		goto out;
390  
391  	cnp->com.cdev = cdev;
392  	ret = 0;
393  out:
394  	mutex_unlock(&cdev_list_lock);
395  	return ret;
396  }
397  
cxgbit_setup_all_np(struct cxgbit_np * cnp)398  static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
399  {
400  	struct cxgbit_device *cdev;
401  	int ret;
402  	u32 count = 0;
403  
404  	mutex_lock(&cdev_list_lock);
405  	list_for_each_entry(cdev, &cdev_list_head, list) {
406  		if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
407  			mutex_unlock(&cdev_list_lock);
408  			return -1;
409  		}
410  	}
411  
412  	list_for_each_entry(cdev, &cdev_list_head, list) {
413  		ret = __cxgbit_setup_cdev_np(cdev, cnp);
414  		if (ret == -ETIMEDOUT)
415  			break;
416  		if (ret != 0)
417  			continue;
418  		count++;
419  	}
420  	mutex_unlock(&cdev_list_lock);
421  
422  	return count ? 0 : -1;
423  }
424  
cxgbit_setup_np(struct iscsi_np * np,struct sockaddr_storage * ksockaddr)425  int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
426  {
427  	struct cxgbit_np *cnp;
428  	int ret;
429  
430  	if ((ksockaddr->ss_family != AF_INET) &&
431  	    (ksockaddr->ss_family != AF_INET6))
432  		return -EINVAL;
433  
434  	cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
435  	if (!cnp)
436  		return -ENOMEM;
437  
438  	init_waitqueue_head(&cnp->accept_wait);
439  	init_completion(&cnp->com.wr_wait.completion);
440  	init_completion(&cnp->accept_comp);
441  	INIT_LIST_HEAD(&cnp->np_accept_list);
442  	spin_lock_init(&cnp->np_accept_lock);
443  	kref_init(&cnp->kref);
444  	memcpy(&np->np_sockaddr, ksockaddr,
445  	       sizeof(struct sockaddr_storage));
446  	memcpy(&cnp->com.local_addr, &np->np_sockaddr,
447  	       sizeof(cnp->com.local_addr));
448  
449  	cnp->np = np;
450  	cnp->com.cdev = NULL;
451  
452  	if (cxgbit_inaddr_any(cnp))
453  		ret = cxgbit_setup_all_np(cnp);
454  	else
455  		ret = cxgbit_setup_cdev_np(cnp);
456  
457  	if (ret) {
458  		cxgbit_put_cnp(cnp);
459  		return -EINVAL;
460  	}
461  
462  	np->np_context = cnp;
463  	cnp->com.state = CSK_STATE_LISTEN;
464  	return 0;
465  }
466  
467  static void
cxgbit_set_conn_info(struct iscsi_np * np,struct iscsit_conn * conn,struct cxgbit_sock * csk)468  cxgbit_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
469  		     struct cxgbit_sock *csk)
470  {
471  	conn->login_family = np->np_sockaddr.ss_family;
472  	conn->login_sockaddr = csk->com.remote_addr;
473  	conn->local_sockaddr = csk->com.local_addr;
474  }
475  
cxgbit_accept_np(struct iscsi_np * np,struct iscsit_conn * conn)476  int cxgbit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
477  {
478  	struct cxgbit_np *cnp = np->np_context;
479  	struct cxgbit_sock *csk;
480  	int ret = 0;
481  
482  accept_wait:
483  	ret = wait_for_completion_interruptible(&cnp->accept_comp);
484  	if (ret)
485  		return -ENODEV;
486  
487  	spin_lock_bh(&np->np_thread_lock);
488  	if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
489  		spin_unlock_bh(&np->np_thread_lock);
490  		/**
491  		 * No point in stalling here when np_thread
492  		 * is in state RESET/SHUTDOWN/EXIT - bail
493  		 **/
494  		return -ENODEV;
495  	}
496  	spin_unlock_bh(&np->np_thread_lock);
497  
498  	spin_lock_bh(&cnp->np_accept_lock);
499  	if (list_empty(&cnp->np_accept_list)) {
500  		spin_unlock_bh(&cnp->np_accept_lock);
501  		goto accept_wait;
502  	}
503  
504  	csk = list_first_entry(&cnp->np_accept_list,
505  			       struct cxgbit_sock,
506  			       accept_node);
507  
508  	list_del_init(&csk->accept_node);
509  	spin_unlock_bh(&cnp->np_accept_lock);
510  	conn->context = csk;
511  	csk->conn = conn;
512  
513  	cxgbit_set_conn_info(np, conn, csk);
514  	return 0;
515  }
516  
517  static int
__cxgbit_free_cdev_np(struct cxgbit_device * cdev,struct cxgbit_np * cnp)518  __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
519  {
520  	int stid, ret;
521  	bool ipv6 = false;
522  
523  	stid = cxgbit_np_hash_del(cdev, cnp);
524  	if (stid < 0)
525  		return -EINVAL;
526  	if (!test_bit(CDEV_STATE_UP, &cdev->flags))
527  		return -EINVAL;
528  
529  	if (cnp->np->np_sockaddr.ss_family == AF_INET6)
530  		ipv6 = true;
531  
532  	cxgbit_get_cnp(cnp);
533  	cxgbit_init_wr_wait(&cnp->com.wr_wait);
534  	ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
535  				  cdev->lldi.rxq_ids[0], ipv6);
536  
537  	if (ret > 0)
538  		ret = net_xmit_errno(ret);
539  
540  	if (ret) {
541  		cxgbit_put_cnp(cnp);
542  		return ret;
543  	}
544  
545  	ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
546  				    0, 10, __func__);
547  	if (ret == -ETIMEDOUT)
548  		return ret;
549  
550  	if (ipv6 && cnp->com.cdev) {
551  		struct sockaddr_in6 *sin6;
552  
553  		sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
554  		cxgb4_clip_release(cdev->lldi.ports[0],
555  				   (const u32 *)&sin6->sin6_addr.s6_addr,
556  				   1);
557  	}
558  
559  	cxgb4_free_stid(cdev->lldi.tids, stid,
560  			cnp->com.local_addr.ss_family);
561  	return 0;
562  }
563  
cxgbit_free_all_np(struct cxgbit_np * cnp)564  static void cxgbit_free_all_np(struct cxgbit_np *cnp)
565  {
566  	struct cxgbit_device *cdev;
567  	int ret;
568  
569  	mutex_lock(&cdev_list_lock);
570  	list_for_each_entry(cdev, &cdev_list_head, list) {
571  		ret = __cxgbit_free_cdev_np(cdev, cnp);
572  		if (ret == -ETIMEDOUT)
573  			break;
574  	}
575  	mutex_unlock(&cdev_list_lock);
576  }
577  
cxgbit_free_cdev_np(struct cxgbit_np * cnp)578  static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
579  {
580  	struct cxgbit_device *cdev;
581  	bool found = false;
582  
583  	mutex_lock(&cdev_list_lock);
584  	list_for_each_entry(cdev, &cdev_list_head, list) {
585  		if (cdev == cnp->com.cdev) {
586  			found = true;
587  			break;
588  		}
589  	}
590  	if (!found)
591  		goto out;
592  
593  	__cxgbit_free_cdev_np(cdev, cnp);
594  out:
595  	mutex_unlock(&cdev_list_lock);
596  }
597  
598  static void __cxgbit_free_conn(struct cxgbit_sock *csk);
599  
cxgbit_free_np(struct iscsi_np * np)600  void cxgbit_free_np(struct iscsi_np *np)
601  {
602  	struct cxgbit_np *cnp = np->np_context;
603  	struct cxgbit_sock *csk, *tmp;
604  
605  	cnp->com.state = CSK_STATE_DEAD;
606  	if (cnp->com.cdev)
607  		cxgbit_free_cdev_np(cnp);
608  	else
609  		cxgbit_free_all_np(cnp);
610  
611  	spin_lock_bh(&cnp->np_accept_lock);
612  	list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
613  		list_del_init(&csk->accept_node);
614  		__cxgbit_free_conn(csk);
615  	}
616  	spin_unlock_bh(&cnp->np_accept_lock);
617  
618  	np->np_context = NULL;
619  	cxgbit_put_cnp(cnp);
620  }
621  
cxgbit_send_halfclose(struct cxgbit_sock * csk)622  static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
623  {
624  	struct sk_buff *skb;
625  	u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
626  
627  	skb = alloc_skb(len, GFP_ATOMIC);
628  	if (!skb)
629  		return;
630  
631  	cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
632  			      NULL, NULL);
633  
634  	cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
635  	__skb_queue_tail(&csk->txq, skb);
636  	cxgbit_push_tx_frames(csk);
637  }
638  
cxgbit_arp_failure_discard(void * handle,struct sk_buff * skb)639  static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
640  {
641  	struct cxgbit_sock *csk = handle;
642  
643  	pr_debug("%s cxgbit_device %p\n", __func__, handle);
644  	kfree_skb(skb);
645  	cxgbit_put_csk(csk);
646  }
647  
cxgbit_abort_arp_failure(void * handle,struct sk_buff * skb)648  static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
649  {
650  	struct cxgbit_device *cdev = handle;
651  	struct cpl_abort_req *req = cplhdr(skb);
652  
653  	pr_debug("%s cdev %p\n", __func__, cdev);
654  	req->cmd = CPL_ABORT_NO_RST;
655  	cxgbit_ofld_send(cdev, skb);
656  }
657  
cxgbit_send_abort_req(struct cxgbit_sock * csk)658  static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
659  {
660  	struct sk_buff *skb;
661  	u32 len = roundup(sizeof(struct cpl_abort_req), 16);
662  
663  	pr_debug("%s: csk %p tid %u; state %d\n",
664  		 __func__, csk, csk->tid, csk->com.state);
665  
666  	__skb_queue_purge(&csk->txq);
667  
668  	if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
669  		cxgbit_send_tx_flowc_wr(csk);
670  
671  	skb = __skb_dequeue(&csk->skbq);
672  	cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
673  			  csk->com.cdev, cxgbit_abort_arp_failure);
674  
675  	return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
676  }
677  
678  static void
__cxgbit_abort_conn(struct cxgbit_sock * csk,struct sk_buff * skb)679  __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
680  {
681  	__kfree_skb(skb);
682  
683  	if (csk->com.state != CSK_STATE_ESTABLISHED)
684  		goto no_abort;
685  
686  	set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
687  	csk->com.state = CSK_STATE_ABORTING;
688  
689  	cxgbit_send_abort_req(csk);
690  
691  	return;
692  
693  no_abort:
694  	cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
695  	cxgbit_put_csk(csk);
696  }
697  
cxgbit_abort_conn(struct cxgbit_sock * csk)698  void cxgbit_abort_conn(struct cxgbit_sock *csk)
699  {
700  	struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
701  
702  	cxgbit_get_csk(csk);
703  	cxgbit_init_wr_wait(&csk->com.wr_wait);
704  
705  	spin_lock_bh(&csk->lock);
706  	if (csk->lock_owner) {
707  		cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
708  		__skb_queue_tail(&csk->backlogq, skb);
709  	} else {
710  		__cxgbit_abort_conn(csk, skb);
711  	}
712  	spin_unlock_bh(&csk->lock);
713  
714  	cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
715  			      csk->tid, 600, __func__);
716  }
717  
__cxgbit_free_conn(struct cxgbit_sock * csk)718  static void __cxgbit_free_conn(struct cxgbit_sock *csk)
719  {
720  	struct iscsit_conn *conn = csk->conn;
721  	bool release = false;
722  
723  	pr_debug("%s: state %d\n",
724  		 __func__, csk->com.state);
725  
726  	spin_lock_bh(&csk->lock);
727  	switch (csk->com.state) {
728  	case CSK_STATE_ESTABLISHED:
729  		if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
730  			csk->com.state = CSK_STATE_CLOSING;
731  			cxgbit_send_halfclose(csk);
732  		} else {
733  			csk->com.state = CSK_STATE_ABORTING;
734  			cxgbit_send_abort_req(csk);
735  		}
736  		break;
737  	case CSK_STATE_CLOSING:
738  		csk->com.state = CSK_STATE_MORIBUND;
739  		cxgbit_send_halfclose(csk);
740  		break;
741  	case CSK_STATE_DEAD:
742  		release = true;
743  		break;
744  	default:
745  		pr_err("%s: csk %p; state %d\n",
746  		       __func__, csk, csk->com.state);
747  	}
748  	spin_unlock_bh(&csk->lock);
749  
750  	if (release)
751  		cxgbit_put_csk(csk);
752  }
753  
cxgbit_free_conn(struct iscsit_conn * conn)754  void cxgbit_free_conn(struct iscsit_conn *conn)
755  {
756  	__cxgbit_free_conn(conn->context);
757  }
758  
cxgbit_set_emss(struct cxgbit_sock * csk,u16 opt)759  static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
760  {
761  	csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
762  			((csk->com.remote_addr.ss_family == AF_INET) ?
763  			sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
764  			sizeof(struct tcphdr);
765  	csk->mss = csk->emss;
766  	if (TCPOPT_TSTAMP_G(opt))
767  		csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
768  	if (csk->emss < 128)
769  		csk->emss = 128;
770  	if (csk->emss & 7)
771  		pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
772  			TCPOPT_MSS_G(opt), csk->mss, csk->emss);
773  	pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
774  		 csk->mss, csk->emss);
775  }
776  
cxgbit_free_skb(struct cxgbit_sock * csk)777  static void cxgbit_free_skb(struct cxgbit_sock *csk)
778  {
779  	struct sk_buff *skb;
780  
781  	__skb_queue_purge(&csk->txq);
782  	__skb_queue_purge(&csk->rxq);
783  	__skb_queue_purge(&csk->backlogq);
784  	__skb_queue_purge(&csk->ppodq);
785  	__skb_queue_purge(&csk->skbq);
786  
787  	while ((skb = cxgbit_sock_dequeue_wr(csk)))
788  		kfree_skb(skb);
789  
790  	__kfree_skb(csk->lro_hskb);
791  }
792  
_cxgbit_free_csk(struct kref * kref)793  void _cxgbit_free_csk(struct kref *kref)
794  {
795  	struct cxgbit_sock *csk;
796  	struct cxgbit_device *cdev;
797  
798  	csk = container_of(kref, struct cxgbit_sock, kref);
799  
800  	pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
801  
802  	if (csk->com.local_addr.ss_family == AF_INET6) {
803  		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
804  					     &csk->com.local_addr;
805  		cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
806  				   (const u32 *)
807  				   &sin6->sin6_addr.s6_addr, 1);
808  	}
809  
810  	cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
811  			 csk->com.local_addr.ss_family);
812  	dst_release(csk->dst);
813  	cxgb4_l2t_release(csk->l2t);
814  
815  	cdev = csk->com.cdev;
816  	spin_lock_bh(&cdev->cskq.lock);
817  	list_del(&csk->list);
818  	spin_unlock_bh(&cdev->cskq.lock);
819  
820  	cxgbit_free_skb(csk);
821  	cxgbit_put_cnp(csk->cnp);
822  	cxgbit_put_cdev(cdev);
823  
824  	kfree(csk);
825  }
826  
cxgbit_set_tcp_window(struct cxgbit_sock * csk,struct port_info * pi)827  static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
828  {
829  	unsigned int linkspeed;
830  	u8 scale;
831  
832  	linkspeed = pi->link_cfg.speed;
833  	scale = linkspeed / SPEED_10000;
834  
835  #define CXGBIT_10G_RCV_WIN (256 * 1024)
836  	csk->rcv_win = CXGBIT_10G_RCV_WIN;
837  	if (scale)
838  		csk->rcv_win *= scale;
839  	csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10);
840  
841  #define CXGBIT_10G_SND_WIN (256 * 1024)
842  	csk->snd_win = CXGBIT_10G_SND_WIN;
843  	if (scale)
844  		csk->snd_win *= scale;
845  	csk->snd_win = min(csk->snd_win, 512U * 1024);
846  
847  	pr_debug("%s snd_win %d rcv_win %d\n",
848  		 __func__, csk->snd_win, csk->rcv_win);
849  }
850  
851  #ifdef CONFIG_CHELSIO_T4_DCB
cxgbit_get_iscsi_dcb_state(struct net_device * ndev)852  static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
853  {
854  	return ndev->dcbnl_ops->getstate(ndev);
855  }
856  
cxgbit_select_priority(int pri_mask)857  static int cxgbit_select_priority(int pri_mask)
858  {
859  	if (!pri_mask)
860  		return 0;
861  
862  	return (ffs(pri_mask) - 1);
863  }
864  
cxgbit_get_iscsi_dcb_priority(struct net_device * ndev,u16 local_port)865  static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
866  {
867  	int ret;
868  	u8 caps;
869  
870  	struct dcb_app iscsi_dcb_app = {
871  		.protocol = local_port
872  	};
873  
874  	ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
875  
876  	if (ret)
877  		return 0;
878  
879  	if (caps & DCB_CAP_DCBX_VER_IEEE) {
880  		iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
881  		ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
882  		if (!ret) {
883  			iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
884  			ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
885  		}
886  	} else if (caps & DCB_CAP_DCBX_VER_CEE) {
887  		iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
888  
889  		ret = dcb_getapp(ndev, &iscsi_dcb_app);
890  	}
891  
892  	pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
893  
894  	return cxgbit_select_priority(ret);
895  }
896  #endif
897  
898  static int
cxgbit_offload_init(struct cxgbit_sock * csk,int iptype,__u8 * peer_ip,u16 local_port,struct dst_entry * dst,struct cxgbit_device * cdev)899  cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
900  		    u16 local_port, struct dst_entry *dst,
901  		    struct cxgbit_device *cdev)
902  {
903  	struct neighbour *n;
904  	int ret, step;
905  	struct net_device *ndev;
906  	u16 rxq_idx, port_id;
907  #ifdef CONFIG_CHELSIO_T4_DCB
908  	u8 priority = 0;
909  #endif
910  
911  	n = dst_neigh_lookup(dst, peer_ip);
912  	if (!n)
913  		return -ENODEV;
914  
915  	rcu_read_lock();
916  	if (!(n->nud_state & NUD_VALID))
917  		neigh_event_send(n, NULL);
918  
919  	ret = -ENOMEM;
920  	if (n->dev->flags & IFF_LOOPBACK) {
921  		if (iptype == 4)
922  			ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
923  		else if (IS_ENABLED(CONFIG_IPV6))
924  			ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
925  		else
926  			ndev = NULL;
927  
928  		if (!ndev) {
929  			ret = -ENODEV;
930  			goto out;
931  		}
932  
933  		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
934  					 n, ndev, 0);
935  		if (!csk->l2t)
936  			goto out;
937  		csk->mtu = ndev->mtu;
938  		csk->tx_chan = cxgb4_port_chan(ndev);
939  		csk->smac_idx =
940  			       ((struct port_info *)netdev_priv(ndev))->smt_idx;
941  		step = cdev->lldi.ntxq /
942  			cdev->lldi.nchan;
943  		csk->txq_idx = cxgb4_port_idx(ndev) * step;
944  		step = cdev->lldi.nrxq /
945  			cdev->lldi.nchan;
946  		csk->ctrlq_idx = cxgb4_port_idx(ndev);
947  		csk->rss_qid = cdev->lldi.rxq_ids[
948  				cxgb4_port_idx(ndev) * step];
949  		csk->port_id = cxgb4_port_idx(ndev);
950  		cxgbit_set_tcp_window(csk,
951  				      (struct port_info *)netdev_priv(ndev));
952  	} else {
953  		ndev = cxgbit_get_real_dev(n->dev);
954  		if (!ndev) {
955  			ret = -ENODEV;
956  			goto out;
957  		}
958  
959  #ifdef CONFIG_CHELSIO_T4_DCB
960  		if (cxgbit_get_iscsi_dcb_state(ndev))
961  			priority = cxgbit_get_iscsi_dcb_priority(ndev,
962  								 local_port);
963  
964  		csk->dcb_priority = priority;
965  
966  		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
967  #else
968  		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
969  #endif
970  		if (!csk->l2t)
971  			goto out;
972  		port_id = cxgb4_port_idx(ndev);
973  		csk->mtu = dst_mtu(dst);
974  		csk->tx_chan = cxgb4_port_chan(ndev);
975  		csk->smac_idx =
976  			       ((struct port_info *)netdev_priv(ndev))->smt_idx;
977  		step = cdev->lldi.ntxq /
978  			cdev->lldi.nports;
979  		csk->txq_idx = (port_id * step) +
980  				(cdev->selectq[port_id][0]++ % step);
981  		csk->ctrlq_idx = cxgb4_port_idx(ndev);
982  		step = cdev->lldi.nrxq /
983  			cdev->lldi.nports;
984  		rxq_idx = (port_id * step) +
985  				(cdev->selectq[port_id][1]++ % step);
986  		csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
987  		csk->port_id = port_id;
988  		cxgbit_set_tcp_window(csk,
989  				      (struct port_info *)netdev_priv(ndev));
990  	}
991  	ret = 0;
992  out:
993  	rcu_read_unlock();
994  	neigh_release(n);
995  	return ret;
996  }
997  
cxgbit_ofld_send(struct cxgbit_device * cdev,struct sk_buff * skb)998  int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
999  {
1000  	int ret = 0;
1001  
1002  	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1003  		kfree_skb(skb);
1004  		pr_err("%s - device not up - dropping\n", __func__);
1005  		return -EIO;
1006  	}
1007  
1008  	ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1009  	if (ret < 0)
1010  		kfree_skb(skb);
1011  	return ret < 0 ? ret : 0;
1012  }
1013  
cxgbit_release_tid(struct cxgbit_device * cdev,u32 tid)1014  static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1015  {
1016  	u32 len = roundup(sizeof(struct cpl_tid_release), 16);
1017  	struct sk_buff *skb;
1018  
1019  	skb = alloc_skb(len, GFP_ATOMIC);
1020  	if (!skb)
1021  		return;
1022  
1023  	cxgb_mk_tid_release(skb, len, tid, 0);
1024  	cxgbit_ofld_send(cdev, skb);
1025  }
1026  
1027  int
cxgbit_l2t_send(struct cxgbit_device * cdev,struct sk_buff * skb,struct l2t_entry * l2e)1028  cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1029  		struct l2t_entry *l2e)
1030  {
1031  	int ret = 0;
1032  
1033  	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1034  		kfree_skb(skb);
1035  		pr_err("%s - device not up - dropping\n", __func__);
1036  		return -EIO;
1037  	}
1038  
1039  	ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1040  	if (ret < 0)
1041  		kfree_skb(skb);
1042  	return ret < 0 ? ret : 0;
1043  }
1044  
cxgbit_send_rx_credits(struct cxgbit_sock * csk,struct sk_buff * skb)1045  static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1046  {
1047  	if (csk->com.state != CSK_STATE_ESTABLISHED) {
1048  		__kfree_skb(skb);
1049  		return;
1050  	}
1051  
1052  	cxgbit_ofld_send(csk->com.cdev, skb);
1053  }
1054  
1055  /*
1056   * CPL connection rx data ack: host ->
1057   * Send RX credits through an RX_DATA_ACK CPL message.
1058   * Returns the number of credits sent.
1059   */
cxgbit_rx_data_ack(struct cxgbit_sock * csk)1060  int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1061  {
1062  	struct sk_buff *skb;
1063  	u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1064  	u32 credit_dack;
1065  
1066  	skb = alloc_skb(len, GFP_KERNEL);
1067  	if (!skb)
1068  		return -1;
1069  
1070  	credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) |
1071  		      RX_CREDITS_V(csk->rx_credits);
1072  
1073  	cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1074  			    credit_dack);
1075  
1076  	csk->rx_credits = 0;
1077  
1078  	spin_lock_bh(&csk->lock);
1079  	if (csk->lock_owner) {
1080  		cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1081  		__skb_queue_tail(&csk->backlogq, skb);
1082  		spin_unlock_bh(&csk->lock);
1083  		return 0;
1084  	}
1085  
1086  	cxgbit_send_rx_credits(csk, skb);
1087  	spin_unlock_bh(&csk->lock);
1088  
1089  	return 0;
1090  }
1091  
1092  #define FLOWC_WR_NPARAMS_MIN    9
1093  #define FLOWC_WR_NPARAMS_MAX	11
cxgbit_alloc_csk_skb(struct cxgbit_sock * csk)1094  static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1095  {
1096  	struct sk_buff *skb;
1097  	u32 len, flowclen;
1098  	u8 i;
1099  
1100  	flowclen = offsetof(struct fw_flowc_wr,
1101  			    mnemval[FLOWC_WR_NPARAMS_MAX]);
1102  
1103  	len = max_t(u32, sizeof(struct cpl_abort_req),
1104  		    sizeof(struct cpl_abort_rpl));
1105  
1106  	len = max(len, flowclen);
1107  	len = roundup(len, 16);
1108  
1109  	for (i = 0; i < 3; i++) {
1110  		skb = alloc_skb(len, GFP_ATOMIC);
1111  		if (!skb)
1112  			goto out;
1113  		__skb_queue_tail(&csk->skbq, skb);
1114  	}
1115  
1116  	skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1117  	if (!skb)
1118  		goto out;
1119  
1120  	memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1121  	csk->lro_hskb = skb;
1122  
1123  	return 0;
1124  out:
1125  	__skb_queue_purge(&csk->skbq);
1126  	return -ENOMEM;
1127  }
1128  
1129  static void
cxgbit_pass_accept_rpl(struct cxgbit_sock * csk,struct cpl_pass_accept_req * req)1130  cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1131  {
1132  	struct sk_buff *skb;
1133  	const struct tcphdr *tcph;
1134  	struct cpl_t5_pass_accept_rpl *rpl5;
1135  	struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1136  	unsigned int len = roundup(sizeof(*rpl5), 16);
1137  	unsigned int mtu_idx;
1138  	u64 opt0;
1139  	u32 opt2, hlen;
1140  	u32 wscale;
1141  	u32 win;
1142  
1143  	pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1144  
1145  	skb = alloc_skb(len, GFP_ATOMIC);
1146  	if (!skb) {
1147  		cxgbit_put_csk(csk);
1148  		return;
1149  	}
1150  
1151  	rpl5 = __skb_put_zero(skb, len);
1152  
1153  	INIT_TP_WR(rpl5, csk->tid);
1154  	OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1155  						     csk->tid));
1156  	cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1157  		      req->tcpopt.tstamp,
1158  		      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1159  	wscale = cxgb_compute_wscale(csk->rcv_win);
1160  	/*
1161  	 * Specify the largest window that will fit in opt0. The
1162  	 * remainder will be specified in the rx_data_ack.
1163  	 */
1164  	win = csk->rcv_win >> 10;
1165  	if (win > RCV_BUFSIZ_M)
1166  		win = RCV_BUFSIZ_M;
1167  	opt0 =  TCAM_BYPASS_F |
1168  		WND_SCALE_V(wscale) |
1169  		MSS_IDX_V(mtu_idx) |
1170  		L2T_IDX_V(csk->l2t->idx) |
1171  		TX_CHAN_V(csk->tx_chan) |
1172  		SMAC_SEL_V(csk->smac_idx) |
1173  		DSCP_V(csk->tos >> 2) |
1174  		ULP_MODE_V(ULP_MODE_ISCSI) |
1175  		RCV_BUFSIZ_V(win);
1176  
1177  	opt2 = RX_CHANNEL_V(0) |
1178  		RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1179  
1180  	if (!is_t5(lldi->adapter_type))
1181  		opt2 |= RX_FC_DISABLE_F;
1182  
1183  	if (req->tcpopt.tstamp)
1184  		opt2 |= TSTAMPS_EN_F;
1185  	if (req->tcpopt.sack)
1186  		opt2 |= SACK_EN_F;
1187  	if (wscale)
1188  		opt2 |= WND_SCALE_EN_F;
1189  
1190  	hlen = ntohl(req->hdr_len);
1191  
1192  	if (is_t5(lldi->adapter_type))
1193  		tcph = (struct tcphdr *)((u8 *)(req + 1) +
1194  		       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1195  	else
1196  		tcph = (struct tcphdr *)((u8 *)(req + 1) +
1197  		       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1198  
1199  	if (tcph->ece && tcph->cwr)
1200  		opt2 |= CCTRL_ECN_V(1);
1201  
1202  	opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1203  
1204  	opt2 |= T5_ISS_F;
1205  	rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
1206  
1207  	opt2 |= T5_OPT_2_VALID_F;
1208  
1209  	rpl5->opt0 = cpu_to_be64(opt0);
1210  	rpl5->opt2 = cpu_to_be32(opt2);
1211  	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1212  	t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1213  	cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1214  }
1215  
1216  static void
cxgbit_pass_accept_req(struct cxgbit_device * cdev,struct sk_buff * skb)1217  cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1218  {
1219  	struct cxgbit_sock *csk = NULL;
1220  	struct cxgbit_np *cnp;
1221  	struct cpl_pass_accept_req *req = cplhdr(skb);
1222  	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1223  	struct tid_info *t = cdev->lldi.tids;
1224  	unsigned int tid = GET_TID(req);
1225  	u16 peer_mss = ntohs(req->tcpopt.mss);
1226  	unsigned short hdrs;
1227  
1228  	struct dst_entry *dst;
1229  	__u8 local_ip[16], peer_ip[16];
1230  	__be16 local_port, peer_port;
1231  	int ret;
1232  	int iptype;
1233  
1234  	pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1235  		 __func__, cdev, stid, tid);
1236  
1237  	cnp = lookup_stid(t, stid);
1238  	if (!cnp) {
1239  		pr_err("%s connect request on invalid stid %d\n",
1240  		       __func__, stid);
1241  		goto rel_skb;
1242  	}
1243  
1244  	if (cnp->com.state != CSK_STATE_LISTEN) {
1245  		pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1246  		       __func__);
1247  		goto reject;
1248  	}
1249  
1250  	csk = lookup_tid(t, tid);
1251  	if (csk) {
1252  		pr_err("%s csk not null tid %u\n",
1253  		       __func__, tid);
1254  		goto rel_skb;
1255  	}
1256  
1257  	cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1258  			peer_ip, &local_port, &peer_port);
1259  
1260  	/* Find output route */
1261  	if (iptype == 4)  {
1262  		pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1263  			 "lport %d rport %d peer_mss %d\n"
1264  			 , __func__, cnp, tid,
1265  			 local_ip, peer_ip, ntohs(local_port),
1266  			 ntohs(peer_port), peer_mss);
1267  		dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1268  				      *(__be32 *)local_ip,
1269  				      *(__be32 *)peer_ip,
1270  				      local_port, peer_port,
1271  				      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1272  	} else {
1273  		pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1274  			 "lport %d rport %d peer_mss %d\n"
1275  			 , __func__, cnp, tid,
1276  			 local_ip, peer_ip, ntohs(local_port),
1277  			 ntohs(peer_port), peer_mss);
1278  		dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1279  				       local_ip, peer_ip,
1280  				       local_port, peer_port,
1281  				       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1282  				       ((struct sockaddr_in6 *)
1283  					&cnp->com.local_addr)->sin6_scope_id);
1284  	}
1285  	if (!dst) {
1286  		pr_err("%s - failed to find dst entry!\n",
1287  		       __func__);
1288  		goto reject;
1289  	}
1290  
1291  	csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1292  	if (!csk) {
1293  		dst_release(dst);
1294  		goto rel_skb;
1295  	}
1296  
1297  	ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1298  				  dst, cdev);
1299  	if (ret) {
1300  		pr_err("%s - failed to allocate l2t entry!\n",
1301  		       __func__);
1302  		dst_release(dst);
1303  		kfree(csk);
1304  		goto reject;
1305  	}
1306  
1307  	kref_init(&csk->kref);
1308  	init_completion(&csk->com.wr_wait.completion);
1309  
1310  	INIT_LIST_HEAD(&csk->accept_node);
1311  
1312  	hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1313  		sizeof(struct tcphdr) +	(req->tcpopt.tstamp ? 12 : 0);
1314  	if (peer_mss && csk->mtu > (peer_mss + hdrs))
1315  		csk->mtu = peer_mss + hdrs;
1316  
1317  	csk->com.state = CSK_STATE_CONNECTING;
1318  	csk->com.cdev = cdev;
1319  	csk->cnp = cnp;
1320  	csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1321  	csk->dst = dst;
1322  	csk->tid = tid;
1323  	csk->wr_cred = cdev->lldi.wr_cred -
1324  			DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1325  	csk->wr_max_cred = csk->wr_cred;
1326  	csk->wr_una_cred = 0;
1327  
1328  	if (iptype == 4) {
1329  		struct sockaddr_in *sin = (struct sockaddr_in *)
1330  					  &csk->com.local_addr;
1331  		sin->sin_family = AF_INET;
1332  		sin->sin_port = local_port;
1333  		sin->sin_addr.s_addr = *(__be32 *)local_ip;
1334  
1335  		sin = (struct sockaddr_in *)&csk->com.remote_addr;
1336  		sin->sin_family = AF_INET;
1337  		sin->sin_port = peer_port;
1338  		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1339  	} else {
1340  		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1341  					    &csk->com.local_addr;
1342  
1343  		sin6->sin6_family = PF_INET6;
1344  		sin6->sin6_port = local_port;
1345  		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1346  		cxgb4_clip_get(cdev->lldi.ports[0],
1347  			       (const u32 *)&sin6->sin6_addr.s6_addr,
1348  			       1);
1349  
1350  		sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1351  		sin6->sin6_family = PF_INET6;
1352  		sin6->sin6_port = peer_port;
1353  		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1354  	}
1355  
1356  	skb_queue_head_init(&csk->rxq);
1357  	skb_queue_head_init(&csk->txq);
1358  	skb_queue_head_init(&csk->ppodq);
1359  	skb_queue_head_init(&csk->backlogq);
1360  	skb_queue_head_init(&csk->skbq);
1361  	cxgbit_sock_reset_wr_list(csk);
1362  	spin_lock_init(&csk->lock);
1363  	init_waitqueue_head(&csk->waitq);
1364  	csk->lock_owner = false;
1365  
1366  	if (cxgbit_alloc_csk_skb(csk)) {
1367  		dst_release(dst);
1368  		kfree(csk);
1369  		goto rel_skb;
1370  	}
1371  
1372  	cxgbit_get_cnp(cnp);
1373  	cxgbit_get_cdev(cdev);
1374  
1375  	spin_lock(&cdev->cskq.lock);
1376  	list_add_tail(&csk->list, &cdev->cskq.list);
1377  	spin_unlock(&cdev->cskq.lock);
1378  	cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1379  	cxgbit_pass_accept_rpl(csk, req);
1380  	goto rel_skb;
1381  
1382  reject:
1383  	cxgbit_release_tid(cdev, tid);
1384  rel_skb:
1385  	__kfree_skb(skb);
1386  }
1387  
1388  static u32
cxgbit_tx_flowc_wr_credits(struct cxgbit_sock * csk,u32 * nparamsp,u32 * flowclenp)1389  cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1390  			   u32 *flowclenp)
1391  {
1392  	u32 nparams, flowclen16, flowclen;
1393  
1394  	nparams = FLOWC_WR_NPARAMS_MIN;
1395  
1396  	if (csk->snd_wscale)
1397  		nparams++;
1398  
1399  #ifdef CONFIG_CHELSIO_T4_DCB
1400  	nparams++;
1401  #endif
1402  	flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1403  	flowclen16 = DIV_ROUND_UP(flowclen, 16);
1404  	flowclen = flowclen16 * 16;
1405  	/*
1406  	 * Return the number of 16-byte credits used by the flowc request.
1407  	 * Pass back the nparams and actual flowc length if requested.
1408  	 */
1409  	if (nparamsp)
1410  		*nparamsp = nparams;
1411  	if (flowclenp)
1412  		*flowclenp = flowclen;
1413  	return flowclen16;
1414  }
1415  
cxgbit_send_tx_flowc_wr(struct cxgbit_sock * csk)1416  u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1417  {
1418  	struct cxgbit_device *cdev = csk->com.cdev;
1419  	struct fw_flowc_wr *flowc;
1420  	u32 nparams, flowclen16, flowclen;
1421  	struct sk_buff *skb;
1422  	u8 index;
1423  
1424  #ifdef CONFIG_CHELSIO_T4_DCB
1425  	u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1426  #endif
1427  
1428  	flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1429  
1430  	skb = __skb_dequeue(&csk->skbq);
1431  	flowc = __skb_put_zero(skb, flowclen);
1432  
1433  	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1434  					   FW_FLOWC_WR_NPARAMS_V(nparams));
1435  	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1436  					  FW_WR_FLOWID_V(csk->tid));
1437  	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1438  	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1439  					    (csk->com.cdev->lldi.pf));
1440  	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1441  	flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1442  	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1443  	flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1444  	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1445  	flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1446  	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1447  	flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1448  	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1449  	flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1450  	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1451  	flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1452  	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1453  	flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1454  
1455  	flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1456  	if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1457  		flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1458  	else
1459  		flowc->mnemval[8].val = cpu_to_be32(16384);
1460  
1461  	index = 9;
1462  
1463  	if (csk->snd_wscale) {
1464  		flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1465  		flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1466  		index++;
1467  	}
1468  
1469  #ifdef CONFIG_CHELSIO_T4_DCB
1470  	flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1471  	if (vlan == VLAN_NONE) {
1472  		pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1473  		flowc->mnemval[index].val = cpu_to_be32(0);
1474  	} else
1475  		flowc->mnemval[index].val = cpu_to_be32(
1476  				(vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1477  #endif
1478  
1479  	pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1480  		 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1481  		 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1482  		 csk->rcv_nxt, csk->snd_win, csk->emss);
1483  	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1484  	cxgbit_ofld_send(csk->com.cdev, skb);
1485  	return flowclen16;
1486  }
1487  
1488  static int
cxgbit_send_tcb_skb(struct cxgbit_sock * csk,struct sk_buff * skb)1489  cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1490  {
1491  	spin_lock_bh(&csk->lock);
1492  	if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
1493  		spin_unlock_bh(&csk->lock);
1494  		pr_err("%s: csk 0x%p, tid %u, state %u\n",
1495  		       __func__, csk, csk->tid, csk->com.state);
1496  		__kfree_skb(skb);
1497  		return -1;
1498  	}
1499  
1500  	cxgbit_get_csk(csk);
1501  	cxgbit_init_wr_wait(&csk->com.wr_wait);
1502  	cxgbit_ofld_send(csk->com.cdev, skb);
1503  	spin_unlock_bh(&csk->lock);
1504  
1505  	return 0;
1506  }
1507  
cxgbit_setup_conn_digest(struct cxgbit_sock * csk)1508  int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1509  {
1510  	struct sk_buff *skb;
1511  	struct cpl_set_tcb_field *req;
1512  	u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1513  	u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1514  	unsigned int len = roundup(sizeof(*req), 16);
1515  	int ret;
1516  
1517  	skb = alloc_skb(len, GFP_KERNEL);
1518  	if (!skb)
1519  		return -ENOMEM;
1520  
1521  	/*  set up ulp submode */
1522  	req = __skb_put_zero(skb, len);
1523  
1524  	INIT_TP_WR(req, csk->tid);
1525  	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1526  	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1527  	req->word_cookie = htons(0);
1528  	req->mask = cpu_to_be64(0x3 << 4);
1529  	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1530  				(dcrc ? ULP_CRC_DATA : 0)) << 4);
1531  	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1532  
1533  	if (cxgbit_send_tcb_skb(csk, skb))
1534  		return -1;
1535  
1536  	ret = cxgbit_wait_for_reply(csk->com.cdev,
1537  				    &csk->com.wr_wait,
1538  				    csk->tid, 5, __func__);
1539  	if (ret)
1540  		return -1;
1541  
1542  	return 0;
1543  }
1544  
cxgbit_setup_conn_pgidx(struct cxgbit_sock * csk,u32 pg_idx)1545  int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1546  {
1547  	struct sk_buff *skb;
1548  	struct cpl_set_tcb_field *req;
1549  	unsigned int len = roundup(sizeof(*req), 16);
1550  	int ret;
1551  
1552  	skb = alloc_skb(len, GFP_KERNEL);
1553  	if (!skb)
1554  		return -ENOMEM;
1555  
1556  	req = __skb_put_zero(skb, len);
1557  
1558  	INIT_TP_WR(req, csk->tid);
1559  	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1560  	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1561  	req->word_cookie = htons(0);
1562  	req->mask = cpu_to_be64(0x3 << 8);
1563  	req->val = cpu_to_be64(pg_idx << 8);
1564  	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1565  
1566  	if (cxgbit_send_tcb_skb(csk, skb))
1567  		return -1;
1568  
1569  	ret = cxgbit_wait_for_reply(csk->com.cdev,
1570  				    &csk->com.wr_wait,
1571  				    csk->tid, 5, __func__);
1572  	if (ret)
1573  		return -1;
1574  
1575  	return 0;
1576  }
1577  
1578  static void
cxgbit_pass_open_rpl(struct cxgbit_device * cdev,struct sk_buff * skb)1579  cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1580  {
1581  	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1582  	struct tid_info *t = cdev->lldi.tids;
1583  	unsigned int stid = GET_TID(rpl);
1584  	struct cxgbit_np *cnp = lookup_stid(t, stid);
1585  
1586  	pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1587  		 __func__, cnp, stid, rpl->status);
1588  
1589  	if (!cnp) {
1590  		pr_info("%s stid %d lookup failure\n", __func__, stid);
1591  		goto rel_skb;
1592  	}
1593  
1594  	cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1595  	cxgbit_put_cnp(cnp);
1596  rel_skb:
1597  	__kfree_skb(skb);
1598  }
1599  
1600  static void
cxgbit_close_listsrv_rpl(struct cxgbit_device * cdev,struct sk_buff * skb)1601  cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1602  {
1603  	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1604  	struct tid_info *t = cdev->lldi.tids;
1605  	unsigned int stid = GET_TID(rpl);
1606  	struct cxgbit_np *cnp = lookup_stid(t, stid);
1607  
1608  	pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1609  		 __func__, cnp, stid, rpl->status);
1610  
1611  	if (!cnp) {
1612  		pr_info("%s stid %d lookup failure\n", __func__, stid);
1613  		goto rel_skb;
1614  	}
1615  
1616  	cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1617  	cxgbit_put_cnp(cnp);
1618  rel_skb:
1619  	__kfree_skb(skb);
1620  }
1621  
1622  static void
cxgbit_pass_establish(struct cxgbit_device * cdev,struct sk_buff * skb)1623  cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1624  {
1625  	struct cpl_pass_establish *req = cplhdr(skb);
1626  	struct tid_info *t = cdev->lldi.tids;
1627  	unsigned int tid = GET_TID(req);
1628  	struct cxgbit_sock *csk;
1629  	struct cxgbit_np *cnp;
1630  	u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1631  	u32 snd_isn = be32_to_cpu(req->snd_isn);
1632  	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1633  
1634  	csk = lookup_tid(t, tid);
1635  	if (unlikely(!csk)) {
1636  		pr_err("can't find connection for tid %u.\n", tid);
1637  		goto rel_skb;
1638  	}
1639  	cnp = csk->cnp;
1640  
1641  	pr_debug("%s: csk %p; tid %u; cnp %p\n",
1642  		 __func__, csk, tid, cnp);
1643  
1644  	csk->write_seq = snd_isn;
1645  	csk->snd_una = snd_isn;
1646  	csk->snd_nxt = snd_isn;
1647  
1648  	csk->rcv_nxt = rcv_isn;
1649  
1650  	csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1651  	cxgbit_set_emss(csk, tcp_opt);
1652  	dst_confirm(csk->dst);
1653  	csk->com.state = CSK_STATE_ESTABLISHED;
1654  	spin_lock_bh(&cnp->np_accept_lock);
1655  	list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1656  	spin_unlock_bh(&cnp->np_accept_lock);
1657  	complete(&cnp->accept_comp);
1658  rel_skb:
1659  	__kfree_skb(skb);
1660  }
1661  
cxgbit_queue_rx_skb(struct cxgbit_sock * csk,struct sk_buff * skb)1662  static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1663  {
1664  	cxgbit_skcb_flags(skb) = 0;
1665  	spin_lock_bh(&csk->rxq.lock);
1666  	__skb_queue_tail(&csk->rxq, skb);
1667  	spin_unlock_bh(&csk->rxq.lock);
1668  	wake_up(&csk->waitq);
1669  }
1670  
cxgbit_peer_close(struct cxgbit_sock * csk,struct sk_buff * skb)1671  static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1672  {
1673  	pr_debug("%s: csk %p; tid %u; state %d\n",
1674  		 __func__, csk, csk->tid, csk->com.state);
1675  
1676  	switch (csk->com.state) {
1677  	case CSK_STATE_ESTABLISHED:
1678  		csk->com.state = CSK_STATE_CLOSING;
1679  		cxgbit_queue_rx_skb(csk, skb);
1680  		return;
1681  	case CSK_STATE_CLOSING:
1682  		/* simultaneous close */
1683  		csk->com.state = CSK_STATE_MORIBUND;
1684  		break;
1685  	case CSK_STATE_MORIBUND:
1686  		csk->com.state = CSK_STATE_DEAD;
1687  		cxgbit_put_csk(csk);
1688  		break;
1689  	case CSK_STATE_ABORTING:
1690  		break;
1691  	default:
1692  		pr_info("%s: cpl_peer_close in bad state %d\n",
1693  			__func__, csk->com.state);
1694  	}
1695  
1696  	__kfree_skb(skb);
1697  }
1698  
cxgbit_close_con_rpl(struct cxgbit_sock * csk,struct sk_buff * skb)1699  static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1700  {
1701  	pr_debug("%s: csk %p; tid %u; state %d\n",
1702  		 __func__, csk, csk->tid, csk->com.state);
1703  
1704  	switch (csk->com.state) {
1705  	case CSK_STATE_CLOSING:
1706  		csk->com.state = CSK_STATE_MORIBUND;
1707  		break;
1708  	case CSK_STATE_MORIBUND:
1709  		csk->com.state = CSK_STATE_DEAD;
1710  		cxgbit_put_csk(csk);
1711  		break;
1712  	case CSK_STATE_ABORTING:
1713  	case CSK_STATE_DEAD:
1714  		break;
1715  	default:
1716  		pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1717  			__func__, csk->com.state);
1718  	}
1719  
1720  	__kfree_skb(skb);
1721  }
1722  
cxgbit_abort_req_rss(struct cxgbit_sock * csk,struct sk_buff * skb)1723  static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1724  {
1725  	struct cpl_abort_req_rss *hdr = cplhdr(skb);
1726  	unsigned int tid = GET_TID(hdr);
1727  	struct sk_buff *rpl_skb;
1728  	bool release = false;
1729  	bool wakeup_thread = false;
1730  	u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1731  
1732  	pr_debug("%s: csk %p; tid %u; state %d\n",
1733  		 __func__, csk, tid, csk->com.state);
1734  
1735  	if (cxgb_is_neg_adv(hdr->status)) {
1736  		pr_err("%s: got neg advise %d on tid %u\n",
1737  		       __func__, hdr->status, tid);
1738  		goto rel_skb;
1739  	}
1740  
1741  	switch (csk->com.state) {
1742  	case CSK_STATE_CONNECTING:
1743  	case CSK_STATE_MORIBUND:
1744  		csk->com.state = CSK_STATE_DEAD;
1745  		release = true;
1746  		break;
1747  	case CSK_STATE_ESTABLISHED:
1748  		csk->com.state = CSK_STATE_DEAD;
1749  		wakeup_thread = true;
1750  		break;
1751  	case CSK_STATE_CLOSING:
1752  		csk->com.state = CSK_STATE_DEAD;
1753  		if (!csk->conn)
1754  			release = true;
1755  		break;
1756  	case CSK_STATE_ABORTING:
1757  		break;
1758  	default:
1759  		pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1760  			__func__, csk->com.state);
1761  		csk->com.state = CSK_STATE_DEAD;
1762  	}
1763  
1764  	__skb_queue_purge(&csk->txq);
1765  
1766  	if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1767  		cxgbit_send_tx_flowc_wr(csk);
1768  
1769  	rpl_skb = __skb_dequeue(&csk->skbq);
1770  
1771  	cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1772  	cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1773  
1774  	if (wakeup_thread) {
1775  		cxgbit_queue_rx_skb(csk, skb);
1776  		return;
1777  	}
1778  
1779  	if (release)
1780  		cxgbit_put_csk(csk);
1781  rel_skb:
1782  	__kfree_skb(skb);
1783  }
1784  
cxgbit_abort_rpl_rss(struct cxgbit_sock * csk,struct sk_buff * skb)1785  static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1786  {
1787  	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1788  
1789  	pr_debug("%s: csk %p; tid %u; state %d\n",
1790  		 __func__, csk, csk->tid, csk->com.state);
1791  
1792  	switch (csk->com.state) {
1793  	case CSK_STATE_ABORTING:
1794  		csk->com.state = CSK_STATE_DEAD;
1795  		if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1796  			cxgbit_wake_up(&csk->com.wr_wait, __func__,
1797  				       rpl->status);
1798  		cxgbit_put_csk(csk);
1799  		break;
1800  	default:
1801  		pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1802  			__func__, csk->com.state);
1803  	}
1804  
1805  	__kfree_skb(skb);
1806  }
1807  
cxgbit_credit_err(const struct cxgbit_sock * csk)1808  static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1809  {
1810  	const struct sk_buff *skb = csk->wr_pending_head;
1811  	u32 credit = 0;
1812  
1813  	if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1814  		pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1815  		       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1816  		return true;
1817  	}
1818  
1819  	while (skb) {
1820  		credit += (__force u32)skb->csum;
1821  		skb = cxgbit_skcb_tx_wr_next(skb);
1822  	}
1823  
1824  	if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1825  		pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1826  		       csk, csk->tid, csk->wr_cred,
1827  		       credit, csk->wr_max_cred);
1828  
1829  		return true;
1830  	}
1831  
1832  	return false;
1833  }
1834  
cxgbit_fw4_ack(struct cxgbit_sock * csk,struct sk_buff * skb)1835  static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1836  {
1837  	struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1838  	u32 credits = rpl->credits;
1839  	u32 snd_una = ntohl(rpl->snd_una);
1840  
1841  	csk->wr_cred += credits;
1842  	if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1843  		csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1844  
1845  	while (credits) {
1846  		struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1847  		u32 csum;
1848  
1849  		if (unlikely(!p)) {
1850  			pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1851  			       csk, csk->tid, credits,
1852  			       csk->wr_cred, csk->wr_una_cred);
1853  			break;
1854  		}
1855  
1856  		csum = (__force u32)p->csum;
1857  		if (unlikely(credits < csum)) {
1858  			pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1859  				csk,  csk->tid,
1860  				credits, csk->wr_cred, csk->wr_una_cred,
1861  				csum);
1862  			p->csum = (__force __wsum)(csum - credits);
1863  			break;
1864  		}
1865  
1866  		cxgbit_sock_dequeue_wr(csk);
1867  		credits -= csum;
1868  		kfree_skb(p);
1869  	}
1870  
1871  	if (unlikely(cxgbit_credit_err(csk))) {
1872  		cxgbit_queue_rx_skb(csk, skb);
1873  		return;
1874  	}
1875  
1876  	if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1877  		if (unlikely(before(snd_una, csk->snd_una))) {
1878  			pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1879  				csk, csk->tid, snd_una,
1880  				csk->snd_una);
1881  			goto rel_skb;
1882  		}
1883  
1884  		if (csk->snd_una != snd_una) {
1885  			csk->snd_una = snd_una;
1886  			dst_confirm(csk->dst);
1887  		}
1888  	}
1889  
1890  	if (skb_queue_len(&csk->txq))
1891  		cxgbit_push_tx_frames(csk);
1892  
1893  rel_skb:
1894  	__kfree_skb(skb);
1895  }
1896  
cxgbit_set_tcb_rpl(struct cxgbit_device * cdev,struct sk_buff * skb)1897  static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1898  {
1899  	struct cxgbit_sock *csk;
1900  	struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1901  	unsigned int tid = GET_TID(rpl);
1902  	struct cxgb4_lld_info *lldi = &cdev->lldi;
1903  	struct tid_info *t = lldi->tids;
1904  
1905  	csk = lookup_tid(t, tid);
1906  	if (unlikely(!csk)) {
1907  		pr_err("can't find connection for tid %u.\n", tid);
1908  		goto rel_skb;
1909  	} else {
1910  		cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1911  	}
1912  
1913  	cxgbit_put_csk(csk);
1914  rel_skb:
1915  	__kfree_skb(skb);
1916  }
1917  
cxgbit_rx_data(struct cxgbit_device * cdev,struct sk_buff * skb)1918  static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1919  {
1920  	struct cxgbit_sock *csk;
1921  	struct cpl_rx_data *cpl = cplhdr(skb);
1922  	unsigned int tid = GET_TID(cpl);
1923  	struct cxgb4_lld_info *lldi = &cdev->lldi;
1924  	struct tid_info *t = lldi->tids;
1925  
1926  	csk = lookup_tid(t, tid);
1927  	if (unlikely(!csk)) {
1928  		pr_err("can't find conn. for tid %u.\n", tid);
1929  		goto rel_skb;
1930  	}
1931  
1932  	cxgbit_queue_rx_skb(csk, skb);
1933  	return;
1934  rel_skb:
1935  	__kfree_skb(skb);
1936  }
1937  
1938  static void
__cxgbit_process_rx_cpl(struct cxgbit_sock * csk,struct sk_buff * skb)1939  __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1940  {
1941  	spin_lock(&csk->lock);
1942  	if (csk->lock_owner) {
1943  		__skb_queue_tail(&csk->backlogq, skb);
1944  		spin_unlock(&csk->lock);
1945  		return;
1946  	}
1947  
1948  	cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1949  	spin_unlock(&csk->lock);
1950  }
1951  
cxgbit_process_rx_cpl(struct cxgbit_sock * csk,struct sk_buff * skb)1952  static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1953  {
1954  	cxgbit_get_csk(csk);
1955  	__cxgbit_process_rx_cpl(csk, skb);
1956  	cxgbit_put_csk(csk);
1957  }
1958  
cxgbit_rx_cpl(struct cxgbit_device * cdev,struct sk_buff * skb)1959  static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1960  {
1961  	struct cxgbit_sock *csk;
1962  	struct cpl_tx_data *cpl = cplhdr(skb);
1963  	struct cxgb4_lld_info *lldi = &cdev->lldi;
1964  	struct tid_info *t = lldi->tids;
1965  	unsigned int tid = GET_TID(cpl);
1966  	u8 opcode = cxgbit_skcb_rx_opcode(skb);
1967  	bool ref = true;
1968  
1969  	switch (opcode) {
1970  	case CPL_FW4_ACK:
1971  			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1972  			ref = false;
1973  			break;
1974  	case CPL_PEER_CLOSE:
1975  			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1976  			break;
1977  	case CPL_CLOSE_CON_RPL:
1978  			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1979  			break;
1980  	case CPL_ABORT_REQ_RSS:
1981  			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1982  			break;
1983  	case CPL_ABORT_RPL_RSS:
1984  			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1985  			break;
1986  	default:
1987  		goto rel_skb;
1988  	}
1989  
1990  	csk = lookup_tid(t, tid);
1991  	if (unlikely(!csk)) {
1992  		pr_err("can't find conn. for tid %u.\n", tid);
1993  		goto rel_skb;
1994  	}
1995  
1996  	if (ref)
1997  		cxgbit_process_rx_cpl(csk, skb);
1998  	else
1999  		__cxgbit_process_rx_cpl(csk, skb);
2000  
2001  	return;
2002  rel_skb:
2003  	__kfree_skb(skb);
2004  }
2005  
2006  cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
2007  	[CPL_PASS_OPEN_RPL]	= cxgbit_pass_open_rpl,
2008  	[CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
2009  	[CPL_PASS_ACCEPT_REQ]	= cxgbit_pass_accept_req,
2010  	[CPL_PASS_ESTABLISH]	= cxgbit_pass_establish,
2011  	[CPL_SET_TCB_RPL]	= cxgbit_set_tcb_rpl,
2012  	[CPL_RX_DATA]		= cxgbit_rx_data,
2013  	[CPL_FW4_ACK]		= cxgbit_rx_cpl,
2014  	[CPL_PEER_CLOSE]	= cxgbit_rx_cpl,
2015  	[CPL_CLOSE_CON_RPL]	= cxgbit_rx_cpl,
2016  	[CPL_ABORT_REQ_RSS]	= cxgbit_rx_cpl,
2017  	[CPL_ABORT_RPL_RSS]	= cxgbit_rx_cpl,
2018  };
2019