1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * IPVS:        Locality-Based Least-Connection with Replication scheduler
4   *
5   * Authors:     Wensong Zhang <wensong@gnuchina.org>
6   *
7   * Changes:
8   *     Julian Anastasov        :    Added the missing (dest->weight>0)
9   *                                  condition in the ip_vs_dest_set_max.
10   */
11  
12  /*
13   * The lblc/r algorithm is as follows (pseudo code):
14   *
15   *       if serverSet[dest_ip] is null then
16   *               n, serverSet[dest_ip] <- {weighted least-conn node};
17   *       else
18   *               n <- {least-conn (alive) node in serverSet[dest_ip]};
19   *               if (n is null) OR
20   *                  (n.conns>n.weight AND
21   *                   there is a node m with m.conns<m.weight/2) then
22   *                   n <- {weighted least-conn node};
23   *                   add n to serverSet[dest_ip];
24   *               if |serverSet[dest_ip]| > 1 AND
25   *                   now - serverSet[dest_ip].lastMod > T then
26   *                   m <- {most conn node in serverSet[dest_ip]};
27   *                   remove m from serverSet[dest_ip];
28   *       if serverSet[dest_ip] changed then
29   *               serverSet[dest_ip].lastMod <- now;
30   *
31   *       return n;
32   *
33   */
34  
35  #define KMSG_COMPONENT "IPVS"
36  #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
37  
38  #include <linux/ip.h>
39  #include <linux/module.h>
40  #include <linux/kernel.h>
41  #include <linux/skbuff.h>
42  #include <linux/jiffies.h>
43  #include <linux/list.h>
44  #include <linux/slab.h>
45  #include <linux/hash.h>
46  
47  /* for sysctl */
48  #include <linux/fs.h>
49  #include <linux/sysctl.h>
50  #include <net/net_namespace.h>
51  
52  #include <net/ip_vs.h>
53  
54  
55  /*
56   *    It is for garbage collection of stale IPVS lblcr entries,
57   *    when the table is full.
58   */
59  #define CHECK_EXPIRE_INTERVAL   (60*HZ)
60  #define ENTRY_TIMEOUT           (6*60*HZ)
61  
62  #define DEFAULT_EXPIRATION	(24*60*60*HZ)
63  
64  /*
65   *    It is for full expiration check.
66   *    When there is no partial expiration check (garbage collection)
67   *    in a half hour, do a full expiration check to collect stale
68   *    entries that haven't been touched for a day.
69   */
70  #define COUNT_FOR_FULL_EXPIRATION   30
71  
72  /*
73   *     for IPVS lblcr entry hash table
74   */
75  #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
76  #define CONFIG_IP_VS_LBLCR_TAB_BITS      10
77  #endif
78  #define IP_VS_LBLCR_TAB_BITS     CONFIG_IP_VS_LBLCR_TAB_BITS
79  #define IP_VS_LBLCR_TAB_SIZE     (1 << IP_VS_LBLCR_TAB_BITS)
80  #define IP_VS_LBLCR_TAB_MASK     (IP_VS_LBLCR_TAB_SIZE - 1)
81  
82  
83  /*
84   *      IPVS destination set structure and operations
85   */
86  struct ip_vs_dest_set_elem {
87  	struct list_head	list;          /* list link */
88  	struct ip_vs_dest	*dest;		/* destination server */
89  	struct rcu_head		rcu_head;
90  };
91  
92  struct ip_vs_dest_set {
93  	atomic_t                size;           /* set size */
94  	unsigned long           lastmod;        /* last modified time */
95  	struct list_head	list;           /* destination list */
96  };
97  
98  
ip_vs_dest_set_insert(struct ip_vs_dest_set * set,struct ip_vs_dest * dest,bool check)99  static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
100  				  struct ip_vs_dest *dest, bool check)
101  {
102  	struct ip_vs_dest_set_elem *e;
103  
104  	if (check) {
105  		list_for_each_entry(e, &set->list, list) {
106  			if (e->dest == dest)
107  				return;
108  		}
109  	}
110  
111  	e = kmalloc(sizeof(*e), GFP_ATOMIC);
112  	if (e == NULL)
113  		return;
114  
115  	ip_vs_dest_hold(dest);
116  	e->dest = dest;
117  
118  	list_add_rcu(&e->list, &set->list);
119  	atomic_inc(&set->size);
120  
121  	set->lastmod = jiffies;
122  }
123  
ip_vs_lblcr_elem_rcu_free(struct rcu_head * head)124  static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
125  {
126  	struct ip_vs_dest_set_elem *e;
127  
128  	e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
129  	ip_vs_dest_put_and_free(e->dest);
130  	kfree(e);
131  }
132  
133  static void
ip_vs_dest_set_erase(struct ip_vs_dest_set * set,struct ip_vs_dest * dest)134  ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
135  {
136  	struct ip_vs_dest_set_elem *e;
137  
138  	list_for_each_entry(e, &set->list, list) {
139  		if (e->dest == dest) {
140  			/* HIT */
141  			atomic_dec(&set->size);
142  			set->lastmod = jiffies;
143  			list_del_rcu(&e->list);
144  			call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
145  			break;
146  		}
147  	}
148  }
149  
ip_vs_dest_set_eraseall(struct ip_vs_dest_set * set)150  static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
151  {
152  	struct ip_vs_dest_set_elem *e, *ep;
153  
154  	list_for_each_entry_safe(e, ep, &set->list, list) {
155  		list_del_rcu(&e->list);
156  		call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
157  	}
158  }
159  
160  /* get weighted least-connection node in the destination set */
ip_vs_dest_set_min(struct ip_vs_dest_set * set)161  static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
162  {
163  	struct ip_vs_dest_set_elem *e;
164  	struct ip_vs_dest *dest, *least;
165  	int loh, doh;
166  
167  	/* select the first destination server, whose weight > 0 */
168  	list_for_each_entry_rcu(e, &set->list, list) {
169  		least = e->dest;
170  		if (least->flags & IP_VS_DEST_F_OVERLOAD)
171  			continue;
172  
173  		if ((atomic_read(&least->weight) > 0)
174  		    && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
175  			loh = ip_vs_dest_conn_overhead(least);
176  			goto nextstage;
177  		}
178  	}
179  	return NULL;
180  
181  	/* find the destination with the weighted least load */
182    nextstage:
183  	list_for_each_entry_continue_rcu(e, &set->list, list) {
184  		dest = e->dest;
185  		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
186  			continue;
187  
188  		doh = ip_vs_dest_conn_overhead(dest);
189  		if (((__s64)loh * atomic_read(&dest->weight) >
190  		     (__s64)doh * atomic_read(&least->weight))
191  		    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
192  			least = dest;
193  			loh = doh;
194  		}
195  	}
196  
197  	IP_VS_DBG_BUF(6, "%s(): server %s:%d "
198  		      "activeconns %d refcnt %d weight %d overhead %d\n",
199  		      __func__,
200  		      IP_VS_DBG_ADDR(least->af, &least->addr),
201  		      ntohs(least->port),
202  		      atomic_read(&least->activeconns),
203  		      refcount_read(&least->refcnt),
204  		      atomic_read(&least->weight), loh);
205  	return least;
206  }
207  
208  
209  /* get weighted most-connection node in the destination set */
ip_vs_dest_set_max(struct ip_vs_dest_set * set)210  static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
211  {
212  	struct ip_vs_dest_set_elem *e;
213  	struct ip_vs_dest *dest, *most;
214  	int moh, doh;
215  
216  	if (set == NULL)
217  		return NULL;
218  
219  	/* select the first destination server, whose weight > 0 */
220  	list_for_each_entry(e, &set->list, list) {
221  		most = e->dest;
222  		if (atomic_read(&most->weight) > 0) {
223  			moh = ip_vs_dest_conn_overhead(most);
224  			goto nextstage;
225  		}
226  	}
227  	return NULL;
228  
229  	/* find the destination with the weighted most load */
230    nextstage:
231  	list_for_each_entry_continue(e, &set->list, list) {
232  		dest = e->dest;
233  		doh = ip_vs_dest_conn_overhead(dest);
234  		/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
235  		if (((__s64)moh * atomic_read(&dest->weight) <
236  		     (__s64)doh * atomic_read(&most->weight))
237  		    && (atomic_read(&dest->weight) > 0)) {
238  			most = dest;
239  			moh = doh;
240  		}
241  	}
242  
243  	IP_VS_DBG_BUF(6, "%s(): server %s:%d "
244  		      "activeconns %d refcnt %d weight %d overhead %d\n",
245  		      __func__,
246  		      IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
247  		      atomic_read(&most->activeconns),
248  		      refcount_read(&most->refcnt),
249  		      atomic_read(&most->weight), moh);
250  	return most;
251  }
252  
253  
254  /*
255   *      IPVS lblcr entry represents an association between destination
256   *      IP address and its destination server set
257   */
258  struct ip_vs_lblcr_entry {
259  	struct hlist_node       list;
260  	int			af;		/* address family */
261  	union nf_inet_addr      addr;           /* destination IP address */
262  	struct ip_vs_dest_set   set;            /* destination server set */
263  	unsigned long           lastuse;        /* last used time */
264  	struct rcu_head		rcu_head;
265  };
266  
267  
268  /*
269   *      IPVS lblcr hash table
270   */
271  struct ip_vs_lblcr_table {
272  	struct rcu_head		rcu_head;
273  	struct hlist_head	bucket[IP_VS_LBLCR_TAB_SIZE];  /* hash bucket */
274  	atomic_t                entries;        /* number of entries */
275  	int                     max_size;       /* maximum size of entries */
276  	struct timer_list       periodic_timer; /* collect stale entries */
277  	struct ip_vs_service	*svc;		/* pointer back to service */
278  	int                     rover;          /* rover for expire check */
279  	int                     counter;        /* counter for no expire */
280  	bool			dead;
281  };
282  
283  
284  #ifdef CONFIG_SYSCTL
285  /*
286   *      IPVS LBLCR sysctl table
287   */
288  
289  static struct ctl_table vs_vars_table[] = {
290  	{
291  		.procname	= "lblcr_expiration",
292  		.data		= NULL,
293  		.maxlen		= sizeof(int),
294  		.mode		= 0644,
295  		.proc_handler	= proc_dointvec_jiffies,
296  	},
297  };
298  #endif
299  
ip_vs_lblcr_free(struct ip_vs_lblcr_entry * en)300  static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
301  {
302  	hlist_del_rcu(&en->list);
303  	ip_vs_dest_set_eraseall(&en->set);
304  	kfree_rcu(en, rcu_head);
305  }
306  
307  
308  /*
309   *	Returns hash value for IPVS LBLCR entry
310   */
311  static inline unsigned int
ip_vs_lblcr_hashkey(int af,const union nf_inet_addr * addr)312  ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
313  {
314  	__be32 addr_fold = addr->ip;
315  
316  #ifdef CONFIG_IP_VS_IPV6
317  	if (af == AF_INET6)
318  		addr_fold = addr->ip6[0]^addr->ip6[1]^
319  			    addr->ip6[2]^addr->ip6[3];
320  #endif
321  	return hash_32(ntohl(addr_fold), IP_VS_LBLCR_TAB_BITS);
322  }
323  
324  
325  /*
326   *	Hash an entry in the ip_vs_lblcr_table.
327   *	returns bool success.
328   */
329  static void
ip_vs_lblcr_hash(struct ip_vs_lblcr_table * tbl,struct ip_vs_lblcr_entry * en)330  ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
331  {
332  	unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
333  
334  	hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
335  	atomic_inc(&tbl->entries);
336  }
337  
338  
339  /* Get ip_vs_lblcr_entry associated with supplied parameters. */
340  static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_get(int af,struct ip_vs_lblcr_table * tbl,const union nf_inet_addr * addr)341  ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
342  		const union nf_inet_addr *addr)
343  {
344  	unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
345  	struct ip_vs_lblcr_entry *en;
346  
347  	hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
348  		if (ip_vs_addr_equal(af, &en->addr, addr))
349  			return en;
350  
351  	return NULL;
352  }
353  
354  
355  /*
356   * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
357   * IP address to a server. Called under spin lock.
358   */
359  static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_new(struct ip_vs_lblcr_table * tbl,const union nf_inet_addr * daddr,u16 af,struct ip_vs_dest * dest)360  ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
361  		u16 af, struct ip_vs_dest *dest)
362  {
363  	struct ip_vs_lblcr_entry *en;
364  
365  	en = ip_vs_lblcr_get(af, tbl, daddr);
366  	if (!en) {
367  		en = kmalloc(sizeof(*en), GFP_ATOMIC);
368  		if (!en)
369  			return NULL;
370  
371  		en->af = af;
372  		ip_vs_addr_copy(af, &en->addr, daddr);
373  		en->lastuse = jiffies;
374  
375  		/* initialize its dest set */
376  		atomic_set(&(en->set.size), 0);
377  		INIT_LIST_HEAD(&en->set.list);
378  
379  		ip_vs_dest_set_insert(&en->set, dest, false);
380  
381  		ip_vs_lblcr_hash(tbl, en);
382  		return en;
383  	}
384  
385  	ip_vs_dest_set_insert(&en->set, dest, true);
386  
387  	return en;
388  }
389  
390  
391  /*
392   *      Flush all the entries of the specified table.
393   */
ip_vs_lblcr_flush(struct ip_vs_service * svc)394  static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
395  {
396  	struct ip_vs_lblcr_table *tbl = svc->sched_data;
397  	int i;
398  	struct ip_vs_lblcr_entry *en;
399  	struct hlist_node *next;
400  
401  	spin_lock_bh(&svc->sched_lock);
402  	tbl->dead = true;
403  	for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
404  		hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
405  			ip_vs_lblcr_free(en);
406  		}
407  	}
408  	spin_unlock_bh(&svc->sched_lock);
409  }
410  
sysctl_lblcr_expiration(struct ip_vs_service * svc)411  static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
412  {
413  #ifdef CONFIG_SYSCTL
414  	return svc->ipvs->sysctl_lblcr_expiration;
415  #else
416  	return DEFAULT_EXPIRATION;
417  #endif
418  }
419  
ip_vs_lblcr_full_check(struct ip_vs_service * svc)420  static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
421  {
422  	struct ip_vs_lblcr_table *tbl = svc->sched_data;
423  	unsigned long now = jiffies;
424  	int i, j;
425  	struct ip_vs_lblcr_entry *en;
426  	struct hlist_node *next;
427  
428  	for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
429  		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
430  
431  		spin_lock(&svc->sched_lock);
432  		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
433  			if (time_after(en->lastuse +
434  				       sysctl_lblcr_expiration(svc), now))
435  				continue;
436  
437  			ip_vs_lblcr_free(en);
438  			atomic_dec(&tbl->entries);
439  		}
440  		spin_unlock(&svc->sched_lock);
441  	}
442  	tbl->rover = j;
443  }
444  
445  
446  /*
447   *      Periodical timer handler for IPVS lblcr table
448   *      It is used to collect stale entries when the number of entries
449   *      exceeds the maximum size of the table.
450   *
451   *      Fixme: we probably need more complicated algorithm to collect
452   *             entries that have not been used for a long time even
453   *             if the number of entries doesn't exceed the maximum size
454   *             of the table.
455   *      The full expiration check is for this purpose now.
456   */
ip_vs_lblcr_check_expire(struct timer_list * t)457  static void ip_vs_lblcr_check_expire(struct timer_list *t)
458  {
459  	struct ip_vs_lblcr_table *tbl = from_timer(tbl, t, periodic_timer);
460  	struct ip_vs_service *svc = tbl->svc;
461  	unsigned long now = jiffies;
462  	int goal;
463  	int i, j;
464  	struct ip_vs_lblcr_entry *en;
465  	struct hlist_node *next;
466  
467  	if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
468  		/* do full expiration check */
469  		ip_vs_lblcr_full_check(svc);
470  		tbl->counter = 1;
471  		goto out;
472  	}
473  
474  	if (atomic_read(&tbl->entries) <= tbl->max_size) {
475  		tbl->counter++;
476  		goto out;
477  	}
478  
479  	goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
480  	if (goal > tbl->max_size/2)
481  		goal = tbl->max_size/2;
482  
483  	for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
484  		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
485  
486  		spin_lock(&svc->sched_lock);
487  		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
488  			if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
489  				continue;
490  
491  			ip_vs_lblcr_free(en);
492  			atomic_dec(&tbl->entries);
493  			goal--;
494  		}
495  		spin_unlock(&svc->sched_lock);
496  		if (goal <= 0)
497  			break;
498  	}
499  	tbl->rover = j;
500  
501    out:
502  	mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
503  }
504  
ip_vs_lblcr_init_svc(struct ip_vs_service * svc)505  static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
506  {
507  	int i;
508  	struct ip_vs_lblcr_table *tbl;
509  
510  	/*
511  	 *    Allocate the ip_vs_lblcr_table for this service
512  	 */
513  	tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
514  	if (tbl == NULL)
515  		return -ENOMEM;
516  
517  	svc->sched_data = tbl;
518  	IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) allocated for "
519  		  "current service\n", sizeof(*tbl));
520  
521  	/*
522  	 *    Initialize the hash buckets
523  	 */
524  	for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
525  		INIT_HLIST_HEAD(&tbl->bucket[i]);
526  	}
527  	tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
528  	tbl->rover = 0;
529  	tbl->counter = 1;
530  	tbl->dead = false;
531  	tbl->svc = svc;
532  	atomic_set(&tbl->entries, 0);
533  
534  	/*
535  	 *    Hook periodic timer for garbage collection
536  	 */
537  	timer_setup(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 0);
538  	mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
539  
540  	return 0;
541  }
542  
543  
ip_vs_lblcr_done_svc(struct ip_vs_service * svc)544  static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
545  {
546  	struct ip_vs_lblcr_table *tbl = svc->sched_data;
547  
548  	/* remove periodic timer */
549  	timer_shutdown_sync(&tbl->periodic_timer);
550  
551  	/* got to clean up table entries here */
552  	ip_vs_lblcr_flush(svc);
553  
554  	/* release the table itself */
555  	kfree_rcu(tbl, rcu_head);
556  	IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) released\n",
557  		  sizeof(*tbl));
558  }
559  
560  
561  static inline struct ip_vs_dest *
__ip_vs_lblcr_schedule(struct ip_vs_service * svc)562  __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
563  {
564  	struct ip_vs_dest *dest, *least;
565  	int loh, doh;
566  
567  	/*
568  	 * We use the following formula to estimate the load:
569  	 *                (dest overhead) / dest->weight
570  	 *
571  	 * Remember -- no floats in kernel mode!!!
572  	 * The comparison of h1*w2 > h2*w1 is equivalent to that of
573  	 *                h1/w1 > h2/w2
574  	 * if every weight is larger than zero.
575  	 *
576  	 * The server with weight=0 is quiesced and will not receive any
577  	 * new connection.
578  	 */
579  	list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
580  		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
581  			continue;
582  
583  		if (atomic_read(&dest->weight) > 0) {
584  			least = dest;
585  			loh = ip_vs_dest_conn_overhead(least);
586  			goto nextstage;
587  		}
588  	}
589  	return NULL;
590  
591  	/*
592  	 *    Find the destination with the least load.
593  	 */
594    nextstage:
595  	list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
596  		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
597  			continue;
598  
599  		doh = ip_vs_dest_conn_overhead(dest);
600  		if ((__s64)loh * atomic_read(&dest->weight) >
601  		    (__s64)doh * atomic_read(&least->weight)) {
602  			least = dest;
603  			loh = doh;
604  		}
605  	}
606  
607  	IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
608  		      "activeconns %d refcnt %d weight %d overhead %d\n",
609  		      IP_VS_DBG_ADDR(least->af, &least->addr),
610  		      ntohs(least->port),
611  		      atomic_read(&least->activeconns),
612  		      refcount_read(&least->refcnt),
613  		      atomic_read(&least->weight), loh);
614  
615  	return least;
616  }
617  
618  
619  /*
620   *   If this destination server is overloaded and there is a less loaded
621   *   server, then return true.
622   */
623  static inline int
is_overloaded(struct ip_vs_dest * dest,struct ip_vs_service * svc)624  is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
625  {
626  	if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
627  		struct ip_vs_dest *d;
628  
629  		list_for_each_entry_rcu(d, &svc->destinations, n_list) {
630  			if (atomic_read(&d->activeconns)*2
631  			    < atomic_read(&d->weight)) {
632  				return 1;
633  			}
634  		}
635  	}
636  	return 0;
637  }
638  
639  
640  /*
641   *    Locality-Based (weighted) Least-Connection scheduling
642   */
643  static struct ip_vs_dest *
ip_vs_lblcr_schedule(struct ip_vs_service * svc,const struct sk_buff * skb,struct ip_vs_iphdr * iph)644  ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
645  		     struct ip_vs_iphdr *iph)
646  {
647  	struct ip_vs_lblcr_table *tbl = svc->sched_data;
648  	struct ip_vs_dest *dest;
649  	struct ip_vs_lblcr_entry *en;
650  
651  	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
652  
653  	/* First look in our cache */
654  	en = ip_vs_lblcr_get(svc->af, tbl, &iph->daddr);
655  	if (en) {
656  		en->lastuse = jiffies;
657  
658  		/* Get the least loaded destination */
659  		dest = ip_vs_dest_set_min(&en->set);
660  
661  		/* More than one destination + enough time passed by, cleanup */
662  		if (atomic_read(&en->set.size) > 1 &&
663  		    time_after(jiffies, en->set.lastmod +
664  				sysctl_lblcr_expiration(svc))) {
665  			spin_lock_bh(&svc->sched_lock);
666  			if (atomic_read(&en->set.size) > 1) {
667  				struct ip_vs_dest *m;
668  
669  				m = ip_vs_dest_set_max(&en->set);
670  				if (m)
671  					ip_vs_dest_set_erase(&en->set, m);
672  			}
673  			spin_unlock_bh(&svc->sched_lock);
674  		}
675  
676  		/* If the destination is not overloaded, use it */
677  		if (dest && !is_overloaded(dest, svc))
678  			goto out;
679  
680  		/* The cache entry is invalid, time to schedule */
681  		dest = __ip_vs_lblcr_schedule(svc);
682  		if (!dest) {
683  			ip_vs_scheduler_err(svc, "no destination available");
684  			return NULL;
685  		}
686  
687  		/* Update our cache entry */
688  		spin_lock_bh(&svc->sched_lock);
689  		if (!tbl->dead)
690  			ip_vs_dest_set_insert(&en->set, dest, true);
691  		spin_unlock_bh(&svc->sched_lock);
692  		goto out;
693  	}
694  
695  	/* No cache entry, time to schedule */
696  	dest = __ip_vs_lblcr_schedule(svc);
697  	if (!dest) {
698  		IP_VS_DBG(1, "no destination available\n");
699  		return NULL;
700  	}
701  
702  	/* If we fail to create a cache entry, we'll just use the valid dest */
703  	spin_lock_bh(&svc->sched_lock);
704  	if (!tbl->dead)
705  		ip_vs_lblcr_new(tbl, &iph->daddr, svc->af, dest);
706  	spin_unlock_bh(&svc->sched_lock);
707  
708  out:
709  	IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
710  		      IP_VS_DBG_ADDR(svc->af, &iph->daddr),
711  		      IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
712  
713  	return dest;
714  }
715  
716  
717  /*
718   *      IPVS LBLCR Scheduler structure
719   */
720  static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
721  {
722  	.name =			"lblcr",
723  	.refcnt =		ATOMIC_INIT(0),
724  	.module =		THIS_MODULE,
725  	.n_list =		LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
726  	.init_service =		ip_vs_lblcr_init_svc,
727  	.done_service =		ip_vs_lblcr_done_svc,
728  	.schedule =		ip_vs_lblcr_schedule,
729  };
730  
731  /*
732   *  per netns init.
733   */
734  #ifdef CONFIG_SYSCTL
__ip_vs_lblcr_init(struct net * net)735  static int __net_init __ip_vs_lblcr_init(struct net *net)
736  {
737  	struct netns_ipvs *ipvs = net_ipvs(net);
738  	size_t vars_table_size = ARRAY_SIZE(vs_vars_table);
739  
740  	if (!ipvs)
741  		return -ENOENT;
742  
743  	if (!net_eq(net, &init_net)) {
744  		ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
745  						sizeof(vs_vars_table),
746  						GFP_KERNEL);
747  		if (ipvs->lblcr_ctl_table == NULL)
748  			return -ENOMEM;
749  
750  		/* Don't export sysctls to unprivileged users */
751  		if (net->user_ns != &init_user_ns)
752  			vars_table_size = 0;
753  	} else
754  		ipvs->lblcr_ctl_table = vs_vars_table;
755  	ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
756  	ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
757  
758  	ipvs->lblcr_ctl_header = register_net_sysctl_sz(net, "net/ipv4/vs",
759  							ipvs->lblcr_ctl_table,
760  							vars_table_size);
761  	if (!ipvs->lblcr_ctl_header) {
762  		if (!net_eq(net, &init_net))
763  			kfree(ipvs->lblcr_ctl_table);
764  		return -ENOMEM;
765  	}
766  
767  	return 0;
768  }
769  
__ip_vs_lblcr_exit(struct net * net)770  static void __net_exit __ip_vs_lblcr_exit(struct net *net)
771  {
772  	struct netns_ipvs *ipvs = net_ipvs(net);
773  
774  	unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
775  
776  	if (!net_eq(net, &init_net))
777  		kfree(ipvs->lblcr_ctl_table);
778  }
779  
780  #else
781  
__ip_vs_lblcr_init(struct net * net)782  static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
__ip_vs_lblcr_exit(struct net * net)783  static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
784  
785  #endif
786  
787  static struct pernet_operations ip_vs_lblcr_ops = {
788  	.init = __ip_vs_lblcr_init,
789  	.exit = __ip_vs_lblcr_exit,
790  };
791  
ip_vs_lblcr_init(void)792  static int __init ip_vs_lblcr_init(void)
793  {
794  	int ret;
795  
796  	ret = register_pernet_subsys(&ip_vs_lblcr_ops);
797  	if (ret)
798  		return ret;
799  
800  	ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
801  	if (ret)
802  		unregister_pernet_subsys(&ip_vs_lblcr_ops);
803  	return ret;
804  }
805  
ip_vs_lblcr_cleanup(void)806  static void __exit ip_vs_lblcr_cleanup(void)
807  {
808  	unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
809  	unregister_pernet_subsys(&ip_vs_lblcr_ops);
810  	rcu_barrier();
811  }
812  
813  
814  module_init(ip_vs_lblcr_init);
815  module_exit(ip_vs_lblcr_cleanup);
816  MODULE_LICENSE("GPL");
817  MODULE_DESCRIPTION("ipvs locality-based least-connection with replication scheduler");
818