1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /* AFS cell and server record management
3   *
4   * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
5   * Written by David Howells (dhowells@redhat.com)
6   */
7  
8  #include <linux/slab.h>
9  #include <linux/key.h>
10  #include <linux/ctype.h>
11  #include <linux/dns_resolver.h>
12  #include <linux/sched.h>
13  #include <linux/inet.h>
14  #include <linux/namei.h>
15  #include <keys/rxrpc-type.h>
16  #include "internal.h"
17  
18  static unsigned __read_mostly afs_cell_gc_delay = 10;
19  static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
20  static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
21  static atomic_t cell_debug_id;
22  
23  static void afs_queue_cell_manager(struct afs_net *);
24  static void afs_manage_cell_work(struct work_struct *);
25  
afs_dec_cells_outstanding(struct afs_net * net)26  static void afs_dec_cells_outstanding(struct afs_net *net)
27  {
28  	if (atomic_dec_and_test(&net->cells_outstanding))
29  		wake_up_var(&net->cells_outstanding);
30  }
31  
32  /*
33   * Set the cell timer to fire after a given delay, assuming it's not already
34   * set for an earlier time.
35   */
afs_set_cell_timer(struct afs_net * net,time64_t delay)36  static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
37  {
38  	if (net->live) {
39  		atomic_inc(&net->cells_outstanding);
40  		if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
41  			afs_dec_cells_outstanding(net);
42  	} else {
43  		afs_queue_cell_manager(net);
44  	}
45  }
46  
47  /*
48   * Look up and get an activation reference on a cell record.  The caller must
49   * hold net->cells_lock at least read-locked.
50   */
afs_find_cell_locked(struct afs_net * net,const char * name,unsigned int namesz,enum afs_cell_trace reason)51  static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
52  					     const char *name, unsigned int namesz,
53  					     enum afs_cell_trace reason)
54  {
55  	struct afs_cell *cell = NULL;
56  	struct rb_node *p;
57  	int n;
58  
59  	_enter("%*.*s", namesz, namesz, name);
60  
61  	if (name && namesz == 0)
62  		return ERR_PTR(-EINVAL);
63  	if (namesz > AFS_MAXCELLNAME)
64  		return ERR_PTR(-ENAMETOOLONG);
65  
66  	if (!name) {
67  		cell = net->ws_cell;
68  		if (!cell)
69  			return ERR_PTR(-EDESTADDRREQ);
70  		goto found;
71  	}
72  
73  	p = net->cells.rb_node;
74  	while (p) {
75  		cell = rb_entry(p, struct afs_cell, net_node);
76  
77  		n = strncasecmp(cell->name, name,
78  				min_t(size_t, cell->name_len, namesz));
79  		if (n == 0)
80  			n = cell->name_len - namesz;
81  		if (n < 0)
82  			p = p->rb_left;
83  		else if (n > 0)
84  			p = p->rb_right;
85  		else
86  			goto found;
87  	}
88  
89  	return ERR_PTR(-ENOENT);
90  
91  found:
92  	return afs_use_cell(cell, reason);
93  }
94  
95  /*
96   * Look up and get an activation reference on a cell record.
97   */
afs_find_cell(struct afs_net * net,const char * name,unsigned int namesz,enum afs_cell_trace reason)98  struct afs_cell *afs_find_cell(struct afs_net *net,
99  			       const char *name, unsigned int namesz,
100  			       enum afs_cell_trace reason)
101  {
102  	struct afs_cell *cell;
103  
104  	down_read(&net->cells_lock);
105  	cell = afs_find_cell_locked(net, name, namesz, reason);
106  	up_read(&net->cells_lock);
107  	return cell;
108  }
109  
110  /*
111   * Set up a cell record and fill in its name, VL server address list and
112   * allocate an anonymous key
113   */
afs_alloc_cell(struct afs_net * net,const char * name,unsigned int namelen,const char * addresses)114  static struct afs_cell *afs_alloc_cell(struct afs_net *net,
115  				       const char *name, unsigned int namelen,
116  				       const char *addresses)
117  {
118  	struct afs_vlserver_list *vllist;
119  	struct afs_cell *cell;
120  	int i, ret;
121  
122  	ASSERT(name);
123  	if (namelen == 0)
124  		return ERR_PTR(-EINVAL);
125  	if (namelen > AFS_MAXCELLNAME) {
126  		_leave(" = -ENAMETOOLONG");
127  		return ERR_PTR(-ENAMETOOLONG);
128  	}
129  
130  	/* Prohibit cell names that contain unprintable chars, '/' and '@' or
131  	 * that begin with a dot.  This also precludes "@cell".
132  	 */
133  	if (name[0] == '.')
134  		return ERR_PTR(-EINVAL);
135  	for (i = 0; i < namelen; i++) {
136  		char ch = name[i];
137  		if (!isprint(ch) || ch == '/' || ch == '@')
138  			return ERR_PTR(-EINVAL);
139  	}
140  
141  	_enter("%*.*s,%s", namelen, namelen, name, addresses);
142  
143  	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
144  	if (!cell) {
145  		_leave(" = -ENOMEM");
146  		return ERR_PTR(-ENOMEM);
147  	}
148  
149  	cell->name = kmalloc(namelen + 1, GFP_KERNEL);
150  	if (!cell->name) {
151  		kfree(cell);
152  		return ERR_PTR(-ENOMEM);
153  	}
154  
155  	cell->net = net;
156  	cell->name_len = namelen;
157  	for (i = 0; i < namelen; i++)
158  		cell->name[i] = tolower(name[i]);
159  	cell->name[i] = 0;
160  
161  	refcount_set(&cell->ref, 1);
162  	atomic_set(&cell->active, 0);
163  	INIT_WORK(&cell->manager, afs_manage_cell_work);
164  	init_rwsem(&cell->vs_lock);
165  	cell->volumes = RB_ROOT;
166  	INIT_HLIST_HEAD(&cell->proc_volumes);
167  	seqlock_init(&cell->volume_lock);
168  	cell->fs_servers = RB_ROOT;
169  	seqlock_init(&cell->fs_lock);
170  	rwlock_init(&cell->vl_servers_lock);
171  	cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
172  
173  	/* Provide a VL server list, filling it in if we were given a list of
174  	 * addresses to use.
175  	 */
176  	if (addresses) {
177  		vllist = afs_parse_text_addrs(net,
178  					      addresses, strlen(addresses), ':',
179  					      VL_SERVICE, AFS_VL_PORT);
180  		if (IS_ERR(vllist)) {
181  			ret = PTR_ERR(vllist);
182  			goto parse_failed;
183  		}
184  
185  		vllist->source = DNS_RECORD_FROM_CONFIG;
186  		vllist->status = DNS_LOOKUP_NOT_DONE;
187  		cell->dns_expiry = TIME64_MAX;
188  	} else {
189  		ret = -ENOMEM;
190  		vllist = afs_alloc_vlserver_list(0);
191  		if (!vllist)
192  			goto error;
193  		vllist->source = DNS_RECORD_UNAVAILABLE;
194  		vllist->status = DNS_LOOKUP_NOT_DONE;
195  		cell->dns_expiry = ktime_get_real_seconds();
196  	}
197  
198  	rcu_assign_pointer(cell->vl_servers, vllist);
199  
200  	cell->dns_source = vllist->source;
201  	cell->dns_status = vllist->status;
202  	smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
203  	atomic_inc(&net->cells_outstanding);
204  	cell->debug_id = atomic_inc_return(&cell_debug_id);
205  	trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
206  
207  	_leave(" = %p", cell);
208  	return cell;
209  
210  parse_failed:
211  	if (ret == -EINVAL)
212  		printk(KERN_ERR "kAFS: bad VL server IP address\n");
213  error:
214  	kfree(cell->name);
215  	kfree(cell);
216  	_leave(" = %d", ret);
217  	return ERR_PTR(ret);
218  }
219  
220  /*
221   * afs_lookup_cell - Look up or create a cell record.
222   * @net:	The network namespace
223   * @name:	The name of the cell.
224   * @namesz:	The strlen of the cell name.
225   * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
226   * @excl:	T if an error should be given if the cell name already exists.
227   *
228   * Look up a cell record by name and query the DNS for VL server addresses if
229   * needed.  Note that that actual DNS query is punted off to the manager thread
230   * so that this function can return immediately if interrupted whilst allowing
231   * cell records to be shared even if not yet fully constructed.
232   */
afs_lookup_cell(struct afs_net * net,const char * name,unsigned int namesz,const char * vllist,bool excl)233  struct afs_cell *afs_lookup_cell(struct afs_net *net,
234  				 const char *name, unsigned int namesz,
235  				 const char *vllist, bool excl)
236  {
237  	struct afs_cell *cell, *candidate, *cursor;
238  	struct rb_node *parent, **pp;
239  	enum afs_cell_state state;
240  	int ret, n;
241  
242  	_enter("%s,%s", name, vllist);
243  
244  	if (!excl) {
245  		cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
246  		if (!IS_ERR(cell))
247  			goto wait_for_cell;
248  	}
249  
250  	/* Assume we're probably going to create a cell and preallocate and
251  	 * mostly set up a candidate record.  We can then use this to stash the
252  	 * name, the net namespace and VL server addresses.
253  	 *
254  	 * We also want to do this before we hold any locks as it may involve
255  	 * upcalling to userspace to make DNS queries.
256  	 */
257  	candidate = afs_alloc_cell(net, name, namesz, vllist);
258  	if (IS_ERR(candidate)) {
259  		_leave(" = %ld", PTR_ERR(candidate));
260  		return candidate;
261  	}
262  
263  	/* Find the insertion point and check to see if someone else added a
264  	 * cell whilst we were allocating.
265  	 */
266  	down_write(&net->cells_lock);
267  
268  	pp = &net->cells.rb_node;
269  	parent = NULL;
270  	while (*pp) {
271  		parent = *pp;
272  		cursor = rb_entry(parent, struct afs_cell, net_node);
273  
274  		n = strncasecmp(cursor->name, name,
275  				min_t(size_t, cursor->name_len, namesz));
276  		if (n == 0)
277  			n = cursor->name_len - namesz;
278  		if (n < 0)
279  			pp = &(*pp)->rb_left;
280  		else if (n > 0)
281  			pp = &(*pp)->rb_right;
282  		else
283  			goto cell_already_exists;
284  	}
285  
286  	cell = candidate;
287  	candidate = NULL;
288  	atomic_set(&cell->active, 2);
289  	trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert);
290  	rb_link_node_rcu(&cell->net_node, parent, pp);
291  	rb_insert_color(&cell->net_node, &net->cells);
292  	up_write(&net->cells_lock);
293  
294  	afs_queue_cell(cell, afs_cell_trace_get_queue_new);
295  
296  wait_for_cell:
297  	trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active),
298  		       afs_cell_trace_wait);
299  	_debug("wait_for_cell");
300  	wait_var_event(&cell->state,
301  		       ({
302  			       state = smp_load_acquire(&cell->state); /* vs error */
303  			       state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
304  		       }));
305  
306  	/* Check the state obtained from the wait check. */
307  	if (state == AFS_CELL_REMOVED) {
308  		ret = cell->error;
309  		goto error;
310  	}
311  
312  	_leave(" = %p [cell]", cell);
313  	return cell;
314  
315  cell_already_exists:
316  	_debug("cell exists");
317  	cell = cursor;
318  	if (excl) {
319  		ret = -EEXIST;
320  	} else {
321  		afs_use_cell(cursor, afs_cell_trace_use_lookup);
322  		ret = 0;
323  	}
324  	up_write(&net->cells_lock);
325  	if (candidate)
326  		afs_put_cell(candidate, afs_cell_trace_put_candidate);
327  	if (ret == 0)
328  		goto wait_for_cell;
329  	goto error_noput;
330  error:
331  	afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
332  error_noput:
333  	_leave(" = %d [error]", ret);
334  	return ERR_PTR(ret);
335  }
336  
337  /*
338   * set the root cell information
339   * - can be called with a module parameter string
340   * - can be called from a write to /proc/fs/afs/rootcell
341   */
afs_cell_init(struct afs_net * net,const char * rootcell)342  int afs_cell_init(struct afs_net *net, const char *rootcell)
343  {
344  	struct afs_cell *old_root, *new_root;
345  	const char *cp, *vllist;
346  	size_t len;
347  
348  	_enter("");
349  
350  	if (!rootcell) {
351  		/* module is loaded with no parameters, or built statically.
352  		 * - in the future we might initialize cell DB here.
353  		 */
354  		_leave(" = 0 [no root]");
355  		return 0;
356  	}
357  
358  	cp = strchr(rootcell, ':');
359  	if (!cp) {
360  		_debug("kAFS: no VL server IP addresses specified");
361  		vllist = NULL;
362  		len = strlen(rootcell);
363  	} else {
364  		vllist = cp + 1;
365  		len = cp - rootcell;
366  	}
367  
368  	/* allocate a cell record for the root cell */
369  	new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
370  	if (IS_ERR(new_root)) {
371  		_leave(" = %ld", PTR_ERR(new_root));
372  		return PTR_ERR(new_root);
373  	}
374  
375  	if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
376  		afs_use_cell(new_root, afs_cell_trace_use_pin);
377  
378  	/* install the new cell */
379  	down_write(&net->cells_lock);
380  	afs_see_cell(new_root, afs_cell_trace_see_ws);
381  	old_root = net->ws_cell;
382  	net->ws_cell = new_root;
383  	up_write(&net->cells_lock);
384  
385  	afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
386  	_leave(" = 0");
387  	return 0;
388  }
389  
390  /*
391   * Update a cell's VL server address list from the DNS.
392   */
afs_update_cell(struct afs_cell * cell)393  static int afs_update_cell(struct afs_cell *cell)
394  {
395  	struct afs_vlserver_list *vllist, *old = NULL, *p;
396  	unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
397  	unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
398  	time64_t now, expiry = 0;
399  	int ret = 0;
400  
401  	_enter("%s", cell->name);
402  
403  	vllist = afs_dns_query(cell, &expiry);
404  	if (IS_ERR(vllist)) {
405  		ret = PTR_ERR(vllist);
406  
407  		_debug("%s: fail %d", cell->name, ret);
408  		if (ret == -ENOMEM)
409  			goto out_wake;
410  
411  		vllist = afs_alloc_vlserver_list(0);
412  		if (!vllist) {
413  			if (ret >= 0)
414  				ret = -ENOMEM;
415  			goto out_wake;
416  		}
417  
418  		switch (ret) {
419  		case -ENODATA:
420  		case -EDESTADDRREQ:
421  			vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
422  			break;
423  		case -EAGAIN:
424  		case -ECONNREFUSED:
425  			vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
426  			break;
427  		default:
428  			vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
429  			break;
430  		}
431  	}
432  
433  	_debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
434  	cell->dns_status = vllist->status;
435  
436  	now = ktime_get_real_seconds();
437  	if (min_ttl > max_ttl)
438  		max_ttl = min_ttl;
439  	if (expiry < now + min_ttl)
440  		expiry = now + min_ttl;
441  	else if (expiry > now + max_ttl)
442  		expiry = now + max_ttl;
443  
444  	_debug("%s: status %d", cell->name, vllist->status);
445  	if (vllist->source == DNS_RECORD_UNAVAILABLE) {
446  		switch (vllist->status) {
447  		case DNS_LOOKUP_GOT_NOT_FOUND:
448  			/* The DNS said that the cell does not exist or there
449  			 * weren't any addresses to be had.
450  			 */
451  			cell->dns_expiry = expiry;
452  			break;
453  
454  		case DNS_LOOKUP_BAD:
455  		case DNS_LOOKUP_GOT_LOCAL_FAILURE:
456  		case DNS_LOOKUP_GOT_TEMP_FAILURE:
457  		case DNS_LOOKUP_GOT_NS_FAILURE:
458  		default:
459  			cell->dns_expiry = now + 10;
460  			break;
461  		}
462  	} else {
463  		cell->dns_expiry = expiry;
464  	}
465  
466  	/* Replace the VL server list if the new record has servers or the old
467  	 * record doesn't.
468  	 */
469  	write_lock(&cell->vl_servers_lock);
470  	p = rcu_dereference_protected(cell->vl_servers, true);
471  	if (vllist->nr_servers > 0 || p->nr_servers == 0) {
472  		rcu_assign_pointer(cell->vl_servers, vllist);
473  		cell->dns_source = vllist->source;
474  		old = p;
475  	}
476  	write_unlock(&cell->vl_servers_lock);
477  	afs_put_vlserverlist(cell->net, old);
478  
479  out_wake:
480  	smp_store_release(&cell->dns_lookup_count,
481  			  cell->dns_lookup_count + 1); /* vs source/status */
482  	wake_up_var(&cell->dns_lookup_count);
483  	_leave(" = %d", ret);
484  	return ret;
485  }
486  
487  /*
488   * Destroy a cell record
489   */
afs_cell_destroy(struct rcu_head * rcu)490  static void afs_cell_destroy(struct rcu_head *rcu)
491  {
492  	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
493  	struct afs_net *net = cell->net;
494  	int r;
495  
496  	_enter("%p{%s}", cell, cell->name);
497  
498  	r = refcount_read(&cell->ref);
499  	ASSERTCMP(r, ==, 0);
500  	trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
501  
502  	afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
503  	afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
504  	key_put(cell->anonymous_key);
505  	kfree(cell->name);
506  	kfree(cell);
507  
508  	afs_dec_cells_outstanding(net);
509  	_leave(" [destroyed]");
510  }
511  
512  /*
513   * Queue the cell manager.
514   */
afs_queue_cell_manager(struct afs_net * net)515  static void afs_queue_cell_manager(struct afs_net *net)
516  {
517  	int outstanding = atomic_inc_return(&net->cells_outstanding);
518  
519  	_enter("%d", outstanding);
520  
521  	if (!queue_work(afs_wq, &net->cells_manager))
522  		afs_dec_cells_outstanding(net);
523  }
524  
525  /*
526   * Cell management timer.  We have an increment on cells_outstanding that we
527   * need to pass along to the work item.
528   */
afs_cells_timer(struct timer_list * timer)529  void afs_cells_timer(struct timer_list *timer)
530  {
531  	struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
532  
533  	_enter("");
534  	if (!queue_work(afs_wq, &net->cells_manager))
535  		afs_dec_cells_outstanding(net);
536  }
537  
538  /*
539   * Get a reference on a cell record.
540   */
afs_get_cell(struct afs_cell * cell,enum afs_cell_trace reason)541  struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
542  {
543  	int r;
544  
545  	__refcount_inc(&cell->ref, &r);
546  	trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
547  	return cell;
548  }
549  
550  /*
551   * Drop a reference on a cell record.
552   */
afs_put_cell(struct afs_cell * cell,enum afs_cell_trace reason)553  void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
554  {
555  	if (cell) {
556  		unsigned int debug_id = cell->debug_id;
557  		unsigned int a;
558  		bool zero;
559  		int r;
560  
561  		a = atomic_read(&cell->active);
562  		zero = __refcount_dec_and_test(&cell->ref, &r);
563  		trace_afs_cell(debug_id, r - 1, a, reason);
564  		if (zero) {
565  			a = atomic_read(&cell->active);
566  			WARN(a != 0, "Cell active count %u > 0\n", a);
567  			call_rcu(&cell->rcu, afs_cell_destroy);
568  		}
569  	}
570  }
571  
572  /*
573   * Note a cell becoming more active.
574   */
afs_use_cell(struct afs_cell * cell,enum afs_cell_trace reason)575  struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
576  {
577  	int r, a;
578  
579  	r = refcount_read(&cell->ref);
580  	WARN_ON(r == 0);
581  	a = atomic_inc_return(&cell->active);
582  	trace_afs_cell(cell->debug_id, r, a, reason);
583  	return cell;
584  }
585  
586  /*
587   * Record a cell becoming less active.  When the active counter reaches 1, it
588   * is scheduled for destruction, but may get reactivated.
589   */
afs_unuse_cell(struct afs_net * net,struct afs_cell * cell,enum afs_cell_trace reason)590  void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
591  {
592  	unsigned int debug_id;
593  	time64_t now, expire_delay;
594  	int r, a;
595  
596  	if (!cell)
597  		return;
598  
599  	_enter("%s", cell->name);
600  
601  	now = ktime_get_real_seconds();
602  	cell->last_inactive = now;
603  	expire_delay = 0;
604  	if (cell->vl_servers->nr_servers)
605  		expire_delay = afs_cell_gc_delay;
606  
607  	debug_id = cell->debug_id;
608  	r = refcount_read(&cell->ref);
609  	a = atomic_dec_return(&cell->active);
610  	trace_afs_cell(debug_id, r, a, reason);
611  	WARN_ON(a == 0);
612  	if (a == 1)
613  		/* 'cell' may now be garbage collected. */
614  		afs_set_cell_timer(net, expire_delay);
615  }
616  
617  /*
618   * Note that a cell has been seen.
619   */
afs_see_cell(struct afs_cell * cell,enum afs_cell_trace reason)620  void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
621  {
622  	int r, a;
623  
624  	r = refcount_read(&cell->ref);
625  	a = atomic_read(&cell->active);
626  	trace_afs_cell(cell->debug_id, r, a, reason);
627  }
628  
629  /*
630   * Queue a cell for management, giving the workqueue a ref to hold.
631   */
afs_queue_cell(struct afs_cell * cell,enum afs_cell_trace reason)632  void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
633  {
634  	afs_get_cell(cell, reason);
635  	if (!queue_work(afs_wq, &cell->manager))
636  		afs_put_cell(cell, afs_cell_trace_put_queue_fail);
637  }
638  
639  /*
640   * Allocate a key to use as a placeholder for anonymous user security.
641   */
afs_alloc_anon_key(struct afs_cell * cell)642  static int afs_alloc_anon_key(struct afs_cell *cell)
643  {
644  	struct key *key;
645  	char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
646  
647  	/* Create a key to represent an anonymous user. */
648  	memcpy(keyname, "afs@", 4);
649  	dp = keyname + 4;
650  	cp = cell->name;
651  	do {
652  		*dp++ = tolower(*cp);
653  	} while (*cp++);
654  
655  	key = rxrpc_get_null_key(keyname);
656  	if (IS_ERR(key))
657  		return PTR_ERR(key);
658  
659  	cell->anonymous_key = key;
660  
661  	_debug("anon key %p{%x}",
662  	       cell->anonymous_key, key_serial(cell->anonymous_key));
663  	return 0;
664  }
665  
666  /*
667   * Activate a cell.
668   */
afs_activate_cell(struct afs_net * net,struct afs_cell * cell)669  static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
670  {
671  	struct hlist_node **p;
672  	struct afs_cell *pcell;
673  	int ret;
674  
675  	if (!cell->anonymous_key) {
676  		ret = afs_alloc_anon_key(cell);
677  		if (ret < 0)
678  			return ret;
679  	}
680  
681  	ret = afs_proc_cell_setup(cell);
682  	if (ret < 0)
683  		return ret;
684  
685  	mutex_lock(&net->proc_cells_lock);
686  	for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
687  		pcell = hlist_entry(*p, struct afs_cell, proc_link);
688  		if (strcmp(cell->name, pcell->name) < 0)
689  			break;
690  	}
691  
692  	cell->proc_link.pprev = p;
693  	cell->proc_link.next = *p;
694  	rcu_assign_pointer(*p, &cell->proc_link.next);
695  	if (cell->proc_link.next)
696  		cell->proc_link.next->pprev = &cell->proc_link.next;
697  
698  	afs_dynroot_mkdir(net, cell);
699  	mutex_unlock(&net->proc_cells_lock);
700  	return 0;
701  }
702  
703  /*
704   * Deactivate a cell.
705   */
afs_deactivate_cell(struct afs_net * net,struct afs_cell * cell)706  static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
707  {
708  	_enter("%s", cell->name);
709  
710  	afs_proc_cell_remove(cell);
711  
712  	mutex_lock(&net->proc_cells_lock);
713  	hlist_del_rcu(&cell->proc_link);
714  	afs_dynroot_rmdir(net, cell);
715  	mutex_unlock(&net->proc_cells_lock);
716  
717  	_leave("");
718  }
719  
720  /*
721   * Manage a cell record, initialising and destroying it, maintaining its DNS
722   * records.
723   */
afs_manage_cell(struct afs_cell * cell)724  static void afs_manage_cell(struct afs_cell *cell)
725  {
726  	struct afs_net *net = cell->net;
727  	int ret, active;
728  
729  	_enter("%s", cell->name);
730  
731  again:
732  	_debug("state %u", cell->state);
733  	switch (cell->state) {
734  	case AFS_CELL_INACTIVE:
735  	case AFS_CELL_FAILED:
736  		down_write(&net->cells_lock);
737  		active = 1;
738  		if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
739  			rb_erase(&cell->net_node, &net->cells);
740  			trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0,
741  				       afs_cell_trace_unuse_delete);
742  			smp_store_release(&cell->state, AFS_CELL_REMOVED);
743  		}
744  		up_write(&net->cells_lock);
745  		if (cell->state == AFS_CELL_REMOVED) {
746  			wake_up_var(&cell->state);
747  			goto final_destruction;
748  		}
749  		if (cell->state == AFS_CELL_FAILED)
750  			goto done;
751  		smp_store_release(&cell->state, AFS_CELL_UNSET);
752  		wake_up_var(&cell->state);
753  		goto again;
754  
755  	case AFS_CELL_UNSET:
756  		smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
757  		wake_up_var(&cell->state);
758  		goto again;
759  
760  	case AFS_CELL_ACTIVATING:
761  		ret = afs_activate_cell(net, cell);
762  		if (ret < 0)
763  			goto activation_failed;
764  
765  		smp_store_release(&cell->state, AFS_CELL_ACTIVE);
766  		wake_up_var(&cell->state);
767  		goto again;
768  
769  	case AFS_CELL_ACTIVE:
770  		if (atomic_read(&cell->active) > 1) {
771  			if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
772  				ret = afs_update_cell(cell);
773  				if (ret < 0)
774  					cell->error = ret;
775  			}
776  			goto done;
777  		}
778  		smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
779  		wake_up_var(&cell->state);
780  		goto again;
781  
782  	case AFS_CELL_DEACTIVATING:
783  		if (atomic_read(&cell->active) > 1)
784  			goto reverse_deactivation;
785  		afs_deactivate_cell(net, cell);
786  		smp_store_release(&cell->state, AFS_CELL_INACTIVE);
787  		wake_up_var(&cell->state);
788  		goto again;
789  
790  	case AFS_CELL_REMOVED:
791  		goto done;
792  
793  	default:
794  		break;
795  	}
796  	_debug("bad state %u", cell->state);
797  	BUG(); /* Unhandled state */
798  
799  activation_failed:
800  	cell->error = ret;
801  	afs_deactivate_cell(net, cell);
802  
803  	smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
804  	wake_up_var(&cell->state);
805  	goto again;
806  
807  reverse_deactivation:
808  	smp_store_release(&cell->state, AFS_CELL_ACTIVE);
809  	wake_up_var(&cell->state);
810  	_leave(" [deact->act]");
811  	return;
812  
813  done:
814  	_leave(" [done %u]", cell->state);
815  	return;
816  
817  final_destruction:
818  	/* The root volume is pinning the cell */
819  	afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
820  	cell->root_volume = NULL;
821  	afs_put_cell(cell, afs_cell_trace_put_destroy);
822  }
823  
afs_manage_cell_work(struct work_struct * work)824  static void afs_manage_cell_work(struct work_struct *work)
825  {
826  	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
827  
828  	afs_manage_cell(cell);
829  	afs_put_cell(cell, afs_cell_trace_put_queue_work);
830  }
831  
832  /*
833   * Manage the records of cells known to a network namespace.  This includes
834   * updating the DNS records and garbage collecting unused cells that were
835   * automatically added.
836   *
837   * Note that constructed cell records may only be removed from net->cells by
838   * this work item, so it is safe for this work item to stash a cursor pointing
839   * into the tree and then return to caller (provided it skips cells that are
840   * still under construction).
841   *
842   * Note also that we were given an increment on net->cells_outstanding by
843   * whoever queued us that we need to deal with before returning.
844   */
afs_manage_cells(struct work_struct * work)845  void afs_manage_cells(struct work_struct *work)
846  {
847  	struct afs_net *net = container_of(work, struct afs_net, cells_manager);
848  	struct rb_node *cursor;
849  	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
850  	bool purging = !net->live;
851  
852  	_enter("");
853  
854  	/* Trawl the cell database looking for cells that have expired from
855  	 * lack of use and cells whose DNS results have expired and dispatch
856  	 * their managers.
857  	 */
858  	down_read(&net->cells_lock);
859  
860  	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
861  		struct afs_cell *cell =
862  			rb_entry(cursor, struct afs_cell, net_node);
863  		unsigned active;
864  		bool sched_cell = false;
865  
866  		active = atomic_read(&cell->active);
867  		trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
868  			       active, afs_cell_trace_manage);
869  
870  		ASSERTCMP(active, >=, 1);
871  
872  		if (purging) {
873  			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
874  				active = atomic_dec_return(&cell->active);
875  				trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
876  					       active, afs_cell_trace_unuse_pin);
877  			}
878  		}
879  
880  		if (active == 1) {
881  			struct afs_vlserver_list *vllist;
882  			time64_t expire_at = cell->last_inactive;
883  
884  			read_lock(&cell->vl_servers_lock);
885  			vllist = rcu_dereference_protected(
886  				cell->vl_servers,
887  				lockdep_is_held(&cell->vl_servers_lock));
888  			if (vllist->nr_servers > 0)
889  				expire_at += afs_cell_gc_delay;
890  			read_unlock(&cell->vl_servers_lock);
891  			if (purging || expire_at <= now)
892  				sched_cell = true;
893  			else if (expire_at < next_manage)
894  				next_manage = expire_at;
895  		}
896  
897  		if (!purging) {
898  			if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
899  				sched_cell = true;
900  		}
901  
902  		if (sched_cell)
903  			afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
904  	}
905  
906  	up_read(&net->cells_lock);
907  
908  	/* Update the timer on the way out.  We have to pass an increment on
909  	 * cells_outstanding in the namespace that we are in to the timer or
910  	 * the work scheduler.
911  	 */
912  	if (!purging && next_manage < TIME64_MAX) {
913  		now = ktime_get_real_seconds();
914  
915  		if (next_manage - now <= 0) {
916  			if (queue_work(afs_wq, &net->cells_manager))
917  				atomic_inc(&net->cells_outstanding);
918  		} else {
919  			afs_set_cell_timer(net, next_manage - now);
920  		}
921  	}
922  
923  	afs_dec_cells_outstanding(net);
924  	_leave(" [%d]", atomic_read(&net->cells_outstanding));
925  }
926  
927  /*
928   * Purge in-memory cell database.
929   */
afs_cell_purge(struct afs_net * net)930  void afs_cell_purge(struct afs_net *net)
931  {
932  	struct afs_cell *ws;
933  
934  	_enter("");
935  
936  	down_write(&net->cells_lock);
937  	ws = net->ws_cell;
938  	net->ws_cell = NULL;
939  	up_write(&net->cells_lock);
940  	afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
941  
942  	_debug("del timer");
943  	if (del_timer_sync(&net->cells_timer))
944  		atomic_dec(&net->cells_outstanding);
945  
946  	_debug("kick mgr");
947  	afs_queue_cell_manager(net);
948  
949  	_debug("wait");
950  	wait_var_event(&net->cells_outstanding,
951  		       !atomic_read(&net->cells_outstanding));
952  	_leave("");
953  }
954