1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2014 Red Hat, Inc.
4   * All Rights Reserved.
5   */
6  
7  #include "xfs.h"
8  #include "xfs_shared.h"
9  #include "xfs_format.h"
10  #include "xfs_log_format.h"
11  #include "xfs_trans_resv.h"
12  #include "xfs_sysfs.h"
13  #include "xfs_log.h"
14  #include "xfs_log_priv.h"
15  #include "xfs_mount.h"
16  
17  struct xfs_sysfs_attr {
18  	struct attribute attr;
19  	ssize_t (*show)(struct kobject *kobject, char *buf);
20  	ssize_t (*store)(struct kobject *kobject, const char *buf,
21  			 size_t count);
22  };
23  
24  static inline struct xfs_sysfs_attr *
to_attr(struct attribute * attr)25  to_attr(struct attribute *attr)
26  {
27  	return container_of(attr, struct xfs_sysfs_attr, attr);
28  }
29  
30  #define XFS_SYSFS_ATTR_RW(name) \
31  	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32  #define XFS_SYSFS_ATTR_RO(name) \
33  	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34  #define XFS_SYSFS_ATTR_WO(name) \
35  	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
36  
37  #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
38  
39  STATIC ssize_t
xfs_sysfs_object_show(struct kobject * kobject,struct attribute * attr,char * buf)40  xfs_sysfs_object_show(
41  	struct kobject		*kobject,
42  	struct attribute	*attr,
43  	char			*buf)
44  {
45  	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
46  
47  	return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
48  }
49  
50  STATIC ssize_t
xfs_sysfs_object_store(struct kobject * kobject,struct attribute * attr,const char * buf,size_t count)51  xfs_sysfs_object_store(
52  	struct kobject		*kobject,
53  	struct attribute	*attr,
54  	const char		*buf,
55  	size_t			count)
56  {
57  	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
58  
59  	return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
60  }
61  
62  static const struct sysfs_ops xfs_sysfs_ops = {
63  	.show = xfs_sysfs_object_show,
64  	.store = xfs_sysfs_object_store,
65  };
66  
67  static struct attribute *xfs_mp_attrs[] = {
68  	NULL,
69  };
70  ATTRIBUTE_GROUPS(xfs_mp);
71  
72  const struct kobj_type xfs_mp_ktype = {
73  	.release = xfs_sysfs_release,
74  	.sysfs_ops = &xfs_sysfs_ops,
75  	.default_groups = xfs_mp_groups,
76  };
77  
78  #ifdef DEBUG
79  /* debug */
80  
81  STATIC ssize_t
bug_on_assert_store(struct kobject * kobject,const char * buf,size_t count)82  bug_on_assert_store(
83  	struct kobject		*kobject,
84  	const char		*buf,
85  	size_t			count)
86  {
87  	int			ret;
88  	int			val;
89  
90  	ret = kstrtoint(buf, 0, &val);
91  	if (ret)
92  		return ret;
93  
94  	if (val == 1)
95  		xfs_globals.bug_on_assert = true;
96  	else if (val == 0)
97  		xfs_globals.bug_on_assert = false;
98  	else
99  		return -EINVAL;
100  
101  	return count;
102  }
103  
104  STATIC ssize_t
bug_on_assert_show(struct kobject * kobject,char * buf)105  bug_on_assert_show(
106  	struct kobject		*kobject,
107  	char			*buf)
108  {
109  	return sysfs_emit(buf, "%d\n", xfs_globals.bug_on_assert);
110  }
111  XFS_SYSFS_ATTR_RW(bug_on_assert);
112  
113  STATIC ssize_t
log_recovery_delay_store(struct kobject * kobject,const char * buf,size_t count)114  log_recovery_delay_store(
115  	struct kobject	*kobject,
116  	const char	*buf,
117  	size_t		count)
118  {
119  	int		ret;
120  	int		val;
121  
122  	ret = kstrtoint(buf, 0, &val);
123  	if (ret)
124  		return ret;
125  
126  	if (val < 0 || val > 60)
127  		return -EINVAL;
128  
129  	xfs_globals.log_recovery_delay = val;
130  
131  	return count;
132  }
133  
134  STATIC ssize_t
log_recovery_delay_show(struct kobject * kobject,char * buf)135  log_recovery_delay_show(
136  	struct kobject	*kobject,
137  	char		*buf)
138  {
139  	return sysfs_emit(buf, "%d\n", xfs_globals.log_recovery_delay);
140  }
141  XFS_SYSFS_ATTR_RW(log_recovery_delay);
142  
143  STATIC ssize_t
mount_delay_store(struct kobject * kobject,const char * buf,size_t count)144  mount_delay_store(
145  	struct kobject	*kobject,
146  	const char	*buf,
147  	size_t		count)
148  {
149  	int		ret;
150  	int		val;
151  
152  	ret = kstrtoint(buf, 0, &val);
153  	if (ret)
154  		return ret;
155  
156  	if (val < 0 || val > 60)
157  		return -EINVAL;
158  
159  	xfs_globals.mount_delay = val;
160  
161  	return count;
162  }
163  
164  STATIC ssize_t
mount_delay_show(struct kobject * kobject,char * buf)165  mount_delay_show(
166  	struct kobject	*kobject,
167  	char		*buf)
168  {
169  	return sysfs_emit(buf, "%d\n", xfs_globals.mount_delay);
170  }
171  XFS_SYSFS_ATTR_RW(mount_delay);
172  
173  static ssize_t
always_cow_store(struct kobject * kobject,const char * buf,size_t count)174  always_cow_store(
175  	struct kobject	*kobject,
176  	const char	*buf,
177  	size_t		count)
178  {
179  	ssize_t		ret;
180  
181  	ret = kstrtobool(buf, &xfs_globals.always_cow);
182  	if (ret < 0)
183  		return ret;
184  	return count;
185  }
186  
187  static ssize_t
always_cow_show(struct kobject * kobject,char * buf)188  always_cow_show(
189  	struct kobject	*kobject,
190  	char		*buf)
191  {
192  	return sysfs_emit(buf, "%d\n", xfs_globals.always_cow);
193  }
194  XFS_SYSFS_ATTR_RW(always_cow);
195  
196  /*
197   * Override how many threads the parallel work queue is allowed to create.
198   * This has to be a debug-only global (instead of an errortag) because one of
199   * the main users of parallel workqueues is mount time quotacheck.
200   */
201  STATIC ssize_t
pwork_threads_store(struct kobject * kobject,const char * buf,size_t count)202  pwork_threads_store(
203  	struct kobject	*kobject,
204  	const char	*buf,
205  	size_t		count)
206  {
207  	int		ret;
208  	int		val;
209  
210  	ret = kstrtoint(buf, 0, &val);
211  	if (ret)
212  		return ret;
213  
214  	if (val < -1 || val > num_possible_cpus())
215  		return -EINVAL;
216  
217  	xfs_globals.pwork_threads = val;
218  
219  	return count;
220  }
221  
222  STATIC ssize_t
pwork_threads_show(struct kobject * kobject,char * buf)223  pwork_threads_show(
224  	struct kobject	*kobject,
225  	char		*buf)
226  {
227  	return sysfs_emit(buf, "%d\n", xfs_globals.pwork_threads);
228  }
229  XFS_SYSFS_ATTR_RW(pwork_threads);
230  
231  /*
232   * The "LARP" (Logged extended Attribute Recovery Persistence) debugging knob
233   * sets the XFS_DA_OP_LOGGED flag on all xfs_attr_set operations performed on
234   * V5 filesystems.  As a result, the intermediate progress of all setxattr and
235   * removexattr operations are tracked via the log and can be restarted during
236   * recovery.  This is useful for testing xattr recovery prior to merging of the
237   * parent pointer feature which requires it to maintain consistency, and may be
238   * enabled for userspace xattrs in the future.
239   */
240  static ssize_t
larp_store(struct kobject * kobject,const char * buf,size_t count)241  larp_store(
242  	struct kobject	*kobject,
243  	const char	*buf,
244  	size_t		count)
245  {
246  	ssize_t		ret;
247  
248  	ret = kstrtobool(buf, &xfs_globals.larp);
249  	if (ret < 0)
250  		return ret;
251  	return count;
252  }
253  
254  STATIC ssize_t
larp_show(struct kobject * kobject,char * buf)255  larp_show(
256  	struct kobject	*kobject,
257  	char		*buf)
258  {
259  	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.larp);
260  }
261  XFS_SYSFS_ATTR_RW(larp);
262  
263  STATIC ssize_t
bload_leaf_slack_store(struct kobject * kobject,const char * buf,size_t count)264  bload_leaf_slack_store(
265  	struct kobject	*kobject,
266  	const char	*buf,
267  	size_t		count)
268  {
269  	int		ret;
270  	int		val;
271  
272  	ret = kstrtoint(buf, 0, &val);
273  	if (ret)
274  		return ret;
275  
276  	xfs_globals.bload_leaf_slack = val;
277  	return count;
278  }
279  
280  STATIC ssize_t
bload_leaf_slack_show(struct kobject * kobject,char * buf)281  bload_leaf_slack_show(
282  	struct kobject	*kobject,
283  	char		*buf)
284  {
285  	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_leaf_slack);
286  }
287  XFS_SYSFS_ATTR_RW(bload_leaf_slack);
288  
289  STATIC ssize_t
bload_node_slack_store(struct kobject * kobject,const char * buf,size_t count)290  bload_node_slack_store(
291  	struct kobject	*kobject,
292  	const char	*buf,
293  	size_t		count)
294  {
295  	int		ret;
296  	int		val;
297  
298  	ret = kstrtoint(buf, 0, &val);
299  	if (ret)
300  		return ret;
301  
302  	xfs_globals.bload_node_slack = val;
303  	return count;
304  }
305  
306  STATIC ssize_t
bload_node_slack_show(struct kobject * kobject,char * buf)307  bload_node_slack_show(
308  	struct kobject	*kobject,
309  	char		*buf)
310  {
311  	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_node_slack);
312  }
313  XFS_SYSFS_ATTR_RW(bload_node_slack);
314  
315  static struct attribute *xfs_dbg_attrs[] = {
316  	ATTR_LIST(bug_on_assert),
317  	ATTR_LIST(log_recovery_delay),
318  	ATTR_LIST(mount_delay),
319  	ATTR_LIST(always_cow),
320  	ATTR_LIST(pwork_threads),
321  	ATTR_LIST(larp),
322  	ATTR_LIST(bload_leaf_slack),
323  	ATTR_LIST(bload_node_slack),
324  	NULL,
325  };
326  ATTRIBUTE_GROUPS(xfs_dbg);
327  
328  const struct kobj_type xfs_dbg_ktype = {
329  	.release = xfs_sysfs_release,
330  	.sysfs_ops = &xfs_sysfs_ops,
331  	.default_groups = xfs_dbg_groups,
332  };
333  
334  #endif /* DEBUG */
335  
336  /* stats */
337  
338  static inline struct xstats *
to_xstats(struct kobject * kobject)339  to_xstats(struct kobject *kobject)
340  {
341  	struct xfs_kobj *kobj = to_kobj(kobject);
342  
343  	return container_of(kobj, struct xstats, xs_kobj);
344  }
345  
346  STATIC ssize_t
stats_show(struct kobject * kobject,char * buf)347  stats_show(
348  	struct kobject	*kobject,
349  	char		*buf)
350  {
351  	struct xstats	*stats = to_xstats(kobject);
352  
353  	return xfs_stats_format(stats->xs_stats, buf);
354  }
355  XFS_SYSFS_ATTR_RO(stats);
356  
357  STATIC ssize_t
stats_clear_store(struct kobject * kobject,const char * buf,size_t count)358  stats_clear_store(
359  	struct kobject	*kobject,
360  	const char	*buf,
361  	size_t		count)
362  {
363  	int		ret;
364  	int		val;
365  	struct xstats	*stats = to_xstats(kobject);
366  
367  	ret = kstrtoint(buf, 0, &val);
368  	if (ret)
369  		return ret;
370  
371  	if (val != 1)
372  		return -EINVAL;
373  
374  	xfs_stats_clearall(stats->xs_stats);
375  	return count;
376  }
377  XFS_SYSFS_ATTR_WO(stats_clear);
378  
379  static struct attribute *xfs_stats_attrs[] = {
380  	ATTR_LIST(stats),
381  	ATTR_LIST(stats_clear),
382  	NULL,
383  };
384  ATTRIBUTE_GROUPS(xfs_stats);
385  
386  const struct kobj_type xfs_stats_ktype = {
387  	.release = xfs_sysfs_release,
388  	.sysfs_ops = &xfs_sysfs_ops,
389  	.default_groups = xfs_stats_groups,
390  };
391  
392  /* xlog */
393  
394  static inline struct xlog *
to_xlog(struct kobject * kobject)395  to_xlog(struct kobject *kobject)
396  {
397  	struct xfs_kobj *kobj = to_kobj(kobject);
398  
399  	return container_of(kobj, struct xlog, l_kobj);
400  }
401  
402  STATIC ssize_t
log_head_lsn_show(struct kobject * kobject,char * buf)403  log_head_lsn_show(
404  	struct kobject	*kobject,
405  	char		*buf)
406  {
407  	int cycle;
408  	int block;
409  	struct xlog *log = to_xlog(kobject);
410  
411  	spin_lock(&log->l_icloglock);
412  	cycle = log->l_curr_cycle;
413  	block = log->l_curr_block;
414  	spin_unlock(&log->l_icloglock);
415  
416  	return sysfs_emit(buf, "%d:%d\n", cycle, block);
417  }
418  XFS_SYSFS_ATTR_RO(log_head_lsn);
419  
420  STATIC ssize_t
log_tail_lsn_show(struct kobject * kobject,char * buf)421  log_tail_lsn_show(
422  	struct kobject	*kobject,
423  	char		*buf)
424  {
425  	int cycle;
426  	int block;
427  	struct xlog *log = to_xlog(kobject);
428  
429  	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
430  	return sysfs_emit(buf, "%d:%d\n", cycle, block);
431  }
432  XFS_SYSFS_ATTR_RO(log_tail_lsn);
433  
434  STATIC ssize_t
reserve_grant_head_bytes_show(struct kobject * kobject,char * buf)435  reserve_grant_head_bytes_show(
436  	struct kobject	*kobject,
437  	char		*buf)
438  {
439  	return sysfs_emit(buf, "%lld\n",
440  			atomic64_read(&to_xlog(kobject)->l_reserve_head.grant));
441  }
442  XFS_SYSFS_ATTR_RO(reserve_grant_head_bytes);
443  
444  STATIC ssize_t
write_grant_head_bytes_show(struct kobject * kobject,char * buf)445  write_grant_head_bytes_show(
446  	struct kobject	*kobject,
447  	char		*buf)
448  {
449  	return sysfs_emit(buf, "%lld\n",
450  			atomic64_read(&to_xlog(kobject)->l_write_head.grant));
451  }
452  XFS_SYSFS_ATTR_RO(write_grant_head_bytes);
453  
454  static struct attribute *xfs_log_attrs[] = {
455  	ATTR_LIST(log_head_lsn),
456  	ATTR_LIST(log_tail_lsn),
457  	ATTR_LIST(reserve_grant_head_bytes),
458  	ATTR_LIST(write_grant_head_bytes),
459  	NULL,
460  };
461  ATTRIBUTE_GROUPS(xfs_log);
462  
463  const struct kobj_type xfs_log_ktype = {
464  	.release = xfs_sysfs_release,
465  	.sysfs_ops = &xfs_sysfs_ops,
466  	.default_groups = xfs_log_groups,
467  };
468  
469  /*
470   * Metadata IO error configuration
471   *
472   * The sysfs structure here is:
473   *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
474   *
475   * where <class> allows us to discriminate between data IO and metadata IO,
476   * and any other future type of IO (e.g. special inode or directory error
477   * handling) we care to support.
478   */
479  static inline struct xfs_error_cfg *
to_error_cfg(struct kobject * kobject)480  to_error_cfg(struct kobject *kobject)
481  {
482  	struct xfs_kobj *kobj = to_kobj(kobject);
483  	return container_of(kobj, struct xfs_error_cfg, kobj);
484  }
485  
486  static inline struct xfs_mount *
err_to_mp(struct kobject * kobject)487  err_to_mp(struct kobject *kobject)
488  {
489  	struct xfs_kobj *kobj = to_kobj(kobject);
490  	return container_of(kobj, struct xfs_mount, m_error_kobj);
491  }
492  
493  static ssize_t
max_retries_show(struct kobject * kobject,char * buf)494  max_retries_show(
495  	struct kobject	*kobject,
496  	char		*buf)
497  {
498  	int		retries;
499  	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
500  
501  	if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
502  		retries = -1;
503  	else
504  		retries = cfg->max_retries;
505  
506  	return sysfs_emit(buf, "%d\n", retries);
507  }
508  
509  static ssize_t
max_retries_store(struct kobject * kobject,const char * buf,size_t count)510  max_retries_store(
511  	struct kobject	*kobject,
512  	const char	*buf,
513  	size_t		count)
514  {
515  	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
516  	int		ret;
517  	int		val;
518  
519  	ret = kstrtoint(buf, 0, &val);
520  	if (ret)
521  		return ret;
522  
523  	if (val < -1)
524  		return -EINVAL;
525  
526  	if (val == -1)
527  		cfg->max_retries = XFS_ERR_RETRY_FOREVER;
528  	else
529  		cfg->max_retries = val;
530  	return count;
531  }
532  XFS_SYSFS_ATTR_RW(max_retries);
533  
534  static ssize_t
retry_timeout_seconds_show(struct kobject * kobject,char * buf)535  retry_timeout_seconds_show(
536  	struct kobject	*kobject,
537  	char		*buf)
538  {
539  	int		timeout;
540  	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
541  
542  	if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
543  		timeout = -1;
544  	else
545  		timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
546  
547  	return sysfs_emit(buf, "%d\n", timeout);
548  }
549  
550  static ssize_t
retry_timeout_seconds_store(struct kobject * kobject,const char * buf,size_t count)551  retry_timeout_seconds_store(
552  	struct kobject	*kobject,
553  	const char	*buf,
554  	size_t		count)
555  {
556  	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
557  	int		ret;
558  	int		val;
559  
560  	ret = kstrtoint(buf, 0, &val);
561  	if (ret)
562  		return ret;
563  
564  	/* 1 day timeout maximum, -1 means infinite */
565  	if (val < -1 || val > 86400)
566  		return -EINVAL;
567  
568  	if (val == -1)
569  		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
570  	else {
571  		cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
572  		ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
573  	}
574  	return count;
575  }
576  XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
577  
578  static ssize_t
fail_at_unmount_show(struct kobject * kobject,char * buf)579  fail_at_unmount_show(
580  	struct kobject	*kobject,
581  	char		*buf)
582  {
583  	struct xfs_mount	*mp = err_to_mp(kobject);
584  
585  	return sysfs_emit(buf, "%d\n", mp->m_fail_unmount);
586  }
587  
588  static ssize_t
fail_at_unmount_store(struct kobject * kobject,const char * buf,size_t count)589  fail_at_unmount_store(
590  	struct kobject	*kobject,
591  	const char	*buf,
592  	size_t		count)
593  {
594  	struct xfs_mount	*mp = err_to_mp(kobject);
595  	int		ret;
596  	int		val;
597  
598  	ret = kstrtoint(buf, 0, &val);
599  	if (ret)
600  		return ret;
601  
602  	if (val < 0 || val > 1)
603  		return -EINVAL;
604  
605  	mp->m_fail_unmount = val;
606  	return count;
607  }
608  XFS_SYSFS_ATTR_RW(fail_at_unmount);
609  
610  static struct attribute *xfs_error_attrs[] = {
611  	ATTR_LIST(max_retries),
612  	ATTR_LIST(retry_timeout_seconds),
613  	NULL,
614  };
615  ATTRIBUTE_GROUPS(xfs_error);
616  
617  static const struct kobj_type xfs_error_cfg_ktype = {
618  	.release = xfs_sysfs_release,
619  	.sysfs_ops = &xfs_sysfs_ops,
620  	.default_groups = xfs_error_groups,
621  };
622  
623  static const struct kobj_type xfs_error_ktype = {
624  	.release = xfs_sysfs_release,
625  	.sysfs_ops = &xfs_sysfs_ops,
626  };
627  
628  /*
629   * Error initialization tables. These need to be ordered in the same
630   * order as the enums used to index the array. All class init tables need to
631   * define a "default" behaviour as the first entry, all other entries can be
632   * empty.
633   */
634  struct xfs_error_init {
635  	char		*name;
636  	int		max_retries;
637  	int		retry_timeout;	/* in seconds */
638  };
639  
640  static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
641  	{ .name = "default",
642  	  .max_retries = XFS_ERR_RETRY_FOREVER,
643  	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
644  	},
645  	{ .name = "EIO",
646  	  .max_retries = XFS_ERR_RETRY_FOREVER,
647  	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
648  	},
649  	{ .name = "ENOSPC",
650  	  .max_retries = XFS_ERR_RETRY_FOREVER,
651  	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
652  	},
653  	{ .name = "ENODEV",
654  	  .max_retries = 0,	/* We can't recover from devices disappearing */
655  	  .retry_timeout = 0,
656  	},
657  };
658  
659  static int
xfs_error_sysfs_init_class(struct xfs_mount * mp,int class,const char * parent_name,struct xfs_kobj * parent_kobj,const struct xfs_error_init init[])660  xfs_error_sysfs_init_class(
661  	struct xfs_mount	*mp,
662  	int			class,
663  	const char		*parent_name,
664  	struct xfs_kobj		*parent_kobj,
665  	const struct xfs_error_init init[])
666  {
667  	struct xfs_error_cfg	*cfg;
668  	int			error;
669  	int			i;
670  
671  	ASSERT(class < XFS_ERR_CLASS_MAX);
672  
673  	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
674  				&mp->m_error_kobj, parent_name);
675  	if (error)
676  		return error;
677  
678  	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
679  		cfg = &mp->m_error_cfg[class][i];
680  		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
681  					parent_kobj, init[i].name);
682  		if (error)
683  			goto out_error;
684  
685  		cfg->max_retries = init[i].max_retries;
686  		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
687  			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
688  		else
689  			cfg->retry_timeout = msecs_to_jiffies(
690  					init[i].retry_timeout * MSEC_PER_SEC);
691  	}
692  	return 0;
693  
694  out_error:
695  	/* unwind the entries that succeeded */
696  	for (i--; i >= 0; i--) {
697  		cfg = &mp->m_error_cfg[class][i];
698  		xfs_sysfs_del(&cfg->kobj);
699  	}
700  	xfs_sysfs_del(parent_kobj);
701  	return error;
702  }
703  
704  int
xfs_error_sysfs_init(struct xfs_mount * mp)705  xfs_error_sysfs_init(
706  	struct xfs_mount	*mp)
707  {
708  	int			error;
709  
710  	/* .../xfs/<dev>/error/ */
711  	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
712  				&mp->m_kobj, "error");
713  	if (error)
714  		return error;
715  
716  	error = sysfs_create_file(&mp->m_error_kobj.kobject,
717  				  ATTR_LIST(fail_at_unmount));
718  
719  	if (error)
720  		goto out_error;
721  
722  	/* .../xfs/<dev>/error/metadata/ */
723  	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
724  				"metadata", &mp->m_error_meta_kobj,
725  				xfs_error_meta_init);
726  	if (error)
727  		goto out_error;
728  
729  	return 0;
730  
731  out_error:
732  	xfs_sysfs_del(&mp->m_error_kobj);
733  	return error;
734  }
735  
736  void
xfs_error_sysfs_del(struct xfs_mount * mp)737  xfs_error_sysfs_del(
738  	struct xfs_mount	*mp)
739  {
740  	struct xfs_error_cfg	*cfg;
741  	int			i, j;
742  
743  	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
744  		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
745  			cfg = &mp->m_error_cfg[i][j];
746  
747  			xfs_sysfs_del(&cfg->kobj);
748  		}
749  	}
750  	xfs_sysfs_del(&mp->m_error_meta_kobj);
751  	xfs_sysfs_del(&mp->m_error_kobj);
752  }
753  
754  struct xfs_error_cfg *
xfs_error_get_cfg(struct xfs_mount * mp,int error_class,int error)755  xfs_error_get_cfg(
756  	struct xfs_mount	*mp,
757  	int			error_class,
758  	int			error)
759  {
760  	struct xfs_error_cfg	*cfg;
761  
762  	if (error < 0)
763  		error = -error;
764  
765  	switch (error) {
766  	case EIO:
767  		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
768  		break;
769  	case ENOSPC:
770  		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
771  		break;
772  	case ENODEV:
773  		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
774  		break;
775  	default:
776  		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
777  		break;
778  	}
779  
780  	return cfg;
781  }
782