1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Copyright(c) 2016 Intel Corporation. All rights reserved.
4   */
5  #ifndef __DAX_PRIVATE_H__
6  #define __DAX_PRIVATE_H__
7  
8  #include <linux/device.h>
9  #include <linux/cdev.h>
10  #include <linux/idr.h>
11  
12  /* private routines between core files */
13  struct dax_device;
14  struct dax_device *inode_dax(struct inode *inode);
15  struct inode *dax_inode(struct dax_device *dax_dev);
16  int dax_bus_init(void);
17  void dax_bus_exit(void);
18  
19  /**
20   * struct dax_region - mapping infrastructure for dax devices
21   * @id: kernel-wide unique region for a memory range
22   * @target_node: effective numa node if this memory range is onlined
23   * @kref: to pin while other agents have a need to do lookups
24   * @dev: parent device backing this region
25   * @align: allocation and mapping alignment for child dax devices
26   * @ida: instance id allocator
27   * @res: resource tree to track instance allocations
28   * @seed: allow userspace to find the first unbound seed device
29   * @youngest: allow userspace to find the most recently created device
30   */
31  struct dax_region {
32  	int id;
33  	int target_node;
34  	struct kref kref;
35  	struct device *dev;
36  	unsigned int align;
37  	struct ida ida;
38  	struct resource res;
39  	struct device *seed;
40  	struct device *youngest;
41  };
42  
43  struct dax_mapping {
44  	struct device dev;
45  	int range_id;
46  	int id;
47  };
48  
49  /**
50   * struct dev_dax - instance data for a subdivision of a dax region, and
51   * data while the device is activated in the driver.
52   * @region - parent region
53   * @dax_dev - core dax functionality
54   * @target_node: effective numa node if dev_dax memory range is onlined
55   * @dyn_id: is this a dynamic or statically created instance
56   * @id: ida allocated id when the dax_region is not static
57   * @ida: mapping id allocator
58   * @dev - device core
59   * @pgmap - pgmap for memmap setup / lifetime (driver owned)
60   * @nr_range: size of @ranges
61   * @ranges: resource-span + pgoff tuples for the instance
62   */
63  struct dev_dax {
64  	struct dax_region *region;
65  	struct dax_device *dax_dev;
66  	unsigned int align;
67  	int target_node;
68  	bool dyn_id;
69  	int id;
70  	struct ida ida;
71  	struct device dev;
72  	struct dev_pagemap *pgmap;
73  	bool memmap_on_memory;
74  	int nr_range;
75  	struct dev_dax_range {
76  		unsigned long pgoff;
77  		struct range range;
78  		struct dax_mapping *mapping;
79  	} *ranges;
80  };
81  
82  /*
83   * While run_dax() is potentially a generic operation that could be
84   * defined in include/linux/dax.h we don't want to grow any users
85   * outside of drivers/dax/
86   */
87  void run_dax(struct dax_device *dax_dev);
88  
to_dev_dax(struct device * dev)89  static inline struct dev_dax *to_dev_dax(struct device *dev)
90  {
91  	return container_of(dev, struct dev_dax, dev);
92  }
93  
to_dax_mapping(struct device * dev)94  static inline struct dax_mapping *to_dax_mapping(struct device *dev)
95  {
96  	return container_of(dev, struct dax_mapping, dev);
97  }
98  
99  phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size);
100  
101  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
dax_align_valid(unsigned long align)102  static inline bool dax_align_valid(unsigned long align)
103  {
104  	if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
105  		return true;
106  	if (align == PMD_SIZE && has_transparent_hugepage())
107  		return true;
108  	if (align == PAGE_SIZE)
109  		return true;
110  	return false;
111  }
112  #else
dax_align_valid(unsigned long align)113  static inline bool dax_align_valid(unsigned long align)
114  {
115  	return align == PAGE_SIZE;
116  }
117  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
118  #endif
119