1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef __LINUX_MEMORY_HOTPLUG_H
3  #define __LINUX_MEMORY_HOTPLUG_H
4  
5  #include <linux/mmzone.h>
6  #include <linux/spinlock.h>
7  #include <linux/notifier.h>
8  #include <linux/bug.h>
9  
10  struct page;
11  struct zone;
12  struct pglist_data;
13  struct mem_section;
14  struct memory_group;
15  struct resource;
16  struct vmem_altmap;
17  struct dev_pagemap;
18  
19  #ifdef CONFIG_MEMORY_HOTPLUG
20  struct page *pfn_to_online_page(unsigned long pfn);
21  
22  /* Types for control the zone type of onlined and offlined memory */
23  enum {
24  	/* Offline the memory. */
25  	MMOP_OFFLINE = 0,
26  	/* Online the memory. Zone depends, see default_zone_for_pfn(). */
27  	MMOP_ONLINE,
28  	/* Online the memory to ZONE_NORMAL. */
29  	MMOP_ONLINE_KERNEL,
30  	/* Online the memory to ZONE_MOVABLE. */
31  	MMOP_ONLINE_MOVABLE,
32  };
33  
34  /* Flags for add_memory() and friends to specify memory hotplug details. */
35  typedef int __bitwise mhp_t;
36  
37  /* No special request */
38  #define MHP_NONE		((__force mhp_t)0)
39  /*
40   * Allow merging of the added System RAM resource with adjacent,
41   * mergeable resources. After a successful call to add_memory_resource()
42   * with this flag set, the resource pointer must no longer be used as it
43   * might be stale, or the resource might have changed.
44   */
45  #define MHP_MERGE_RESOURCE	((__force mhp_t)BIT(0))
46  
47  /*
48   * We want memmap (struct page array) to be self contained.
49   * To do so, we will use the beginning of the hot-added range to build
50   * the page tables for the memmap array that describes the entire range.
51   * Only selected architectures support it with SPARSE_VMEMMAP.
52   * This is only a hint, the core kernel can decide to not do this based on
53   * different alignment checks.
54   */
55  #define MHP_MEMMAP_ON_MEMORY   ((__force mhp_t)BIT(1))
56  /*
57   * The nid field specifies a memory group id (mgid) instead. The memory group
58   * implies the node id (nid).
59   */
60  #define MHP_NID_IS_MGID		((__force mhp_t)BIT(2))
61  /*
62   * The hotplugged memory is completely inaccessible while the memory is
63   * offline. The memory provider will handle MEM_PREPARE_ONLINE /
64   * MEM_FINISH_OFFLINE notifications and make the memory accessible.
65   *
66   * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY,
67   * because the altmap cannot be written (e.g., poisoned) when adding
68   * memory -- before it is set online.
69   *
70   * This allows for adding memory with an altmap that is not currently
71   * made available by a hypervisor. When onlining that memory, the
72   * hypervisor can be instructed to make that memory available, and
73   * the onlining phase will not require any memory allocations, which is
74   * helpful in low-memory situations.
75   */
76  #define MHP_OFFLINE_INACCESSIBLE	((__force mhp_t)BIT(3))
77  
78  /*
79   * Extended parameters for memory hotplug:
80   * altmap: alternative allocator for memmap array (optional)
81   * pgprot: page protection flags to apply to newly created page tables
82   *	(required)
83   */
84  struct mhp_params {
85  	struct vmem_altmap *altmap;
86  	pgprot_t pgprot;
87  	struct dev_pagemap *pgmap;
88  };
89  
90  bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
91  struct range mhp_get_pluggable_range(bool need_mapping);
92  bool mhp_supports_memmap_on_memory(void);
93  
94  /*
95   * Zone resizing functions
96   *
97   * Note: any attempt to resize a zone should has pgdat_resize_lock()
98   * zone_span_writelock() both held. This ensure the size of a zone
99   * can't be changed while pgdat_resize_lock() held.
100   */
zone_span_seqbegin(struct zone * zone)101  static inline unsigned zone_span_seqbegin(struct zone *zone)
102  {
103  	return read_seqbegin(&zone->span_seqlock);
104  }
zone_span_seqretry(struct zone * zone,unsigned iv)105  static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
106  {
107  	return read_seqretry(&zone->span_seqlock, iv);
108  }
zone_span_writelock(struct zone * zone)109  static inline void zone_span_writelock(struct zone *zone)
110  {
111  	write_seqlock(&zone->span_seqlock);
112  }
zone_span_writeunlock(struct zone * zone)113  static inline void zone_span_writeunlock(struct zone *zone)
114  {
115  	write_sequnlock(&zone->span_seqlock);
116  }
zone_seqlock_init(struct zone * zone)117  static inline void zone_seqlock_init(struct zone *zone)
118  {
119  	seqlock_init(&zone->span_seqlock);
120  }
121  extern void adjust_present_page_count(struct page *page,
122  				      struct memory_group *group,
123  				      long nr_pages);
124  /* VM interface that may be used by firmware interface */
125  extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
126  				     struct zone *zone, bool mhp_off_inaccessible);
127  extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
128  extern int online_pages(unsigned long pfn, unsigned long nr_pages,
129  			struct zone *zone, struct memory_group *group);
130  extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
131  		unsigned long end_pfn);
132  
133  typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
134  
135  extern void generic_online_page(struct page *page, unsigned int order);
136  extern int set_online_page_callback(online_page_callback_t callback);
137  extern int restore_online_page_callback(online_page_callback_t callback);
138  
139  extern int try_online_node(int nid);
140  
141  extern int arch_add_memory(int nid, u64 start, u64 size,
142  			   struct mhp_params *params);
143  extern u64 max_mem_size;
144  
145  extern int mhp_online_type_from_str(const char *str);
146  
147  /* Default online_type (MMOP_*) when new memory blocks are added. */
148  extern int mhp_default_online_type;
149  /* If movable_node boot option specified */
150  extern bool movable_node_enabled;
movable_node_is_enabled(void)151  static inline bool movable_node_is_enabled(void)
152  {
153  	return movable_node_enabled;
154  }
155  
156  extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
157  extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
158  			   struct vmem_altmap *altmap);
159  
160  /* reasonably generic interface to expand the physical pages */
161  extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
162  		       struct mhp_params *params);
163  
164  #ifndef CONFIG_ARCH_HAS_ADD_PAGES
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)165  static inline int add_pages(int nid, unsigned long start_pfn,
166  		unsigned long nr_pages, struct mhp_params *params)
167  {
168  	return __add_pages(nid, start_pfn, nr_pages, params);
169  }
170  #else /* ARCH_HAS_ADD_PAGES */
171  int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
172  	      struct mhp_params *params);
173  #endif /* ARCH_HAS_ADD_PAGES */
174  
175  void get_online_mems(void);
176  void put_online_mems(void);
177  
178  void mem_hotplug_begin(void);
179  void mem_hotplug_done(void);
180  
181  /* See kswapd_is_running() */
pgdat_kswapd_lock(pg_data_t * pgdat)182  static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
183  {
184  	mutex_lock(&pgdat->kswapd_lock);
185  }
186  
pgdat_kswapd_unlock(pg_data_t * pgdat)187  static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
188  {
189  	mutex_unlock(&pgdat->kswapd_lock);
190  }
191  
pgdat_kswapd_lock_init(pg_data_t * pgdat)192  static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
193  {
194  	mutex_init(&pgdat->kswapd_lock);
195  }
196  
197  #else /* ! CONFIG_MEMORY_HOTPLUG */
198  #define pfn_to_online_page(pfn)			\
199  ({						\
200  	struct page *___page = NULL;		\
201  	if (pfn_valid(pfn))			\
202  		___page = pfn_to_page(pfn);	\
203  	___page;				\
204   })
205  
zone_span_seqbegin(struct zone * zone)206  static inline unsigned zone_span_seqbegin(struct zone *zone)
207  {
208  	return 0;
209  }
zone_span_seqretry(struct zone * zone,unsigned iv)210  static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
211  {
212  	return 0;
213  }
zone_span_writelock(struct zone * zone)214  static inline void zone_span_writelock(struct zone *zone) {}
zone_span_writeunlock(struct zone * zone)215  static inline void zone_span_writeunlock(struct zone *zone) {}
zone_seqlock_init(struct zone * zone)216  static inline void zone_seqlock_init(struct zone *zone) {}
217  
try_online_node(int nid)218  static inline int try_online_node(int nid)
219  {
220  	return 0;
221  }
222  
get_online_mems(void)223  static inline void get_online_mems(void) {}
put_online_mems(void)224  static inline void put_online_mems(void) {}
225  
mem_hotplug_begin(void)226  static inline void mem_hotplug_begin(void) {}
mem_hotplug_done(void)227  static inline void mem_hotplug_done(void) {}
228  
movable_node_is_enabled(void)229  static inline bool movable_node_is_enabled(void)
230  {
231  	return false;
232  }
233  
mhp_supports_memmap_on_memory(void)234  static inline bool mhp_supports_memmap_on_memory(void)
235  {
236  	return false;
237  }
238  
pgdat_kswapd_lock(pg_data_t * pgdat)239  static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
pgdat_kswapd_unlock(pg_data_t * pgdat)240  static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
pgdat_kswapd_lock_init(pg_data_t * pgdat)241  static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
242  #endif /* ! CONFIG_MEMORY_HOTPLUG */
243  
244  /*
245   * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
246   * platforms might override and use arch_get_mappable_range()
247   * for internal non memory hotplug purposes.
248   */
249  struct range arch_get_mappable_range(void);
250  
251  #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
252  /*
253   * pgdat resizing functions
254   */
255  static inline
pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags)256  void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
257  {
258  	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
259  }
260  static inline
pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags)261  void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
262  {
263  	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
264  }
265  static inline
pgdat_resize_init(struct pglist_data * pgdat)266  void pgdat_resize_init(struct pglist_data *pgdat)
267  {
268  	spin_lock_init(&pgdat->node_size_lock);
269  }
270  #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
271  /*
272   * Stub functions for when hotplug is off
273   */
pgdat_resize_lock(struct pglist_data * p,unsigned long * f)274  static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_unlock(struct pglist_data * p,unsigned long * f)275  static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_init(struct pglist_data * pgdat)276  static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
277  #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
278  
279  #ifdef CONFIG_MEMORY_HOTREMOVE
280  
281  extern void try_offline_node(int nid);
282  extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
283  			 struct zone *zone, struct memory_group *group);
284  extern int remove_memory(u64 start, u64 size);
285  extern void __remove_memory(u64 start, u64 size);
286  extern int offline_and_remove_memory(u64 start, u64 size);
287  
288  #else
try_offline_node(int nid)289  static inline void try_offline_node(int nid) {}
290  
offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)291  static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
292  				struct zone *zone, struct memory_group *group)
293  {
294  	return -EINVAL;
295  }
296  
remove_memory(u64 start,u64 size)297  static inline int remove_memory(u64 start, u64 size)
298  {
299  	return -EBUSY;
300  }
301  
__remove_memory(u64 start,u64 size)302  static inline void __remove_memory(u64 start, u64 size) {}
303  #endif /* CONFIG_MEMORY_HOTREMOVE */
304  
305  #ifdef CONFIG_MEMORY_HOTPLUG
306  extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
307  extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
308  extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
309  extern int add_memory_resource(int nid, struct resource *resource,
310  			       mhp_t mhp_flags);
311  extern int add_memory_driver_managed(int nid, u64 start, u64 size,
312  				     const char *resource_name,
313  				     mhp_t mhp_flags);
314  extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
315  				   unsigned long nr_pages,
316  				   struct vmem_altmap *altmap, int migratetype);
317  extern void remove_pfn_range_from_zone(struct zone *zone,
318  				       unsigned long start_pfn,
319  				       unsigned long nr_pages);
320  extern int sparse_add_section(int nid, unsigned long pfn,
321  		unsigned long nr_pages, struct vmem_altmap *altmap,
322  		struct dev_pagemap *pgmap);
323  extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
324  				  struct vmem_altmap *altmap);
325  extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
326  					  unsigned long pnum);
327  extern struct zone *zone_for_pfn_range(int online_type, int nid,
328  		struct memory_group *group, unsigned long start_pfn,
329  		unsigned long nr_pages);
330  extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
331  				      struct mhp_params *params);
332  void arch_remove_linear_mapping(u64 start, u64 size);
333  #endif /* CONFIG_MEMORY_HOTPLUG */
334  
335  #endif /* __LINUX_MEMORY_HOTPLUG_H */
336