/linux-6.12.1/include/linux/ |
D | mempolicy.h | 46 struct mempolicy { struct 64 extern void __mpol_put(struct mempolicy *pol); 65 static inline void mpol_put(struct mempolicy *pol) in mpol_put() 75 static inline int mpol_needs_cond_ref(struct mempolicy *pol) in mpol_needs_cond_ref() 80 static inline void mpol_cond_put(struct mempolicy *pol) in mpol_cond_put() 86 extern struct mempolicy *__mpol_dup(struct mempolicy *pol); 87 static inline struct mempolicy *mpol_dup(struct mempolicy *pol) in mpol_dup() 94 static inline void mpol_get(struct mempolicy *pol) in mpol_get() 100 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); 101 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) in mpol_equal() [all …]
|
D | gfp.h | 13 struct mempolicy; 304 struct mempolicy *mpol, pgoff_t ilx, int nid); 307 struct mempolicy *mpol, pgoff_t ilx, int nid); 316 struct mempolicy *mpol, pgoff_t ilx, int nid) in alloc_pages_mpol_noprof() 325 struct mempolicy *mpol, pgoff_t ilx, int nid) in folio_alloc_mpol_noprof()
|
D | shmem_fs.h | 71 struct mempolicy *mpol; /* default memory policy for mappings */
|
D | sched.h | 64 struct mempolicy; 1303 struct mempolicy *mempolicy; member
|
D | mm_types.h | 767 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
|
D | mm.h | 35 struct mempolicy; 648 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 660 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
|
/linux-6.12.1/mm/ |
D | mempolicy.c | 134 static struct mempolicy default_policy = { 139 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 199 struct mempolicy *get_task_policy(struct task_struct *p) in get_task_policy() 201 struct mempolicy *pol = p->mempolicy; in get_task_policy() 219 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 220 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 223 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) in mpol_store_user_nodemask() 236 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_nodemask() 244 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() 262 static int mpol_set_nodemask(struct mempolicy *pol, in mpol_set_nodemask() [all …]
|
D | swap.h | 5 struct mempolicy; 72 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, 75 struct mempolicy *mpol, pgoff_t ilx); 133 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead()
|
D | swap_state.c | 433 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, in __read_swap_cache_async() 563 struct mempolicy *mpol; in read_swap_cache_async() 657 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 800 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() 875 struct mempolicy *mpol; in swapin_readahead()
|
D | shmem.c | 116 struct mempolicy *mpol; 1581 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() 1593 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() 1595 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() 1605 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() 1608 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() 1614 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 1620 struct mempolicy *mpol; in shmem_swapin_cluster() 1764 struct mempolicy *mpol; in shmem_alloc_folio() 2653 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shmem_set_policy() [all …]
|
D | vma.h | 85 struct mempolicy *policy; 297 struct mempolicy *new_pol);
|
D | Makefile | 82 obj-$(CONFIG_NUMA) += mempolicy.o
|
D | zswap.c | 1010 struct mempolicy *mpol; in zswap_writeback_entry()
|
D | hugetlb.c | 1401 struct mempolicy *mpol; in dequeue_hugetlb_folio_vma() 2372 struct mempolicy *mpol; in alloc_buddy_hugetlb_folio_with_mpol() 2437 struct mempolicy *mpol = get_task_policy(current); in policy_mbind_nodemask() 6505 struct mempolicy *mpol; in alloc_hugetlb_folio_vma()
|
/linux-6.12.1/Documentation/ABI/testing/ |
D | sysfs-kernel-mm-mempolicy-weighted-interleave | 1 What: /sys/kernel/mm/mempolicy/weighted_interleave/ 6 What: /sys/kernel/mm/mempolicy/weighted_interleave/nodeN 12 utilized by tasks which have set their mempolicy to
|
D | sysfs-kernel-mm-mempolicy | 1 What: /sys/kernel/mm/mempolicy/
|
/linux-6.12.1/tools/testing/vma/ |
D | vma_internal.h | 165 struct mempolicy {}; struct 282 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 342 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 354 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 627 static inline void mpol_put(struct mempolicy *) in mpol_put() argument 869 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *) in mpol_equal() argument
|
/linux-6.12.1/fs/proc/ |
D | internal.h | 19 struct mempolicy; 326 struct mempolicy *task_mempolicy;
|
/linux-6.12.1/Documentation/translations/zh_CN/core-api/ |
D | mm-api.rst | 117 mm/mempolicy.c
|
/linux-6.12.1/Documentation/admin-guide/mm/ |
D | numa_memory_policy.rst | 170 structure, struct mempolicy. Details of this structure will be 207 preferred_node member of struct mempolicy. When the internal 256 /sys/kernel/mm/mempolicy/weighted_interleave/ 269 Without this flag, any time a mempolicy is rebound because of a 301 mempolicy is rebound because of a change in the set of allowed 321 if not already set, sets the node in the mempolicy nodemask. 347 To resolve use/free races, struct mempolicy contains an atomic reference 350 the structure back to the mempolicy kmem cache when the reference count 427 definitions are defined in <linux/mempolicy.h>. 478 mempolicy range. Other address ranges are ignored. A home node is the NUMA node
|
D | hugetlbpage.rst | 291 sysctl or attribute. When the ``nr_hugepages`` attribute is used, mempolicy 313 #. Regardless of mempolicy mode [see 316 specified in the mempolicy as if "interleave" had been specified. 332 Any of the other mempolicy modes may be used to specify a single node. 334 #. The nodes allowed mask will be derived from any non-default task mempolicy, 341 #. Any task mempolicy specified--e.g., using numactl--will be constrained by 373 resources exist, regardless of the task's mempolicy or cpuset constraints. 376 as we don't know until fault time, when the faulting task's mempolicy is
|
/linux-6.12.1/Documentation/core-api/ |
D | mm-api.rst | 107 .. kernel-doc:: mm/mempolicy.c
|
/linux-6.12.1/kernel/ |
D | fork.c | 2306 p->mempolicy = mpol_dup(p->mempolicy); in copy_process() 2307 if (IS_ERR(p->mempolicy)) { in copy_process() 2308 retval = PTR_ERR(p->mempolicy); in copy_process() 2309 p->mempolicy = NULL; in copy_process() 2659 mpol_put(p->mempolicy); in copy_process()
|
/linux-6.12.1/ipc/ |
D | shm.c | 566 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shm_set_policy() 576 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, in shm_get_policy() 580 struct mempolicy *mpol = vma->vm_policy; in shm_get_policy()
|
/linux-6.12.1/Documentation/admin-guide/cgroup-v1/ |
D | cpusets.rst | 342 except perhaps as modified by the task's NUMA mempolicy or cpuset 349 or slab caches to ignore the task's NUMA mempolicy and be spread 353 is turned off, then the currently specified NUMA mempolicy once again 631 mempolicy MPOL_BIND, and the nodes to which it was bound overlap with
|