1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4
5 #include <linux/mm_types_task.h>
6
7 #include <linux/auxvec.h>
8 #include <linux/list.h>
9 #include <linux/spinlock.h>
10 #include <linux/rbtree.h>
11 #include <linux/rwsem.h>
12 #include <linux/completion.h>
13 #include <linux/cpumask.h>
14 #include <linux/uprobes.h>
15 #include <linux/page-flags-layout.h>
16 #include <linux/workqueue.h>
17 #include <linux/seqlock.h>
18
19 #include <asm/mmu.h>
20
21 #ifndef AT_VECTOR_SIZE_ARCH
22 #define AT_VECTOR_SIZE_ARCH 0
23 #endif
24 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
25
26 #define INIT_PASID 0
27
28 struct address_space;
29 struct mem_cgroup;
30
31 /*
32 * Each physical page in the system has a struct page associated with
33 * it to keep track of whatever it is we are using the page for at the
34 * moment. Note that we have no way to track which tasks are using
35 * a page, though if it is a pagecache page, rmap structures can tell us
36 * who is mapping it.
37 *
38 * If you allocate the page using alloc_pages(), you can use some of the
39 * space in struct page for your own purposes. The five words in the main
40 * union are available, except for bit 0 of the first word which must be
41 * kept clear. Many users use this word to store a pointer to an object
42 * which is guaranteed to be aligned. If you use the same storage as
43 * page->mapping, you must restore it to NULL before freeing the page.
44 *
45 * If your page will not be mapped to userspace, you can also use the four
46 * bytes in the mapcount union, but you must call page_mapcount_reset()
47 * before freeing it.
48 *
49 * If you want to use the refcount field, it must be used in such a way
50 * that other CPUs temporarily incrementing and then decrementing the
51 * refcount does not cause problems. On receiving the page from
52 * alloc_pages(), the refcount will be positive.
53 *
54 * If you allocate pages of order > 0, you can use some of the fields
55 * in each subpage, but you may need to restore some of their values
56 * afterwards.
57 *
58 * SLUB uses cmpxchg_double() to atomically update its freelist and
59 * counters. That requires that freelist & counters be adjacent and
60 * double-word aligned. We align all struct pages to double-word
61 * boundaries, and ensure that 'freelist' is aligned within the
62 * struct.
63 */
64 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
65 #define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
66 #else
67 #define _struct_page_alignment
68 #endif
69 /*
70 * page描述一个物理页面
71 */
72 struct page {
73 unsigned long flags; /* Atomic flags, some possibly ///页面重要的标志集合,包括node编号等
74 * updated asynchronously */
75 /*
76 * Five words (20/40 bytes) are available in this union.
77 * WARNING: bit 0 of the first word is used for PageTail(). That
78 * means the other users of this union MUST NOT use the bit to
79 * avoid collision and false-positive PageTail().
80 */
81 union {
82 struct { /* Page cache and anonymous pages */ ///管理匿名页和文件页
83 /**
84 * @lru: Pageout list, eg. active_list protected by
85 * lruvec->lru_lock. Sometimes used as a generic list
86 * by the page owner.
87 */
88 struct list_head lru; ///page加入lru链表
89 /* See page-flags.h for PAGE_MAPPING_FLAGS */
90 struct address_space *mapping; ///页面指向的地址空间,文件页/匿名页共用
91 pgoff_t index; /* Our offset within mapping. */ ///页面在映射空间的偏移量
92 /**
93 * @private: Mapping-private opaque data.
94 * Usually used for buffer_heads if PagePrivate.
95 * Used for swp_entry_t if PageSwapCache.
96 * Indicates order in the buddy system if PageBuddy.
97 */
98 /* swap缺页异常时,保存换出页标识符
99 * 页标识符定义
100 * arch/arm64/include/asm/pgtable.h
101 * Encode and decode a swap entry:
102 * bits 0-1: present (must be zero)
103 * bits 2-7: swap type
104 * bits 8-57: swap offset
105 * bit 58: PTE_PROT_NONE (must be zero)
106 */
107 unsigned long private;
108 };
109 struct { /* page_pool used by netstack */
110 /**
111 * @pp_magic: magic value to avoid recycling non
112 * page_pool allocated pages.
113 */
114 unsigned long pp_magic;
115 struct page_pool *pp;
116 unsigned long _pp_mapping_pad;
117 unsigned long dma_addr;
118 union {
119 /**
120 * dma_addr_upper: might require a 64-bit
121 * value on 32-bit architectures.
122 */
123 unsigned long dma_addr_upper;
124 /**
125 * For frag page support, not supported in
126 * 32-bit architectures with 64-bit DMA.
127 */
128 atomic_long_t pp_frag_count;
129 };
130 };
131 ///管理slab,当flag设置PG-SLAB后,slab成员生效
132 struct { /* slab, slob and slub */
133 union {
134 struct list_head slab_list;
135 struct { /* Partial pages */
136 struct page *next;
137 #ifdef CONFIG_64BIT
138 int pages; /* Nr of pages left */
139 int pobjects; /* Approximate count */
140 #else
141 short int pages;
142 short int pobjects;
143 #endif
144 };
145 };
146 ///指向slab的slab cache描述符
147 struct kmem_cache *slab_cache; /* not slob */
148
149 /* Double-word boundary */
150 ///指向slab空闲obj的索引数组,与slab描述符的active成员搭配使用
151 ///实际上存放slab中未使用的obj索引值
152 void *freelist; /* first free object */
153 union {
154 ///slab区域的第一个obj
155 void *s_mem; /* slab: first object */
156 unsigned long counters; /* SLUB */
157 struct { /* SLUB */
158 unsigned inuse:16;
159 unsigned objects:15;
160 unsigned frozen:1;
161 };
162 };
163 };
164 struct { /* Tail pages of compound page */
165 unsigned long compound_head; /* Bit zero is set */
166
167 /* First tail page only */
168 unsigned char compound_dtor;
169 unsigned char compound_order;
170 atomic_t compound_mapcount;
171 unsigned int compound_nr; /* 1 << compound_order */
172 };
173 struct { /* Second tail page of compound page */
174 unsigned long _compound_pad_1; /* compound_head */
175 atomic_t hpage_pinned_refcount;
176 /* For both global and memcg */
177 struct list_head deferred_list;
178 };
179 struct { /* Page table pages */
180 unsigned long _pt_pad_1; /* compound_head */
181 pgtable_t pmd_huge_pte; /* protected by page->ptl */
182 unsigned long _pt_pad_2; /* mapping */
183 union {
184 struct mm_struct *pt_mm; /* x86 pgds only */
185 atomic_t pt_frag_refcount; /* powerpc */
186 };
187 #if ALLOC_SPLIT_PTLOCKS
188 spinlock_t *ptl;
189 #else
190 spinlock_t ptl;
191 #endif
192 };
193 struct { /* ZONE_DEVICE pages */
194 /** @pgmap: Points to the hosting device page map. */
195 struct dev_pagemap *pgmap;
196 void *zone_device_data;
197 /*
198 * ZONE_DEVICE private pages are counted as being
199 * mapped so the next 3 words hold the mapping, index,
200 * and private fields from the source anonymous or
201 * page cache page while the page is migrated to device
202 * private memory.
203 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
204 * use the mapping, index, and private fields when
205 * pmem backed DAX files are mapped.
206 */
207 };
208
209 /** @rcu_head: You can use this to free a page by RCU. */
210 struct rcu_head rcu_head;
211 };
212
213 union { /* This union is 4 bytes in size. */
214 /*
215 * If the page can be mapped to userspace, encodes the number
216 * of times this page is referenced by a page table.
217 */
218 atomic_t _mapcount; ///统计进程映射的个数
219
220 /*
221 * If the page is neither PageSlab nor mappable to userspace,
222 * the value stored here may help determine what this page
223 * is used for. See page-flags.h for a list of page types
224 * which are currently stored here.
225 */
226 unsigned int page_type;
227
228 ///slab已经使用的obj数量
229 unsigned int active; /* SLAB */
230 int units; /* SLOB */
231 };
232
233 /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
234 atomic_t _refcount; ///引用计数,0:空闲页,或可以即将被释放页; 大于0:正在被使用
235
236 #ifdef CONFIG_MEMCG
237 unsigned long memcg_data;
238 #endif
239
240 /*
241 * On machines where all RAM is mapped into kernel address space,
242 * we can simply calculate the virtual address. On machines with
243 * highmem some memory is mapped into kernel virtual memory
244 * dynamically, so we need a place to store that address.
245 * Note that this field could be 16 bits on x86 ... ;)
246 *
247 * Architectures with slow multiplication can define
248 * WANT_PAGE_VIRTUAL in asm/page.h
249 */
250 #if defined(WANT_PAGE_VIRTUAL)
251 void *virtual; /* Kernel virtual address (NULL if
252 not kmapped, ie. highmem) */
253 #endif /* WANT_PAGE_VIRTUAL */
254
255 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
256 int _last_cpupid;
257 #endif
258 } _struct_page_alignment;
259
compound_mapcount_ptr(struct page * page)260 static inline atomic_t *compound_mapcount_ptr(struct page *page)
261 {
262 return &page[1].compound_mapcount;
263 }
264
compound_pincount_ptr(struct page * page)265 static inline atomic_t *compound_pincount_ptr(struct page *page)
266 {
267 return &page[2].hpage_pinned_refcount;
268 }
269
270 /*
271 * Used for sizing the vmemmap region on some architectures
272 */
273 #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
274
275 #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
276 #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
277
278 #define page_private(page) ((page)->private)
279
set_page_private(struct page * page,unsigned long private)280 static inline void set_page_private(struct page *page, unsigned long private)
281 {
282 page->private = private;
283 }
284
285 struct page_frag_cache {
286 void * va;
287 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
288 __u16 offset;
289 __u16 size;
290 #else
291 __u32 offset;
292 #endif
293 /* we maintain a pagecount bias, so that we dont dirty cache line
294 * containing page->_refcount every time we allocate a fragment.
295 */
296 unsigned int pagecnt_bias;
297 bool pfmemalloc;
298 };
299
300 typedef unsigned long vm_flags_t;
301
302 /*
303 * A region containing a mapping of a non-memory backed file under NOMMU
304 * conditions. These are held in a global tree and are pinned by the VMAs that
305 * map parts of them.
306 */
307 struct vm_region {
308 struct rb_node vm_rb; /* link in global region tree */
309 vm_flags_t vm_flags; /* VMA vm_flags */
310 unsigned long vm_start; /* start address of region */
311 unsigned long vm_end; /* region initialised to here */
312 unsigned long vm_top; /* region allocated to here */
313 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
314 struct file *vm_file; /* the backing file or NULL */
315
316 int vm_usage; /* region usage count (access under nommu_region_sem) */
317 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
318 * this region */
319 };
320
321 #ifdef CONFIG_USERFAULTFD
322 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
323 struct vm_userfaultfd_ctx {
324 struct userfaultfd_ctx *ctx;
325 };
326 #else /* CONFIG_USERFAULTFD */
327 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
328 struct vm_userfaultfd_ctx {};
329 #endif /* CONFIG_USERFAULTFD */
330
331 /*
332 * This struct describes a virtual memory area. There is one of these
333 * per VM-area/task. A VM area is any part of the process virtual memory
334 * space that has a special rule for the page-fault handlers (ie a shared
335 * library, the executable area etc).
336 */
337 struct vm_area_struct {
338 /* The first cache line has the info for VMA tree walking. */
339
340 ///VMA在进程地址空间内的起始地址,结束地址 ,[start,end)
341 unsigned long vm_start; /* Our start address within vm_mm. */
342 unsigned long vm_end; /* The first byte after our end address
343 within vm_mm. */
344
345 /* linked list of VM areas per task, sorted by address */
346 ///进程的所有vma连接成一个链表,链表头mmap
347 struct vm_area_struct *vm_next, *vm_prev;
348
349 ///每个进程的mm_struct都有一个红黑树,VMA作为一个节点,加入该红黑树
350 struct rb_node vm_rb;
351
352 /*
353 * Largest free memory gap in bytes to the left of this VMA.
354 * Either between this VMA and vma->vm_prev, or between one of the
355 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
356 * get_unmapped_area find a free area of the right size.
357 */
358 ///以当前vma节点为根,左子树中最大可用虚拟内存区域的大小
359 unsigned long rb_subtree_gap;
360
361 /* Second cache line starts here. */
362
363 ///指向vma所属进程的mm_struct
364 struct mm_struct *vm_mm; /* The address space we belong to. */
365
366 /*
367 * Access permissions of this VMA.
368 * See vmf_insert_mixed_prot() for discussion.
369 */
370 ///当前vma的访问权限
371 pgprot_t vm_page_prot;
372
373 ///描述该vma的属性
374 unsigned long vm_flags; /* Flags, see mm.h. */
375
376 /*
377 * For areas with an address space and backing store,
378 * linkage into the address_space->i_mmap interval tree.
379 */
380 struct {
381 struct rb_node rb;
382 unsigned long rb_subtree_last;
383 } shared;
384
385 /*
386 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
387 * list, after a COW of one of the file pages. A MAP_SHARED vma
388 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
389 * or brk vma (with NULL file) can only be in an anon_vma list.
390 */
391 ///本vma相关的所有avc链表
392 struct list_head anon_vma_chain; /* Serialized by mmap_lock &
393 * page_table_lock */
394
395 ///指向VMA对应的anon_vma
396 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
397
398 /* Function pointers to deal with this struct. */
399 ///指向操作方法集合,常用在文件映射
400 const struct vm_operations_struct *vm_ops;
401
402 /* Information about our backing store: */
403 ///指定文件映射的偏移量,单位页
404 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
405 units */
406 ///指向映射的文件,匿名页为NULL
407 struct file * vm_file; /* File we map to (can be NULL). */
408 void * vm_private_data; /* was vm_pte (shared mem) */
409
410 #ifdef CONFIG_SWAP
411 atomic_long_t swap_readahead_info;
412 #endif
413 #ifndef CONFIG_MMU
414 struct vm_region *vm_region; /* NOMMU mapping region */
415 #endif
416 #ifdef CONFIG_NUMA
417 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
418 #endif
419 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
420 } __randomize_layout;
421
422 struct core_thread {
423 struct task_struct *task;
424 struct core_thread *next;
425 };
426
427 struct core_state {
428 atomic_t nr_threads;
429 struct core_thread dumper;
430 struct completion startup;
431 };
432
433 struct kioctx_table;
434 struct mm_struct {
435 struct {
436 ///进程里所有vma形成的一个单链表,mmap是表头
437 struct vm_area_struct *mmap; /* list of VMAs */
438
439 ///vma红黑树的根节点,根据地址大小组成树,方便快速查找
440 struct rb_root mm_rb;
441 u64 vmacache_seqnum; /* per-thread vmacache */
442 #ifdef CONFIG_MMU
443 ///判断虚拟内存空间是否有足够空间,返回一段没有映射过的虚拟空间起始地址
444 unsigned long (*get_unmapped_area) (struct file *filp,
445 unsigned long addr, unsigned long len,
446 unsigned long pgoff, unsigned long flags);
447 #endif
448 ///mmap内存开始映射的起始地址
449 unsigned long mmap_base; /* base of mmap area */
450 ///按照bottom-up方向分配内存的起始地址
451 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
452 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
453 /* Base addresses for compatible mmap() */
454 unsigned long mmap_compat_base;
455 unsigned long mmap_compat_legacy_base;
456 #endif
457 ///task的虚拟地址可见大小
458 unsigned long task_size; /* size of task vm space */
459 ///最后一个vma的结束地址
460 unsigned long highest_vm_end; /* highest vma end address */
461 ///指向进程一级页表,页全局目录地址
462 pgd_t * pgd;
463
464 #ifdef CONFIG_MEMBARRIER
465 /**
466 * @membarrier_state: Flags controlling membarrier behavior.
467 *
468 * This field is close to @pgd to hopefully fit in the same
469 * cache-line, which needs to be touched by switch_mm().
470 */
471 atomic_t membarrier_state;
472 #endif
473
474 /**
475 * @mm_users: The number of users including userspace.
476 *
477 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
478 * drops to 0 (i.e. when the task exits and there are no other
479 * temporary reference holders), we also release a reference on
480 * @mm_count (which may then free the &struct mm_struct if
481 * @mm_count also drops to 0).
482 */
483 ///正在使用该进程空间的线程数目
484 atomic_t mm_users;
485
486 /**
487 * @mm_count: The number of references to &struct mm_struct
488 * (@mm_users count as 1).
489 *
490 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
491 * &struct mm_struct is freed.
492 */
493 ///mm_struct结构体的主引用计数
494 atomic_t mm_count;
495
496 #ifdef CONFIG_MMU
497 atomic_long_t pgtables_bytes; /* PTE page table pages */
498 #endif
499 ///mm中vma个数
500 int map_count; /* number of VMAs */
501
502 spinlock_t page_table_lock; /* Protects page tables and some
503 * counters
504 */
505 /*
506 * With some kernel config, the current mmap_lock's offset
507 * inside 'mm_struct' is at 0x120, which is very optimal, as
508 * its two hot fields 'count' and 'owner' sit in 2 different
509 * cachelines, and when mmap_lock is highly contended, both
510 * of the 2 fields will be accessed frequently, current layout
511 * will help to reduce cache bouncing.
512 *
513 * So please be careful with adding new fields before
514 * mmap_lock, which can easily push the 2 fields into one
515 * cacheline.
516 */
517 ///保护vma的读写信号量,读操作场景比写操作多,所有用读写信号量
518 struct rw_semaphore mmap_lock;
519
520 ///所有的mm_struct结构都连接到一个双向链表中,链表头是init_mm内存描述符
521 struct list_head mmlist; /* List of maybe swapped mm's. These
522 * are globally strung together off
523 * init_mm.mmlist, and are protected
524 * by mmlist_lock
525 */
526
527
528 unsigned long hiwater_rss; /* High-watermark of RSS usage */
529 unsigned long hiwater_vm; /* High-water virtual memory usage */
530
531 ///已经使用的进程地址空间总和
532 unsigned long total_vm; /* Total pages mapped */
533 unsigned long locked_vm; /* Pages that have PG_mlocked set */
534 atomic64_t pinned_vm; /* Refcount permanently increased */
535 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
536 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
537 unsigned long stack_vm; /* VM_STACK */
538 unsigned long def_flags;
539
540 /**
541 * @write_protect_seq: Locked when any thread is write
542 * protecting pages mapped by this mm to enforce a later COW,
543 * for instance during page table copying for fork().
544 */
545 seqcount_t write_protect_seq;
546
547 spinlock_t arg_lock; /* protect the below fields */
548 ///代码段,数据段的起始地址和结束地址
549 unsigned long start_code, end_code, start_data, end_data;
550
551 ///start_brk:task堆空间的起始地址,
552 ///brk:task堆的结束地址,brk通过sys_brk()修改
553 ///start_stack:用户态栈起始地址
554 unsigned long start_brk, brk, start_stack;
555
556 ///arg:应用程序启动时传递参数字符串起始地址和结束地址
557 ///env:环境变量起始地址和结束地址
558 unsigned long arg_start, arg_end, env_start, env_end;
559
560 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
561
562 /*
563 * Special counters, in some configurations protected by the
564 * page_table_lock, in other configurations by being atomic.
565 */
566 struct mm_rss_stat rss_stat;
567
568 struct linux_binfmt *binfmt;
569
570 /* Architecture-specific MM context */
571 mm_context_t context;
572
573 unsigned long flags; /* Must use atomic bitops to access */
574
575 struct core_state *core_state; /* coredumping support */
576
577 #ifdef CONFIG_AIO
578 spinlock_t ioctx_lock;
579 struct kioctx_table __rcu *ioctx_table;
580 #endif
581 #ifdef CONFIG_MEMCG
582 /*
583 * "owner" points to a task that is regarded as the canonical
584 * user/owner of this mm. All of the following must be true in
585 * order for it to be changed:
586 *
587 * current == mm->owner
588 * current->mm != mm
589 * new_owner->mm == mm
590 * new_owner->alloc_lock is held
591 */
592 struct task_struct __rcu *owner;
593 #endif
594 struct user_namespace *user_ns;
595
596 /* store ref to file /proc/<pid>/exe symlink points to */
597 struct file __rcu *exe_file;
598 #ifdef CONFIG_MMU_NOTIFIER
599 struct mmu_notifier_subscriptions *notifier_subscriptions;
600 #endif
601 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
602 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
603 #endif
604 #ifdef CONFIG_NUMA_BALANCING
605 /*
606 * numa_next_scan is the next time that the PTEs will be marked
607 * pte_numa. NUMA hinting faults will gather statistics and
608 * migrate pages to new nodes if necessary.
609 */
610 unsigned long numa_next_scan;
611
612 /* Restart point for scanning and setting pte_numa */
613 unsigned long numa_scan_offset;
614
615 /* numa_scan_seq prevents two threads setting pte_numa */
616 int numa_scan_seq;
617 #endif
618 /*
619 * An operation with batched TLB flushing is going on. Anything
620 * that can move process memory needs to flush the TLB when
621 * moving a PROT_NONE or PROT_NUMA mapped page.
622 */
623 atomic_t tlb_flush_pending;
624 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
625 /* See flush_tlb_batched_pending() */
626 bool tlb_flush_batched;
627 #endif
628 struct uprobes_state uprobes_state;
629 #ifdef CONFIG_HUGETLB_PAGE
630 atomic_long_t hugetlb_usage;
631 #endif
632 struct work_struct async_put_work;
633
634 #ifdef CONFIG_IOMMU_SUPPORT
635 u32 pasid;
636 #endif
637 } __randomize_layout;
638
639 /*
640 * The mm_cpumask needs to be at the end of mm_struct, because it
641 * is dynamically sized based on nr_cpu_ids.
642 */
643 unsigned long cpu_bitmap[];
644 };
645
646 extern struct mm_struct init_mm;
647
648 /* Pointer magic because the dynamic array size confuses some compilers. */
mm_init_cpumask(struct mm_struct * mm)649 static inline void mm_init_cpumask(struct mm_struct *mm)
650 {
651 unsigned long cpu_bitmap = (unsigned long)mm;
652
653 cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
654 cpumask_clear((struct cpumask *)cpu_bitmap);
655 }
656
657 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
mm_cpumask(struct mm_struct * mm)658 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
659 {
660 return (struct cpumask *)&mm->cpu_bitmap;
661 }
662
663 struct mmu_gather;
664 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
665 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
666 extern void tlb_finish_mmu(struct mmu_gather *tlb);
667
init_tlb_flush_pending(struct mm_struct * mm)668 static inline void init_tlb_flush_pending(struct mm_struct *mm)
669 {
670 atomic_set(&mm->tlb_flush_pending, 0);
671 }
672
inc_tlb_flush_pending(struct mm_struct * mm)673 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
674 {
675 atomic_inc(&mm->tlb_flush_pending);
676 /*
677 * The only time this value is relevant is when there are indeed pages
678 * to flush. And we'll only flush pages after changing them, which
679 * requires the PTL.
680 *
681 * So the ordering here is:
682 *
683 * atomic_inc(&mm->tlb_flush_pending);
684 * spin_lock(&ptl);
685 * ...
686 * set_pte_at();
687 * spin_unlock(&ptl);
688 *
689 * spin_lock(&ptl)
690 * mm_tlb_flush_pending();
691 * ....
692 * spin_unlock(&ptl);
693 *
694 * flush_tlb_range();
695 * atomic_dec(&mm->tlb_flush_pending);
696 *
697 * Where the increment if constrained by the PTL unlock, it thus
698 * ensures that the increment is visible if the PTE modification is
699 * visible. After all, if there is no PTE modification, nobody cares
700 * about TLB flushes either.
701 *
702 * This very much relies on users (mm_tlb_flush_pending() and
703 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
704 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
705 * locks (PPC) the unlock of one doesn't order against the lock of
706 * another PTL.
707 *
708 * The decrement is ordered by the flush_tlb_range(), such that
709 * mm_tlb_flush_pending() will not return false unless all flushes have
710 * completed.
711 */
712 }
713
dec_tlb_flush_pending(struct mm_struct * mm)714 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
715 {
716 /*
717 * See inc_tlb_flush_pending().
718 *
719 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
720 * not order against TLB invalidate completion, which is what we need.
721 *
722 * Therefore we must rely on tlb_flush_*() to guarantee order.
723 */
724 atomic_dec(&mm->tlb_flush_pending);
725 }
726
mm_tlb_flush_pending(struct mm_struct * mm)727 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
728 {
729 /*
730 * Must be called after having acquired the PTL; orders against that
731 * PTLs release and therefore ensures that if we observe the modified
732 * PTE we must also observe the increment from inc_tlb_flush_pending().
733 *
734 * That is, it only guarantees to return true if there is a flush
735 * pending for _this_ PTL.
736 */
737 return atomic_read(&mm->tlb_flush_pending);
738 }
739
mm_tlb_flush_nested(struct mm_struct * mm)740 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
741 {
742 /*
743 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
744 * for which there is a TLB flush pending in order to guarantee
745 * we've seen both that PTE modification and the increment.
746 *
747 * (no requirement on actually still holding the PTL, that is irrelevant)
748 */
749 return atomic_read(&mm->tlb_flush_pending) > 1;
750 }
751
752 struct vm_fault;
753
754 /**
755 * typedef vm_fault_t - Return type for page fault handlers.
756 *
757 * Page fault handlers return a bitmask of %VM_FAULT values.
758 */
759 typedef __bitwise unsigned int vm_fault_t;
760
761 /**
762 * enum vm_fault_reason - Page fault handlers return a bitmask of
763 * these values to tell the core VM what happened when handling the
764 * fault. Used to decide whether a process gets delivered SIGBUS or
765 * just gets major/minor fault counters bumped up.
766 *
767 * @VM_FAULT_OOM: Out Of Memory
768 * @VM_FAULT_SIGBUS: Bad access
769 * @VM_FAULT_MAJOR: Page read from storage
770 * @VM_FAULT_WRITE: Special case for get_user_pages
771 * @VM_FAULT_HWPOISON: Hit poisoned small page
772 * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded
773 * in upper bits
774 * @VM_FAULT_SIGSEGV: segmentation fault
775 * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page
776 * @VM_FAULT_LOCKED: ->fault locked the returned page
777 * @VM_FAULT_RETRY: ->fault blocked, must retry
778 * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small
779 * @VM_FAULT_DONE_COW: ->fault has fully handled COW
780 * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
781 * fsync() to complete (for synchronous page faults
782 * in DAX)
783 * @VM_FAULT_HINDEX_MASK: mask HINDEX value
784 *
785 */
786 enum vm_fault_reason {
787 VM_FAULT_OOM = (__force vm_fault_t)0x000001,
788 VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002,
789 VM_FAULT_MAJOR = (__force vm_fault_t)0x000004,
790 VM_FAULT_WRITE = (__force vm_fault_t)0x000008,
791 VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010,
792 VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
793 VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040,
794 VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100,
795 VM_FAULT_LOCKED = (__force vm_fault_t)0x000200,
796 VM_FAULT_RETRY = (__force vm_fault_t)0x000400,
797 VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
798 VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
799 VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
800 VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
801 };
802
803 /* Encode hstate index for a hwpoisoned large page */
804 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
805 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
806
807 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \
808 VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \
809 VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
810
811 #define VM_FAULT_RESULT_TRACE \
812 { VM_FAULT_OOM, "OOM" }, \
813 { VM_FAULT_SIGBUS, "SIGBUS" }, \
814 { VM_FAULT_MAJOR, "MAJOR" }, \
815 { VM_FAULT_WRITE, "WRITE" }, \
816 { VM_FAULT_HWPOISON, "HWPOISON" }, \
817 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
818 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
819 { VM_FAULT_NOPAGE, "NOPAGE" }, \
820 { VM_FAULT_LOCKED, "LOCKED" }, \
821 { VM_FAULT_RETRY, "RETRY" }, \
822 { VM_FAULT_FALLBACK, "FALLBACK" }, \
823 { VM_FAULT_DONE_COW, "DONE_COW" }, \
824 { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
825
826 struct vm_special_mapping {
827 const char *name; /* The name, e.g. "[vdso]". */
828
829 /*
830 * If .fault is not provided, this points to a
831 * NULL-terminated array of pages that back the special mapping.
832 *
833 * This must not be NULL unless .fault is provided.
834 */
835 struct page **pages;
836
837 /*
838 * If non-NULL, then this is called to resolve page faults
839 * on the special mapping. If used, .pages is not checked.
840 */
841 vm_fault_t (*fault)(const struct vm_special_mapping *sm,
842 struct vm_area_struct *vma,
843 struct vm_fault *vmf);
844
845 int (*mremap)(const struct vm_special_mapping *sm,
846 struct vm_area_struct *new_vma);
847 };
848
849 enum tlb_flush_reason {
850 TLB_FLUSH_ON_TASK_SWITCH,
851 TLB_REMOTE_SHOOTDOWN,
852 TLB_LOCAL_SHOOTDOWN,
853 TLB_LOCAL_MM_SHOOTDOWN,
854 TLB_REMOTE_SEND_IPI,
855 NR_TLB_FLUSH_REASONS,
856 };
857
858 /*
859 * A swap entry has to fit into a "unsigned long", as the entry is hidden
860 * in the "index" field of the swapper address space.
861 */
862 typedef struct {
863 unsigned long val;
864 } swp_entry_t;
865
866 #endif /* _LINUX_MM_TYPES_H */
867