| /linux-5.15/include/linux/ |
| H A D | page-flags.h | 197 unsigned long head = READ_ONCE(page->compound_head); in _compound_head() 204 #define compound_head(page) ((typeof(page))_compound_head(page)) macro 208 return READ_ONCE(page->compound_head) & 1; in PageTail() 260 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 266 PF_POISONED_CHECK(compound_head(page)); }) 414 page = compound_head(page); in PAGEFLAG() 504 page = compound_head(page); in PageAnon() 523 page = compound_head(page); in PageKsm() 536 page = compound_head(page); in PageUptodate() 595 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); in __PAGEFLAG() [all …]
|
| H A D | pagemap.h | 529 head = compound_head(page); in page_to_index() 614 page = compound_head(page); in trylock_page() 688 wait_on_page_bit(compound_head(page), PG_locked); in wait_on_page_locked() 695 return wait_on_page_bit_killable(compound_head(page), PG_locked); in wait_on_page_locked_killable() 721 page = compound_head(page); in set_page_private_2()
|
| A D | page_ref.h | 72 return atomic_read(&compound_head(page)->_refcount); in page_count()
|
| H A D | mm.h | 850 page = compound_head(page); in compound_mapcount() 903 return compound_head(page); in virt_to_head_page() 961 page = compound_head(page); in hpage_pincount_available() 973 page = compound_head(page); in compound_pincount() 1209 page = compound_head(page); in get_page() 1225 page = compound_head(page); in try_get_page() 1234 page = compound_head(page); in put_page() 1330 return ((unsigned int)page_ref_count(compound_head(page))) >= in page_maybe_dma_pinned()
|
| H A D | mm_types.h | 165 unsigned long compound_head; /* Bit zero is set */ member
|
| H A D | huge_mm.h | 259 return compound_head(page); in thp_head()
|
| /linux-5.15/mm/ |
| H A D | util.c | 671 page = compound_head(page); in page_rmapping() 685 page = compound_head(page); in page_mapped() 702 page = compound_head(page); in page_anon_vma() 713 page = compound_head(page); in page_mapping() 746 page = compound_head(page); in __page_mapcount()
|
| H A D | memory-failure.c | 364 tk->size_shift = page_shift(compound_head(p)); in add_to_kill() 980 struct page *hpage = compound_head(p); in me_huge_page() 1134 struct page *head = compound_head(page); in __get_hwpoison_page() 1151 if (head == compound_head(page)) in __get_hwpoison_page() 1422 struct page *head = compound_head(p); in memory_failure_hugetlb() 1651 orig_head = hpage = compound_head(p); in memory_failure() 1735 if (PageCompound(p) && compound_head(p) != orig_head) { in memory_failure() 1948 page = compound_head(p); in unpoison_memory() 2057 struct page *hpage = compound_head(page); in __soft_offline_page() 2127 struct page *hpage = compound_head(page); in soft_offline_in_use_page()
|
| H A D | hwpoison-inject.c | 28 hpage = compound_head(p); in hwpoison_inject()
|
| H A D | swap.c | 332 page = compound_head(page); in activate_page() 354 page = compound_head(page); in activate_page() 415 page = compound_head(page); in mark_page_accessed() 937 page = compound_head(page); in release_pages()
|
| A D | gup.c | 35 VM_BUG_ON_PAGE(page != compound_head(page), page); in hpage_pincount_add() 43 VM_BUG_ON_PAGE(page != compound_head(page), page); in hpage_pincount_sub() 71 struct page *head = compound_head(page); in try_get_compound_head() 87 if (unlikely(compound_head(page) != head)) { in try_get_compound_head() 228 put_compound_head(compound_head(page), 1, FOLL_PIN); in unpin_user_page() 243 page = compound_head(next); in compound_range_next() 268 page = compound_head(list[i]); in compound_next() 270 if (compound_head(list[nr]) != page) in compound_next() 1715 head = compound_head(pages[i]); in check_and_migrate_movable_pages() 2177 VM_BUG_ON_PAGE(compound_head(page) != head, page); in gup_pte_range()
|
| A D | usercopy.c | 236 page = compound_head(kmap_to_page((void *)ptr)); in check_heap_object()
|
| H A D | filemap.c | 1446 page = compound_head(page); in put_and_wait_on_page_locked() 1511 page = compound_head(page); in unlock_page() 1531 page = compound_head(page); in end_page_private_2() 1547 page = compound_head(page); in wait_on_page_private_2() 1568 page = compound_head(page); in wait_on_page_private_2_killable() 1647 struct page *page = compound_head(__page); in __lock_page() 1656 struct page *page = compound_head(__page); in __lock_page_killable() 3105 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
|
| A D | debug.c | 49 struct page *head = compound_head(page); in __dump_page()
|
| H A D | huge_memory.c | 504 struct mem_cgroup *memcg = page_memcg(compound_head(page)); in get_deferred_split_queue() 537 page = compound_head(page); in is_transparent_hugepage() 2408 struct page *head = compound_head(page); in __split_huge_page() 2558 page = compound_head(page); in page_trans_huge_mapcount() 2614 struct page *head = compound_head(page); in split_huge_page_to_list() 2757 struct mem_cgroup *memcg = page_memcg(compound_head(page)); in deferred_split_huge_page() 2822 page = compound_head(page); in deferred_split_scan() 2980 if (!can_split_huge_page(compound_head(page), NULL)) in split_huge_pages_pid()
|
| H A D | rmap.c | 750 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma() 1057 page = compound_head(page); in page_move_anon_rmap() 1273 struct page *head = compound_head(page); in page_add_file_rmap() 1414 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
|
| H A D | migrate.c | 306 page = compound_head(page); in __migration_entry_wait() 1650 struct hstate *h = page_hstate(compound_head(page)); in alloc_migration_target() 1757 head = compound_head(page); in add_page_for_migration()
|
| H A D | swapfile.c | 1596 page = compound_head(page); in page_swapped() 1626 page = compound_head(page); in page_trans_huge_map_swapcount() 1692 page = compound_head(page); in reuse_swap_page() 1745 page = compound_head(page); in try_to_free_swap()
|
| /linux-5.15/fs/proc/ |
| H A D | page.c | 153 struct page *head = compound_head(page); in stable_page_flags() 186 if (PageTail(page) && PageSlab(compound_head(page))) in stable_page_flags()
|
| /linux-5.15/kernel/ |
| A D | crash_core.c | 438 VMCOREINFO_OFFSET(page, compound_head); in crash_save_vmcoreinfo_init()
|
| /linux-5.15/arch/powerpc/mm/book3s64/ |
| A D | iommu_api.c | 155 pageshift = page_shift(compound_head(page)); in mm_iommu_do_alloc()
|
| /linux-5.15/drivers/infiniband/core/ |
| A D | umem_odp.c | 501 struct page *head_page = compound_head(page); in ib_umem_odp_unmap_dma_pages()
|
| /linux-5.15/net/core/ |
| A D | page_pool.c | 741 page = compound_head(page); in page_pool_return_skb_page()
|
| H A D | datagram.c | 655 struct page *head = compound_head(pages[n]); in __zerocopy_sg_from_iter()
|
| /linux-5.15/mm/kfence/ |
| A D | core.c | 454 if (WARN_ON(compound_head(&pages[i]) != &pages[i])) in kfence_init_pool()
|