diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 10 | ||||
-rw-r--r-- | mm/filemap.c | 6 | ||||
-rw-r--r-- | mm/highmem.c | 4 | ||||
-rw-r--r-- | mm/huge_memory.c | 11 | ||||
-rw-r--r-- | mm/hugetlb.c | 17 | ||||
-rw-r--r-- | mm/kasan/hw_tags.c | 5 | ||||
-rw-r--r-- | mm/kasan/kasan.h | 10 | ||||
-rw-r--r-- | mm/kfence/core.c | 21 | ||||
-rw-r--r-- | mm/kfence/kfence.h | 21 | ||||
-rw-r--r-- | mm/kfence/report.c | 47 | ||||
-rw-r--r-- | mm/kmemleak.c | 8 | ||||
-rw-r--r-- | mm/list_lru.c | 6 | ||||
-rw-r--r-- | mm/mempolicy.c | 39 | ||||
-rw-r--r-- | mm/migrate.c | 80 | ||||
-rw-r--r-- | mm/mremap.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/page_io.c | 54 | ||||
-rw-r--r-- | mm/page_vma_mapped.c | 6 | ||||
-rw-r--r-- | mm/secretmem.c | 17 | ||||
-rw-r--r-- | mm/shmem.c | 31 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slab.h | 2 | ||||
-rw-r--r-- | mm/slab_common.c | 9 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 11 |
26 files changed, 212 insertions, 216 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index c3e37aa9ff9e..fe915db6149b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -26,6 +26,11 @@ #include "internal.h" #ifdef CONFIG_COMPACTION +/* + * Fragmentation score check interval for proactive compaction purposes. + */ +#define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500) + static inline void count_compact_event(enum vm_event_item item) { count_vm_event(item); @@ -51,11 +56,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta) #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) /* - * Fragmentation score check interval for proactive compaction purposes. - */ -static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; - -/* * Page order with-respect-to which proactive compaction * calculates external fragmentation, which is used as * the "fragmentation score" of a node/zone. diff --git a/mm/filemap.c b/mm/filemap.c index 3a5ffb5587cd..9a1eef6c5d35 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1063,12 +1063,6 @@ void __init pagecache_init(void) init_waitqueue_head(&folio_wait_table[i]); page_writeback_init(); - - /* - * tmpfs uses the ZERO_PAGE for reading holes: it is up-to-date, - * and splice's page_cache_pipe_buf_confirm() needs to see that. - */ - SetPageUptodate(ZERO_PAGE(0)); } /* diff --git a/mm/highmem.c b/mm/highmem.c index 0cc0c4da7ed9..1a692997fac4 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -624,7 +624,7 @@ void __kmap_local_sched_out(void) /* With debug all even slots are unmapped and act as guard */ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { - WARN_ON_ONCE(!pte_none(pteval)); + WARN_ON_ONCE(pte_val(pteval) != 0); continue; } if (WARN_ON_ONCE(pte_none(pteval))) @@ -661,7 +661,7 @@ void __kmap_local_sched_in(void) /* With debug all even slots are unmapped and act as guard */ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { - WARN_ON_ONCE(!pte_none(pteval)); + WARN_ON_ONCE(pte_val(pteval) != 0); continue; } if (WARN_ON_ONCE(pte_none(pteval))) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2fe38212e07c..c468fee595ff 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2145,15 +2145,14 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, * pmd against. Otherwise we can end up replacing wrong folio. */ VM_BUG_ON(freeze && !folio); - if (folio) { - VM_WARN_ON_ONCE(!folio_test_locked(folio)); - if (folio != page_folio(pmd_page(*pmd))) - goto out; - } + VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || - is_pmd_migration_entry(*pmd)) + is_pmd_migration_entry(*pmd)) { + if (folio && folio != page_folio(pmd_page(*pmd))) + goto out; __split_huge_pmd_locked(vma, pmd, range.start, freeze); + } out: spin_unlock(ptl); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b34f50156f7e..f8ca7cca3c1a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3475,7 +3475,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) { int nr_nodes, node; struct page *page; - int rc = 0; lockdep_assert_held(&hugetlb_lock); @@ -3486,15 +3485,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) } for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { - if (!list_empty(&h->hugepage_freelists[node])) { - page = list_entry(h->hugepage_freelists[node].next, - struct page, lru); - rc = demote_free_huge_page(h, page); - break; + list_for_each_entry(page, &h->hugepage_freelists[node], lru) { + if (PageHWPoison(page)) + continue; + + return demote_free_huge_page(h, page); } } - return rc; + /* + * Only way to get here is if all pages on free lists are poisoned. + * Return -EBUSY so that caller will not retry. + */ + return -EBUSY; } #define HSTATE_ATTR_RO(_name) \ diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 07a76c46daa5..9e1b6544bfa8 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -336,8 +336,6 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size) #endif -#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) - void kasan_enable_tagging(void) { if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC) @@ -347,6 +345,9 @@ void kasan_enable_tagging(void) else hw_enable_tagging_sync(); } + +#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) + EXPORT_SYMBOL_GPL(kasan_enable_tagging); void kasan_force_async_fault(void) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index d79b83d673b1..b01b4bbe0409 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -355,25 +355,27 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) #define hw_set_mem_tag_range(addr, size, tag, init) \ arch_set_mem_tag_range((addr), (size), (tag), (init)) +void kasan_enable_tagging(void); + #else /* CONFIG_KASAN_HW_TAGS */ #define hw_enable_tagging_sync() #define hw_enable_tagging_async() #define hw_enable_tagging_asymm() +static inline void kasan_enable_tagging(void) { } + #endif /* CONFIG_KASAN_HW_TAGS */ #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) -void kasan_enable_tagging(void); void kasan_force_async_fault(void); -#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */ +#else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */ -static inline void kasan_enable_tagging(void) { } static inline void kasan_force_async_fault(void) { } -#endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */ +#endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */ #ifdef CONFIG_KASAN_SW_TAGS u8 kasan_random_tag(void); diff --git a/mm/kfence/core.c b/mm/kfence/core.c index a203747ad2c0..9b2b5f56f4ae 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr) return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); } -static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) -{ - long index; - - /* The checks do not affect performance; only called from slow-paths. */ - - if (!is_kfence_address((void *)addr)) - return NULL; - - /* - * May be an invalid index if called with an address at the edge of - * __kfence_pool, in which case we would report an "invalid access" - * error. - */ - index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; - if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) - return NULL; - - return &kfence_metadata[index]; -} - static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) { unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index 9a6c4b1b12a8..600f2e2431d6 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -96,6 +96,27 @@ struct kfence_metadata { extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; +static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) +{ + long index; + + /* The checks do not affect performance; only called from slow-paths. */ + + if (!is_kfence_address((void *)addr)) + return NULL; + + /* + * May be an invalid index if called with an address at the edge of + * __kfence_pool, in which case we would report an "invalid access" + * error. + */ + index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; + if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) + return NULL; + + return &kfence_metadata[index]; +} + /* KFENCE error types for report generation. */ enum kfence_error_type { KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ diff --git a/mm/kfence/report.c b/mm/kfence/report.c index f93a7b2a338b..f5a6d8ba3e21 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); } + +#ifdef CONFIG_PRINTK +static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack) +{ + int i, j; + + i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL); + for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j) + kp_stack[j] = (void *)track->stack_entries[i]; + if (j < KS_ADDRS_COUNT) + kp_stack[j] = NULL; +} + +bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +{ + struct kfence_metadata *meta = addr_to_metadata((unsigned long)object); + unsigned long flags; + + if (!meta) + return false; + + /* + * If state is UNUSED at least show the pointer requested; the rest + * would be garbage data. + */ + kpp->kp_ptr = object; + + /* Requesting info an a never-used object is almost certainly a bug. */ + if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED)) + return true; + + raw_spin_lock_irqsave(&meta->lock, flags); + + kpp->kp_slab = slab; + kpp->kp_slab_cache = meta->cache; + kpp->kp_objp = (void *)meta->addr; + kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack); + if (meta->state == KFENCE_OBJECT_FREED) + kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack); + /* get_stack_skipnr() ensures the first entry is outside allocator. */ + kpp->kp_ret = kpp->kp_stack[0]; + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + return true; +} +#endif diff --git a/mm/kmemleak.c b/mm/kmemleak.c index acd7cbb82e16..a182f5ddaf68 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, gfp_t gfp) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_alloc(__va(phys), size, min_count, gfp); } EXPORT_SYMBOL(kmemleak_alloc_phys); @@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); */ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_free_part(__va(phys), size); } EXPORT_SYMBOL(kmemleak_free_part_phys); @@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); */ void __ref kmemleak_not_leak_phys(phys_addr_t phys) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_not_leak(__va(phys)); } EXPORT_SYMBOL(kmemleak_not_leak_phys); @@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); */ void __ref kmemleak_ignore_phys(phys_addr_t phys) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_ignore(__va(phys)); } EXPORT_SYMBOL(kmemleak_ignore_phys); diff --git a/mm/list_lru.c b/mm/list_lru.c index c669d87001a6..ba76428ceece 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -395,12 +395,6 @@ static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, struct list_lru_one *src, *dst; /* - * If there is no lru entry in this nlru, we can skip it immediately. - */ - if (!READ_ONCE(nlru->nr_items)) - return; - - /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a2516d31db6c..8c74107a2b15 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, */ static struct page *new_page(struct page *page, unsigned long start) { + struct folio *dst, *src = page_folio(page); struct vm_area_struct *vma; unsigned long address; + gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; vma = find_vma(current->mm, start); while (vma) { @@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start) vma = vma->vm_next; } - if (PageHuge(page)) { - return alloc_huge_page_vma(page_hstate(compound_head(page)), + if (folio_test_hugetlb(src)) + return alloc_huge_page_vma(page_hstate(&src->page), vma, address); - } else if (PageTransHuge(page)) { - struct page *thp; - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, - HPAGE_PMD_ORDER); - if (!thp) - return NULL; - prep_transhuge_page(thp); - return thp; - } + if (folio_test_large(src)) + gfp = GFP_TRANSHUGE; + /* - * if !vma, alloc_page_vma() will use task or system default policy + * if !vma, vma_alloc_folio() will use task or system default policy */ - return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, - vma, address); + dst = vma_alloc_folio(gfp, folio_order(src), vma, address, + folio_test_large(src)); + return &dst->page; } #else @@ -2227,6 +2224,19 @@ out: } EXPORT_SYMBOL(alloc_pages_vma); +struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, + unsigned long addr, bool hugepage) +{ + struct folio *folio; + + folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr, + hugepage); + if (folio && order > 1) + prep_transhuge_page(&folio->page); + + return folio; +} + /** * alloc_pages - Allocate pages. * @gfp: GFP flags. @@ -2733,6 +2743,7 @@ alloc_new: mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; + atomic_set(&mpol_new->refcnt, 1); goto restart; } diff --git a/mm/migrate.c b/mm/migrate.c index de175e2fdba5..6c31ee1e1c9b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1520,10 +1520,11 @@ out: struct page *alloc_migration_target(struct page *page, unsigned long private) { + struct folio *folio = page_folio(page); struct migration_target_control *mtc; gfp_t gfp_mask; unsigned int order = 0; - struct page *new_page = NULL; + struct folio *new_folio = NULL; int nid; int zidx; @@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) gfp_mask = mtc->gfp_mask; nid = mtc->nid; if (nid == NUMA_NO_NODE) - nid = page_to_nid(page); + nid = folio_nid(folio); - if (PageHuge(page)) { - struct hstate *h = page_hstate(compound_head(page)); + if (folio_test_hugetlb(folio)) { + struct hstate *h = page_hstate(&folio->page); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); } - if (PageTransHuge(page)) { + if (folio_test_large(folio)) { /* * clear __GFP_RECLAIM to make the migration callback * consistent with regular THP allocations. */ gfp_mask &= ~__GFP_RECLAIM; gfp_mask |= GFP_TRANSHUGE; - order = HPAGE_PMD_ORDER; + order = folio_order(folio); } - zidx = zone_idx(page_zone(page)); + zidx = zone_idx(folio_zone(folio)); if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); - - if (new_page && PageTransHuge(new_page)) - prep_transhuge_page(new_page); + new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); - return new_page; + return &new_folio->page; } #ifdef CONFIG_NUMA @@ -1999,32 +1997,20 @@ static struct page *alloc_misplaced_dst_page(struct page *page, unsigned long data) { int nid = (int) data; - struct page *newpage; - - newpage = __alloc_pages_node(nid, - (GFP_HIGHUSER_MOVABLE | - __GFP_THISNODE | __GFP_NOMEMALLOC | - __GFP_NORETRY | __GFP_NOWARN) & - ~__GFP_RECLAIM, 0); - - return newpage; -} - -static struct page *alloc_misplaced_dst_page_thp(struct page *page, - unsigned long data) -{ - int nid = (int) data; - struct page *newpage; - - newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), - HPAGE_PMD_ORDER); - if (!newpage) - goto out; - - prep_transhuge_page(newpage); + int order = compound_order(page); + gfp_t gfp = __GFP_THISNODE; + struct folio *new; + + if (order > 0) + gfp |= GFP_TRANSHUGE_LIGHT; + else { + gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | + __GFP_NOWARN; + gfp &= ~__GFP_RECLAIM; + } + new = __folio_alloc_node(gfp, order, nid); -out: - return newpage; + return &new->page; } static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) @@ -2082,23 +2068,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); - new_page_t *new; - bool compound; int nr_pages = thp_nr_pages(page); /* - * PTE mapped THP or HugeTLB page can't reach here so the page could - * be either base page or THP. And it must be head page if it is - * THP. - */ - compound = PageTransHuge(page); - - if (compound) - new = alloc_misplaced_dst_page_thp; - else - new = alloc_misplaced_dst_page; - - /* * Don't migrate file pages that are mapped in multiple processes * with execute permissions as they are probably shared libraries. */ @@ -2118,9 +2090,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, goto out; list_add(&page->lru, &migratepages); - nr_remaining = migrate_pages(&migratepages, *new, NULL, node, - MIGRATE_ASYNC, MR_NUMA_MISPLACED, - &nr_succeeded); + nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, + NULL, node, MIGRATE_ASYNC, + MR_NUMA_MISPLACED, &nr_succeeded); if (nr_remaining) { if (!list_empty(&migratepages)) { list_del(&page->lru); diff --git a/mm/mremap.c b/mm/mremap.c index 9d76da79594d..303d3290b938 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -486,6 +486,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; + if (!len) + return 0; + old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2db95780e003..33ca8cab21e6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -128,7 +128,7 @@ static DEFINE_MUTEX(pcp_batch_high_lock); struct pagesets { local_lock_t lock; }; -static DEFINE_PER_CPU(struct pagesets, pagesets) __maybe_unused = { +static DEFINE_PER_CPU(struct pagesets, pagesets) = { .lock = INIT_LOCAL_LOCK(lock), }; @@ -6131,7 +6131,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) do { zone_type--; zone = pgdat->node_zones + zone_type; - if (managed_zone(zone)) { + if (populated_zone(zone)) { zoneref_set_zone(zone, &zonerefs[nr_zones++]); check_highest_zone(zone_type); } diff --git a/mm/page_io.c b/mm/page_io.c index b417f000b49e..89fbf3cae30f 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio) bio_put(bio); } -static void swap_slot_free_notify(struct page *page) -{ - struct swap_info_struct *sis; - struct gendisk *disk; - swp_entry_t entry; - - /* - * There is no guarantee that the page is in swap cache - the software - * suspend code (at least) uses end_swap_bio_read() against a non- - * swapcache page. So we must check PG_swapcache before proceeding with - * this optimization. - */ - if (unlikely(!PageSwapCache(page))) - return; - - sis = page_swap_info(page); - if (data_race(!(sis->flags & SWP_BLKDEV))) - return; - - /* - * The swap subsystem performs lazy swap slot freeing, - * expecting that the page will be swapped out again. - * So we can avoid an unnecessary write if the page - * isn't redirtied. - * This is good for real swap storage because we can - * reduce unnecessary I/O and enhance wear-leveling - * if an SSD is used as the as swap device. - * But if in-memory swap device (eg zram) is used, - * this causes a duplicated copy between uncompressed - * data in VM-owned memory and compressed data in - * zram-owned memory. So let's free zram-owned memory - * and make the VM-owned decompressed page *dirty*, - * so the page should be swapped out somewhere again if - * we again wish to reclaim it. - */ - disk = sis->bdev->bd_disk; - entry.val = page_private(page); - if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) { - unsigned long offset; - - offset = swp_offset(entry); - - SetPageDirty(page); - disk->fops->swap_slot_free_notify(sis->bdev, - offset); - } -} - static void end_swap_bio_read(struct bio *bio) { struct page *page = bio_first_page_all(bio); @@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio) } SetPageUptodate(page); - swap_slot_free_notify(page); out: unlock_page(page); WRITE_ONCE(bio->bi_private, NULL); @@ -394,11 +345,6 @@ int swap_readpage(struct page *page, bool synchronous) if (sis->flags & SWP_SYNCHRONOUS_IO) { ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); if (!ret) { - if (trylock_page(page)) { - swap_slot_free_notify(page); - unlock_page(page); - } - count_vm_event(PSWPIN); goto out; } diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 1187f9c1ec5b..14a5cda73dee 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -163,7 +163,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) return not_found(pvmw); if (unlikely(is_vm_hugetlb_page(vma))) { - unsigned long size = pvmw->nr_pages * PAGE_SIZE; + struct hstate *hstate = hstate_vma(vma); + unsigned long size = huge_page_size(hstate); /* The only possible mapping was handled on last iteration */ if (pvmw->pte) return not_found(pvmw); @@ -173,8 +174,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) if (!pvmw->pte) return false; - pvmw->ptl = huge_pte_lockptr(size_to_hstate(size), mm, - pvmw->pte); + pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte); spin_lock(pvmw->ptl); if (!check_pte(pvmw)) return not_found(pvmw); diff --git a/mm/secretmem.c b/mm/secretmem.c index 098638d3b8a4..3b3cf2892b6a 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = { .isolate_page = secretmem_isolate_page, }; +static int secretmem_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *iattr) +{ + struct inode *inode = d_inode(dentry); + unsigned int ia_valid = iattr->ia_valid; + + if ((ia_valid & ATTR_SIZE) && inode->i_size) + return -EINVAL; + + return simple_setattr(mnt_userns, dentry, iattr); +} + +static const struct inode_operations secretmem_iops = { + .setattr = secretmem_setattr, +}; + static struct vfsmount *secretmem_mnt; static struct file *secretmem_file_create(unsigned long flags) @@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags) mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); + inode->i_op = &secretmem_iops; inode->i_mapping->a_ops = &secretmem_aops; /* pretend we are a normal file with zero size */ diff --git a/mm/shmem.c b/mm/shmem.c index 529c9ad3e926..4b2fea33158e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2513,7 +2513,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) pgoff_t end_index; unsigned long nr, ret; loff_t i_size = i_size_read(inode); - bool got_page; end_index = i_size >> PAGE_SHIFT; if (index > end_index) @@ -2570,24 +2569,34 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) */ if (!offset) mark_page_accessed(page); - got_page = true; + /* + * Ok, we have the page, and it's up-to-date, so + * now we can copy it to user space... + */ + ret = copy_page_to_iter(page, offset, nr, to); + put_page(page); + + } else if (iter_is_iovec(to)) { + /* + * Copy to user tends to be so well optimized, but + * clear_user() not so much, that it is noticeably + * faster to copy the zero page instead of clearing. + */ + ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); } else { - page = ZERO_PAGE(0); - got_page = false; + /* + * But submitting the same page twice in a row to + * splice() - or others? - can result in confusion: + * so don't attempt that optimization on pipes etc. + */ + ret = iov_iter_zero(nr, to); } - /* - * Ok, we have the page, and it's up-to-date, so - * now we can copy it to user space... - */ - ret = copy_page_to_iter(page, offset, nr, to); retval += ret; offset += ret; index += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; - if (got_page) - put_page(page); if (!iov_iter_count(to)) break; if (ret < nr) { diff --git a/mm/slab.c b/mm/slab.c index b04e40078bdf..0edb474edef1 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller); #endif /* CONFIG_NUMA */ #ifdef CONFIG_PRINTK -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { struct kmem_cache *cachep; unsigned int objnr; diff --git a/mm/slab.h b/mm/slab.h index fd7ae2024897..95eb34174c1b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -868,7 +868,7 @@ struct kmem_obj_info { void *kp_stack[KS_ADDRS_COUNT]; void *kp_free_stack[KS_ADDRS_COUNT]; }; -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); #endif #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR diff --git a/mm/slab_common.c b/mm/slab_common.c index 6ee64d6208b3..2b3206a2c3b5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object) } EXPORT_SYMBOL_GPL(kmem_valid_obj); +static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +{ + if (__kfence_obj_info(kpp, object, slab)) + return; + __kmem_obj_info(kpp, object, slab); +} + /** * kmem_dump_obj - Print available slab provenance information * @object: slab object for which to find provenance information. @@ -590,6 +597,8 @@ void kmem_dump_obj(void *object) pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); else pr_cont(" slab%s", cp); + if (is_kfence_address(object)) + pr_cont(" (kfence)"); if (kp.kp_objp) pr_cont(" start %px", kp.kp_objp); if (kp.kp_data_offset) diff --git a/mm/slob.c b/mm/slob.c index dfa6808dff36..40ea6e2d4ccd 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -463,7 +463,7 @@ out: } #ifdef CONFIG_PRINTK -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { kpp->kp_ptr = object; kpp->kp_slab = slab; diff --git a/mm/slub.c b/mm/slub.c index 74d92aa4a3a2..ed5c2c03a47a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) } #ifdef CONFIG_PRINTK -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { void *base; int __maybe_unused i; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e163372d3967..0b17498a34f1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1671,17 +1671,6 @@ static DEFINE_MUTEX(vmap_purge_lock); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); -#ifdef CONFIG_X86_64 -/* - * called before a call to iounmap() if the caller wants vm_area_struct's - * immediately freed. - */ -void set_iounmap_nonlazy(void) -{ - atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); -} -#endif /* CONFIG_X86_64 */ - /* * Purges all lazily-freed vmap areas. */ |