Merge branch 'akpm' (patches from Andrew)

Merge updates from Andrew Morton:

 - a few hot fixes

 - ocfs2 updates

 - almost all of -mm (slab-generic, slab, slub, kmemleak, kasan,
   cleanups, debug, pagecache, memcg, gup, pagemap, memory-hotplug,
   sparsemem, vmalloc, initialization, z3fold, compaction, mempolicy,
   oom-kill, hugetlb, migration, thp, mmap, madvise, shmem, zswap,
   zsmalloc)

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (132 commits)
  mm/zsmalloc.c: fix a -Wunused-function warning
  zswap: do not map same object twice
  zswap: use movable memory if zpool support allocate movable memory
  zpool: add malloc_support_movable to zpool_driver
  shmem: fix obsolete comment in shmem_getpage_gfp()
  mm/madvise: reduce code duplication in error handling paths
  mm: mmap: increase sockets maximum memory size pgoff for 32bits
  mm/mmap.c: refine find_vma_prev() with rb_last()
  riscv: make mmap allocation top-down by default
  mips: use generic mmap top-down layout and brk randomization
  mips: replace arch specific way to determine 32bit task with generic version
  mips: adjust brk randomization offset to fit generic version
  mips: use STACK_TOP when computing mmap base address
  mips: properly account for stack randomization and stack guard gap
  arm: use generic mmap top-down layout and brk randomization
  arm: use STACK_TOP when computing mmap base address
  arm: properly account for stack randomization and stack guard gap
  arm64, mm: make randomization selected by generic topdown mmap layout
  arm64, mm: move generic mmap layout functions to mm
  arm64: consider stack randomization for mmap base only when necessary
  ...
This commit is contained in:
Linus Torvalds 2019-09-24 16:10:23 -07:00
commit 9c9fa97a8e
204 changed files with 2275 additions and 2446 deletions

View File

@ -429,10 +429,15 @@ KernelVersion: 2.6.22
Contact: Pekka Enberg <penberg@cs.helsinki.fi>, Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org> Christoph Lameter <cl@linux-foundation.org>
Description: Description:
The shrink file is written when memory should be reclaimed from The shrink file is used to reclaim unused slab cache
a cache. Empty partial slabs are freed and the partial list is memory from a cache. Empty per-cpu or partial slabs
sorted so the slabs with the fewest available objects are used are freed and the partial list is sorted so the slabs
first. with the fewest available objects are used first.
It only accepts a value of "1" on write for shrinking
the cache. Other input values are considered invalid.
Shrinking slab caches might be expensive and can
adversely impact other running applications. So it
should be used with care.
What: /sys/kernel/slab/cache/slab_size What: /sys/kernel/slab/cache/slab_size
Date: May 2007 Date: May 2007

View File

@ -85,8 +85,10 @@ Brief summary of control files.
memory.oom_control set/show oom controls. memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa memory.numa_stat show the number of memory usage per numa
node node
memory.kmem.limit_in_bytes set/show hard limit for kernel memory memory.kmem.limit_in_bytes set/show hard limit for kernel memory
This knob is deprecated and shouldn't be
used. It is planned that this be removed in
the foreseeable future.
memory.kmem.usage_in_bytes show current kernel memory allocation memory.kmem.usage_in_bytes show current kernel memory allocation
memory.kmem.failcnt show the number of kernel memory usage memory.kmem.failcnt show the number of kernel memory usage
hits limits hits limits

View File

@ -809,6 +809,8 @@
enables the feature at boot time. By default, it is enables the feature at boot time. By default, it is
disabled and the system will work mostly the same as a disabled and the system will work mostly the same as a
kernel built without CONFIG_DEBUG_PAGEALLOC. kernel built without CONFIG_DEBUG_PAGEALLOC.
Note: to get most of debug_pagealloc error reports, it's
useful to also enable the page_owner functionality.
on: enable the feature on: enable the feature
debugpat [X86] Enable PAT debugging debugpat [X86] Enable PAT debugging

View File

@ -706,6 +706,17 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
and vice-versa 32-bit applications to call 64-bit mmap(). and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls. Required for applications doing different bitness syscalls.
# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
# is not in legacy mode (compat task, unlimited stack size or
# sysctl_legacy_va_layout).
# Architecture that selects this option can provide its own version of:
# - STACK_RND_MASK
config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
bool
depends on MMU
select ARCH_HAS_ELF_RANDOMIZE
config HAVE_COPY_THREAD_TLS config HAVE_COPY_THREAD_TLS
bool bool
help help

View File

@ -53,6 +53,4 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd); free_page((unsigned long)pmd);
} }
#define check_pgt_cache() do { } while (0)
#endif /* _ALPHA_PGALLOC_H */ #endif /* _ALPHA_PGALLOC_H */

View File

@ -359,11 +359,6 @@ extern void paging_init(void);
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ /* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA

View File

@ -129,7 +129,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#define check_pgt_cache() do { } while (0)
#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
#endif /* _ASM_ARC_PGALLOC_H */ #endif /* _ASM_ARC_PGALLOC_H */

View File

@ -395,11 +395,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* to cope with aliasing VIPT cache */ /* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif

View File

@ -34,6 +34,7 @@ config ARM
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select BUILDTIME_EXTABLE_SORT if MMU select BUILDTIME_EXTABLE_SORT if MMU

View File

@ -15,8 +15,6 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define check_pgt_cache() do { } while (0)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))

View File

@ -70,11 +70,6 @@ typedef pte_t *pte_addr_t;
*/ */
extern unsigned int kobjsize(const void *objp); extern unsigned int kobjsize(const void *objp);
/*
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)
/* /*
* All 32bit addresses are effectively valid for vmalloc... * All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets. * Sort of meaningless for non-VM targets.

View File

@ -368,8 +368,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */

View File

@ -140,8 +140,6 @@ static inline void prefetchw(const void *ptr)
#endif #endif
#endif #endif
#define HAVE_ARCH_PICK_MMAP_LAYOUT
#endif #endif
#endif /* __ASM_ARM_PROCESSOR_H */ #endif /* __ASM_ARM_PROCESSOR_H */

View File

@ -319,11 +319,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0; return 0;
} }
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_page(mm->brk, 0x02000000);
}
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS #ifdef CONFIG_KUSER_HELPERS
/* /*

View File

@ -204,18 +204,17 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* coherent with the kernels mapping. * coherent with the kernels mapping.
*/ */
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
size_t page_size = PAGE_SIZE << compound_order(page); __cpuc_flush_dcache_area(page_address(page), page_size(page));
__cpuc_flush_dcache_area(page_address(page), page_size);
} else { } else {
unsigned long i; unsigned long i;
if (cache_is_vipt_nonaliasing()) { if (cache_is_vipt_nonaliasing()) {
for (i = 0; i < (1 << compound_order(page)); i++) { for (i = 0; i < compound_nr(page); i++) {
void *addr = kmap_atomic(page + i); void *addr = kmap_atomic(page + i);
__cpuc_flush_dcache_area(addr, PAGE_SIZE); __cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_atomic(addr); kunmap_atomic(addr);
} }
} else { } else {
for (i = 0; i < (1 << compound_order(page)); i++) { for (i = 0; i < compound_nr(page); i++) {
void *addr = kmap_high_get(page + i); void *addr = kmap_high_get(page + i);
if (addr) { if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE); __cpuc_flush_dcache_area(addr, PAGE_SIZE);

View File

@ -17,33 +17,6 @@
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
static int mmap_is_legacy(struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlim_stack->rlim_cur == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
/* /*
* We need to ensure that shared mappings are correctly aligned to * We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that * avoid aliasing issues with VIPT caches. We need to ensure that
@ -171,31 +144,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr; return addr;
} }
unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
/* /*
* You really shouldn't be using read() or write() on /dev/mem. This * You really shouldn't be using read() or write() on /dev/mem. This
* might go away in the future. * might go away in the future.

View File

@ -15,7 +15,6 @@ config ARM64
select ARCH_HAS_DMA_COHERENT_TO_PFN select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
@ -71,6 +70,7 @@ config ARM64
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_UBSAN_SANITIZE_ALL

View File

@ -15,8 +15,6 @@
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
#define check_pgt_cache() do { } while (0)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2

View File

@ -861,8 +861,6 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
static inline void pgtable_cache_init(void) { }
/* /*
* On AArch64, the cache coherency is handled via the set_pte_at() function. * On AArch64, the cache coherency is handled via the set_pte_at() function.
*/ */

View File

@ -280,8 +280,6 @@ static inline void spin_lock_prefetch(const void *ptr)
"nop") : : "p" (ptr)); "nop") : : "p" (ptr));
} }
#define HAVE_ARCH_PICK_MMAP_LAYOUT
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void); extern void __init minsigstksz_setup(void);

View File

@ -557,14 +557,6 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ~0xf; return sp & ~0xf;
} }
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (is_compat_task())
return randomize_page(mm->brk, SZ_32M);
else
return randomize_page(mm->brk, SZ_1G);
}
/* /*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/ */

View File

@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
struct page *page = pte_page(pte); struct page *page = pte_page(pte);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page), sync_icache_aliases(page_address(page), page_size(page));
PAGE_SIZE << compound_order(page));
} }
EXPORT_SYMBOL_GPL(__sync_icache_dcache); EXPORT_SYMBOL_GPL(__sync_icache_dcache);

View File

@ -20,78 +20,6 @@
#include <asm/cputype.h> #include <asm/cputype.h>
/*
* Leave enough space between the mmap area and the stack to honour ulimit in
* the face of randomisation.
*/
#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP/6*5)
static int mmap_is_legacy(struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlim_stack->rlim_cur == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT))
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
/*
* This function, called very early during the creation of a new process VM
* image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
/*
* Fall back to the standard layout if the personality bit is set, or
* if the expected stack growth is unlimited:
*/
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
/* /*
* You really shouldn't be using read() or write() on /dev/mem. This might go * You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future. * away in the future.

View File

@ -35,7 +35,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd); kmem_cache_free(pgd_cache, pgd);
} }
void __init pgd_cache_init(void) void __init pgtable_cache_init(void)
{ {
if (PGD_SIZE == PAGE_SIZE) if (PGD_SIZE == PAGE_SIZE)
return; return;

View File

@ -59,11 +59,6 @@ extern unsigned long empty_zero_page;
#define swapper_pg_dir ((pgd_t *) 0) #define swapper_pg_dir ((pgd_t *) 0)
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
/* /*
* c6x is !MMU, so define the simpliest implementation * c6x is !MMU, so define the simpliest implementation
*/ */

View File

@ -75,8 +75,6 @@ do { \
tlb_remove_page(tlb, pte); \ tlb_remove_page(tlb, pte); \
} while (0) } while (0)
#define check_pgt_cache() do {} while (0)
extern void pagetable_init(void); extern void pagetable_init(void);
extern void pre_mmu_init(void); extern void pre_mmu_init(void);
extern void pre_trap_init(void); extern void pre_trap_init(void);

View File

@ -296,11 +296,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do {} while (0)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot) remap_pfn_range(vma, vaddr, pfn, size, prot)

View File

@ -4,7 +4,6 @@
#define __ARCH_USE_5LEVEL_HACK #define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h> #include <asm-generic/pgtable-nopud.h>
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#define pgtable_cache_init() do { } while (0)
extern void paging_init(void); extern void paging_init(void);
#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ #define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ #define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
@ -34,11 +33,6 @@ static inline int pte_file(pte_t pte) { return 0; }
extern unsigned int kobjsize(const void *objp); extern unsigned int kobjsize(const void *objp);
extern int is_in_rom(unsigned long); extern int is_in_rom(unsigned long);
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
/* /*
* All 32bit addresses are effectively valid for vmalloc... * All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets. * Sort of meaningless for non-VM targets.

View File

@ -13,8 +13,6 @@
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
#define check_pgt_cache() do {} while (0)
extern unsigned long long kmap_generation; extern unsigned long long kmap_generation;
/* /*

View File

@ -431,9 +431,6 @@ static inline int pte_exec(pte_t pte)
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/* I think this is in case we have page table caches; needed by init/main.c */
#define pgtable_cache_init() do { } while (0)
/* /*
* Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
* interpreted as swap information. The remaining free bits are interpreted as * interpreted as swap information. The remaining free bits are interpreted as

View File

@ -3,5 +3,5 @@
# Makefile for Hexagon memory management subsystem # Makefile for Hexagon memory management subsystem
# #
obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o

View File

@ -1,10 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
void __init pgtable_cache_init(void)
{
}

View File

@ -72,10 +72,6 @@ config 64BIT
config ZONE_DMA32 config ZONE_DMA32
def_bool y def_bool y
config QUICKLIST
bool
default y
config MMU config MMU
bool bool
default y default y

View File

@ -19,18 +19,19 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/quicklist.h>
#include <asm-generic/pgalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
return quicklist_alloc(0, GFP_KERNEL, NULL); return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} }
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{ {
quicklist_free(0, NULL, pgd); free_page((unsigned long)pgd);
} }
#if CONFIG_PGTABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
@ -42,12 +43,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return quicklist_alloc(0, GFP_KERNEL, NULL); return (pud_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} }
static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{ {
quicklist_free(0, NULL, pud); free_page((unsigned long)pud);
} }
#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_LEVELS == 4 */ #endif /* CONFIG_PGTABLE_LEVELS == 4 */
@ -60,12 +61,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return quicklist_alloc(0, GFP_KERNEL, NULL); return (pmd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
quicklist_free(0, NULL, pmd); free_page((unsigned long)pmd);
} }
#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
@ -83,43 +84,6 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
pmd_val(*pmd_entry) = __pa(pte); pmd_val(*pmd_entry) = __pa(pte);
} }
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
void *pg;
pg = quicklist_alloc(0, GFP_KERNEL, NULL);
if (!pg)
return NULL;
page = virt_to_page(pg);
if (!pgtable_page_ctor(page)) {
quicklist_free(0, NULL, pg);
return NULL;
}
return page;
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
quicklist_free_page(0, NULL, pte);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(0, NULL, pte);
}
static inline void check_pgt_cache(void)
{
quicklist_trim(0, NULL, 25, 16);
}
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */ #endif /* _ASM_IA64_PGALLOC_H */

View File

@ -566,11 +566,6 @@ extern struct page *zero_page_memmap_ptr;
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
/* These tell get_user_pages() that the first gate page is accessible from user-level. */ /* These tell get_user_pages() that the first gate page is accessible from user-level. */
#define FIXADDR_USER_START GATE_ADDR #define FIXADDR_USER_START GATE_ADDR
#ifdef HAVE_BUGGY_SEGREL #ifdef HAVE_BUGGY_SEGREL

View File

@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
if (test_bit(PG_arch_1, &page->flags)) if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */ return; /* i-cache is already coherent with d-cache */
flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); flush_icache_range(addr, addr + page_size(page));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */ set_bit(PG_arch_1, &page->flags); /* mark page as clean */
} }

View File

@ -176,11 +176,4 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#define check_pgt_cache() do { } while (0)
#endif /* _M68K_PGTABLE_H */ #endif /* _M68K_PGTABLE_H */

View File

@ -44,11 +44,6 @@ extern void paging_init(void);
*/ */
#define ZERO_PAGE(vaddr) (virt_to_page(0)) #define ZERO_PAGE(vaddr) (virt_to_page(0))
/*
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)
/* /*
* All 32bit addresses are effectively valid for vmalloc... * All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets. * Sort of meaningless for non-VM targets.
@ -60,6 +55,4 @@ extern void paging_init(void);
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#define check_pgt_cache() do { } while (0)
#endif /* _M68KNOMMU_PGTABLE_H */ #endif /* _M68KNOMMU_PGTABLE_H */

View File

@ -21,83 +21,23 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#define PGDIR_ORDER 0 #define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
#include <asm-generic/pgalloc.h>
/*
* This is handled very differently on MicroBlaze since out page tables
* are all 0's and I want to be able to use these zero'd pages elsewhere
* as well - it gives us quite a speedup.
* -- Cort
*/
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
} quicklists;
#define pgd_quicklist (quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
extern atomic_t zero_sz; /* # currently pre-zero'd pages */
extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
extern atomic_t zerototal; /* # pages zero'd over time */
#define zero_quicklist (zero_cache)
#define zero_cache_sz (zero_sz)
#define zero_cache_calls (zeropage_calls)
#define zero_cache_hits (zeropage_hits)
#define zero_cache_total (zerototal)
/*
* return a pre-zero'd page from the list,
* return NULL if none available -- Cort
*/
extern unsigned long get_zero_page_fast(void);
extern void __bad_pte(pmd_t *pmd); extern void __bad_pte(pmd_t *pmd);
static inline pgd_t *get_pgd_slow(void) static inline pgd_t *get_pgd(void)
{ {
pgd_t *ret; return (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
if (ret != NULL)
clear_page(ret);
return ret;
} }
static inline pgd_t *get_pgd_fast(void) static inline void free_pgd(pgd_t *pgd)
{
unsigned long *ret;
ret = pgd_quicklist;
if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
static inline void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long **)pgd = pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
static inline void free_pgd_slow(pgd_t *pgd)
{ {
free_page((unsigned long)pgd); free_page((unsigned long)pgd);
} }
#define pgd_free(mm, pgd) free_pgd_fast(pgd) #define pgd_free(mm, pgd) free_pgd(pgd)
#define pgd_alloc(mm) get_pgd_fast() #define pgd_alloc(mm) get_pgd()
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) pmd_page(pmd)
@ -110,50 +50,6 @@ static inline void free_pgd_slow(pgd_t *pgd)
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else
int flags = GFP_KERNEL;
#endif
ptepage = alloc_pages(flags, 0);
if (!ptepage)
return NULL;
clear_highpage(ptepage);
if (!pgtable_page_ctor(ptepage)) {
__free_page(ptepage);
return NULL;
}
return ptepage;
}
static inline void pte_free_fast(pte_t *pte)
{
*(unsigned long **)pte = pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free_slow(struct page *ptepage)
{
__free_page(ptepage);
}
static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
{
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte) \ #define pmd_populate(mm, pmd, pte) \
@ -171,10 +67,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
#define pgd_populate(mm, pmd, pte) BUG() #define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#define check_pgt_cache() do { } while (0)
#endif /* _ASM_MICROBLAZE_PGALLOC_H */ #endif /* _ASM_MICROBLAZE_PGALLOC_H */

View File

@ -46,8 +46,6 @@ extern int mem_init_done;
#define swapper_pg_dir ((pgd_t *) NULL) #define swapper_pg_dir ((pgd_t *) NULL)
#define pgtable_cache_init() do {} while (0)
#define arch_enter_lazy_cpu_mode() do {} while (0) #define arch_enter_lazy_cpu_mode() do {} while (0)
#define pgprot_noncached_wc(prot) prot #define pgprot_noncached_wc(prot) prot
@ -526,11 +524,6 @@ extern unsigned long iopa(unsigned long addr);
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
void do_page_fault(struct pt_regs *regs, unsigned long address, void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code); unsigned long error_code);

View File

@ -44,10 +44,6 @@ unsigned long ioremap_base;
unsigned long ioremap_bot; unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot); EXPORT_SYMBOL(ioremap_bot);
#ifndef CONFIG_SMP
struct pgtable_cache_struct quicklists;
#endif
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags) unsigned long flags)
{ {

View File

@ -5,7 +5,6 @@ config MIPS
select ARCH_32BIT_OFF_T if !64BIT select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_UPROBES select ARCH_SUPPORTS_UPROBES
@ -13,6 +12,7 @@ config MIPS
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS select CLONE_BACKWARDS

View File

@ -105,8 +105,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
#endif /* __PAGETABLE_PUD_FOLDED */ #endif /* __PAGETABLE_PUD_FOLDED */
#define check_pgt_cache() do { } while (0)
extern void pagetable_init(void); extern void pagetable_init(void);
#endif /* _ASM_PGALLOC_H */ #endif /* _ASM_PGALLOC_H */

View File

@ -661,9 +661,4 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#endif /* _ASM_PGTABLE_H */ #endif /* _ASM_PGTABLE_H */

View File

@ -29,11 +29,6 @@
extern unsigned int vced_count, vcei_count; extern unsigned int vced_count, vcei_count;
/*
* MIPS does have an arch_pick_mmap_layout()
*/
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
/* User space process size is limited to 1GB in KVM Guest Mode */ /* User space process size is limited to 1GB in KVM Guest Mode */

View File

@ -20,33 +20,6 @@
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask); EXPORT_SYMBOL(shm_align_mask);
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
static int mmap_is_legacy(struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlim_stack->rlim_cur == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
#define COLOUR_ALIGN(addr, pgoff) \ #define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask)) (((pgoff) << PAGE_SHIFT) & shm_align_mask))
@ -144,63 +117,6 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
addr0, len, pgoff, flags, DOWN); addr0, len, pgoff, flags, DOWN);
} }
unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;
#ifdef CONFIG_COMPAT
if (TASK_IS_32BIT_ADDR)
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif /* CONFIG_COMPAT */
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
static inline unsigned long brk_rnd(void)
{
unsigned long rnd = get_random_long();
rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */
if (TASK_IS_32BIT_ADDR)
rnd = rnd & 0x7ffffful;
else
rnd = rnd & 0xffffffful;
return rnd;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long base = mm->brk;
unsigned long ret;
ret = PAGE_ALIGN(base + brk_rnd());
if (ret < mm->brk)
return mm->brk;
return ret;
}
bool __virt_addr_valid(const volatile void *kaddr) bool __virt_addr_valid(const volatile void *kaddr)
{ {
unsigned long vaddr = (unsigned long)kaddr; unsigned long vaddr = (unsigned long)kaddr;

View File

@ -23,8 +23,6 @@
extern pgd_t *pgd_alloc(struct mm_struct *mm); extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t * pgd); extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
#define check_pgt_cache() do { } while (0)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm) static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{ {
pgtable_t pte; pgtable_t pte;

View File

@ -403,8 +403,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* into virtual address `from' * into virtual address `from'
*/ */
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASMNDS32_PGTABLE_H */ #endif /* _ASMNDS32_PGTABLE_H */

View File

@ -45,6 +45,4 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
tlb_remove_page((tlb), (pte)); \ tlb_remove_page((tlb), (pte)); \
} while (0) } while (0)
#define check_pgt_cache() do { } while (0)
#endif /* _ASM_NIOS2_PGALLOC_H */ #endif /* _ASM_NIOS2_PGALLOC_H */

View File

@ -291,8 +291,6 @@ static inline void pte_clear(struct mm_struct *mm,
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#define pgtable_cache_init() do { } while (0)
extern void __init paging_init(void); extern void __init paging_init(void);
extern void __init mmu_init(void); extern void __init mmu_init(void);

View File

@ -101,6 +101,4 @@ do { \
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) pmd_page(pmd)
#define check_pgt_cache() do { } while (0)
#endif #endif

View File

@ -443,11 +443,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */

View File

@ -124,6 +124,4 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
pmd_populate_kernel(mm, pmd, page_address(pte_page)) pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) pmd_page(pmd)
#define check_pgt_cache() do { } while (0)
#endif #endif

View File

@ -132,8 +132,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PTRS_PER_PTE (1UL << BITS_PER_PTE) #define PTRS_PER_PTE (1UL << BITS_PER_PTE)
/* Definitions for 2nd level */ /* Definitions for 2nd level */
#define pgtable_cache_init() do { } while (0)
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1)) #define PMD_MASK (~(PMD_SIZE-1))

View File

@ -64,8 +64,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
extern struct kmem_cache *pgtable_cache[]; extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift] #define PGT_CACHE(shift) pgtable_cache[shift]
static inline void check_pgt_cache(void) { }
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h> #include <asm/book3s/pgalloc.h>
#else #else

View File

@ -87,7 +87,6 @@ extern unsigned long ioremap_bot;
unsigned long vmalloc_to_phys(void *vmalloc_addr); unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned int shift); void pgtable_cache_add(unsigned int shift);
void pgtable_cache_init(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
void mark_initmem_nx(void); void mark_initmem_nx(void);

View File

@ -1748,7 +1748,7 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
/* /*
* IF we try to do a HUGE PTE update after a withdraw is done. * IF we try to do a HUGE PTE update after a withdraw is done.
* we will find the below NULL. This happens when we do * we will find the below NULL. This happens when we do
* split_huge_page_pmd * split_huge_pmd
*/ */
if (!hpte_slot_array) if (!hpte_slot_array)
return; return;

View File

@ -129,11 +129,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
* Allow to use larger than 64k IOMMU pages. Only do that * Allow to use larger than 64k IOMMU pages. Only do that
* if we are backed by hugetlb. * if we are backed by hugetlb.
*/ */
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) { if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
struct page *head = compound_head(page); pageshift = page_shift(compound_head(page));
pageshift = compound_order(head) + PAGE_SHIFT;
}
mem->pageshift = min(mem->pageshift, pageshift); mem->pageshift = min(mem->pageshift, pageshift);
/* /*
* We don't need struct page reference any more, switch * We don't need struct page reference any more, switch

View File

@ -667,7 +667,7 @@ void flush_dcache_icache_hugepage(struct page *page)
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
for (i = 0; i < (1UL << compound_order(page)); i++) { for (i = 0; i < compound_nr(page); i++) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
__flush_dcache_icache(page_address(page+i)); __flush_dcache_icache(page_address(page+i));
} else { } else {

View File

@ -59,6 +59,18 @@ config RISCV
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select HAVE_ARCH_MMAP_RND_BITS
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
default 8
# max bits determined by the following formula:
# VA_BITS - PAGE_SHIFT - 3
config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT # SV39 based
default 17
config MMU config MMU
def_bool y def_bool y

View File

@ -82,8 +82,4 @@ do { \
tlb_remove_page((tlb), pte); \ tlb_remove_page((tlb), pte); \
} while (0) } while (0)
static inline void check_pgt_cache(void)
{
}
#endif /* _ASM_RISCV_PGALLOC_H */ #endif /* _ASM_RISCV_PGALLOC_H */

View File

@ -424,11 +424,6 @@ extern void *dtb_early_va;
extern void setup_bootmem(void); extern void setup_bootmem(void);
extern void paging_init(void); extern void paging_init(void);
static inline void pgtable_cache_init(void)
{
/* No page table caches to initialize */
}
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1) #define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)

View File

@ -1682,12 +1682,6 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
/*
* No page table caches to initialise
*/
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* _S390_PAGE_H */ #endif /* _S390_PAGE_H */

View File

@ -2,10 +2,8 @@
#ifndef __ASM_SH_PGALLOC_H #ifndef __ASM_SH_PGALLOC_H
#define __ASM_SH_PGALLOC_H #define __ASM_SH_PGALLOC_H
#include <linux/quicklist.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm-generic/pgalloc.h>
#define QUICK_PT 0 /* Other page table pages that are zero on free */
extern pgd_t *pgd_alloc(struct mm_struct *); extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
@ -29,41 +27,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
} }
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
*/
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
void *pg;
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg)
return NULL;
page = virt_to_page(pg);
if (!pgtable_page_ctor(page)) {
quicklist_free(QUICK_PT, NULL, pg);
return NULL;
}
return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(QUICK_PT, NULL, pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
quicklist_free_page(QUICK_PT, NULL, pte);
}
#define __pte_free_tlb(tlb,pte,addr) \ #define __pte_free_tlb(tlb,pte,addr) \
do { \ do { \
pgtable_page_dtor(pte); \ pgtable_page_dtor(pte); \
@ -79,9 +42,4 @@ do { \
} while (0); } while (0);
#endif #endif
static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PT, NULL, 25, 16);
}
#endif /* __ASM_SH_PGALLOC_H */ #endif /* __ASM_SH_PGALLOC_H */

View File

@ -123,11 +123,6 @@ typedef pte_t *pte_addr_t;
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
/*
* Initialise the page table caches
*/
extern void pgtable_cache_init(void);
struct vm_area_struct; struct vm_area_struct;
struct mm_struct; struct mm_struct;

View File

@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
menu "Memory management options" menu "Memory management options"
config QUICKLIST
def_bool y
config MMU config MMU
bool "Support for memory management hardware" bool "Support for memory management hardware"
depends on !CPU_SH2 depends on !CPU_SH2

View File

@ -97,7 +97,3 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{ {
} }
void pgtable_cache_init(void)
{
}

View File

@ -17,8 +17,6 @@ void srmmu_free_nocache(void *addr, int size);
extern struct resource sparc_iomap; extern struct resource sparc_iomap;
#define check_pgt_cache() do { } while (0)
pgd_t *get_pgd_fast(void); pgd_t *get_pgd_fast(void);
static inline void free_pgd_fast(pgd_t *pgd) static inline void free_pgd_fast(pgd_t *pgd)
{ {

View File

@ -69,8 +69,6 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE) #define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD)) #define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD))
#define check_pgt_cache() do { } while (0)
void pgtable_free(void *table, bool is_page); void pgtable_free(void *table, bool is_page);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP

View File

@ -445,9 +445,4 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
/* We provide our own get_unmapped_area to cope with VA holes for userland */ /* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#endif /* !(_SPARC_PGTABLE_H) */ #endif /* !(_SPARC_PGTABLE_H) */

View File

@ -1135,7 +1135,6 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
unsigned long); unsigned long);
#define HAVE_ARCH_FB_UNMAPPED_AREA #define HAVE_ARCH_FB_UNMAPPED_AREA
void pgtable_cache_init(void);
void sun4v_register_fault_status(void); void sun4v_register_fault_status(void);
void sun4v_ktsb_register(void); void sun4v_ktsb_register(void);
void __init cheetah_ecache_flush_init(void); void __init cheetah_ecache_flush_init(void);

View File

@ -31,7 +31,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/vaddrs.h> #include <asm/vaddrs.h>
#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/prom.h> #include <asm/prom.h>

View File

@ -43,7 +43,5 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x)) #define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
#endif #endif
#define check_pgt_cache() do { } while (0)
#endif #endif

View File

@ -32,8 +32,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* zero page used for uninitialized stuff */ /* zero page used for uninitialized stuff */
extern unsigned long *empty_zero_page; extern unsigned long *empty_zero_page;
#define pgtable_cache_init() do ; while (0)
/* Just any arbitrary offset to the start of the vmalloc VM area: the /* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the * current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that * physical memory until the kernel virtual memory starts. That means that

View File

@ -18,8 +18,6 @@
#define __HAVE_ARCH_PTE_ALLOC_ONE #define __HAVE_ARCH_PTE_ALLOC_ONE
#include <asm-generic/pgalloc.h> #include <asm-generic/pgalloc.h>
#define check_pgt_cache() do { } while (0)
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT) #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT) #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)

View File

@ -285,8 +285,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __UNICORE_PGTABLE_H__ */ #endif /* __UNICORE_PGTABLE_H__ */

View File

@ -29,8 +29,6 @@ extern pgd_t swapper_pg_dir[1024];
extern pgd_t initial_page_table[1024]; extern pgd_t initial_page_table[1024];
extern pmd_t initial_pg_pmd[]; extern pmd_t initial_pg_pmd[];
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
void paging_init(void); void paging_init(void);
void sync_initial_page_table(void); void sync_initial_page_table(void);

View File

@ -241,9 +241,6 @@ extern void cleanup_highmap(void);
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define pgtable_cache_init() do { } while (0)
#define check_pgt_cache() do { } while (0)
#define PAGE_AGP PAGE_KERNEL_NOCACHE #define PAGE_AGP PAGE_KERNEL_NOCACHE
#define HAVE_PAGE_AGP 1 #define HAVE_PAGE_AGP 1

View File

@ -357,7 +357,7 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
static struct kmem_cache *pgd_cache; static struct kmem_cache *pgd_cache;
void __init pgd_cache_init(void) void __init pgtable_cache_init(void)
{ {
/* /*
* When PAE kernel is running as a Xen domain, it does not use * When PAE kernel is running as a Xen domain, it does not use
@ -402,10 +402,6 @@ static inline void _pgd_free(pgd_t *pgd)
} }
#else #else
void __init pgd_cache_init(void)
{
}
static inline pgd_t *_pgd_alloc(void) static inline pgd_t *_pgd_alloc(void)
{ {
return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,

View File

@ -238,7 +238,6 @@ extern void paging_init(void);
# define swapper_pg_dir NULL # define swapper_pg_dir NULL
static inline void paging_init(void) { } static inline void paging_init(void) { }
#endif #endif
static inline void pgtable_cache_init(void) { }
/* /*
* The pmd contains the kernel virtual address of the pte page. * The pmd contains the kernel virtual address of the pte page.

View File

@ -160,9 +160,6 @@ static inline void invalidate_dtlb_mapping (unsigned address)
invalidate_dtlb_entry(tlb_entry); invalidate_dtlb_entry(tlb_entry);
} }
#define check_pgt_cache() do { } while (0)
/* /*
* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
* ISA and exist only for test purposes.. * ISA and exist only for test purposes..

View File

@ -100,26 +100,9 @@ unsigned long __weak memory_block_size_bytes(void)
} }
EXPORT_SYMBOL_GPL(memory_block_size_bytes); EXPORT_SYMBOL_GPL(memory_block_size_bytes);
static unsigned long get_memory_block_size(void)
{
unsigned long block_sz;
block_sz = memory_block_size_bytes();
/* Validate blk_sz is a power of 2 and not less than section size */
if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
WARN_ON(1);
block_sz = MIN_MEMORY_BLOCK_SIZE;
}
return block_sz;
}
/* /*
* use this as the physical section index that this memsection * Show the first physical section index (number) of this memory block.
* uses.
*/ */
static ssize_t phys_index_show(struct device *dev, static ssize_t phys_index_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
@ -131,7 +114,10 @@ static ssize_t phys_index_show(struct device *dev,
} }
/* /*
* Show whether the section of memory is likely to be hot-removable * Show whether the memory block is likely to be offlineable (or is already
* offline). Once offline, the memory block could be removed. The return
* value does, however, not indicate that there is a way to remove the
* memory block.
*/ */
static ssize_t removable_show(struct device *dev, struct device_attribute *attr, static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
@ -455,12 +441,12 @@ static DEVICE_ATTR_RO(phys_device);
static DEVICE_ATTR_RO(removable); static DEVICE_ATTR_RO(removable);
/* /*
* Block size attribute stuff * Show the memory block size (shared by all memory blocks).
*/ */
static ssize_t block_size_bytes_show(struct device *dev, static ssize_t block_size_bytes_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
return sprintf(buf, "%lx\n", get_memory_block_size()); return sprintf(buf, "%lx\n", memory_block_size_bytes());
} }
static DEVICE_ATTR_RO(block_size_bytes); static DEVICE_ATTR_RO(block_size_bytes);
@ -670,10 +656,10 @@ static int init_memory_block(struct memory_block **memory,
return -ENOMEM; return -ENOMEM;
mem->start_section_nr = block_id * sections_per_block; mem->start_section_nr = block_id * sections_per_block;
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state; mem->state = state;
start_pfn = section_nr_to_pfn(mem->start_section_nr); start_pfn = section_nr_to_pfn(mem->start_section_nr);
mem->phys_device = arch_get_memory_phys_device(start_pfn); mem->phys_device = arch_get_memory_phys_device(start_pfn);
mem->nid = NUMA_NO_NODE;
ret = register_memory(mem); ret = register_memory(mem);
@ -810,19 +796,22 @@ static const struct attribute_group *memory_root_attr_groups[] = {
/* /*
* Initialize the sysfs support for memory devices... * Initialize the sysfs support for memory devices...
*/ */
int __init memory_dev_init(void) void __init memory_dev_init(void)
{ {
int ret; int ret;
int err; int err;
unsigned long block_sz, nr; unsigned long block_sz, nr;
/* Validate the configured memory block size */
block_sz = memory_block_size_bytes();
if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
panic("Memory block size not suitable: 0x%lx\n", block_sz);
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret) if (ret)
goto out; goto out;
block_sz = get_memory_block_size();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
/* /*
* Create entries for memory sections that were found * Create entries for memory sections that were found
* during boot and have been initialized * during boot and have been initialized
@ -838,8 +827,7 @@ int __init memory_dev_init(void)
out: out:
if (ret) if (ret)
printk(KERN_ERR "%s() failed: %d\n", __func__, ret); panic("%s() failed: %d\n", __func__, ret);
return ret;
} }
/** /**

View File

@ -427,6 +427,8 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d AnonHugePages: %8lu kB\n" "Node %d AnonHugePages: %8lu kB\n"
"Node %d ShmemHugePages: %8lu kB\n" "Node %d ShmemHugePages: %8lu kB\n"
"Node %d ShmemPmdMapped: %8lu kB\n" "Node %d ShmemPmdMapped: %8lu kB\n"
"Node %d FileHugePages: %8lu kB\n"
"Node %d FilePmdMapped: %8lu kB\n"
#endif #endif
, ,
nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
@ -452,6 +454,10 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
HPAGE_PMD_NR), HPAGE_PMD_NR),
nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
HPAGE_PMD_NR),
nid, K(node_page_state(pgdat, NR_FILE_THPS) *
HPAGE_PMD_NR),
nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
HPAGE_PMD_NR) HPAGE_PMD_NR)
#endif #endif
); );
@ -756,15 +762,13 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
static int register_mem_sect_under_node(struct memory_block *mem_blk, static int register_mem_sect_under_node(struct memory_block *mem_blk,
void *arg) void *arg)
{ {
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
int ret, nid = *(int *)arg; int ret, nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn; unsigned long pfn;
mem_blk->nid = nid; for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
sect_end_pfn += PAGES_PER_SECTION - 1;
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int page_nid; int page_nid;
/* /*
@ -789,6 +793,13 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
if (page_nid != nid) if (page_nid != nid)
continue; continue;
} }
/*
* If this memory block spans multiple nodes, we only indicate
* the last processed node.
*/
mem_blk->nid = nid;
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj, &mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj)); kobject_name(&mem_blk->dev.kobj));
@ -804,32 +815,18 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
} }
/* /*
* Unregister memory block device under all nodes that it spans. * Unregister a memory block device under the node it spans. Memory blocks
* Has to be called with mem_sysfs_mutex held (due to unlinked_nodes). * with multiple nodes cannot be offlined and therefore also never be removed.
*/ */
void unregister_memory_block_under_nodes(struct memory_block *mem_blk) void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{ {
unsigned long pfn, sect_start_pfn, sect_end_pfn; if (mem_blk->nid == NUMA_NO_NODE)
static nodemask_t unlinked_nodes; return;
nodes_clear(unlinked_nodes); sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); kobject_name(&mem_blk->dev.kobj));
sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); sysfs_remove_link(&mem_blk->dev.kobj,
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
int nid;
nid = get_nid_for_pfn(pfn);
if (nid < 0)
continue;
if (!node_online(nid))
continue;
if (node_test_and_set(nid, unlinked_nodes))
continue;
sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
sysfs_remove_link(&mem_blk->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
}
} }
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)

View File

@ -1078,7 +1078,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
bool merge; bool merge;
if (page) if (page)
pg_size <<= compound_order(page); pg_size = page_size(page);
if (off < pg_size && if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) { skb_can_coalesce(skb, i, page, off)) {
merge = 1; merge = 1;
@ -1105,8 +1105,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
__GFP_NORETRY, __GFP_NORETRY,
order); order);
if (page) if (page)
pg_size <<= pg_size <<= order;
compound_order(page);
} }
if (!page) { if (!page) {
page = alloc_page(gfp); page = alloc_page(gfp);

View File

@ -174,7 +174,6 @@ via_map_blit_for_device(struct pci_dev *pdev,
static void static void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{ {
struct page *page;
int i; int i;
switch (vsg->state) { switch (vsg->state) {
@ -189,13 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
kfree(vsg->desc_pages); kfree(vsg->desc_pages);
/* fall through */ /* fall through */
case dr_via_pages_locked: case dr_via_pages_locked:
for (i = 0; i < vsg->num_pages; ++i) { put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
if (NULL != (page = vsg->pages[i])) { (vsg->direction == DMA_FROM_DEVICE));
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
put_page(page);
}
}
/* fall through */ /* fall through */
case dr_via_pages_alloc: case dr_via_pages_alloc:
vfree(vsg->pages); vfree(vsg->pages);

View File

@ -54,10 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
page = sg_page_iter_page(&sg_iter); page = sg_page_iter_page(&sg_iter);
if (umem->writable && dirty) put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
put_user_pages_dirty_lock(&page, 1);
else
put_user_page(page);
} }
sg_free_table(&umem->sg_head); sg_free_table(&umem->sg_head);

View File

@ -118,10 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
size_t npages, bool dirty) size_t npages, bool dirty)
{ {
if (dirty) put_user_pages_dirty_lock(p, npages, dirty);
put_user_pages_dirty_lock(p, npages);
else
put_user_pages(p, npages);
if (mm) { /* during close after signal, mm can be NULL */ if (mm) { /* during close after signal, mm can be NULL */
atomic64_sub(npages, &mm->pinned_vm); atomic64_sub(npages, &mm->pinned_vm);

View File

@ -40,10 +40,7 @@
static void __qib_release_user_pages(struct page **p, size_t num_pages, static void __qib_release_user_pages(struct page **p, size_t num_pages,
int dirty) int dirty)
{ {
if (dirty) put_user_pages_dirty_lock(p, num_pages, dirty);
put_user_pages_dirty_lock(p, num_pages);
else
put_user_pages(p, num_pages);
} }
/** /**

View File

@ -75,10 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) { for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg); page = sg_page(sg);
pa = sg_phys(sg); pa = sg_phys(sg);
if (dirty) put_user_pages_dirty_lock(&page, 1, dirty);
put_user_pages_dirty_lock(&page, 1);
else
put_user_page(page);
usnic_dbg("pa: %pa\n", &pa); usnic_dbg("pa: %pa\n", &pa);
} }
kfree(chunk); kfree(chunk);

View File

@ -63,15 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
bool dirty) bool dirty)
{ {
struct page **p = chunk->plist; put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
while (num_pages--) {
if (!PageDirty(*p) && dirty)
put_user_pages_dirty_lock(p, 1);
else
put_user_page(*p);
p++;
}
} }
void siw_umem_release(struct siw_umem *umem, bool dirty) void siw_umem_release(struct siw_umem *umem, bool dirty)

View File

@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
if (!page) if (!page)
goto free_pages; goto free_pages;
list_add_tail(&page->lru, &pages); list_add_tail(&page->lru, &pages);
size_remaining -= PAGE_SIZE << compound_order(page); size_remaining -= page_size(page);
max_order = compound_order(page); max_order = compound_order(page);
i++; i++;
} }
@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
sg = table->sgl; sg = table->sgl;
list_for_each_entry_safe(page, tmp_page, &pages, lru) { list_for_each_entry_safe(page, tmp_page, &pages, lru) {
sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); sg_set_page(sg, page, page_size(page), 0);
sg = sg_next(sg); sg = sg_next(sg);
list_del(&page->lru); list_del(&page->lru);
} }

View File

@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
page, off_in_page, tlen); page, off_in_page, tlen);
fr_len(fp) += tlen; fr_len(fp) += tlen;
fp_skb(fp)->data_len += tlen; fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize += fp_skb(fp)->truesize += page_size(page);
PAGE_SIZE << compound_order(page);
} else { } else {
BUG_ON(!page); BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));

View File

@ -176,13 +176,13 @@ static long tce_iommu_register_pages(struct tce_container *container,
} }
static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa, static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
unsigned int page_shift) unsigned int it_page_shift)
{ {
struct page *page; struct page *page;
unsigned long size = 0; unsigned long size = 0;
if (mm_iommu_is_devmem(mm, hpa, page_shift, &size)) if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
return size == (1UL << page_shift); return size == (1UL << it_page_shift);
page = pfn_to_page(hpa >> PAGE_SHIFT); page = pfn_to_page(hpa >> PAGE_SHIFT);
/* /*
@ -190,7 +190,7 @@ static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
* a page we just found. Otherwise the hardware can get access to * a page we just found. Otherwise the hardware can get access to
* a bigger memory chunk that it should. * a bigger memory chunk that it should.
*/ */
return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift; return page_shift(compound_head(page)) >= it_page_shift;
} }
static inline bool tce_groups_attached(struct tce_container *container) static inline bool tce_groups_attached(struct tce_container *container)

View File

@ -670,26 +670,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
* libraries. There is no binary dependent code anywhere else. * libraries. There is no binary dependent code anywhere else.
*/ */
#ifndef STACK_RND_MASK
#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
#endif
static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned long random_variable = 0;
if (current->flags & PF_RANDOMIZE) {
random_variable = get_random_long();
random_variable &= STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
#ifdef CONFIG_STACK_GROWSUP
return PAGE_ALIGN(stack_top) + random_variable;
#else
return PAGE_ALIGN(stack_top) - random_variable;
#endif
}
static int load_elf_binary(struct linux_binprm *bprm) static int load_elf_binary(struct linux_binprm *bprm)
{ {
struct file *interpreter = NULL; /* to shut gcc up */ struct file *interpreter = NULL; /* to shut gcc up */

View File

@ -1100,8 +1100,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
err = -ENOMEM; err = -ENOMEM;
goto error; goto error;
} }
/* Avoid race with userspace read via bdev */
lock_buffer(bhs[n]);
memset(bhs[n]->b_data, 0, sb->s_blocksize); memset(bhs[n]->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bhs[n]); set_buffer_uptodate(bhs[n]);
unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir); mark_buffer_dirty_inode(bhs[n], dir);
n++; n++;
@ -1158,6 +1161,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de = (struct msdos_dir_entry *)bhs[0]->b_data; de = (struct msdos_dir_entry *)bhs[0]->b_data;
/* Avoid race with userspace read via bdev */
lock_buffer(bhs[0]);
/* filling the new directory slots ("." and ".." entries) */ /* filling the new directory slots ("." and ".." entries) */
memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME); memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME); memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
@ -1180,6 +1185,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
de[0].size = de[1].size = 0; de[0].size = de[1].size = 0;
memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de)); memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
set_buffer_uptodate(bhs[0]); set_buffer_uptodate(bhs[0]);
unlock_buffer(bhs[0]);
mark_buffer_dirty_inode(bhs[0], dir); mark_buffer_dirty_inode(bhs[0], dir);
err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE); err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
@ -1237,11 +1243,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
/* fill the directory entry */ /* fill the directory entry */
copy = min(size, sb->s_blocksize); copy = min(size, sb->s_blocksize);
/* Avoid race with userspace read via bdev */
lock_buffer(bhs[n]);
memcpy(bhs[n]->b_data, slots, copy); memcpy(bhs[n]->b_data, slots, copy);
set_buffer_uptodate(bhs[n]);
unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
slots += copy; slots += copy;
size -= copy; size -= copy;
set_buffer_uptodate(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
if (!size) if (!size)
break; break;
n++; n++;

View File

@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
err = -ENOMEM; err = -ENOMEM;
goto error; goto error;
} }
/* Avoid race with userspace read via bdev */
lock_buffer(c_bh);
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize); memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh); set_buffer_uptodate(c_bh);
unlock_buffer(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode); mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
if (sb->s_flags & SB_SYNCHRONOUS) if (sb->s_flags & SB_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh); err = sync_dirty_buffer(c_bh);

View File

@ -181,6 +181,9 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->flags = 0; mapping->flags = 0;
mapping->wb_err = 0; mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0); atomic_set(&mapping->i_mmap_writable, 0);
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
atomic_set(&mapping->nr_thps, 0);
#endif
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL; mapping->private_data = NULL;
mapping->writeback_index = 0; mapping->writeback_index = 0;

View File

@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
} }
page = virt_to_head_page(ptr); page = virt_to_head_page(ptr);
if (sz > (PAGE_SIZE << compound_order(page))) if (sz > page_size(page))
return -EINVAL; return -EINVAL;
pfn = virt_to_phys(ptr) >> PAGE_SHIFT; pfn = virt_to_phys(ptr) >> PAGE_SHIFT;

View File

@ -89,8 +89,6 @@ EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
EXPORT_SYMBOL(jbd2_journal_invalidatepage); EXPORT_SYMBOL(jbd2_journal_invalidatepage);
EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers); EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit); EXPORT_SYMBOL(jbd2_journal_force_commit);
EXPORT_SYMBOL(jbd2_journal_inode_add_write);
EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_write); EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait); EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);

View File

@ -2622,18 +2622,6 @@ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
return 0; return 0;
} }
int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
{
return jbd2_journal_file_inode(handle, jinode,
JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
}
int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
{
return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
LLONG_MAX);
}
int jbd2_journal_inode_ranged_write(handle_t *handle, int jbd2_journal_inode_ranged_write(handle_t *handle,
struct jbd2_inode *jinode, loff_t start_byte, loff_t length) struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
{ {

Some files were not shown because too many files have changed in this diff Show More