Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:
 "VM:
   - z3fold fixes and enhancements by Henry Burns and Vitaly Wool

   - more accurate reclaimed slab caches calculations by Yafang Shao

   - fix MAP_UNINITIALIZED UAPI symbol to not depend on config, by
     Christoph Hellwig

   - !CONFIG_MMU fixes by Christoph Hellwig

   - new novmcoredd parameter to omit device dumps from vmcore, by
     Kairui Song

   - new test_meminit module for testing heap and pagealloc
     initialization, by Alexander Potapenko

   - ioremap improvements for huge mappings, by Anshuman Khandual

   - generalize kprobe page fault handling, by Anshuman Khandual

   - device-dax hotplug fixes and improvements, by Pavel Tatashin

   - enable synchronous DAX fault on powerpc, by Aneesh Kumar K.V

   - add pte_devmap() support for arm64, by Robin Murphy

   - unify locked_vm accounting with a helper, by Daniel Jordan

   - several misc fixes

  core/lib:
   - new typeof_member() macro including some users, by Alexey Dobriyan

   - make BIT() and GENMASK() available in asm, by Masahiro Yamada

   - changed LIST_POISON2 on x86_64 to 0xdead000000000122 for better
     code generation, by Alexey Dobriyan

   - rbtree code size optimizations, by Michel Lespinasse

   - convert struct pid count to refcount_t, by Joel Fernandes

  get_maintainer.pl:
   - add --no-moderated switch to skip moderated ML's, by Joe Perches

  misc:
   - ptrace PTRACE_GET_SYSCALL_INFO interface

   - coda updates

   - gdb scripts, various"

[ Using merge message suggestion from Vlastimil Babka, with some editing - Linus ]

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (100 commits)
  fs/select.c: use struct_size() in kmalloc()
  mm: add account_locked_vm utility function
  arm64: mm: implement pte_devmap support
  mm: introduce ARCH_HAS_PTE_DEVMAP
  mm: clean up is_device_*_page() definitions
  mm/mmap: move common defines to mman-common.h
  mm: move MAP_SYNC to asm-generic/mman-common.h
  device-dax: "Hotremove" persistent memory that is used like normal RAM
  mm/hotplug: make remove_memory() interface usable
  device-dax: fix memory and resource leak if hotplug fails
  include/linux/lz4.h: fix spelling and copy-paste errors in documentation
  ipc/mqueue.c: only perform resource calculation if user valid
  include/asm-generic/bug.h: fix "cut here" for WARN_ON for __WARN_TAINT architectures
  scripts/gdb: add helpers to find and list devices
  scripts/gdb: add lx-genpd-summary command
  drivers/pps/pps.c: clear offset flags in PPS_SETPARAMS ioctl
  kernel/pid.c: convert struct pid count to refcount_t
  drivers/rapidio/devices/rio_mport_cdev.c: NUL terminate some strings
  select: shift restore_saved_sigmask_unless() into poll_select_copy_remaining()
  select: change do_poll() to return -ERESTARTNOHAND rather than -EINTR
  ...
This commit is contained in:
Linus Torvalds 2019-07-17 08:58:04 -07:00
commit 57a8ec387e
151 changed files with 2672 additions and 1223 deletions

View File

@ -2877,6 +2877,17 @@
/sys/module/printk/parameters/console_suspend) to
turn on/off it dynamically.
novmcoredd [KNL,KDUMP]
Disable device dump. Device dump allows drivers to
append dump data to vmcore so you can collect driver
specified debug info. Drivers can append the data
without any limit and this data is stored in memory,
so this may cause significant memory stress. Disabling
device dump can help save memory but the driver debug
data will be no longer available. This parameter
is only available when CONFIG_PROC_VMCORE_DEVICE_DUMP
is set.
noaliencache [MM, NUMA, SLAB] Disables the allocation of alien
caches in the slab allocator. Saves per-node memory,
but will impact performance.

View File

@ -4,7 +4,7 @@ OHCI
Required properties:
- compatible: should be "samsung,s3c2410-ohci" for USB host controller
- reg: address and lenght of the controller memory mapped region
- reg: address and length of the controller memory mapped region
- interrupts: interrupt number for the USB OHCI controller
- clocks: Should reference the bus and host clocks
- clock-names: Should contain two strings

View File

@ -481,7 +481,10 @@ kernel support.
struct coda_timespec {
int64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
struct coda_vattr {
enum coda_vtype va_type; /* vnode type (for create) */
@ -493,9 +496,9 @@ kernel support.
long va_fileid; /* file id */
u_quad_t va_size; /* file size in bytes */
long va_blocksize; /* blocksize preferred for i/o */
struct timespec va_atime; /* time of last access */
struct timespec va_mtime; /* time of last modification */
struct timespec va_ctime; /* time file changed */
struct coda_timespec va_atime; /* time of last access */
struct coda_timespec va_mtime; /* time of last modification */
struct coda_timespec va_ctime; /* time file changed */
u_long va_gen; /* generation number of file */
u_long va_flags; /* flags defined for file */
dev_t va_rdev; /* device special file represents */

View File

@ -93,11 +93,6 @@ static inline void * phys_to_virt(unsigned long address)
#define page_to_phys(page) page_to_pa(page)
static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
{
return page_to_phys(page);
}
/* Maximum PIO space address supported? */
#define IO_SPACE_LIMIT 0xffff

View File

@ -32,7 +32,7 @@
#ifndef _ASM_ARC_PGTABLE_H
#define _ASM_ARC_PGTABLE_H
#include <linux/const.h>
#include <linux/bits.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
@ -215,11 +215,11 @@
#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
#define PGDIR_SIZE _BITUL(PGDIR_SHIFT) /* vaddr span, not PDG sz */
#define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
#define PTRS_PER_PGD _BITUL(BITS_FOR_PGD)
#define PTRS_PER_PTE BIT(BITS_FOR_PTE)
#define PTRS_PER_PGD BIT(BITS_FOR_PGD)
/*
* Number of entries a user land program use.

View File

@ -10,6 +10,7 @@
#error "Incorrect ctop.h include"
#endif
#include <linux/bits.h>
#include <linux/types.h>
#include <soc/nps/common.h>
@ -51,19 +52,19 @@
#define CTOP_INST_AXOR_DI_R2_R2_R3 0x4A664C06
/* Do not use D$ for address in 2G-3G */
#define HW_COMPLY_KRN_NOT_D_CACHED _BITUL(28)
#define HW_COMPLY_KRN_NOT_D_CACHED BIT(28)
#define NPS_MSU_EN_CFG 0x80
#define NPS_CRG_BLKID 0x480
#define NPS_CRG_SYNC_BIT _BITUL(0)
#define NPS_CRG_SYNC_BIT BIT(0)
#define NPS_GIM_BLKID 0x5C0
/* GIM registers and fields*/
#define NPS_GIM_UART_LINE _BITUL(7)
#define NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE _BITUL(10)
#define NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE _BITUL(11)
#define NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE _BITUL(25)
#define NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE _BITUL(26)
#define NPS_GIM_UART_LINE BIT(7)
#define NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE BIT(10)
#define NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE BIT(11)
#define NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE BIT(25)
#define NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE BIT(26)
#ifndef __ASSEMBLY__
/* Functional registers definition */

View File

@ -30,7 +30,6 @@
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*

View File

@ -27,28 +27,6 @@
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
int ret = 0;
if (!user_mode(regs)) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, fsr))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
return 0;
}
#endif
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
@ -265,7 +243,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (notify_page_fault(regs, fsr))
if (kprobe_page_fault(regs, fsr))
return 0;
tsk = current;

View File

@ -24,6 +24,7 @@ config ARM64
select ARCH_HAS_KCOV
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_DIRECT_MAP

View File

@ -16,6 +16,7 @@
#define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
#ifndef __ASSEMBLY__

View File

@ -79,6 +79,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
#define pte_cont_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
@ -206,6 +207,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
}
static inline pte_t pte_mkdevmap(pte_t pte)
{
return set_pte_bit(pte, __pgprot(PTE_DEVMAP));
}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
WRITE_ONCE(*ptep, pte);
@ -388,6 +394,11 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
#endif
#define pmd_mkdevmap(pmd) pte_pmd(pte_mkdevmap(pmd_pte(pmd)))
#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
@ -673,6 +684,16 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
{
return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}
static inline int pud_devmap(pud_t pud)
{
return 0;
}
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif
/*

View File

@ -9,7 +9,7 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
#include <linux/const.h>
#include <linux/bits.h>
#include <linux/stringify.h>
/*
@ -478,31 +478,31 @@
#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2)
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_DSSBS (_BITUL(44))
#define SCTLR_ELx_ENIA (_BITUL(31))
#define SCTLR_ELx_ENIB (_BITUL(30))
#define SCTLR_ELx_ENDA (_BITUL(27))
#define SCTLR_ELx_EE (_BITUL(25))
#define SCTLR_ELx_IESB (_BITUL(21))
#define SCTLR_ELx_WXN (_BITUL(19))
#define SCTLR_ELx_ENDB (_BITUL(13))
#define SCTLR_ELx_I (_BITUL(12))
#define SCTLR_ELx_SA (_BITUL(3))
#define SCTLR_ELx_C (_BITUL(2))
#define SCTLR_ELx_A (_BITUL(1))
#define SCTLR_ELx_M (_BITUL(0))
#define SCTLR_ELx_DSSBS (BIT(44))
#define SCTLR_ELx_ENIA (BIT(31))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_ENDA (BIT(27))
#define SCTLR_ELx_EE (BIT(25))
#define SCTLR_ELx_IESB (BIT(21))
#define SCTLR_ELx_WXN (BIT(19))
#define SCTLR_ELx_ENDB (BIT(13))
#define SCTLR_ELx_I (BIT(12))
#define SCTLR_ELx_SA (BIT(3))
#define SCTLR_ELx_C (BIT(2))
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)
/* SCTLR_EL2 specific flags. */
#define SCTLR_EL2_RES1 ((_BITUL(4)) | (_BITUL(5)) | (_BITUL(11)) | (_BITUL(16)) | \
(_BITUL(18)) | (_BITUL(22)) | (_BITUL(23)) | (_BITUL(28)) | \
(_BITUL(29)))
#define SCTLR_EL2_RES0 ((_BITUL(6)) | (_BITUL(7)) | (_BITUL(8)) | (_BITUL(9)) | \
(_BITUL(10)) | (_BITUL(13)) | (_BITUL(14)) | (_BITUL(15)) | \
(_BITUL(17)) | (_BITUL(20)) | (_BITUL(24)) | (_BITUL(26)) | \
(_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \
#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
(BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
(BIT(29)))
#define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \
(BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \
(BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \
(BIT(27)) | (BIT(30)) | (BIT(31)) | \
(0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
@ -524,23 +524,23 @@
#endif
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_UCI (_BITUL(26))
#define SCTLR_EL1_E0E (_BITUL(24))
#define SCTLR_EL1_SPAN (_BITUL(23))
#define SCTLR_EL1_NTWE (_BITUL(18))
#define SCTLR_EL1_NTWI (_BITUL(16))
#define SCTLR_EL1_UCT (_BITUL(15))
#define SCTLR_EL1_DZE (_BITUL(14))
#define SCTLR_EL1_UMA (_BITUL(9))
#define SCTLR_EL1_SED (_BITUL(8))
#define SCTLR_EL1_ITD (_BITUL(7))
#define SCTLR_EL1_CP15BEN (_BITUL(5))
#define SCTLR_EL1_SA0 (_BITUL(4))
#define SCTLR_EL1_UCI (BIT(26))
#define SCTLR_EL1_E0E (BIT(24))
#define SCTLR_EL1_SPAN (BIT(23))
#define SCTLR_EL1_NTWE (BIT(18))
#define SCTLR_EL1_NTWI (BIT(16))
#define SCTLR_EL1_UCT (BIT(15))
#define SCTLR_EL1_DZE (BIT(14))
#define SCTLR_EL1_UMA (BIT(9))
#define SCTLR_EL1_SED (BIT(8))
#define SCTLR_EL1_ITD (BIT(7))
#define SCTLR_EL1_CP15BEN (BIT(5))
#define SCTLR_EL1_SA0 (BIT(4))
#define SCTLR_EL1_RES1 ((_BITUL(11)) | (_BITUL(20)) | (_BITUL(22)) | (_BITUL(28)) | \
(_BITUL(29)))
#define SCTLR_EL1_RES0 ((_BITUL(6)) | (_BITUL(10)) | (_BITUL(13)) | (_BITUL(17)) | \
(_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \
#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
(BIT(29)))
#define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \
(BIT(27)) | (BIT(30)) | (BIT(31)) | \
(0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
@ -756,13 +756,13 @@
#define ZCR_ELx_LEN_SIZE 9
#define ZCR_ELx_LEN_MASK 0x1ff
#define CPACR_EL1_ZEN_EL1EN (_BITUL(16)) /* enable EL1 access */
#define CPACR_EL1_ZEN_EL0EN (_BITUL(17)) /* enable EL0 access, if EL1EN set */
#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (_BITUL(31))
#define SYS_MPIDR_SAFE_VAL (BIT(31))
#ifdef __ASSEMBLY__

View File

@ -59,28 +59,6 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
return debug_fault_info + DBG_ESR_EVT(esr);
}
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, esr))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
{
return 0;
}
#endif
static void data_abort_decode(unsigned int esr)
{
pr_alert("Data abort info:\n");
@ -434,7 +412,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (notify_page_fault(regs, esr))
if (kprobe_page_fault(regs, esr))
return 0;
/*

View File

@ -942,6 +942,11 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
return dt_virt;
}
int __init arch_ioremap_p4d_supported(void)
{
return 0;
}
int __init arch_ioremap_pud_supported(void)
{
/*

View File

@ -9,6 +9,8 @@
#define _ASM_HEXAGON_SYSCALL_H
#include <uapi/linux/audit.h>
#include <linux/err.h>
#include <asm/ptrace.h>
typedef long (*syscall_fn)(unsigned long, unsigned long,
unsigned long, unsigned long,
@ -31,6 +33,18 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
return IS_ERR_VALUE(regs->r00) ? regs->r00 : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->r00;
}
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_HEXAGON;

View File

@ -21,28 +21,6 @@
extern int die(char *, struct pt_regs *, long);
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
int ret = 0;
if (!user_mode(regs)) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, trap))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
return 0;
}
#endif
/*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present.
@ -116,7 +94,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* This is to handle the kprobes on user space access instructions
*/
if (notify_page_fault(regs, TRAP_BRKPT))
if (kprobe_page_fault(regs, TRAP_BRKPT))
return;
if (user_mode(regs))

View File

@ -149,8 +149,6 @@ static inline void *isa_bus_to_virt(unsigned long address)
return phys_to_virt(address);
}
#define isa_page_to_bus page_to_phys
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.

View File

@ -41,6 +41,7 @@ do { \
#define kretprobe_blacklist_size 0
void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {

View File

@ -89,6 +89,12 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
unreachable();
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
return regs->regs[7] ? -regs->regs[2] : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{

View File

@ -398,7 +398,7 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
return 1;
}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

View File

@ -26,7 +26,8 @@ struct pt_regs;
*
* It's only valid to call this when @task is known to be blocked.
*/
int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
static inline int
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
return regs->syscallno;
}
@ -47,7 +48,8 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
* system call instruction. This may not be the same as what the
* register state looked like at system call entry tracing.
*/
void syscall_rollback(struct task_struct *task, struct pt_regs *regs)
static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
regs->uregs[0] = regs->orig_r0;
}
@ -62,7 +64,8 @@ void syscall_rollback(struct task_struct *task, struct pt_regs *regs)
* It's only valid to call this when @task is stopped for tracing on exit
* from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
*/
long syscall_get_error(struct task_struct *task, struct pt_regs *regs)
static inline long
syscall_get_error(struct task_struct *task, struct pt_regs *regs)
{
unsigned long error = regs->uregs[0];
return IS_ERR_VALUE(error) ? error : 0;
@ -79,7 +82,8 @@ long syscall_get_error(struct task_struct *task, struct pt_regs *regs)
* It's only valid to call this when @task is stopped for tracing on exit
* from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
*/
long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
static inline long
syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
{
return regs->uregs[0];
}
@ -99,8 +103,9 @@ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
* It's only valid to call this when @task is stopped for tracing on exit
* from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
*/
void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
int error, long val)
static inline void
syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
int error, long val)
{
regs->uregs[0] = (long)error ? error : val;
}
@ -118,8 +123,9 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
*/
#define SYSCALL_MAX_ARGS 6
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned long *args)
static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned long *args)
{
args[0] = regs->orig_r0;
args++;
@ -138,8 +144,9 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
* It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
*/
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
const unsigned long *args)
static inline void
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
const unsigned long *args)
{
regs->orig_r0 = args[0];
args++;

View File

@ -29,6 +29,13 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
args[0] = regs->gr[26];
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
unsigned long error = regs->gr[28];
return IS_ERR_VALUE(error) ? error : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{

View File

@ -129,6 +129,7 @@ config PPC
select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64
@ -136,7 +137,6 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_ZONE_DEVICE if PPC_BOOK3S_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT

View File

@ -90,7 +90,6 @@
#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */
#define __HAVE_ARCH_PTE_DEVMAP
/*
* Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE

View File

@ -35,6 +35,16 @@ static inline void syscall_rollback(struct task_struct *task,
regs->gpr[3] = regs->orig_gpr3;
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{

View File

@ -21,15 +21,11 @@
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
#define MCL_FUTURE 0x4000 /* lock all additions to address space */
#define MCL_ONFAULT 0x8000 /* lock all pages that are faulted in */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
/* Override any generic PKEY permission defines */
#define PKEY_DISABLE_EXECUTE 0x4
#undef PKEY_ACCESS_MASK

View File

@ -19,6 +19,7 @@
#include <linux/anon_inodes.h>
#include <linux/iommu.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@ -45,43 +46,6 @@ static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
}
static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
{
long ret = 0;
if (!current || !current->mm)
return ret; /* process exited */
down_write(&current->mm->mmap_sem);
if (inc) {
unsigned long locked, lock_limit;
locked = current->mm->locked_vm + stt_pages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
current->mm->locked_vm += stt_pages;
} else {
if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
stt_pages = current->mm->locked_vm;
current->mm->locked_vm -= stt_pages;
}
pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
inc ? '+' : '-',
stt_pages << PAGE_SHIFT,
current->mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
up_write(&current->mm->mmap_sem);
return ret;
}
static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
{
struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
@ -291,7 +255,7 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
kvm_put_kvm(stt->kvm);
kvmppc_account_memlimit(
account_locked_vm(current->mm,
kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
call_rcu(&stt->rcu, release_spapr_tce_table);
@ -316,7 +280,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return -EINVAL;
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
if (ret)
return ret;
@ -362,7 +326,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
kfree(stt);
fail_acct:
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
return ret;
}

View File

@ -14,6 +14,7 @@
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/sizes.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
#include <linux/mm_inline.h>
@ -46,40 +47,6 @@ struct mm_iommu_table_group_mem_t {
u64 dev_hpa; /* Device memory base address */
};
static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
unsigned long npages, bool incr)
{
long ret = 0, locked, lock_limit;
if (!npages)
return 0;
down_write(&mm->mmap_sem);
if (incr) {
locked = mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
mm->locked_vm += npages;
} else {
if (WARN_ON_ONCE(npages > mm->locked_vm))
npages = mm->locked_vm;
mm->locked_vm -= npages;
}
pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
current ? current->pid : 0,
incr ? '+' : '-',
npages << PAGE_SHIFT,
mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
up_write(&mm->mmap_sem);
return ret;
}
bool mm_iommu_preregistered(struct mm_struct *mm)
{
return !list_empty(&mm->context.iommu_group_mem_list);
@ -96,7 +63,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
unsigned long entry, chunk;
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
ret = mm_iommu_adjust_locked_vm(mm, entries, true);
ret = account_locked_vm(mm, entries, true);
if (ret)
return ret;
@ -211,7 +178,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
kfree(mem);
unlock_exit:
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
account_locked_vm(mm, locked_entries, false);
return ret;
}
@ -311,7 +278,7 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
unlock_exit:
mutex_unlock(&mem_list_mutex);
mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
account_locked_vm(mm, unlock_entries, false);
return ret;
}

View File

@ -1237,3 +1237,8 @@ int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
return 0;
}
}
int __init arch_ioremap_p4d_supported(void)
{
return 0;
}

View File

@ -42,26 +42,6 @@
#include <asm/debug.h>
#include <asm/kup.h>
static inline bool notify_page_fault(struct pt_regs *regs)
{
bool ret = false;
#ifdef CONFIG_KPROBES
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 11))
ret = true;
preempt_enable();
}
#endif /* CONFIG_KPROBES */
if (unlikely(debugger_fault_handler(regs)))
ret = true;
return ret;
}
/*
* Check whether the instruction inst is a store using
* an update addressing form which will update r1.
@ -461,8 +441,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
int is_write = page_fault_is_write(error_code);
vm_fault_t fault, major = 0;
bool must_retry = false;
bool kprobe_fault = kprobe_page_fault(regs, 11);
if (notify_page_fault(regs))
if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
return 0;
if (unlikely(page_fault_is_bad(error_code))) {

View File

@ -8,27 +8,27 @@
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
#include <linux/const.h>
#include <linux/bits.h>
#define CR0_CLOCK_COMPARATOR_SIGN _BITUL(63 - 10)
#define CR0_EMERGENCY_SIGNAL_SUBMASK _BITUL(63 - 49)
#define CR0_EXTERNAL_CALL_SUBMASK _BITUL(63 - 50)
#define CR0_CLOCK_COMPARATOR_SUBMASK _BITUL(63 - 52)
#define CR0_CPU_TIMER_SUBMASK _BITUL(63 - 53)
#define CR0_SERVICE_SIGNAL_SUBMASK _BITUL(63 - 54)
#define CR0_UNUSED_56 _BITUL(63 - 56)
#define CR0_INTERRUPT_KEY_SUBMASK _BITUL(63 - 57)
#define CR0_MEASUREMENT_ALERT_SUBMASK _BITUL(63 - 58)
#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)
#define CR0_CPU_TIMER_SUBMASK BIT(63 - 53)
#define CR0_SERVICE_SIGNAL_SUBMASK BIT(63 - 54)
#define CR0_UNUSED_56 BIT(63 - 56)
#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57)
#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58)
#define CR2_GUARDED_STORAGE _BITUL(63 - 59)
#define CR2_GUARDED_STORAGE BIT(63 - 59)
#define CR14_UNUSED_32 _BITUL(63 - 32)
#define CR14_UNUSED_33 _BITUL(63 - 33)
#define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35)
#define CR14_RECOVERY_SUBMASK _BITUL(63 - 36)
#define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37)
#define CR14_EXTERNAL_DAMAGE_SUBMASK _BITUL(63 - 38)
#define CR14_WARNING_SUBMASK _BITUL(63 - 39)
#define CR14_UNUSED_32 BIT(63 - 32)
#define CR14_UNUSED_33 BIT(63 - 33)
#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35)
#define CR14_RECOVERY_SUBMASK BIT(63 - 36)
#define CR14_DEGRADATION_SUBMASK BIT(63 - 37)
#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(63 - 38)
#define CR14_WARNING_SUBMASK BIT(63 - 39)
#ifndef __ASSEMBLY__

View File

@ -12,7 +12,7 @@
#ifndef _ASM_S390_NMI_H
#define _ASM_S390_NMI_H
#include <linux/const.h>
#include <linux/bits.h>
#include <linux/types.h>
#define MCIC_SUBCLASS_MASK (1ULL<<63 | 1ULL<<62 | 1ULL<<61 | \
@ -20,15 +20,15 @@
1ULL<<55 | 1ULL<<54 | 1ULL<<53 | \
1ULL<<52 | 1ULL<<47 | 1ULL<<46 | \
1ULL<<45 | 1ULL<<44)
#define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63)
#define MCCK_CODE_EXT_DAMAGE _BITUL(63 - 5)
#define MCCK_CODE_CP _BITUL(63 - 9)
#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46)
#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
#define MCCK_CODE_CR_VALID _BITUL(63 - 29)
#define MCCK_CODE_GS_VALID _BITUL(63 - 36)
#define MCCK_CODE_FC_VALID _BITUL(63 - 43)
#define MCCK_CODE_SYSTEM_DAMAGE BIT(63)
#define MCCK_CODE_EXT_DAMAGE BIT(63 - 5)
#define MCCK_CODE_CP BIT(63 - 9)
#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46)
#define MCCK_CODE_PSW_MWP_VALID BIT(63 - 20)
#define MCCK_CODE_PSW_IA_VALID BIT(63 - 23)
#define MCCK_CODE_CR_VALID BIT(63 - 29)
#define MCCK_CODE_GS_VALID BIT(63 - 36)
#define MCCK_CODE_FC_VALID BIT(63 - 43)
#ifndef __ASSEMBLY__

View File

@ -12,7 +12,7 @@
#ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H
#include <linux/const.h>
#include <linux/bits.h>
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
@ -24,15 +24,15 @@
#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
#define _CIF_FPU _BITUL(CIF_FPU)
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU)
#define _CIF_MCCK_PENDING BIT(CIF_MCCK_PENDING)
#define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_FPU BIT(CIF_FPU)
#define _CIF_IGNORE_IRQ BIT(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
#ifndef __ASSEMBLY__

View File

@ -7,7 +7,7 @@
#ifndef _S390_PTRACE_H
#define _S390_PTRACE_H
#include <linux/const.h>
#include <linux/bits.h>
#include <uapi/asm/ptrace.h>
#define PIF_SYSCALL 0 /* inside a system call */
@ -15,10 +15,10 @@
#define PIF_SYSCALL_RESTART 2 /* restart the current system call */
#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define _PIF_SYSCALL _BITUL(PIF_SYSCALL)
#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP)
#define _PIF_SYSCALL_RESTART _BITUL(PIF_SYSCALL_RESTART)
#define _PIF_GUEST_FAULT _BITUL(PIF_GUEST_FAULT)
#define _PIF_SYSCALL BIT(PIF_SYSCALL)
#define _PIF_PER_TRAP BIT(PIF_PER_TRAP)
#define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART)
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#ifndef __ASSEMBLY__

View File

@ -6,7 +6,7 @@
#ifndef _ASM_S390_SETUP_H
#define _ASM_S390_SETUP_H
#include <linux/const.h>
#include <linux/bits.h>
#include <uapi/asm/setup.h>
#define EP_OFFSET 0x10008
@ -21,25 +21,25 @@
* Machine features detected in early.c
*/
#define MACHINE_FLAG_VM _BITUL(0)
#define MACHINE_FLAG_KVM _BITUL(1)
#define MACHINE_FLAG_LPAR _BITUL(2)
#define MACHINE_FLAG_DIAG9C _BITUL(3)
#define MACHINE_FLAG_ESOP _BITUL(4)
#define MACHINE_FLAG_IDTE _BITUL(5)
#define MACHINE_FLAG_DIAG44 _BITUL(6)
#define MACHINE_FLAG_EDAT1 _BITUL(7)
#define MACHINE_FLAG_EDAT2 _BITUL(8)
#define MACHINE_FLAG_TOPOLOGY _BITUL(10)
#define MACHINE_FLAG_TE _BITUL(11)
#define MACHINE_FLAG_TLB_LC _BITUL(12)
#define MACHINE_FLAG_VX _BITUL(13)
#define MACHINE_FLAG_TLB_GUEST _BITUL(14)
#define MACHINE_FLAG_NX _BITUL(15)
#define MACHINE_FLAG_GS _BITUL(16)
#define MACHINE_FLAG_SCC _BITUL(17)
#define MACHINE_FLAG_VM BIT(0)
#define MACHINE_FLAG_KVM BIT(1)
#define MACHINE_FLAG_LPAR BIT(2)
#define MACHINE_FLAG_DIAG9C BIT(3)
#define MACHINE_FLAG_ESOP BIT(4)
#define MACHINE_FLAG_IDTE BIT(5)
#define MACHINE_FLAG_DIAG44 BIT(6)
#define MACHINE_FLAG_EDAT1 BIT(7)
#define MACHINE_FLAG_EDAT2 BIT(8)
#define MACHINE_FLAG_TOPOLOGY BIT(10)
#define MACHINE_FLAG_TE BIT(11)
#define MACHINE_FLAG_TLB_LC BIT(12)
#define MACHINE_FLAG_VX BIT(13)
#define MACHINE_FLAG_TLB_GUEST BIT(14)
#define MACHINE_FLAG_NX BIT(15)
#define MACHINE_FLAG_GS BIT(16)
#define MACHINE_FLAG_SCC BIT(17)
#define LPP_MAGIC _BITUL(31)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
/* Offsets to entry points in kernel/head.S */

View File

@ -8,7 +8,7 @@
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#include <linux/const.h>
#include <linux/bits.h>
/*
* General size of kernel stacks
@ -82,21 +82,21 @@ void arch_setup_new_exec(void);
#define TIF_SECCOMP 26 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */
#define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING)
#define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED)
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING)
#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_31BIT _BITUL(TIF_31BIT)
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
#define _TIF_31BIT BIT(TIF_31BIT)
#define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP)
#define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
#define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP BIT(TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT BIT(TIF_SYSCALL_TRACEPOINT)
#endif /* _ASM_THREAD_INFO_H */

View File

@ -67,20 +67,6 @@ static int __init fault_init(void)
}
early_initcall(fault_init);
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
/*
* Find out which address space caused the exception.
*/
@ -412,7 +398,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
*/
clear_pt_regs_flag(regs, PIF_PER_TRAP);
if (notify_page_fault(regs))
if (kprobe_page_fault(regs, 14))
return 0;
mm = tsk->mm;

View File

@ -24,20 +24,6 @@
#include <asm/tlbflush.h>
#include <asm/traps.h>
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
int ret = 0;
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, trap))
ret = 1;
preempt_enable();
}
return ret;
}
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address)
{
@ -412,14 +398,14 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (unlikely(fault_in_kernel_space(address))) {
if (vmalloc_fault(address) >= 0)
return;
if (notify_page_fault(regs, vec))
if (kprobe_page_fault(regs, vec))
return;
bad_area_nosemaphore(regs, error_code, address);
return;
}
if (unlikely(notify_page_fault(regs, vec)))
if (unlikely(kprobe_page_fault(regs, vec)))
return;
/* Only enable interrupts if they were on before the fault */

View File

@ -22,10 +22,4 @@
#define MCL_FUTURE 0x4000 /* lock all additions to address space */
#define MCL_ONFAULT 0x8000 /* lock all pages that are faulted in */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
#endif /* _UAPI__SPARC_MMAN_H__ */

View File

@ -38,20 +38,6 @@
int show_unhandled_signals = 1;
static inline __kprobes int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 0))
ret = 1;
preempt_enable();
}
return ret;
}
static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
struct pt_regs *regs)
@ -285,7 +271,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
fault_code = get_thread_fault_code();
if (notify_page_fault(regs))
if (kprobe_page_fault(regs, 0))
goto exit_exception;
si_code = SEGV_MAPERR;

View File

@ -70,6 +70,7 @@ config X86
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
@ -80,7 +81,6 @@ config X86
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_ZONE_DEVICE if X86_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT

View File

@ -165,7 +165,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
{
return (unsigned int)virt_to_phys(address);
}
#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
#define isa_bus_to_virt phys_to_virt
/*

View File

@ -271,7 +271,7 @@ static inline int has_transparent_hugepage(void)
return boot_cpu_has(X86_FEATURE_PSE);
}
#ifdef __HAVE_ARCH_PTE_DEVMAP
#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pmd_devmap(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_DEVMAP);
@ -732,7 +732,7 @@ static inline int pte_present(pte_t a)
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
#ifdef __HAVE_ARCH_PTE_DEVMAP
#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t a)
{
return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;

View File

@ -103,7 +103,6 @@
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
#define __HAVE_ARCH_PTE_DEVMAP
#else
#define _PAGE_NX (_AT(pteval_t, 0))
#define _PAGE_DEVMAP (_AT(pteval_t, 0))

View File

@ -46,23 +46,6 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
return 0;
}
static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
{
if (!kprobes_built_in())
return 0;
if (user_mode(regs))
return 0;
/*
* To be potentially processing a kprobe fault and to be allowed to call
* kprobe_running(), we have to be non-preemptible.
*/
if (preemptible())
return 0;
if (!kprobe_running())
return 0;
return kprobe_fault_handler(regs, X86_TRAP_PF);
}
/*
* Prefetch quirks:
*
@ -1282,7 +1265,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
return;
/* kprobes don't want to hook the spurious faults: */
if (kprobes_fault(regs))
if (kprobe_page_fault(regs, X86_TRAP_PF))
return;
/*
@ -1313,7 +1296,7 @@ void do_user_addr_fault(struct pt_regs *regs,
mm = tsk->mm;
/* kprobes don't want to hook the spurious faults: */
if (unlikely(kprobes_fault(regs)))
if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*

View File

@ -459,6 +459,11 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
int __init arch_ioremap_p4d_supported(void)
{
return 0;
}
int __init arch_ioremap_pud_supported(void)
{
#ifdef CONFIG_X86_64

View File

@ -56,12 +56,8 @@
#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
* uninitialized */
#else
# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
#endif
/*
* Flags for msync

View File

@ -43,6 +43,7 @@ struct dax_region {
* @target_node: effective numa node if dev_dax memory range is onlined
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @dax_mem_res: physical address range of hotadded DAX memory
*/
struct dev_dax {
struct dax_region *region;
@ -50,6 +51,7 @@ struct dev_dax {
int target_node;
struct device dev;
struct dev_pagemap pgmap;
struct resource *dax_kmem_res;
};
static inline struct dev_dax *to_dev_dax(struct device *dev)

View File

@ -66,23 +66,59 @@ int dev_dax_kmem_probe(struct device *dev)
new_res->name = dev_name(dev);
rc = add_memory(numa_node, new_res->start, resource_size(new_res));
if (rc)
if (rc) {
release_resource(new_res);
kfree(new_res);
return rc;
}
dev_dax->dax_kmem_res = new_res;
return 0;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static int dev_dax_kmem_remove(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
struct resource *res = dev_dax->dax_kmem_res;
resource_size_t kmem_start = res->start;
resource_size_t kmem_size = resource_size(res);
int rc;
/*
* We have one shot for removing memory, if some memory blocks were not
* offline prior to calling this function remove_memory() will fail, and
* there is no way to hotremove this memory until reboot because device
* unbind will succeed even if we return failure.
*/
rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size);
if (rc) {
dev_err(dev,
"DAX region %pR cannot be hotremoved until the next reboot\n",
res);
return rc;
}
/* Release and free dax resources */
release_resource(res);
kfree(res);
dev_dax->dax_kmem_res = NULL;
return 0;
}
#else
static int dev_dax_kmem_remove(struct device *dev)
{
/*
* Purposely leak the request_mem_region() for the device-dax
* range and return '0' to ->remove() attempts. The removal of
* the device from the driver always succeeds, but the region
* is permanently pinned as reserved by the unreleased
* Without hotremove purposely leak the request_mem_region() for the
* device-dax range and return '0' to ->remove() attempts. The removal
* of the device from the driver always succeeds, but the region is
* permanently pinned as reserved by the unreleased
* request_mem_region().
*/
return 0;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
static struct dax_device_driver device_dax_kmem_driver = {
.drv = {

View File

@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include "dfl-afu.h"
@ -31,52 +32,6 @@ void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
afu->dma_regions = RB_ROOT;
}
/**
* afu_dma_adjust_locked_vm - adjust locked memory
* @dev: port device
* @npages: number of pages
* @incr: increase or decrease locked memory
*
* Increase or decrease the locked memory size with npages input.
*
* Return 0 on success.
* Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
*/
static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
{
unsigned long locked, lock_limit;
int ret = 0;
/* the task is exiting. */
if (!current->mm)
return 0;
down_write(&current->mm->mmap_sem);
if (incr) {
locked = current->mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
current->mm->locked_vm += npages;
} else {
if (WARN_ON_ONCE(npages > current->mm->locked_vm))
npages = current->mm->locked_vm;
current->mm->locked_vm -= npages;
}
dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
incr ? '+' : '-', npages << PAGE_SHIFT,
current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
ret ? "- exceeded" : "");
up_write(&current->mm->mmap_sem);
return ret;
}
/**
* afu_dma_pin_pages - pin pages of given dma memory region
* @pdata: feature device platform data
@ -92,7 +47,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
struct device *dev = &pdata->dev->dev;
int ret, pinned;
ret = afu_dma_adjust_locked_vm(dev, npages, true);
ret = account_locked_vm(current->mm, npages, true);
if (ret)
return ret;
@ -121,7 +76,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
free_pages:
kfree(region->pages);
unlock_vm:
afu_dma_adjust_locked_vm(dev, npages, false);
account_locked_vm(current->mm, npages, false);
return ret;
}
@ -141,7 +96,7 @@ static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
put_all_pages(region->pages, npages);
kfree(region->pages);
afu_dma_adjust_locked_vm(dev, npages, false);
account_locked_vm(current->mm, npages, false);
dev_dbg(dev, "%ld pages unpinned\n", npages);
}

View File

@ -53,7 +53,7 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
pad = round_up(skb->len, 4) + 4 - skb->len;
/* First packet of a A-MSDU burst keeps track of the whole burst
* length, need to update lenght of it and the last packet.
* length, need to update length of it and the last packet.
*/
skb_walk_frags(skb, iter) {
last = iter;

View File

@ -152,6 +152,14 @@ static long pps_cdev_ioctl(struct file *file,
pps->params.mode |= PPS_CANWAIT;
pps->params.api_version = PPS_API_VERS;
/*
* Clear unused fields of pps_kparams to avoid leaking
* uninitialized data of the PPS_SETPARAMS caller via
* PPS_GETPARAMS
*/
pps->params.assert_off_tu.flags = 0;
pps->params.clear_off_tu.flags = 0;
spin_unlock_irq(&pps->lock);
break;

View File

@ -1686,6 +1686,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT;
dev_info.name[sizeof(dev_info.name) - 1] = '\0';
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
dev_info.comptag, dev_info.destid, dev_info.hopcount);
@ -1817,6 +1818,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT;
dev_info.name[sizeof(dev_info.name) - 1] = '\0';
mport = priv->md->mport;

View File

@ -19,6 +19,7 @@
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <asm/iommu.h>
#include <asm/tce.h>
@ -31,51 +32,6 @@
static void tce_iommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group);
static long try_increment_locked_vm(struct mm_struct *mm, long npages)
{
long ret = 0, locked, lock_limit;
if (WARN_ON_ONCE(!mm))
return -EPERM;
if (!npages)
return 0;
down_write(&mm->mmap_sem);
locked = mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
mm->locked_vm += npages;
pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
npages << PAGE_SHIFT,
mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
up_write(&mm->mmap_sem);
return ret;
}
static void decrement_locked_vm(struct mm_struct *mm, long npages)
{
if (!mm || !npages)
return;
down_write(&mm->mmap_sem);
if (WARN_ON_ONCE(npages > mm->locked_vm))
npages = mm->locked_vm;
mm->locked_vm -= npages;
pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
npages << PAGE_SHIFT,
mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
up_write(&mm->mmap_sem);
}
/*
* VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
*
@ -333,7 +289,7 @@ static int tce_iommu_enable(struct tce_container *container)
return ret;
locked = table_group->tce32_size >> PAGE_SHIFT;
ret = try_increment_locked_vm(container->mm, locked);
ret = account_locked_vm(container->mm, locked, true);
if (ret)
return ret;
@ -352,7 +308,7 @@ static void tce_iommu_disable(struct tce_container *container)
container->enabled = false;
BUG_ON(!container->mm);
decrement_locked_vm(container->mm, container->locked_pages);
account_locked_vm(container->mm, container->locked_pages, false);
}
static void *tce_iommu_open(unsigned long arg)
@ -656,7 +612,7 @@ static long tce_iommu_create_table(struct tce_container *container,
if (!table_size)
return -EINVAL;
ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
if (ret)
return ret;
@ -675,7 +631,7 @@ static void tce_iommu_free_table(struct tce_container *container,
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
iommu_tce_table_put(tbl);
decrement_locked_vm(container->mm, pages);
account_locked_vm(container->mm, pages, false);
}
static long tce_iommu_create_window(struct tce_container *container,

View File

@ -272,21 +272,8 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
ret = down_write_killable(&mm->mmap_sem);
if (!ret) {
if (npage > 0) {
if (!dma->lock_cap) {
unsigned long limit;
limit = task_rlimit(dma->task,
RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (mm->locked_vm + npage > limit)
ret = -ENOMEM;
}
}
if (!ret)
mm->locked_vm += npage;
ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
dma->lock_cap);
up_write(&mm->mmap_sem);
}

View File

@ -2094,7 +2094,6 @@ SYSCALL_DEFINE6(io_pgetevents,
const struct __aio_sigset __user *, usig)
{
struct __aio_sigset ksig = { NULL, };
sigset_t ksigmask, sigsaved;
struct timespec64 ts;
bool interrupted;
int ret;
@ -2105,14 +2104,14 @@ SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
interrupted = signal_pending(current);
restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;
@ -2130,7 +2129,6 @@ SYSCALL_DEFINE6(io_pgetevents_time32,
const struct __aio_sigset __user *, usig)
{
struct __aio_sigset ksig = { NULL, };
sigset_t ksigmask, sigsaved;
struct timespec64 ts;
bool interrupted;
int ret;
@ -2142,14 +2140,14 @@ SYSCALL_DEFINE6(io_pgetevents_time32,
return -EFAULT;
ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
interrupted = signal_pending(current);
restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;
@ -2198,7 +2196,6 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { NULL, };
sigset_t ksigmask, sigsaved;
struct timespec64 t;
bool interrupted;
int ret;
@ -2209,14 +2206,14 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
interrupted = signal_pending(current);
restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;
@ -2234,7 +2231,6 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { NULL, };
sigset_t ksigmask, sigsaved;
struct timespec64 t;
bool interrupted;
int ret;
@ -2245,14 +2241,14 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
interrupted = signal_pending(current);
restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;

View File

@ -1127,7 +1127,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
load_addr, interp_load_addr);
if (retval < 0)
goto out;
/* N.B. passed_fileno might not be initialized? */
current->mm->end_code = end_code;
current->mm->start_code = start_code;
current->mm->start_data = start_data;

View File

@ -431,7 +431,6 @@ static int load_flat_file(struct linux_binprm *bprm,
unsigned long len, memp, memp_size, extra, rlim;
__be32 __user *reloc;
u32 __user *rp;
struct inode *inode;
int i, rev, relocs;
loff_t fpos;
unsigned long start_code, end_code;
@ -439,7 +438,6 @@ static int load_flat_file(struct linux_binprm *bprm,
int ret;
hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */
inode = file_inode(bprm->file);
text_len = ntohl(hdr->data_start);
data_len = ntohl(hdr->data_end) - ntohl(hdr->data_start);

View File

@ -6,7 +6,8 @@
obj-$(CONFIG_CODA_FS) += coda.o
coda-objs := psdev.o cache.o cnode.o inode.o dir.o file.o upcall.o \
coda_linux.o symlink.o pioctl.o sysctl.o
coda_linux.o symlink.o pioctl.o
coda-$(CONFIG_SYSCTL) += sysctl.o
# If you want debugging output, please uncomment the following line.

View File

@ -21,7 +21,7 @@
#include <linux/spinlock.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
#include "coda_cache.h"

View File

@ -8,8 +8,8 @@
#include <linux/time.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include <linux/pagemap.h>
#include "coda_psdev.h"
#include "coda_linux.h"
static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2)
@ -137,11 +137,6 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb)
struct inode *inode;
unsigned long hash = coda_f2i(fid);
if ( !sb ) {
pr_warn("%s: no sb!\n", __func__);
return NULL;
}
inode = ilookup5(sb, hash, coda_test_inode, fid);
if ( !inode )
return NULL;
@ -153,6 +148,16 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb)
return inode;
}
struct coda_file_info *coda_ftoc(struct file *file)
{
struct coda_file_info *cfi = file->private_data;
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
return cfi;
}
/* the CONTROL inode is made without asking attributes from Venus */
struct inode *coda_cnode_makectl(struct super_block *sb)
{

View File

@ -40,10 +40,9 @@ struct coda_file_info {
int cfi_magic; /* magic number */
struct file *cfi_container; /* container file for this cnode */
unsigned int cfi_mapcount; /* nr of times this file is mapped */
bool cfi_access_intent; /* is access intent supported */
};
#define CODA_FTOC(file) ((struct coda_file_info *)((file)->private_data))
/* flags */
#define C_VATTR 0x1 /* Validity of vattr in inode */
#define C_FLUSH 0x2 /* used after a flush */
@ -54,6 +53,7 @@ struct inode *coda_cnode_make(struct CodaFid *, struct super_block *);
struct inode *coda_iget(struct super_block *sb, struct CodaFid *fid, struct coda_vattr *attr);
struct inode *coda_cnode_makectl(struct super_block *sb);
struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb);
struct coda_file_info *coda_ftoc(struct file *file);
void coda_replace_fid(struct inode *, struct CodaFid *, struct CodaFid *);
#endif

View File

@ -13,9 +13,19 @@ extern int coda_fake_statfs;
void coda_destroy_inodecache(void);
int __init coda_init_inodecache(void);
int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync);
#ifdef CONFIG_SYSCTL
void coda_sysctl_init(void);
void coda_sysctl_clean(void);
#else
static inline void coda_sysctl_init(void)
{
}
static inline void coda_sysctl_clean(void)
{
}
#endif
#endif /* _CODA_INT_ */

View File

@ -18,7 +18,7 @@
#include <linux/string.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
/* initialize the debugging variables */
@ -66,6 +66,25 @@ unsigned short coda_flags_to_cflags(unsigned short flags)
return coda_flags;
}
static struct timespec64 coda_to_timespec64(struct coda_timespec ts)
{
struct timespec64 ts64 = {
.tv_sec = ts.tv_sec,
.tv_nsec = ts.tv_nsec,
};
return ts64;
}
static struct coda_timespec timespec64_to_coda(struct timespec64 ts64)
{
struct coda_timespec ts = {
.tv_sec = ts64.tv_sec,
.tv_nsec = ts64.tv_nsec,
};
return ts;
}
/* utility functions below */
void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
@ -105,11 +124,11 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
if (attr->va_size != -1)
inode->i_blocks = (attr->va_size + 511) >> 9;
if (attr->va_atime.tv_sec != -1)
inode->i_atime = timespec_to_timespec64(attr->va_atime);
inode->i_atime = coda_to_timespec64(attr->va_atime);
if (attr->va_mtime.tv_sec != -1)
inode->i_mtime = timespec_to_timespec64(attr->va_mtime);
inode->i_mtime = coda_to_timespec64(attr->va_mtime);
if (attr->va_ctime.tv_sec != -1)
inode->i_ctime = timespec_to_timespec64(attr->va_ctime);
inode->i_ctime = coda_to_timespec64(attr->va_ctime);
}
@ -130,12 +149,12 @@ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr)
vattr->va_uid = (vuid_t) -1;
vattr->va_gid = (vgid_t) -1;
vattr->va_size = (off_t) -1;
vattr->va_atime.tv_sec = (time_t) -1;
vattr->va_atime.tv_nsec = (time_t) -1;
vattr->va_mtime.tv_sec = (time_t) -1;
vattr->va_mtime.tv_nsec = (time_t) -1;
vattr->va_ctime.tv_sec = (time_t) -1;
vattr->va_ctime.tv_nsec = (time_t) -1;
vattr->va_atime.tv_sec = (int64_t) -1;
vattr->va_atime.tv_nsec = (long) -1;
vattr->va_mtime.tv_sec = (int64_t) -1;
vattr->va_mtime.tv_nsec = (long) -1;
vattr->va_ctime.tv_sec = (int64_t) -1;
vattr->va_ctime.tv_nsec = (long) -1;
vattr->va_type = C_VNON;
vattr->va_fileid = -1;
vattr->va_gen = -1;
@ -175,13 +194,13 @@ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr)
vattr->va_size = iattr->ia_size;
}
if ( valid & ATTR_ATIME ) {
vattr->va_atime = timespec64_to_timespec(iattr->ia_atime);
vattr->va_atime = timespec64_to_coda(iattr->ia_atime);
}
if ( valid & ATTR_MTIME ) {
vattr->va_mtime = timespec64_to_timespec(iattr->ia_mtime);
vattr->va_mtime = timespec64_to_coda(iattr->ia_mtime);
}
if ( valid & ATTR_CTIME ) {
vattr->va_ctime = timespec64_to_timespec(iattr->ia_ctime);
vattr->va_ctime = timespec64_to_coda(iattr->ia_ctime);
}
}

View File

@ -59,22 +59,6 @@ void coda_vattr_to_iattr(struct inode *, struct coda_vattr *);
void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *);
unsigned short coda_flags_to_cflags(unsigned short);
/* sysctl.h */
void coda_sysctl_init(void);
void coda_sysctl_clean(void);
#define CODA_ALLOC(ptr, cast, size) do { \
if (size < PAGE_SIZE) \
ptr = kzalloc((unsigned long) size, GFP_KERNEL); \
else \
ptr = (cast)vzalloc((unsigned long) size); \
if (!ptr) \
pr_warn("kernel malloc returns 0 at %s:%d\n", __FILE__, __LINE__); \
} while (0)
#define CODA_FREE(ptr, size) kvfree((ptr))
/* inode to cnode access functions */
static inline struct coda_inode_info *ITOC(struct inode *inode)

View File

@ -3,11 +3,31 @@
#define __CODA_PSDEV_H
#include <linux/backing-dev.h>
#include <linux/magic.h>
#include <linux/mutex.h>
#include <uapi/linux/coda_psdev.h>
#define CODA_PSDEV_MAJOR 67
#define MAX_CODADEVS 5 /* how many do we allow */
struct kstatfs;
/* messages between coda filesystem in kernel and Venus */
struct upc_req {
struct list_head uc_chain;
caddr_t uc_data;
u_short uc_flags;
u_short uc_inSize; /* Size is at most 5000 bytes */
u_short uc_outSize;
u_short uc_opcode; /* copied from data to save lookup */
int uc_unique;
wait_queue_head_t uc_sleep; /* process' wait queue */
};
#define CODA_REQ_ASYNC 0x1
#define CODA_REQ_READ 0x2
#define CODA_REQ_WRITE 0x4
#define CODA_REQ_ABORT 0x8
/* communication pending/processing queues */
struct venus_comm {
u_long vc_seq;
@ -19,7 +39,6 @@ struct venus_comm {
struct mutex vc_mutex;
};
static inline struct venus_comm *coda_vcp(struct super_block *sb)
{
return (struct venus_comm *)((sb)->s_fs_info);
@ -30,39 +49,43 @@ int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
int venus_getattr(struct super_block *sb, struct CodaFid *fid,
struct coda_vattr *attr);
int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
int venus_lookup(struct super_block *sb, struct CodaFid *fid,
const char *name, int length, int *type,
int venus_lookup(struct super_block *sb, struct CodaFid *fid,
const char *name, int length, int *type,
struct CodaFid *resfid);
int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
kuid_t uid);
int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
struct file **f);
int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length,
int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length,
struct CodaFid *newfid, struct coda_vattr *attrs);
int venus_create(struct super_block *sb, struct CodaFid *dirfid,
int venus_create(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length, int excl, int mode,
struct CodaFid *newfid, struct coda_vattr *attrs) ;
int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
struct CodaFid *newfid, struct coda_vattr *attrs);
int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length);
int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length);
int venus_readlink(struct super_block *sb, struct CodaFid *fid,
int venus_readlink(struct super_block *sb, struct CodaFid *fid,
char *buffer, int *length);
int venus_rename(struct super_block *, struct CodaFid *new_fid,
struct CodaFid *old_fid, size_t old_length,
size_t new_length, const char *old_name,
int venus_rename(struct super_block *sb, struct CodaFid *new_fid,
struct CodaFid *old_fid, size_t old_length,
size_t new_length, const char *old_name,
const char *new_name);
int venus_link(struct super_block *sb, struct CodaFid *fid,
int venus_link(struct super_block *sb, struct CodaFid *fid,
struct CodaFid *dirfid, const char *name, int len );
int venus_symlink(struct super_block *sb, struct CodaFid *fid,
const char *name, int len, const char *symname, int symlen);
int venus_access(struct super_block *sb, struct CodaFid *fid, int mask);
int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
unsigned int cmd, struct PioctlData *data);
int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out);
int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out,
size_t nbytes);
int venus_fsync(struct super_block *sb, struct CodaFid *fid);
int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
int venus_access_intent(struct super_block *sb, struct CodaFid *fid,
bool *access_intent_supported,
size_t count, loff_t ppos, int type);
/*
* Statistics

View File

@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
#include "coda_cache.h"
@ -47,8 +47,8 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsig
int type = 0;
if (length > CODA_MAXNAMLEN) {
pr_err("name too long: lookup, %s (%*s)\n",
coda_i2s(dir), (int)length, name);
pr_err("name too long: lookup, %s %zu\n",
coda_i2s(dir), length);
return ERR_PTR(-ENAMETOOLONG);
}
@ -356,8 +356,7 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
ino_t ino;
int ret;
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
cfi = coda_ftoc(coda_file);
host_file = cfi->cfi_container;
cii = ITOC(file_inode(coda_file));
@ -426,8 +425,7 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
struct file *host_file;
int ret;
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
cfi = coda_ftoc(coda_file);
host_file = cfi->cfi_container;
if (host_file->f_op->iterate || host_file->f_op->iterate_shared) {

View File

@ -20,22 +20,43 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
#include "coda_int.h"
struct coda_vm_ops {
atomic_t refcnt;
struct file *coda_file;
const struct vm_operations_struct *host_vm_ops;
struct vm_operations_struct vm_ops;
};
static ssize_t
coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *coda_file = iocb->ki_filp;
struct coda_file_info *cfi = CODA_FTOC(coda_file);
struct inode *coda_inode = file_inode(coda_file);
struct coda_file_info *cfi = coda_ftoc(coda_file);
loff_t ki_pos = iocb->ki_pos;
size_t count = iov_iter_count(to);
ssize_t ret;
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
&cfi->cfi_access_intent,
count, ki_pos, CODA_ACCESS_TYPE_READ);
if (ret)
goto finish_read;
return vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0);
ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0);
finish_read:
venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
&cfi->cfi_access_intent,
count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH);
return ret;
}
static ssize_t
@ -43,13 +64,18 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *coda_file = iocb->ki_filp;
struct inode *coda_inode = file_inode(coda_file);
struct coda_file_info *cfi = CODA_FTOC(coda_file);
struct file *host_file;
struct coda_file_info *cfi = coda_ftoc(coda_file);
struct file *host_file = cfi->cfi_container;
loff_t ki_pos = iocb->ki_pos;
size_t count = iov_iter_count(to);
ssize_t ret;
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
&cfi->cfi_access_intent,
count, ki_pos, CODA_ACCESS_TYPE_WRITE);
if (ret)
goto finish_write;
host_file = cfi->cfi_container;
file_start_write(host_file);
inode_lock(coda_inode);
ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
@ -58,26 +84,73 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode);
inode_unlock(coda_inode);
file_end_write(host_file);
finish_write:
venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
&cfi->cfi_access_intent,
count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH);
return ret;
}
static void
coda_vm_open(struct vm_area_struct *vma)
{
struct coda_vm_ops *cvm_ops =
container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
atomic_inc(&cvm_ops->refcnt);
if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
cvm_ops->host_vm_ops->open(vma);
}
static void
coda_vm_close(struct vm_area_struct *vma)
{
struct coda_vm_ops *cvm_ops =
container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
cvm_ops->host_vm_ops->close(vma);
if (atomic_dec_and_test(&cvm_ops->refcnt)) {
vma->vm_ops = cvm_ops->host_vm_ops;
fput(cvm_ops->coda_file);
kfree(cvm_ops);
}
}
static int
coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
{
struct coda_file_info *cfi;
struct inode *coda_inode = file_inode(coda_file);
struct coda_file_info *cfi = coda_ftoc(coda_file);
struct file *host_file = cfi->cfi_container;
struct inode *host_inode = file_inode(host_file);
struct coda_inode_info *cii;
struct file *host_file;
struct inode *coda_inode, *host_inode;
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
host_file = cfi->cfi_container;
struct coda_vm_ops *cvm_ops;
loff_t ppos;
size_t count;
int ret;
if (!host_file->f_op->mmap)
return -ENODEV;
coda_inode = file_inode(coda_file);
host_inode = file_inode(host_file);
if (WARN_ON(coda_file != vma->vm_file))
return -EIO;
count = vma->vm_end - vma->vm_start;
ppos = vma->vm_pgoff * PAGE_SIZE;
ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
&cfi->cfi_access_intent,
count, ppos, CODA_ACCESS_TYPE_MMAP);
if (ret)
return ret;
cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
if (!cvm_ops)
return -ENOMEM;
cii = ITOC(coda_inode);
spin_lock(&cii->c_lock);
@ -89,6 +162,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
* the container file on us! */
else if (coda_inode->i_mapping != host_inode->i_mapping) {
spin_unlock(&cii->c_lock);
kfree(cvm_ops);
return -EBUSY;
}
@ -97,7 +171,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
cfi->cfi_mapcount++;
spin_unlock(&cii->c_lock);
return call_mmap(host_file, vma);
vma->vm_file = get_file(host_file);
ret = call_mmap(vma->vm_file, vma);
if (ret) {
/* if call_mmap fails, our caller will put coda_file so we
* should drop the reference to the host_file that we got.
*/
fput(host_file);
kfree(cvm_ops);
} else {
/* here we add redirects for the open/close vm_operations */
cvm_ops->host_vm_ops = vma->vm_ops;
if (vma->vm_ops)
cvm_ops->vm_ops = *vma->vm_ops;
cvm_ops->vm_ops.open = coda_vm_open;
cvm_ops->vm_ops.close = coda_vm_close;
cvm_ops->coda_file = coda_file;
atomic_set(&cvm_ops->refcnt, 1);
vma->vm_ops = &cvm_ops->vm_ops;
}
return ret;
}
int coda_open(struct inode *coda_inode, struct file *coda_file)
@ -127,6 +223,8 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
cfi->cfi_magic = CODA_MAGIC;
cfi->cfi_mapcount = 0;
cfi->cfi_container = host_file;
/* assume access intents are supported unless we hear otherwise */
cfi->cfi_access_intent = true;
BUG_ON(coda_file->private_data != NULL);
coda_file->private_data = cfi;
@ -142,8 +240,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
struct inode *host_inode;
int err;
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
cfi = coda_ftoc(coda_file);
err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
coda_flags, coda_file->f_cred->fsuid);
@ -185,8 +282,7 @@ int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
return err;
inode_lock(coda_inode);
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
cfi = coda_ftoc(coda_file);
host_file = cfi->cfi_container;
err = vfs_fsync(host_file, datasync);
@ -207,4 +303,3 @@ const struct file_operations coda_file_operations = {
.fsync = coda_fsync,
.splice_read = generic_file_splice_read,
};

View File

@ -27,7 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
#include "coda_cache.h"
@ -236,6 +236,7 @@ static void coda_put_super(struct super_block *sb)
vcp->vc_sb = NULL;
sb->s_fs_info = NULL;
mutex_unlock(&vcp->vc_mutex);
mutex_destroy(&vcp->vc_mutex);
pr_info("Bye bye.\n");
}

View File

@ -20,8 +20,7 @@
#include <linux/uaccess.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
/* pioctl ops */

View File

@ -38,8 +38,7 @@
#include <linux/uaccess.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
#include "coda_int.h"
@ -100,8 +99,12 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
ssize_t retval = 0, count = 0;
int error;
/* make sure there is enough to copy out the (opcode, unique) values */
if (nbytes < (2 * sizeof(u_int32_t)))
return -EINVAL;
/* Peek at the opcode, uniquefier */
if (copy_from_user(&hdr, buf, 2 * sizeof(u_long)))
if (copy_from_user(&hdr, buf, 2 * sizeof(u_int32_t)))
return -EFAULT;
if (DOWNCALL(hdr.opcode)) {
@ -119,17 +122,21 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
hdr.opcode, hdr.unique);
nbytes = size;
}
CODA_ALLOC(dcbuf, union outputArgs *, nbytes);
dcbuf = kvmalloc(nbytes, GFP_KERNEL);
if (!dcbuf) {
retval = -ENOMEM;
goto out;
}
if (copy_from_user(dcbuf, buf, nbytes)) {
CODA_FREE(dcbuf, nbytes);
kvfree(dcbuf);
retval = -EFAULT;
goto out;
}
/* what downcall errors does Venus handle ? */
error = coda_downcall(vcp, hdr.opcode, dcbuf);
error = coda_downcall(vcp, hdr.opcode, dcbuf, nbytes);
CODA_FREE(dcbuf, nbytes);
kvfree(dcbuf);
if (error) {
pr_warn("%s: coda_downcall error: %d\n",
__func__, error);
@ -182,8 +189,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
if (req->uc_opcode == CODA_OPEN_BY_FD) {
struct coda_open_by_fd_out *outp =
(struct coda_open_by_fd_out *)req->uc_data;
if (!outp->oh.result)
if (!outp->oh.result) {
outp->fh = fget(outp->fd);
if (!outp->fh)
return -EBADF;
}
}
wake_up(&req->uc_sleep);
@ -252,7 +262,7 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
goto out;
}
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
kvfree(req->uc_data);
kfree(req);
out:
mutex_unlock(&vcp->vc_mutex);
@ -314,7 +324,7 @@ static int coda_psdev_release(struct inode * inode, struct file * file)
/* Async requests need to be freed here */
if (req->uc_flags & CODA_REQ_ASYNC) {
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
kvfree(req->uc_data);
kfree(req);
continue;
}
@ -347,13 +357,13 @@ static const struct file_operations coda_psdev_fops = {
.llseek = noop_llseek,
};
static int init_coda_psdev(void)
static int __init init_coda_psdev(void)
{
int i, err = 0;
if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) {
pr_err("%s: unable to get major %d\n",
__func__, CODA_PSDEV_MAJOR);
return -EIO;
return -EIO;
}
coda_psdev_class = class_create(THIS_MODULE, "coda");
if (IS_ERR(coda_psdev_class)) {
@ -378,7 +388,7 @@ MODULE_AUTHOR("Jan Harkes, Peter J. Braam");
MODULE_DESCRIPTION("Coda Distributed File System VFS interface");
MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR);
MODULE_LICENSE("GPL");
MODULE_VERSION("6.6");
MODULE_VERSION("7.0");
static int __init init_coda(void)
{

View File

@ -17,8 +17,7 @@
#include <linux/pagemap.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
static int coda_symlink_filler(struct file *file, struct page *page)

View File

@ -12,7 +12,6 @@
#include "coda_int.h"
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *fs_table_header;
static struct ctl_table coda_table[] = {
@ -62,13 +61,3 @@ void coda_sysctl_clean(void)
fs_table_header = NULL;
}
}
#else
void coda_sysctl_init(void)
{
}
void coda_sysctl_clean(void)
{
}
#endif

View File

@ -33,7 +33,7 @@
#include <linux/vfs.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_psdev.h"
#include "coda_linux.h"
#include "coda_cache.h"
@ -46,7 +46,7 @@ static void *alloc_upcall(int opcode, int size)
{
union inputArgs *inp;
CODA_ALLOC(inp, union inputArgs *, size);
inp = kvzalloc(size, GFP_KERNEL);
if (!inp)
return ERR_PTR(-ENOMEM);
@ -85,7 +85,7 @@ int venus_rootfid(struct super_block *sb, struct CodaFid *fidp)
if (!error)
*fidp = outp->coda_root.VFid;
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -104,7 +104,7 @@ int venus_getattr(struct super_block *sb, struct CodaFid *fid,
if (!error)
*attr = outp->coda_getattr.attr;
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -123,7 +123,7 @@ int venus_setattr(struct super_block *sb, struct CodaFid *fid,
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -153,7 +153,7 @@ int venus_lookup(struct super_block *sb, struct CodaFid *fid,
*type = outp->coda_lookup.vtype;
}
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -173,7 +173,7 @@ int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -194,7 +194,7 @@ int venus_open(struct super_block *sb, struct CodaFid *fid,
if (!error)
*fh = outp->coda_open_by_fd.fh;
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -224,7 +224,7 @@ int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
*newfid = outp->coda_mkdir.VFid;
}
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -262,7 +262,7 @@ int venus_rename(struct super_block *sb, struct CodaFid *old_fid,
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -295,7 +295,7 @@ int venus_create(struct super_block *sb, struct CodaFid *dirfid,
*newfid = outp->coda_create.VFid;
}
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -318,7 +318,7 @@ int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -340,7 +340,7 @@ int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -370,7 +370,7 @@ int venus_readlink(struct super_block *sb, struct CodaFid *fid,
*(buffer + retlen) = '\0';
}
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -398,7 +398,7 @@ int venus_link(struct super_block *sb, struct CodaFid *fid,
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -433,7 +433,7 @@ int venus_symlink(struct super_block *sb, struct CodaFid *fid,
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -449,7 +449,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid)
inp->coda_fsync.VFid = *fid;
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -467,7 +467,7 @@ int venus_access(struct super_block *sb, struct CodaFid *fid, int mask)
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -543,7 +543,7 @@ int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
}
exit:
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
@ -553,7 +553,7 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs)
union outputArgs *outp;
int insize, outsize, error;
insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
insize = SIZE(statfs);
UPARG(CODA_STATFS);
error = coda_upcall(coda_vcp(dentry->d_sb), insize, &outsize, inp);
@ -565,10 +565,51 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs)
sfs->f_ffree = outp->coda_statfs.stat.f_ffree;
}
CODA_FREE(inp, insize);
kvfree(inp);
return error;
}
int venus_access_intent(struct super_block *sb, struct CodaFid *fid,
bool *access_intent_supported,
size_t count, loff_t ppos, int type)
{
union inputArgs *inp;
union outputArgs *outp;
int insize, outsize, error;
bool finalizer =
type == CODA_ACCESS_TYPE_READ_FINISH ||
type == CODA_ACCESS_TYPE_WRITE_FINISH;
if (!*access_intent_supported && !finalizer)
return 0;
insize = SIZE(access_intent);
UPARG(CODA_ACCESS_INTENT);
inp->coda_access_intent.VFid = *fid;
inp->coda_access_intent.count = count;
inp->coda_access_intent.pos = ppos;
inp->coda_access_intent.type = type;
error = coda_upcall(coda_vcp(sb), insize,
finalizer ? NULL : &outsize, inp);
/*
* we have to free the request buffer for synchronous upcalls
* or when asynchronous upcalls fail, but not when asynchronous
* upcalls succeed
*/
if (!finalizer || error)
kvfree(inp);
/* Chunked access is not supported or an old Coda client */
if (error == -EOPNOTSUPP) {
*access_intent_supported = false;
error = 0;
}
return error;
}
/*
* coda_upcall and coda_downcall routines.
*/
@ -598,10 +639,12 @@ static void coda_unblock_signals(sigset_t *old)
* has seen them,
* - CODA_CLOSE or CODA_RELEASE upcall (to avoid reference count problems)
* - CODA_STORE (to avoid data loss)
* - CODA_ACCESS_INTENT (to avoid reference count problems)
*/
#define CODA_INTERRUPTIBLE(r) (!coda_hard && \
(((r)->uc_opcode != CODA_CLOSE && \
(r)->uc_opcode != CODA_STORE && \
(r)->uc_opcode != CODA_ACCESS_INTENT && \
(r)->uc_opcode != CODA_RELEASE) || \
(r)->uc_flags & CODA_REQ_READ))
@ -687,21 +730,25 @@ static int coda_upcall(struct venus_comm *vcp,
goto exit;
}
req->uc_data = (void *)buffer;
req->uc_flags = 0;
req->uc_inSize = inSize;
req->uc_outSize = *outSize ? *outSize : inSize;
req->uc_opcode = ((union inputArgs *)buffer)->ih.opcode;
req->uc_unique = ++vcp->vc_seq;
init_waitqueue_head(&req->uc_sleep);
buffer->ih.unique = ++vcp->vc_seq;
/* Fill in the common input args. */
((union inputArgs *)buffer)->ih.unique = req->uc_unique;
req->uc_data = (void *)buffer;
req->uc_flags = outSize ? 0 : CODA_REQ_ASYNC;
req->uc_inSize = inSize;
req->uc_outSize = (outSize && *outSize) ? *outSize : inSize;
req->uc_opcode = buffer->ih.opcode;
req->uc_unique = buffer->ih.unique;
init_waitqueue_head(&req->uc_sleep);
/* Append msg to pending queue and poke Venus. */
list_add_tail(&req->uc_chain, &vcp->vc_pending);
wake_up_interruptible(&vcp->vc_waitq);
if (req->uc_flags & CODA_REQ_ASYNC) {
mutex_unlock(&vcp->vc_mutex);
return 0;
}
/* We can be interrupted while we wait for Venus to process
* our request. If the interrupt occurs before Venus has read
* the request, we dequeue and return. If it occurs after the
@ -743,20 +790,20 @@ static int coda_upcall(struct venus_comm *vcp,
sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
if (!sig_req) goto exit;
CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr));
if (!sig_req->uc_data) {
sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL);
if (!sig_inputArgs) {
kfree(sig_req);
goto exit;
}
error = -EINTR;
sig_inputArgs = (union inputArgs *)sig_req->uc_data;
sig_inputArgs->ih.opcode = CODA_SIGNAL;
sig_inputArgs->ih.unique = req->uc_unique;
sig_req->uc_flags = CODA_REQ_ASYNC;
sig_req->uc_opcode = sig_inputArgs->ih.opcode;
sig_req->uc_unique = sig_inputArgs->ih.unique;
sig_req->uc_data = (void *)sig_inputArgs;
sig_req->uc_inSize = sizeof(struct coda_in_hdr);
sig_req->uc_outSize = sizeof(struct coda_in_hdr);
@ -804,12 +851,44 @@ static int coda_upcall(struct venus_comm *vcp,
*
* CODA_REPLACE -- replace one CodaFid with another throughout the name cache */
int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out)
int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out,
size_t nbytes)
{
struct inode *inode = NULL;
struct CodaFid *fid = NULL, *newfid;
struct super_block *sb;
/*
* Make sure we have received enough data from the cache
* manager to populate the necessary fields in the buffer
*/
switch (opcode) {
case CODA_PURGEUSER:
if (nbytes < sizeof(struct coda_purgeuser_out))
return -EINVAL;
break;
case CODA_ZAPDIR:
if (nbytes < sizeof(struct coda_zapdir_out))
return -EINVAL;
break;
case CODA_ZAPFILE:
if (nbytes < sizeof(struct coda_zapfile_out))
return -EINVAL;
break;
case CODA_PURGEFID:
if (nbytes < sizeof(struct coda_purgefid_out))
return -EINVAL;
break;
case CODA_REPLACE:
if (nbytes < sizeof(struct coda_replace_out))
return -EINVAL;
break;
}
/* Handle invalidation requests. */
mutex_lock(&vcp->vc_mutex);
sb = vcp->vc_sb;
@ -879,4 +958,3 @@ int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out)
iput(inode);
return 0;
}

View File

@ -2313,19 +2313,17 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
size_t, sigsetsize)
{
int error;
sigset_t ksigmask, sigsaved;
/*
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
error = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
error = set_user_sigmask(sigmask, sigsetsize);
if (error)
return error;
error = do_epoll_wait(epfd, events, maxevents, timeout);
restore_user_sigmask(sigmask, &sigsaved, error == -EINTR);
restore_saved_sigmask_unless(error == -EINTR);
return error;
}
@ -2338,19 +2336,17 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
compat_size_t, sigsetsize)
{
long err;
sigset_t ksigmask, sigsaved;
/*
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
err = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
err = set_compat_user_sigmask(sigmask, sigsetsize);
if (err)
return err;
err = do_epoll_wait(epfd, events, maxevents, timeout);
restore_user_sigmask(sigmask, &sigsaved, err == -EINTR);
restore_saved_sigmask_unless(err == -EINTR);
return err;
}

View File

@ -407,7 +407,7 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len)
int offset = 0;
if (!is_known_namespace(xattr_name)) {
strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
memcpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
offset += XATTR_MAC_OSX_PREFIX_LEN;
len += XATTR_MAC_OSX_PREFIX_LEN;
}

View File

@ -2400,7 +2400,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
const sigset_t __user *sig, size_t sigsz)
{
struct io_cq_ring *ring = ctx->cq_ring;
sigset_t ksigmask, sigsaved;
int ret;
if (io_cqring_events(ring) >= min_events)
@ -2410,21 +2409,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
&ksigmask, &sigsaved, sigsz);
sigsz);
else
#endif
ret = set_user_sigmask(sig, &ksigmask,
&sigsaved, sigsz);
ret = set_user_sigmask(sig, sigsz);
if (ret)
return ret;
}
ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events);
if (sig)
restore_user_sigmask(sig, &sigsaved, ret == -ERESTARTSYS);
restore_saved_sigmask_unless(ret == -ERESTARTSYS);
if (ret == -ERESTARTSYS)
ret = -EINTR;

View File

@ -58,7 +58,8 @@ config PROC_VMCORE_DEVICE_DUMP
snapshot.
If you say Y here, the collected device dumps will be added
as ELF notes to /proc/vmcore.
as ELF notes to /proc/vmcore. You can still disable device
dump using the kernel command line option 'novmcoredd'.
config PROC_SYSCTL
bool "Sysctl support (/proc/sys)" if EXPERT

View File

@ -200,7 +200,8 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
struct proc_dir_entry *pde = PDE(file_inode(file));
loff_t rv = -EINVAL;
if (use_pde(pde)) {
loff_t (*llseek)(struct file *, loff_t, int);
typeof_member(struct file_operations, llseek) llseek;
llseek = pde->proc_fops->llseek;
if (!llseek)
llseek = default_llseek;
@ -212,10 +213,11 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
if (use_pde(pde)) {
typeof_member(struct file_operations, read) read;
read = pde->proc_fops->read;
if (read)
rv = read(file, buf, count, ppos);
@ -226,10 +228,11 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
if (use_pde(pde)) {
typeof_member(struct file_operations, write) write;
write = pde->proc_fops->write;
if (write)
rv = write(file, buf, count, ppos);
@ -242,8 +245,9 @@ static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
__poll_t rv = DEFAULT_POLLMASK;
__poll_t (*poll)(struct file *, struct poll_table_struct *);
if (use_pde(pde)) {
typeof_member(struct file_operations, poll) poll;
poll = pde->proc_fops->poll;
if (poll)
rv = poll(file, pts);
@ -256,8 +260,9 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
{
struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
long (*ioctl)(struct file *, unsigned int, unsigned long);
if (use_pde(pde)) {
typeof_member(struct file_operations, unlocked_ioctl) ioctl;
ioctl = pde->proc_fops->unlocked_ioctl;
if (ioctl)
rv = ioctl(file, cmd, arg);
@ -271,8 +276,9 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
{
struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
if (use_pde(pde)) {
typeof_member(struct file_operations, compat_ioctl) compat_ioctl;
compat_ioctl = pde->proc_fops->compat_ioctl;
if (compat_ioctl)
rv = compat_ioctl(file, cmd, arg);
@ -286,8 +292,9 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
int rv = -EIO;
int (*mmap)(struct file *, struct vm_area_struct *);
if (use_pde(pde)) {
typeof_member(struct file_operations, mmap) mmap;
mmap = pde->proc_fops->mmap;
if (mmap)
rv = mmap(file, vma);
@ -305,7 +312,7 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
unsigned long rv = -EIO;
if (use_pde(pde)) {
typeof(proc_reg_get_unmapped_area) *get_area;
typeof_member(struct file_operations, get_unmapped_area) get_area;
get_area = pde->proc_fops->get_unmapped_area;
#ifdef CONFIG_MMU
@ -326,8 +333,8 @@ static int proc_reg_open(struct inode *inode, struct file *file)
{
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
int (*open)(struct inode *, struct file *);
int (*release)(struct inode *, struct file *);
typeof_member(struct file_operations, open) open;
typeof_member(struct file_operations, release) release;
struct pde_opener *pdeo;
/*

View File

@ -499,6 +499,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
if (root->set_ownership)
root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
else {
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
}
return inode;
}

View File

@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/crash_dump.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
@ -54,6 +55,9 @@ static struct proc_dir_entry *proc_vmcore;
/* Device Dump list and mutex to synchronize access to list */
static LIST_HEAD(vmcoredd_list);
static DEFINE_MUTEX(vmcoredd_mutex);
static bool vmcoredd_disabled;
core_param(novmcoredd, vmcoredd_disabled, bool, 0);
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
/* Device Dump Size */
@ -1452,6 +1456,11 @@ int vmcore_add_device_dump(struct vmcoredd_data *data)
size_t data_size;
int ret;
if (vmcoredd_disabled) {
pr_err_once("Device dump is disabled\n");
return -EINVAL;
}
if (!data || !strlen(data->dump_name) ||
!data->vmcoredd_callback || !data->size)
return -EINVAL;

View File

@ -94,7 +94,7 @@ static int journal_join(struct reiserfs_transaction_handle *th,
struct super_block *sb);
static void release_journal_dev(struct super_block *super,
struct reiserfs_journal *journal);
static int dirty_one_transaction(struct super_block *s,
static void dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl);
static void flush_async_commits(struct work_struct *work);
static void queue_log_writer(struct super_block *s);
@ -1682,12 +1682,11 @@ static int write_one_transaction(struct super_block *s,
}
/* used by flush_commit_list */
static int dirty_one_transaction(struct super_block *s,
static void dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl)
{
struct reiserfs_journal_cnode *cn;
struct reiserfs_journal_list *pjl;
int ret = 0;
jl->j_state |= LIST_DIRTY;
cn = jl->j_realblock;
@ -1716,7 +1715,6 @@ static int dirty_one_transaction(struct super_block *s,
}
cn = cn->next;
}
return ret;
}
static int kupdate_transactions(struct super_block *s,

View File

@ -294,12 +294,14 @@ enum poll_time_type {
PT_OLD_TIMESPEC = 3,
};
static int poll_select_copy_remaining(struct timespec64 *end_time,
void __user *p,
enum poll_time_type pt_type, int ret)
static int poll_select_finish(struct timespec64 *end_time,
void __user *p,
enum poll_time_type pt_type, int ret)
{
struct timespec64 rts;
restore_saved_sigmask_unless(ret == -ERESTARTNOHAND);
if (!p)
return ret;
@ -714,9 +716,7 @@ static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
}
ret = core_sys_select(n, inp, outp, exp, to);
ret = poll_select_copy_remaining(&end_time, tvp, PT_TIMEVAL, ret);
return ret;
return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret);
}
SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
@ -730,7 +730,6 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
const sigset_t __user *sigmask, size_t sigsetsize,
enum poll_time_type type)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
@ -753,15 +752,12 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
return -EINVAL;
}
ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
ret = set_user_sigmask(sigmask, sigsetsize);
if (ret)
return ret;
ret = core_sys_select(n, inp, outp, exp, to);
restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND);
ret = poll_select_copy_remaining(&end_time, tsp, type, ret);
return ret;
return poll_select_finish(&end_time, tsp, type, ret);
}
/*
@ -926,7 +922,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
if (!count) {
count = wait->error;
if (signal_pending(current))
count = -EINTR;
count = -ERESTARTNOHAND;
}
if (count || timed_out)
break;
@ -965,7 +961,7 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec64 *end_time)
{
struct poll_wqueues table;
int err = -EFAULT, fdcount, len, size;
int err = -EFAULT, fdcount, len;
/* Allocate small arguments on the stack to save memory and be
faster - use long to make sure the buffer is aligned properly
on 64 bit archs to avoid unaligned access */
@ -993,8 +989,8 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
break;
len = min(todo, POLLFD_PER_PAGE);
size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
walk = walk->next = kmalloc(size, GFP_KERNEL);
walk = walk->next = kmalloc(struct_size(walk, entries, len),
GFP_KERNEL);
if (!walk) {
err = -ENOMEM;
goto out_fds;
@ -1041,7 +1037,7 @@ static long do_restart_poll(struct restart_block *restart_block)
ret = do_sys_poll(ufds, nfds, to);
if (ret == -EINTR) {
if (ret == -ERESTARTNOHAND) {
restart_block->fn = do_restart_poll;
ret = -ERESTART_RESTARTBLOCK;
}
@ -1062,7 +1058,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
ret = do_sys_poll(ufds, nfds, to);
if (ret == -EINTR) {
if (ret == -ERESTARTNOHAND) {
struct restart_block *restart_block;
restart_block = &current->restart_block;
@ -1086,7 +1082,6 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
@ -1099,20 +1094,12 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
return -EINVAL;
}
ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
ret = set_user_sigmask(sigmask, sigsetsize);
if (ret)
return ret;
ret = do_sys_poll(ufds, nfds, to);
restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
/* We can restart this syscall, usually */
if (ret == -EINTR)
ret = -ERESTARTNOHAND;
ret = poll_select_copy_remaining(&end_time, tsp, PT_TIMESPEC, ret);
return ret;
return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
}
#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
@ -1121,7 +1108,6 @@ SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
@ -1134,20 +1120,12 @@ SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
return -EINVAL;
}
ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
ret = set_user_sigmask(sigmask, sigsetsize);
if (ret)
return ret;
ret = do_sys_poll(ufds, nfds, to);
restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
/* We can restart this syscall, usually */
if (ret == -EINTR)
ret = -ERESTARTNOHAND;
ret = poll_select_copy_remaining(&end_time, tsp, PT_OLD_TIMESPEC, ret);
return ret;
return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
}
#endif
@ -1284,9 +1262,7 @@ static int do_compat_select(int n, compat_ulong_t __user *inp,
}
ret = compat_core_sys_select(n, inp, outp, exp, to);
ret = poll_select_copy_remaining(&end_time, tvp, PT_OLD_TIMEVAL, ret);
return ret;
return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret);
}
COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
@ -1319,7 +1295,6 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
void __user *tsp, compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, enum poll_time_type type)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
@ -1342,15 +1317,12 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
return -EINVAL;
}
ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
ret = set_compat_user_sigmask(sigmask, sigsetsize);
if (ret)
return ret;
ret = compat_core_sys_select(n, inp, outp, exp, to);
restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND);
ret = poll_select_copy_remaining(&end_time, tsp, type, ret);
return ret;
return poll_select_finish(&end_time, tsp, type, ret);
}
COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
@ -1402,7 +1374,6 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
unsigned int, nfds, struct old_timespec32 __user *, tsp,
const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
@ -1415,20 +1386,12 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
return -EINVAL;
}
ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
ret = set_compat_user_sigmask(sigmask, sigsetsize);
if (ret)
return ret;
ret = do_sys_poll(ufds, nfds, to);
restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
/* We can restart this syscall, usually */
if (ret == -EINTR)
ret = -ERESTARTNOHAND;
ret = poll_select_copy_remaining(&end_time, tsp, PT_OLD_TIMESPEC, ret);
return ret;
return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
}
#endif
@ -1437,7 +1400,6 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
unsigned int, nfds, struct __kernel_timespec __user *, tsp,
const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
@ -1450,20 +1412,12 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
return -EINVAL;
}
ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
ret = set_compat_user_sigmask(sigmask, sigsetsize);
if (ret)
return ret;
ret = do_sys_poll(ufds, nfds, to);
restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
/* We can restart this syscall, usually */
if (ret == -EINTR)
ret = -ERESTARTNOHAND;
ret = poll_select_copy_remaining(&end_time, tsp, PT_TIMESPEC, ret);
return ret;
return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
}
#endif

View File

@ -1407,11 +1407,9 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct super_block *sb = dentry->d_sb;
struct ufs_sb_private_info *uspi= UFS_SB(sb)->s_uspi;
unsigned flags = UFS_SB(sb)->s_flags;
struct ufs_super_block_third *usb3;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
mutex_lock(&UFS_SB(sb)->s_lock);
usb3 = ubh_get_usb_third(uspi);
if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
buf->f_type = UFS2_MAGIC;

View File

@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line);
warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_TAINT(TAINT_WARN)
#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0)
#define __WARN() do { \
printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
} while (0)
#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
#define __WARN_printf_taint(taint, arg...) \
do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
#endif

View File

@ -5,24 +5,70 @@
/* Keep includes the same across arches. */
#include <linux/mm.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
static inline void flush_cache_all(void)
{
}
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
static inline void flush_dcache_page(struct page *page)
{
}
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \

View File

@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
#include <linux/const.h>
#include <asm/bitsperlong.h>
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT(nr) (UL(1) << (nr))
#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
@ -17,10 +19,11 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) \
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
(((~UL(0)) - (UL(1) << (l)) + 1) & \
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
(((~0ULL) - (1ULL << (l)) + 1) & \
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
(((~ULL(0)) - (ULL(1) << (l)) + 1) & \
(~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif /* __LINUX_BITS_H */

View File

@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
#if defined(__linux__)
typedef unsigned long long u_quad_t;
#endif
#include <uapi/linux/coda.h>
#endif

View File

@ -138,8 +138,7 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
sigset_t *set, sigset_t *oldset,
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize);
struct compat_sigaction {

View File

@ -33,6 +33,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
void __init ioremap_huge_init(void);
int arch_ioremap_p4d_supported(void);
int arch_ioremap_pud_supported(void);
int arch_ioremap_pmd_supported(void);
#else

View File

@ -88,6 +88,8 @@
*/
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define typeof_member(T, m) typeof(((T*)0)->m)
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \

View File

@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr)
}
#endif
/* Returns true if kprobes handled the fault */
static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
unsigned int trap)
{
if (!kprobes_built_in())
return false;
if (user_mode(regs))
return false;
/*
* To be potentially processing a kprobe fault and to be allowed
* to call kprobe_running(), we have to be non-preemptible.
*/
if (preemptible())
return false;
if (!kprobe_running())
return false;
return kprobe_fault_handler(regs, trap);
}
#endif /* _LINUX_KPROBES_H */

View File

@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
* Decompresses data fom 'source' into 'dest'.
* Decompresses data from 'source' into 'dest'.
* If the source stream is detected malformed, the function will
* stop decoding and return a negative result.
* This function is protected against buffer overflow exploits,
@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize);
/**
* LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
* LZ4_decompress_safe_continue() - Decompress blocks in streaming mode
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
* These decoding function allows decompression of multiple blocks
* This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* which must be already allocated with 'originalSize' bytes
* @originalSize: is the original and therefore uncompressed size
*
* These decoding function allows decompression of multiple blocks
* This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
* These decoding function works the same as
* This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
* It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
* It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
* These decoding function works the same as
* This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
* It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
* LZ4_decompress_fast_continue()
* It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)

View File

@ -324,7 +324,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
extern int remove_memory(int nid, u64 start, u64 size);
extern void __remove_memory(int nid, u64 start, u64 size);
#else
@ -341,7 +341,11 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
return -EINVAL;
}
static inline void remove_memory(int nid, u64 start, u64 size) {}
static inline int remove_memory(int nid, u64 start, u64 size)
{
return -EBUSY;
}
static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */

View File

@ -547,7 +547,7 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
struct mmu_gather;
struct inode;
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
return 0;
@ -956,41 +956,28 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
}
static inline bool is_device_private_page(const struct page *page)
{
return is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
#ifdef CONFIG_PCI_P2PDMA
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
#else /* CONFIG_PCI_P2PDMA */
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return false;
}
#endif /* CONFIG_PCI_P2PDMA */
#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool put_devmap_managed_page(struct page *page)
{
return false;
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
{
return false;
return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return false;
return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
IS_ENABLED(CONFIG_PCI_P2PDMA) &&
is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
/* 127: arbitrary random number, small enough to assemble well */
#define page_ref_zero_or_close_to_overflow(page) \
@ -1556,6 +1543,10 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
@ -1763,7 +1754,7 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
#ifndef __HAVE_ARCH_PTE_DEVMAP
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
return 0;
@ -2767,7 +2758,13 @@ extern int randomize_va_space;
#endif
const char * arch_vma_name(struct vm_area_struct *vma);
#ifdef CONFIG_MMU
void print_vma_addr(char *prefix, unsigned long rip);
#else
static inline void print_vma_addr(char *prefix, unsigned long rip)
{
}
#endif
void *sparse_buffer_alloc(unsigned long size);
struct page *sparse_mem_map_populate(unsigned long pnum, int nid,

View File

@ -97,7 +97,7 @@ static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
#endif
#endif
#ifdef __HAVE_ARCH_PTE_DEVMAP
#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline bool pfn_t_devmap(pfn_t pfn)
{
const u64 flags = PFN_DEV|PFN_MAP;
@ -115,7 +115,7 @@ pmd_t pmd_mkdevmap(pmd_t pmd);
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pud_mkdevmap(pud_t pud);
#endif
#endif /* __HAVE_ARCH_PTE_DEVMAP */
#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline bool pfn_t_special(pfn_t pfn)

View File

@ -4,6 +4,7 @@
#include <linux/rculist.h>
#include <linux/wait.h>
#include <linux/refcount.h>
enum pid_type
{
@ -57,7 +58,7 @@ struct upid {
struct pid
{
atomic_t count;
refcount_t count;
unsigned int level;
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
@ -74,7 +75,7 @@ extern const struct file_operations pidfd_fops;
static inline struct pid *get_pid(struct pid *pid)
{
if (pid)
atomic_inc(&pid->count);
refcount_inc(&pid->count);
return pid;
}

View File

@ -21,7 +21,7 @@
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*

View File

@ -32,25 +32,9 @@ struct rb_root {
struct rb_node *rb_node;
};
/*
* Leftmost-cached rbtrees.
*
* We do not cache the rightmost node based on footprint
* size vs number of potential users that could benefit
* from O(1) rb_last(). Just not worth it, users that want
* this feature can always implement the logic explicitly.
* Furthermore, users that want to cache both pointers may
* find it a bit asymmetric, but that's ok.
*/
struct rb_root_cached {
struct rb_root rb_root;
struct rb_node *rb_leftmost;
};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
extern void rb_insert_color_cached(struct rb_node *,
struct rb_root_cached *, bool);
extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
typeof(*pos), field); 1; }); \
pos = n)
/*
* Leftmost-cached rbtrees.
*
* We do not cache the rightmost node based on footprint
* size vs number of potential users that could benefit
* from O(1) rb_last(). Just not worth it, users that want
* this feature can always implement the logic explicitly.
* Furthermore, users that want to cache both pointers may
* find it a bit asymmetric, but that's ok.
*/
struct rb_root_cached {
struct rb_root rb_root;
struct rb_node *rb_leftmost;
};
#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
static inline void rb_insert_color_cached(struct rb_node *node,
struct rb_root_cached *root,
bool leftmost)
{
if (leftmost)
root->rb_leftmost = node;
rb_insert_color(node, &root->rb_root);
}
static inline void rb_erase_cached(struct rb_node *node,
struct rb_root_cached *root)
{
if (root->rb_leftmost == node)
root->rb_leftmost = rb_next(node);
rb_erase(node, &root->rb_root);
}
static inline void rb_replace_node_cached(struct rb_node *victim,
struct rb_node *new,
struct rb_root_cached *root)
{
if (root->rb_leftmost == victim)
root->rb_leftmost = new;
rb_replace_node(victim, new, &root->rb_root);
}
#endif /* _LINUX_RBTREE_H */

View File

@ -30,10 +30,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
extern void __rb_insert_augmented(struct rb_node *node,
struct rb_root *root,
bool newleft, struct rb_node **leftmost,
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
@ -48,7 +47,7 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
__rb_insert_augmented(node, root, false, NULL, augment->rotate);
__rb_insert_augmented(node, root, augment->rotate);
}
static inline void
@ -56,8 +55,9 @@ rb_insert_augmented_cached(struct rb_node *node,
struct rb_root_cached *root, bool newleft,
const struct rb_augment_callbacks *augment)
{
__rb_insert_augmented(node, &root->rb_root,
newleft, &root->rb_leftmost, augment->rotate);
if (newleft)
root->rb_leftmost = node;
rb_insert_augmented(node, &root->rb_root, augment);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@ -150,7 +150,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right;
@ -158,9 +157,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node *parent, *rebalance;
unsigned long pc;
if (leftmost && node == *leftmost)
*leftmost = rb_next(node);
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@ -260,8 +256,7 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, root,
NULL, augment);
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
@ -270,11 +265,9 @@ static __always_inline void
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
&root->rb_leftmost,
augment);
if (rebalance)
__rb_erase_color(rebalance, &root->rb_root, augment->rotate);
if (root->rb_leftmost == node)
root->rb_leftmost = rb_next(node);
rb_erase_augmented(node, &root->rb_root, augment);
}
#endif /* _LINUX_RBTREE_AUGMENTED_H */

Some files were not shown because too many files have changed in this diff Show More