Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: deal with races in /proc/*/{syscall,stack,personality} proc: enable writing to /proc/pid/mem proc: make check_mem_permission() return an mm_struct on success proc: hold cred_guard_mutex in check_mem_permission() proc: disable mem_write after exec mm: implement access_remote_vm mm: factor out main logic of access_process_vm mm: use mm_struct to resolve gate vma's in __get_user_pages mm: arch: rename in_gate_area_no_task to in_gate_area_no_mm mm: arch: make in_gate_area take an mm_struct instead of a task_struct mm: arch: make get_gate_vma take an mm_struct instead of a task_struct x86: mark associated mm when running a task in 32 bit compatibility mode x86: add context tag to mark mm when running a task in 32-bit compatibility mode auxv: require the target to be tracable (or yourself) close race in /proc/*/environ report errors in /proc/*/*map* sanely pagemap: close races with suid execve make sessionid permissions in /proc/*/task/* match those in /proc/* fix leaks in path_lookupat() Fix up trivial conflicts in fs/proc/base.c
This commit is contained in:
73
mm/memory.c
73
mm/memory.c
@@ -1486,9 +1486,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
vma = find_extend_vma(mm, start);
|
||||
if (!vma && in_gate_area(tsk, start)) {
|
||||
if (!vma && in_gate_area(mm, start)) {
|
||||
unsigned long pg = start & PAGE_MASK;
|
||||
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
|
||||
struct vm_area_struct *gate_vma = get_gate_vma(mm);
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
@@ -1591,10 +1591,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
return i ? i : -EFAULT;
|
||||
BUG();
|
||||
}
|
||||
if (ret & VM_FAULT_MAJOR)
|
||||
tsk->maj_flt++;
|
||||
else
|
||||
tsk->min_flt++;
|
||||
|
||||
if (tsk) {
|
||||
if (ret & VM_FAULT_MAJOR)
|
||||
tsk->maj_flt++;
|
||||
else
|
||||
tsk->min_flt++;
|
||||
}
|
||||
|
||||
if (ret & VM_FAULT_RETRY) {
|
||||
if (nonblocking)
|
||||
@@ -1641,7 +1644,8 @@ EXPORT_SYMBOL(__get_user_pages);
|
||||
|
||||
/**
|
||||
* get_user_pages() - pin user pages in memory
|
||||
* @tsk: task_struct of target task
|
||||
* @tsk: the task_struct to use for page fault accounting, or
|
||||
* NULL if faults are not to be recorded.
|
||||
* @mm: mm_struct of target mm
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
@@ -3499,7 +3503,7 @@ static int __init gate_vma_init(void)
|
||||
__initcall(gate_vma_init);
|
||||
#endif
|
||||
|
||||
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
|
||||
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef AT_SYSINFO_EHDR
|
||||
return &gate_vma;
|
||||
@@ -3508,7 +3512,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
|
||||
#endif
|
||||
}
|
||||
|
||||
int in_gate_area_no_task(unsigned long addr)
|
||||
int in_gate_area_no_mm(unsigned long addr)
|
||||
{
|
||||
#ifdef AT_SYSINFO_EHDR
|
||||
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
|
||||
@@ -3649,20 +3653,15 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Access another process' address space.
|
||||
* Source/target buffer must be kernel space,
|
||||
* Do not walk the page table directly, use get_user_pages
|
||||
* Access another process' address space as given in mm. If non-NULL, use the
|
||||
* given task for page fault accounting.
|
||||
*/
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
|
||||
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long addr, void *buf, int len, int write)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *vma;
|
||||
void *old_buf = buf;
|
||||
|
||||
mm = get_task_mm(tsk);
|
||||
if (!mm)
|
||||
return 0;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
/* ignore errors, just check how much was successfully transferred */
|
||||
while (len) {
|
||||
@@ -3711,11 +3710,47 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
|
||||
addr += bytes;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
|
||||
return buf - old_buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* @access_remote_vm - access another process' address space
|
||||
* @mm: the mm_struct of the target address space
|
||||
* @addr: start address to access
|
||||
* @buf: source or destination buffer
|
||||
* @len: number of bytes to transfer
|
||||
* @write: whether the access is a write
|
||||
*
|
||||
* The caller must hold a reference on @mm.
|
||||
*/
|
||||
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
{
|
||||
return __access_remote_vm(NULL, mm, addr, buf, len, write);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access another process' address space.
|
||||
* Source/target buffer must be kernel space,
|
||||
* Do not walk the page table directly, use get_user_pages
|
||||
*/
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
int ret;
|
||||
|
||||
mm = get_task_mm(tsk);
|
||||
if (!mm)
|
||||
return 0;
|
||||
|
||||
ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
|
||||
mmput(mm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Print the name of a VMA.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user