Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
This commit is contained in:
16
mm/hugetlb.c
16
mm/hugetlb.c
@@ -146,7 +146,7 @@ static long region_chg(struct list_head *head, long f, long t)
|
||||
if (rg->from > t)
|
||||
return chg;
|
||||
|
||||
/* We overlap with this area, if it extends futher than
|
||||
/* We overlap with this area, if it extends further than
|
||||
* us then we must extend ourselves. Account for its
|
||||
* existing reservation. */
|
||||
if (rg->to > t) {
|
||||
@@ -842,7 +842,7 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the hugetlb pool such that it can accomodate a reservation
|
||||
* Increase the hugetlb pool such that it can accommodate a reservation
|
||||
* of size 'delta'.
|
||||
*/
|
||||
static int gather_surplus_pages(struct hstate *h, int delta)
|
||||
@@ -890,7 +890,7 @@ retry:
|
||||
|
||||
/*
|
||||
* The surplus_list now contains _at_least_ the number of extra pages
|
||||
* needed to accomodate the reservation. Add the appropriate number
|
||||
* needed to accommodate the reservation. Add the appropriate number
|
||||
* of pages to the hugetlb pool and free the extras back to the buddy
|
||||
* allocator. Commit the entire reservation here to prevent another
|
||||
* process from stealing the pages as they are added to the pool but
|
||||
@@ -1872,8 +1872,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
|
||||
unsigned long tmp;
|
||||
int ret;
|
||||
|
||||
if (!write)
|
||||
tmp = h->max_huge_pages;
|
||||
tmp = h->max_huge_pages;
|
||||
|
||||
if (write && h->order >= MAX_ORDER)
|
||||
return -EINVAL;
|
||||
@@ -1938,8 +1937,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
|
||||
unsigned long tmp;
|
||||
int ret;
|
||||
|
||||
if (!write)
|
||||
tmp = h->nr_overcommit_huge_pages;
|
||||
tmp = h->nr_overcommit_huge_pages;
|
||||
|
||||
if (write && h->order >= MAX_ORDER)
|
||||
return -EINVAL;
|
||||
@@ -2045,7 +2043,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
|
||||
* This new VMA should share its siblings reservation map if present.
|
||||
* The VMA will only ever have a valid reservation map pointer where
|
||||
* it is being copied for another still existing VMA. As that VMA
|
||||
* has a reference to the reservation map it cannot dissappear until
|
||||
* has a reference to the reservation map it cannot disappear until
|
||||
* after this open call completes. It is therefore safe to take a
|
||||
* new reference here without additional locking.
|
||||
*/
|
||||
@@ -2492,7 +2490,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
/*
|
||||
* Currently, we are forced to kill the process in the event the
|
||||
* original mapper has unmapped pages from the child due to a failed
|
||||
* COW. Warn that such a situation has occured as it may not be obvious
|
||||
* COW. Warn that such a situation has occurred as it may not be obvious
|
||||
*/
|
||||
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
|
||||
printk(KERN_WARNING
|
||||
|
Reference in New Issue
Block a user