percpu: kill lpage first chunk allocator
With x86 converted to embedding allocator, lpage doesn't have any user left. Kill it along with cpa handling code. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jan Beulich <JBeulich@novell.com>
This commit is contained in:
@@ -687,7 +687,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
|
||||
{
|
||||
struct cpa_data alias_cpa;
|
||||
unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
|
||||
unsigned long vaddr, remapped;
|
||||
unsigned long vaddr;
|
||||
int ret;
|
||||
|
||||
if (cpa->pfn >= max_pfn_mapped)
|
||||
@@ -745,24 +745,6 @@ static int cpa_process_alias(struct cpa_data *cpa)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the PMD page was partially used for per-cpu remapping,
|
||||
* the recycled area needs to be split and modified. Because
|
||||
* the area is always proper subset of a PMD page
|
||||
* cpa->numpages is guaranteed to be 1 for these areas, so
|
||||
* there's no need to loop over and check for further remaps.
|
||||
*/
|
||||
remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
|
||||
if (remapped) {
|
||||
WARN_ON(cpa->numpages > 1);
|
||||
alias_cpa = *cpa;
|
||||
alias_cpa.vaddr = &remapped;
|
||||
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
|
||||
ret = __change_page_attr_set_clr(&alias_cpa, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user