Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@@ -268,7 +268,12 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
|
||||
#undef __CSG_LOOP
|
||||
#endif
|
||||
|
||||
#else /* __s390x__ */
|
||||
|
||||
#include <asm-generic/atomic64.h>
|
||||
|
||||
#endif /* __s390x__ */
|
||||
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_mb()
|
||||
|
@@ -6,3 +6,5 @@
|
||||
|
||||
static inline void set_perf_counter_pending(void) {}
|
||||
static inline void clear_perf_counter_pending(void) {}
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 0
|
||||
|
@@ -61,7 +61,7 @@ struct thread_info {
|
||||
.exec_domain = &default_exec_domain, \
|
||||
.flags = 0, \
|
||||
.cpu = 0, \
|
||||
.preempt_count = 1, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.restart_block = { \
|
||||
.fn = do_no_restart_syscall, \
|
||||
}, \
|
||||
|
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
||||
* pte_free_tlb frees a pte table and clears the CRSTE for the
|
||||
* page table from the tlb.
|
||||
*/
|
||||
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
|
||||
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
||||
unsigned long address)
|
||||
{
|
||||
if (!tlb->fullmm) {
|
||||
tlb->array[tlb->nr_ptes++] = pte;
|
||||
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
|
||||
* as the pgd. pmd_free_tlb checks the asce_limit against 2GB
|
||||
* to avoid the double free of the pmd in this case.
|
||||
*/
|
||||
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
||||
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
||||
unsigned long address)
|
||||
{
|
||||
#ifdef __s390x__
|
||||
if (tlb->mm->context.asce_limit <= (1UL << 31))
|
||||
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
||||
* as the pgd. pud_free_tlb checks the asce_limit against 4TB
|
||||
* to avoid the double free of the pud in this case.
|
||||
*/
|
||||
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
|
||||
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||
unsigned long address)
|
||||
{
|
||||
#ifdef __s390x__
|
||||
if (tlb->mm->context.asce_limit <= (1UL << 42))
|
||||
|
Reference in New Issue
Block a user