[IA64] Optional method to purge the TLB on SN systems
This patch adds an optional method for purging the TLB on SN IA64 systems. The change should not affect any non-SN system. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
@@ -39,6 +39,7 @@
|
|||||||
#include <asm/machvec.h>
|
#include <asm/machvec.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
#ifdef CONFIG_PERFMON
|
#ifdef CONFIG_PERFMON
|
||||||
# include <asm/perfmon.h>
|
# include <asm/perfmon.h>
|
||||||
@@ -127,8 +128,10 @@ void destroy_irq(unsigned int irq)
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
|
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
|
||||||
|
# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
|
||||||
#else
|
#else
|
||||||
# define IS_RESCHEDULE(vec) (0)
|
# define IS_RESCHEDULE(vec) (0)
|
||||||
|
# define IS_LOCAL_TLB_FLUSH(vec) (0)
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* That's where the IVT branches when we get an external
|
* That's where the IVT branches when we get an external
|
||||||
@@ -180,8 +183,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
|
|||||||
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
|
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
|
||||||
ia64_srlz_d();
|
ia64_srlz_d();
|
||||||
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
||||||
if (unlikely(IS_RESCHEDULE(vector)))
|
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
|
||||||
kstat_this_cpu.irqs[vector]++;
|
smp_local_flush_tlb();
|
||||||
|
kstat_this_cpu.irqs[vector]++;
|
||||||
|
} else if (unlikely(IS_RESCHEDULE(vector)))
|
||||||
|
kstat_this_cpu.irqs[vector]++;
|
||||||
else {
|
else {
|
||||||
ia64_setreg(_IA64_REG_CR_TPR, vector);
|
ia64_setreg(_IA64_REG_CR_TPR, vector);
|
||||||
ia64_srlz_d();
|
ia64_srlz_d();
|
||||||
@@ -227,8 +233,11 @@ void ia64_process_pending_intr(void)
|
|||||||
* Perform normal interrupt style processing
|
* Perform normal interrupt style processing
|
||||||
*/
|
*/
|
||||||
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
||||||
if (unlikely(IS_RESCHEDULE(vector)))
|
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
|
||||||
kstat_this_cpu.irqs[vector]++;
|
smp_local_flush_tlb();
|
||||||
|
kstat_this_cpu.irqs[vector]++;
|
||||||
|
} else if (unlikely(IS_RESCHEDULE(vector)))
|
||||||
|
kstat_this_cpu.irqs[vector]++;
|
||||||
else {
|
else {
|
||||||
struct pt_regs *old_regs = set_irq_regs(NULL);
|
struct pt_regs *old_regs = set_irq_regs(NULL);
|
||||||
|
|
||||||
@@ -260,12 +269,12 @@ void ia64_process_pending_intr(void)
|
|||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
extern irqreturn_t handle_IPI (int irq, void *dev_id);
|
|
||||||
|
|
||||||
static irqreturn_t dummy_handler (int irq, void *dev_id)
|
static irqreturn_t dummy_handler (int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
extern irqreturn_t handle_IPI (int irq, void *dev_id);
|
||||||
|
|
||||||
static struct irqaction ipi_irqaction = {
|
static struct irqaction ipi_irqaction = {
|
||||||
.handler = handle_IPI,
|
.handler = handle_IPI,
|
||||||
@@ -278,6 +287,13 @@ static struct irqaction resched_irqaction = {
|
|||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED,
|
||||||
.name = "resched"
|
.name = "resched"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct irqaction tlb_irqaction = {
|
||||||
|
.handler = dummy_handler,
|
||||||
|
.flags = SA_INTERRUPT,
|
||||||
|
.name = "tlb_flush"
|
||||||
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -303,6 +319,7 @@ init_IRQ (void)
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
|
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
|
||||||
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
|
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
|
||||||
|
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PERFMON
|
#ifdef CONFIG_PERFMON
|
||||||
pfm_init_percpu();
|
pfm_init_percpu();
|
||||||
|
@@ -49,6 +49,18 @@
|
|||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include <asm/mca.h>
|
#include <asm/mca.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: alignment of 4 entries/cacheline was empirically determined
|
||||||
|
* to be a good tradeoff between hot cachelines & spreading the array
|
||||||
|
* across too many cacheline.
|
||||||
|
*/
|
||||||
|
static struct local_tlb_flush_counts {
|
||||||
|
unsigned int count;
|
||||||
|
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Structure and data for smp_call_function(). This is designed to minimise static memory
|
* Structure and data for smp_call_function(). This is designed to minimise static memory
|
||||||
* requirements. It also looks cleaner.
|
* requirements. It also looks cleaner.
|
||||||
@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu)
|
|||||||
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
|
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called with preeemption disabled.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
smp_send_local_flush_tlb (int cpu)
|
||||||
|
{
|
||||||
|
platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
smp_local_flush_tlb(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Use atomic ops. Otherwise, the load/increment/store sequence from
|
||||||
|
* a "++" operation can have the line stolen between the load & store.
|
||||||
|
* The overhead of the atomic op in negligible in this case & offers
|
||||||
|
* significant benefit for the brief periods where lots of cpus
|
||||||
|
* are simultaneously flushing TLBs.
|
||||||
|
*/
|
||||||
|
ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
|
||||||
|
local_flush_tlb_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
|
||||||
|
|
||||||
|
void
|
||||||
|
smp_flush_tlb_cpumask(cpumask_t xcpumask)
|
||||||
|
{
|
||||||
|
unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
|
||||||
|
cpumask_t cpumask = xcpumask;
|
||||||
|
int mycpu, cpu, flush_mycpu = 0;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
mycpu = smp_processor_id();
|
||||||
|
|
||||||
|
for_each_cpu_mask(cpu, cpumask)
|
||||||
|
counts[cpu] = local_tlb_flush_counts[cpu].count;
|
||||||
|
|
||||||
|
mb();
|
||||||
|
for_each_cpu_mask(cpu, cpumask) {
|
||||||
|
if (cpu == mycpu)
|
||||||
|
flush_mycpu = 1;
|
||||||
|
else
|
||||||
|
smp_send_local_flush_tlb(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (flush_mycpu)
|
||||||
|
smp_local_flush_tlb();
|
||||||
|
|
||||||
|
for_each_cpu_mask(cpu, cpumask)
|
||||||
|
while(counts[cpu] == local_tlb_flush_counts[cpu].count)
|
||||||
|
udelay(FLUSH_DELAY);
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
smp_flush_tlb_all (void)
|
smp_flush_tlb_all (void)
|
||||||
{
|
{
|
||||||
|
@@ -46,6 +46,9 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
|
|||||||
|
|
||||||
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
|
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
|
||||||
|
|
||||||
|
/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
|
||||||
|
static int sn2_flush_opt = 0;
|
||||||
|
|
||||||
extern unsigned long
|
extern unsigned long
|
||||||
sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
|
sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
|
||||||
volatile unsigned long *, unsigned long,
|
volatile unsigned long *, unsigned long,
|
||||||
@@ -76,6 +79,8 @@ struct ptc_stats {
|
|||||||
unsigned long shub_itc_clocks;
|
unsigned long shub_itc_clocks;
|
||||||
unsigned long shub_itc_clocks_max;
|
unsigned long shub_itc_clocks_max;
|
||||||
unsigned long shub_ptc_flushes_not_my_mm;
|
unsigned long shub_ptc_flushes_not_my_mm;
|
||||||
|
unsigned long shub_ipi_flushes;
|
||||||
|
unsigned long shub_ipi_flushes_itc_clocks;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define sn2_ptctest 0
|
#define sn2_ptctest 0
|
||||||
@@ -121,6 +126,18 @@ void sn_tlb_migrate_finish(struct mm_struct *mm)
|
|||||||
flush_tlb_mm(mm);
|
flush_tlb_mm(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
sn2_ipi_flush_all_tlb(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
unsigned long itc;
|
||||||
|
|
||||||
|
itc = ia64_get_itc();
|
||||||
|
smp_flush_tlb_cpumask(mm->cpu_vm_mask);
|
||||||
|
itc = ia64_get_itc() - itc;
|
||||||
|
__get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
|
||||||
|
__get_cpu_var(ptcstats).shub_ipi_flushes++;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
|
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
|
||||||
* @mm: mm_struct containing virtual address range
|
* @mm: mm_struct containing virtual address range
|
||||||
@@ -154,7 +171,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
|
|||||||
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
|
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
|
||||||
short nasids[MAX_NUMNODES], nix;
|
short nasids[MAX_NUMNODES], nix;
|
||||||
nodemask_t nodes_flushed;
|
nodemask_t nodes_flushed;
|
||||||
int active, max_active, deadlock;
|
int active, max_active, deadlock, flush_opt = sn2_flush_opt;
|
||||||
|
|
||||||
|
if (flush_opt > 2) {
|
||||||
|
sn2_ipi_flush_all_tlb(mm);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
nodes_clear(nodes_flushed);
|
nodes_clear(nodes_flushed);
|
||||||
i = 0;
|
i = 0;
|
||||||
@@ -189,6 +211,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (flush_opt == 2) {
|
||||||
|
sn2_ipi_flush_all_tlb(mm);
|
||||||
|
preempt_enable();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
itc = ia64_get_itc();
|
itc = ia64_get_itc();
|
||||||
nix = 0;
|
nix = 0;
|
||||||
for_each_node_mask(cnode, nodes_flushed)
|
for_each_node_mask(cnode, nodes_flushed)
|
||||||
@@ -256,6 +284,8 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
|
|||||||
}
|
}
|
||||||
if (active >= max_active || i == (nix - 1)) {
|
if (active >= max_active || i == (nix - 1)) {
|
||||||
if ((deadlock = wait_piowc())) {
|
if ((deadlock = wait_piowc())) {
|
||||||
|
if (flush_opt == 1)
|
||||||
|
goto done;
|
||||||
sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
|
sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
|
||||||
if (reset_max_active_on_deadlock())
|
if (reset_max_active_on_deadlock())
|
||||||
max_active = 1;
|
max_active = 1;
|
||||||
@@ -267,6 +297,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
|
|||||||
start += (1UL << nbits);
|
start += (1UL << nbits);
|
||||||
} while (start < end);
|
} while (start < end);
|
||||||
|
|
||||||
|
done:
|
||||||
itc2 = ia64_get_itc() - itc2;
|
itc2 = ia64_get_itc() - itc2;
|
||||||
__get_cpu_var(ptcstats).shub_itc_clocks += itc2;
|
__get_cpu_var(ptcstats).shub_itc_clocks += itc2;
|
||||||
if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
|
if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
|
||||||
@@ -279,6 +310,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
|
|||||||
|
|
||||||
spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
|
spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
|
||||||
|
|
||||||
|
if (flush_opt == 1 && deadlock) {
|
||||||
|
__get_cpu_var(ptcstats).deadlocks++;
|
||||||
|
sn2_ipi_flush_all_tlb(mm);
|
||||||
|
}
|
||||||
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -425,24 +461,42 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
|
|||||||
|
|
||||||
if (!cpu) {
|
if (!cpu) {
|
||||||
seq_printf(file,
|
seq_printf(file,
|
||||||
"# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
|
"# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
|
||||||
seq_printf(file, "# ptctest %d\n", sn2_ptctest);
|
seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu < NR_CPUS && cpu_online(cpu)) {
|
if (cpu < NR_CPUS && cpu_online(cpu)) {
|
||||||
stat = &per_cpu(ptcstats, cpu);
|
stat = &per_cpu(ptcstats, cpu);
|
||||||
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
|
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
|
||||||
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
|
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
|
||||||
stat->deadlocks,
|
stat->deadlocks,
|
||||||
1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
||||||
1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
||||||
1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
|
1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
|
||||||
stat->shub_ptc_flushes_not_my_mm,
|
stat->shub_ptc_flushes_not_my_mm,
|
||||||
stat->deadlocks2);
|
stat->deadlocks2,
|
||||||
|
stat->shub_ipi_flushes,
|
||||||
|
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
char optstr[64];
|
||||||
|
|
||||||
|
if (copy_from_user(optstr, user, count))
|
||||||
|
return -EFAULT;
|
||||||
|
optstr[count - 1] = '\0';
|
||||||
|
sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
static struct seq_operations sn2_ptc_seq_ops = {
|
static struct seq_operations sn2_ptc_seq_ops = {
|
||||||
.start = sn2_ptc_seq_start,
|
.start = sn2_ptc_seq_start,
|
||||||
.next = sn2_ptc_seq_next,
|
.next = sn2_ptc_seq_next,
|
||||||
@@ -458,6 +512,7 @@ static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
|
|||||||
static const struct file_operations proc_sn2_ptc_operations = {
|
static const struct file_operations proc_sn2_ptc_operations = {
|
||||||
.open = sn2_ptc_proc_open,
|
.open = sn2_ptc_proc_open,
|
||||||
.read = seq_read,
|
.read = seq_read,
|
||||||
|
.write = sn2_ptc_proc_write,
|
||||||
.llseek = seq_lseek,
|
.llseek = seq_lseek,
|
||||||
.release = seq_release,
|
.release = seq_release,
|
||||||
};
|
};
|
||||||
|
@@ -66,6 +66,7 @@ extern int ia64_last_device_vector;
|
|||||||
#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */
|
#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */
|
||||||
#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
|
#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
|
||||||
#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
|
#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
|
||||||
|
#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
|
||||||
#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
|
#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
|
||||||
#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
|
#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
|
||||||
|
|
||||||
|
@@ -27,9 +27,11 @@ extern void local_flush_tlb_all (void);
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
extern void smp_flush_tlb_all (void);
|
extern void smp_flush_tlb_all (void);
|
||||||
extern void smp_flush_tlb_mm (struct mm_struct *mm);
|
extern void smp_flush_tlb_mm (struct mm_struct *mm);
|
||||||
|
extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
|
||||||
# define flush_tlb_all() smp_flush_tlb_all()
|
# define flush_tlb_all() smp_flush_tlb_all()
|
||||||
#else
|
#else
|
||||||
# define flush_tlb_all() local_flush_tlb_all()
|
# define flush_tlb_all() local_flush_tlb_all()
|
||||||
|
# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@@ -94,6 +96,15 @@ flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush the local TLB. Invoked from another cpu using an IPI.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
void smp_local_flush_tlb(void);
|
||||||
|
#else
|
||||||
|
#define smp_local_flush_tlb()
|
||||||
|
#endif
|
||||||
|
|
||||||
#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
|
#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
|
||||||
|
|
||||||
#endif /* _ASM_IA64_TLBFLUSH_H */
|
#endif /* _ASM_IA64_TLBFLUSH_H */
|
||||||
|
Reference in New Issue
Block a user