[IA64] - Avoid slow TLB purges on SGI Altix systems
flush_tlb_all() can be a scaling issue on large SGI Altix systems since it uses the global call_lock and always executes on all cpus. When a process enters flush_tlb_range() to purge TLBs for another process, it is possible to avoid flush_tlb_all() and instead allow sn2_global_tlb_purge() to purge TLBs only where necessary. This patch modifies flush_tlb_range() so that this case can be handled by platform TLB purge functions and updates ia64_global_tlb_purge() accordingly. sn2_global_tlb_purge() now calculates the region register value from the mm argument introduced with this patch. Signed-off-by: Dean Roe <roe@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
@@ -86,10 +86,15 @@ wrap_mmu_context (struct mm_struct *mm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
|
ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits)
|
||||||
{
|
{
|
||||||
static DEFINE_SPINLOCK(ptcg_lock);
|
static DEFINE_SPINLOCK(ptcg_lock);
|
||||||
|
|
||||||
|
if (mm != current->active_mm) {
|
||||||
|
flush_tlb_all();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* HW requires global serialization of ptc.ga. */
|
/* HW requires global serialization of ptc.ga. */
|
||||||
spin_lock(&ptcg_lock);
|
spin_lock(&ptcg_lock);
|
||||||
{
|
{
|
||||||
@@ -135,15 +140,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
|
|||||||
unsigned long size = end - start;
|
unsigned long size = end - start;
|
||||||
unsigned long nbits;
|
unsigned long nbits;
|
||||||
|
|
||||||
|
#ifndef CONFIG_SMP
|
||||||
if (mm != current->active_mm) {
|
if (mm != current->active_mm) {
|
||||||
/* this does happen, but perhaps it's not worth optimizing for? */
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
flush_tlb_all();
|
|
||||||
#else
|
|
||||||
mm->context = 0;
|
mm->context = 0;
|
||||||
#endif
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
nbits = ia64_fls(size + 0xfff);
|
nbits = ia64_fls(size + 0xfff);
|
||||||
while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
|
while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
|
||||||
@@ -153,7 +155,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
|
|||||||
start &= ~((1UL << nbits) - 1);
|
start &= ~((1UL << nbits) - 1);
|
||||||
|
|
||||||
# ifdef CONFIG_SMP
|
# ifdef CONFIG_SMP
|
||||||
platform_global_tlb_purge(start, end, nbits);
|
platform_global_tlb_purge(mm, start, end, nbits);
|
||||||
# else
|
# else
|
||||||
do {
|
do {
|
||||||
ia64_ptcl(start, (nbits<<2));
|
ia64_ptcl(start, (nbits<<2));
|
||||||
|
@@ -177,6 +177,7 @@ void sn_tlb_migrate_finish(struct mm_struct *mm)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
|
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
|
||||||
|
* @mm: mm_struct containing virtual address range
|
||||||
* @start: start of virtual address range
|
* @start: start of virtual address range
|
||||||
* @end: end of virtual address range
|
* @end: end of virtual address range
|
||||||
* @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
|
* @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
|
||||||
@@ -188,21 +189,22 @@ void sn_tlb_migrate_finish(struct mm_struct *mm)
|
|||||||
* - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
|
* - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
|
||||||
* - cpu_vm_mask is converted into a nodemask of the nodes containing the
|
* - cpu_vm_mask is converted into a nodemask of the nodes containing the
|
||||||
* cpus in cpu_vm_mask.
|
* cpus in cpu_vm_mask.
|
||||||
* - if only one bit is set in cpu_vm_mask & it is the current cpu,
|
* - if only one bit is set in cpu_vm_mask & it is the current cpu & the
|
||||||
* then only the local TLB needs to be flushed. This flushing can be done
|
* process is purging its own virtual address range, then only the
|
||||||
* using ptc.l. This is the common case & avoids the global spinlock.
|
* local TLB needs to be flushed. This flushing can be done using
|
||||||
|
* ptc.l. This is the common case & avoids the global spinlock.
|
||||||
* - if multiple cpus have loaded the context, then flushing has to be
|
* - if multiple cpus have loaded the context, then flushing has to be
|
||||||
* done with ptc.g/MMRs under protection of the global ptc_lock.
|
* done with ptc.g/MMRs under protection of the global ptc_lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
sn2_global_tlb_purge(unsigned long start, unsigned long end,
|
sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long nbits)
|
unsigned long end, unsigned long nbits)
|
||||||
{
|
{
|
||||||
int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
|
int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
|
||||||
|
int mymm = (mm == current->active_mm);
|
||||||
volatile unsigned long *ptc0, *ptc1;
|
volatile unsigned long *ptc0, *ptc1;
|
||||||
unsigned long itc, itc2, flags, data0 = 0, data1 = 0;
|
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value;
|
||||||
struct mm_struct *mm = current->active_mm;
|
|
||||||
short nasids[MAX_NUMNODES], nix;
|
short nasids[MAX_NUMNODES], nix;
|
||||||
nodemask_t nodes_flushed;
|
nodemask_t nodes_flushed;
|
||||||
|
|
||||||
@@ -216,9 +218,12 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
|
|||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (i == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
||||||
if (likely(i == 1 && lcpu == smp_processor_id())) {
|
if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
|
||||||
do {
|
do {
|
||||||
ia64_ptcl(start, nbits << 2);
|
ia64_ptcl(start, nbits << 2);
|
||||||
start += (1UL << nbits);
|
start += (1UL << nbits);
|
||||||
@@ -229,7 +234,7 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_read(&mm->mm_users) == 1) {
|
if (atomic_read(&mm->mm_users) == 1 && mymm) {
|
||||||
flush_tlb_mm(mm);
|
flush_tlb_mm(mm);
|
||||||
__get_cpu_var(ptcstats).change_rid++;
|
__get_cpu_var(ptcstats).change_rid++;
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
@@ -241,11 +246,13 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
|
|||||||
for_each_node_mask(cnode, nodes_flushed)
|
for_each_node_mask(cnode, nodes_flushed)
|
||||||
nasids[nix++] = cnodeid_to_nasid(cnode);
|
nasids[nix++] = cnodeid_to_nasid(cnode);
|
||||||
|
|
||||||
|
rr_value = (mm->context << 3) | REGION_NUMBER(start);
|
||||||
|
|
||||||
shub1 = is_shub1();
|
shub1 = is_shub1();
|
||||||
if (shub1) {
|
if (shub1) {
|
||||||
data0 = (1UL << SH1_PTC_0_A_SHFT) |
|
data0 = (1UL << SH1_PTC_0_A_SHFT) |
|
||||||
(nbits << SH1_PTC_0_PS_SHFT) |
|
(nbits << SH1_PTC_0_PS_SHFT) |
|
||||||
((ia64_get_rr(start) >> 8) << SH1_PTC_0_RID_SHFT) |
|
(rr_value << SH1_PTC_0_RID_SHFT) |
|
||||||
(1UL << SH1_PTC_0_START_SHFT);
|
(1UL << SH1_PTC_0_START_SHFT);
|
||||||
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
|
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
|
||||||
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
|
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
|
||||||
@@ -254,7 +261,7 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
|
|||||||
(nbits << SH2_PTC_PS_SHFT) |
|
(nbits << SH2_PTC_PS_SHFT) |
|
||||||
(1UL << SH2_PTC_START_SHFT);
|
(1UL << SH2_PTC_START_SHFT);
|
||||||
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
|
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
|
||||||
((ia64_get_rr(start) >> 8) << SH2_PTC_RID_SHFT) );
|
(rr_value << SH2_PTC_RID_SHFT));
|
||||||
ptc1 = NULL;
|
ptc1 = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,7 +282,7 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
|
|||||||
data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
|
data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
|
||||||
for (i = 0; i < nix; i++) {
|
for (i = 0; i < nix; i++) {
|
||||||
nasid = nasids[i];
|
nasid = nasids[i];
|
||||||
if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid)) {
|
if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid && mymm)) {
|
||||||
ia64_ptcga(start, nbits << 2);
|
ia64_ptcga(start, nbits << 2);
|
||||||
ia64_srlz_i();
|
ia64_srlz_i();
|
||||||
} else {
|
} else {
|
||||||
|
@@ -26,7 +26,7 @@ typedef void ia64_mv_cpu_init_t (void);
|
|||||||
typedef void ia64_mv_irq_init_t (void);
|
typedef void ia64_mv_irq_init_t (void);
|
||||||
typedef void ia64_mv_send_ipi_t (int, int, int, int);
|
typedef void ia64_mv_send_ipi_t (int, int, int, int);
|
||||||
typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
|
typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
|
||||||
typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
|
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
|
||||||
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
|
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
|
||||||
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
|
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
|
||||||
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
|
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
|
||||||
|
Reference in New Issue
Block a user