sparc64: Fix cpu strand yielding.

For atomic backoff, we just loop over an exponentially backed off
counter.  This is extremely ineffective as it doesn't actually yield
the cpu strand so that other competing strands can use the cpu core.

In cpus previous to SPARC-T4 we have to do this in a slightly hackish
way, by doing an operation with no side effects that also happens to
mark the strand as unavailable.

The mechanism we choose for this is three reads of the %ccr
(condition-code) register into %g0 (the zero register).

SPARC-T4 has an explicit "pause" instruction, and we'll make use of
that in a subsequent commit.

Yield strands also in cpu_relax().  We really should have done this a
very long time ago.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2012-10-27 18:05:20 -07:00
parent 517ffce4e1
commit 270c10e00a
2 changed files with 8 additions and 2 deletions

View File

@ -13,7 +13,10 @@
#define BACKOFF_SPIN(reg, tmp, label) \ #define BACKOFF_SPIN(reg, tmp, label) \
mov reg, tmp; \ mov reg, tmp; \
88: brnz,pt tmp, 88b; \ 88: rd %ccr, %g0; \
rd %ccr, %g0; \
rd %ccr, %g0; \
brnz,pt tmp, 88b; \
sub tmp, 1, tmp; \ sub tmp, 1, tmp; \
set BACKOFF_LIMIT, tmp; \ set BACKOFF_LIMIT, tmp; \
cmp reg, tmp; \ cmp reg, tmp; \

View File

@ -196,7 +196,10 @@ extern unsigned long get_wchan(struct task_struct *task);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
#define cpu_relax() barrier() #define cpu_relax() asm volatile("rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0" \
::: "memory")
/* Prefetch support. This is tuned for UltraSPARC-III and later. /* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has * UltraSPARC-I will treat these as nops, and UltraSPARC-II has