x86: Support for this_cpu_add, sub, dec, inc_return
Supply an implementation for x86 in order to generate more efficient code. V2->V3: - Cleanup - Remove strange type checking from percpu_add_return_op. tj: - Dropped unused typedef from percpu_add_return_op(). - Renamed ret__ to paro_ret__ in percpu_add_return_op(). - Minor indentation adjustments. Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
committed by
Tejun Heo
parent
a663ffff1d
commit
8f1d97c79e
@@ -177,6 +177,39 @@ do { \
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add return operation
|
||||||
|
*/
|
||||||
|
#define percpu_add_return_op(var, val) \
|
||||||
|
({ \
|
||||||
|
typeof(var) paro_ret__ = val; \
|
||||||
|
switch (sizeof(var)) { \
|
||||||
|
case 1: \
|
||||||
|
asm("xaddb %0, "__percpu_arg(1) \
|
||||||
|
: "+q" (paro_ret__), "+m" (var) \
|
||||||
|
: : "memory"); \
|
||||||
|
break; \
|
||||||
|
case 2: \
|
||||||
|
asm("xaddw %0, "__percpu_arg(1) \
|
||||||
|
: "+r" (paro_ret__), "+m" (var) \
|
||||||
|
: : "memory"); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
asm("xaddl %0, "__percpu_arg(1) \
|
||||||
|
: "+r" (paro_ret__), "+m" (var) \
|
||||||
|
: : "memory"); \
|
||||||
|
break; \
|
||||||
|
case 8: \
|
||||||
|
asm("xaddq %0, "__percpu_arg(1) \
|
||||||
|
: "+re" (paro_ret__), "+m" (var) \
|
||||||
|
: : "memory"); \
|
||||||
|
break; \
|
||||||
|
default: __bad_percpu_size(); \
|
||||||
|
} \
|
||||||
|
paro_ret__ += val; \
|
||||||
|
paro_ret__; \
|
||||||
|
})
|
||||||
|
|
||||||
#define percpu_from_op(op, var, constraint) \
|
#define percpu_from_op(op, var, constraint) \
|
||||||
({ \
|
({ \
|
||||||
typeof(var) pfo_ret__; \
|
typeof(var) pfo_ret__; \
|
||||||
@@ -300,6 +333,14 @@ do { \
|
|||||||
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
#ifndef CONFIG_M386
|
||||||
|
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
#endif
|
||||||
/*
|
/*
|
||||||
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
||||||
* 32 bit must fall back to generic operations.
|
* 32 bit must fall back to generic operations.
|
||||||
@@ -324,6 +365,8 @@ do { \
|
|||||||
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||||
|
Reference in New Issue
Block a user