[XFS] Use atomic counters for ktrace buffer indexes
ktrace_enter() is consuming vast amounts of CPU time due to the use of a single global lock for protecting buffer index increments. Change it to use per-buffer atomic counters - this reduces ktrace_enter() overhead during a trace intensive test on a 4p machine from 58% of all CPU time to 12% and halves test runtime. SGI-PV: 977546 SGI-Modid: xfs-linux-melb:xfs-kern:30537a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
This commit is contained in:
committed by
Lachlan McIlroy
parent
44d814ced4
commit
6ee4752ffe
@@ -92,7 +92,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
|
|||||||
|
|
||||||
ktp->kt_entries = ktep;
|
ktp->kt_entries = ktep;
|
||||||
ktp->kt_nentries = nentries;
|
ktp->kt_nentries = nentries;
|
||||||
ktp->kt_index = 0;
|
atomic_set(&ktp->kt_index, 0);
|
||||||
ktp->kt_rollover = 0;
|
ktp->kt_rollover = 0;
|
||||||
return ktp;
|
return ktp;
|
||||||
}
|
}
|
||||||
@@ -151,8 +151,6 @@ ktrace_enter(
|
|||||||
void *val14,
|
void *val14,
|
||||||
void *val15)
|
void *val15)
|
||||||
{
|
{
|
||||||
static DEFINE_SPINLOCK(wrap_lock);
|
|
||||||
unsigned long flags;
|
|
||||||
int index;
|
int index;
|
||||||
ktrace_entry_t *ktep;
|
ktrace_entry_t *ktep;
|
||||||
|
|
||||||
@@ -161,12 +159,8 @@ ktrace_enter(
|
|||||||
/*
|
/*
|
||||||
* Grab an entry by pushing the index up to the next one.
|
* Grab an entry by pushing the index up to the next one.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&wrap_lock, flags);
|
index = atomic_add_return(1, &ktp->kt_index);
|
||||||
index = ktp->kt_index;
|
index = (index - 1) % ktp->kt_nentries;
|
||||||
if (++ktp->kt_index == ktp->kt_nentries)
|
|
||||||
ktp->kt_index = 0;
|
|
||||||
spin_unlock_irqrestore(&wrap_lock, flags);
|
|
||||||
|
|
||||||
if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
|
if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
|
||||||
ktp->kt_rollover = 1;
|
ktp->kt_rollover = 1;
|
||||||
|
|
||||||
@@ -199,11 +193,12 @@ int
|
|||||||
ktrace_nentries(
|
ktrace_nentries(
|
||||||
ktrace_t *ktp)
|
ktrace_t *ktp)
|
||||||
{
|
{
|
||||||
if (ktp == NULL) {
|
int index;
|
||||||
|
if (ktp == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
|
index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
|
||||||
|
return (ktp->kt_rollover ? ktp->kt_nentries : index);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -228,7 +223,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
|
|||||||
int nentries;
|
int nentries;
|
||||||
|
|
||||||
if (ktp->kt_rollover)
|
if (ktp->kt_rollover)
|
||||||
index = ktp->kt_index;
|
index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
|
||||||
else
|
else
|
||||||
index = 0;
|
index = 0;
|
||||||
|
|
||||||
|
@@ -30,7 +30,7 @@ typedef struct ktrace_entry {
|
|||||||
*/
|
*/
|
||||||
typedef struct ktrace {
|
typedef struct ktrace {
|
||||||
int kt_nentries; /* number of entries in trace buf */
|
int kt_nentries; /* number of entries in trace buf */
|
||||||
int kt_index; /* current index in entries */
|
atomic_t kt_index; /* current index in entries */
|
||||||
int kt_rollover;
|
int kt_rollover;
|
||||||
ktrace_entry_t *kt_entries; /* buffer of entries */
|
ktrace_entry_t *kt_entries; /* buffer of entries */
|
||||||
} ktrace_t;
|
} ktrace_t;
|
||||||
|
Reference in New Issue
Block a user