Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Prevent people from directly including <asm/rwsem.h>. [IA64] remove time interpolator [IA64] Convert to generic timekeeping/clocksource [IA64] refresh some config files for 64K pagesize [IA64] Delete iosapic_free_rte() [IA64] fallocate system call [IA64] Enable percpu vector domain for IA64_DIG [IA64] Enable percpu vector domain for IA64_GENERIC [IA64] Support irq migration across domain [IA64] Add support for vector domain [IA64] Add mapping table between irq and vector [IA64] Check if irq is sharable [IA64] Fix invalid irq vector assumption for iosapic [IA64] Use dynamic irq for iosapic interrupts [IA64] Use per iosapic lock for indirect iosapic register access [IA64] Cleanup lock order in iosapic_register_intr [IA64] Remove duplicated members in iosapic_rte_info [IA64] Remove block structure for locking in iosapic.c
This commit is contained in:
@@ -7,6 +7,7 @@
|
||||
#define ASM_OFFSETS_C 1
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/clocksource.h>
|
||||
|
||||
#include <asm-ia64/processor.h>
|
||||
#include <asm-ia64/ptrace.h>
|
||||
@@ -15,6 +16,7 @@
|
||||
#include <asm-ia64/mca.h>
|
||||
|
||||
#include "../kernel/sigframe.h"
|
||||
#include "../kernel/fsyscall_gtod_data.h"
|
||||
|
||||
#define DEFINE(sym, val) \
|
||||
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
|
||||
@@ -256,17 +258,24 @@ void foo(void)
|
||||
BLANK();
|
||||
|
||||
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter));
|
||||
DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask));
|
||||
DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU);
|
||||
DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
|
||||
DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
|
||||
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
|
||||
DEFINE(IA64_GTOD_LOCK_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, lock));
|
||||
DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, wall_time));
|
||||
DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, monotonic_time));
|
||||
DEFINE(IA64_CLKSRC_MASK_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, clk_mask));
|
||||
DEFINE(IA64_CLKSRC_MULT_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, clk_mult));
|
||||
DEFINE(IA64_CLKSRC_SHIFT_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, clk_shift));
|
||||
DEFINE(IA64_CLKSRC_MMIO_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio));
|
||||
DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET,
|
||||
offsetof (struct fsyscall_gtod_data_t, clk_cycle_last));
|
||||
DEFINE(IA64_ITC_JITTER_OFFSET,
|
||||
offsetof (struct itc_jitter_data_t, itc_jitter));
|
||||
DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
|
||||
offsetof (struct itc_jitter_data_t, itc_lastcycle));
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/time.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/* IBM Summit (EXA) Cyclone counter code*/
|
||||
@@ -18,13 +19,21 @@ void __init cyclone_setup(void)
|
||||
use_cyclone = 1;
|
||||
}
|
||||
|
||||
static void __iomem *cyclone_mc;
|
||||
|
||||
struct time_interpolator cyclone_interpolator = {
|
||||
.source = TIME_SOURCE_MMIO64,
|
||||
.shift = 16,
|
||||
.frequency = CYCLONE_TIMER_FREQ,
|
||||
.drift = -100,
|
||||
.mask = (1LL << 40) - 1
|
||||
static cycle_t read_cyclone(void)
|
||||
{
|
||||
return (cycle_t)readq((void __iomem *)cyclone_mc);
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_cyclone = {
|
||||
.name = "cyclone",
|
||||
.rating = 300,
|
||||
.read = read_cyclone,
|
||||
.mask = (1LL << 40) - 1,
|
||||
.mult = 0, /*to be caluclated*/
|
||||
.shift = 16,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
int __init init_cyclone_clock(void)
|
||||
@@ -44,13 +53,15 @@ int __init init_cyclone_clock(void)
|
||||
offset = (CYCLONE_CBAR_ADDR);
|
||||
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
|
||||
if(!reg){
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
|
||||
" register.\n");
|
||||
use_cyclone = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
base = readq(reg);
|
||||
if(!base){
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
|
||||
" value.\n");
|
||||
use_cyclone = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -60,7 +71,8 @@ int __init init_cyclone_clock(void)
|
||||
offset = (base + CYCLONE_PMCC_OFFSET);
|
||||
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
|
||||
if(!reg){
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
|
||||
" register.\n");
|
||||
use_cyclone = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -71,7 +83,8 @@ int __init init_cyclone_clock(void)
|
||||
offset = (base + CYCLONE_MPCS_OFFSET);
|
||||
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
|
||||
if(!reg){
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
|
||||
" register.\n");
|
||||
use_cyclone = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -82,7 +95,8 @@ int __init init_cyclone_clock(void)
|
||||
offset = (base + CYCLONE_MPMC_OFFSET);
|
||||
cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32));
|
||||
if(!cyclone_timer){
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
|
||||
printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
|
||||
" register.\n");
|
||||
use_cyclone = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -93,7 +107,8 @@ int __init init_cyclone_clock(void)
|
||||
int stall = 100;
|
||||
while(stall--) barrier();
|
||||
if(readl(cyclone_timer) == old){
|
||||
printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
|
||||
printk(KERN_ERR "Summit chipset: Counter not counting!"
|
||||
" DISABLED\n");
|
||||
iounmap(cyclone_timer);
|
||||
cyclone_timer = 0;
|
||||
use_cyclone = 0;
|
||||
@@ -101,8 +116,11 @@ int __init init_cyclone_clock(void)
|
||||
}
|
||||
}
|
||||
/* initialize last tick */
|
||||
cyclone_interpolator.addr = cyclone_timer;
|
||||
register_time_interpolator(&cyclone_interpolator);
|
||||
cyclone_mc = cyclone_timer;
|
||||
clocksource_cyclone.fsys_mmio = cyclone_timer;
|
||||
clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ,
|
||||
clocksource_cyclone.shift);
|
||||
clocksource_register(&clocksource_cyclone);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1581,7 +1581,7 @@ sys_call_table:
|
||||
data8 sys_sync_file_range // 1300
|
||||
data8 sys_tee
|
||||
data8 sys_vmsplice
|
||||
data8 sys_ni_syscall // reserved for move_pages
|
||||
data8 sys_fallocate
|
||||
data8 sys_getcpu
|
||||
data8 sys_epoll_pwait // 1305
|
||||
data8 sys_utimensat
|
||||
|
@@ -147,12 +147,11 @@ ENTRY(fsys_set_tid_address)
|
||||
FSYS_RETURN
|
||||
END(fsys_set_tid_address)
|
||||
|
||||
/*
|
||||
* Ensure that the time interpolator structure is compatible with the asm code
|
||||
*/
|
||||
#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \
|
||||
|| IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4
|
||||
#error fsys_gettimeofday incompatible with changes to struct time_interpolator
|
||||
#if IA64_GTOD_LOCK_OFFSET !=0
|
||||
#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
|
||||
#endif
|
||||
#if IA64_ITC_JITTER_OFFSET !=0
|
||||
#error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t
|
||||
#endif
|
||||
#define CLOCK_REALTIME 0
|
||||
#define CLOCK_MONOTONIC 1
|
||||
@@ -179,126 +178,124 @@ ENTRY(fsys_gettimeofday)
|
||||
// r11 = preserved: saved ar.pfs
|
||||
// r12 = preserved: memory stack
|
||||
// r13 = preserved: thread pointer
|
||||
// r14 = address of mask / mask
|
||||
// r14 = address of mask / mask value
|
||||
// r15 = preserved: system call number
|
||||
// r16 = preserved: current task pointer
|
||||
// r17 = wall to monotonic use
|
||||
// r18 = time_interpolator->offset
|
||||
// r19 = address of wall_to_monotonic
|
||||
// r20 = pointer to struct time_interpolator / pointer to time_interpolator->address
|
||||
// r21 = shift factor
|
||||
// r22 = address of time interpolator->last_counter
|
||||
// r23 = address of time_interpolator->last_cycle
|
||||
// r24 = adress of time_interpolator->offset
|
||||
// r25 = last_cycle value
|
||||
// r26 = last_counter value
|
||||
// r27 = pointer to xtime
|
||||
// r17 = (not used)
|
||||
// r18 = (not used)
|
||||
// r19 = address of itc_lastcycle
|
||||
// r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence)
|
||||
// r21 = address of mmio_ptr
|
||||
// r22 = address of wall_time or monotonic_time
|
||||
// r23 = address of shift / value
|
||||
// r24 = address mult factor / cycle_last value
|
||||
// r25 = itc_lastcycle value
|
||||
// r26 = address clocksource cycle_last
|
||||
// r27 = (not used)
|
||||
// r28 = sequence number at the beginning of critcal section
|
||||
// r29 = address of seqlock
|
||||
// r29 = address of itc_jitter
|
||||
// r30 = time processing flags / memory address
|
||||
// r31 = pointer to result
|
||||
// Predicates
|
||||
// p6,p7 short term use
|
||||
// p8 = timesource ar.itc
|
||||
// p9 = timesource mmio64
|
||||
// p10 = timesource mmio32
|
||||
// p10 = timesource mmio32 - not used
|
||||
// p11 = timesource not to be handled by asm code
|
||||
// p12 = memory time source ( = p9 | p10)
|
||||
// p13 = do cmpxchg with time_interpolator_last_cycle
|
||||
// p12 = memory time source ( = p9 | p10) - not used
|
||||
// p13 = do cmpxchg with itc_lastcycle
|
||||
// p14 = Divide by 1000
|
||||
// p15 = Add monotonic
|
||||
//
|
||||
// Note that instructions are optimized for McKinley. McKinley can process two
|
||||
// bundles simultaneously and therefore we continuously try to feed the CPU
|
||||
// two bundles and then a stop.
|
||||
tnat.nz p6,p0 = r31 // branch deferred since it does not fit into bundle structure
|
||||
// Note that instructions are optimized for McKinley. McKinley can
|
||||
// process two bundles simultaneously and therefore we continuously
|
||||
// try to feed the CPU two bundles and then a stop.
|
||||
//
|
||||
// Additional note that code has changed a lot. Optimization is TBD.
|
||||
// Comments begin with "?" are maybe outdated.
|
||||
tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle
|
||||
mov pr = r30,0xc000 // Set predicates according to function
|
||||
add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
|
||||
movl r20 = time_interpolator
|
||||
movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address
|
||||
;;
|
||||
ld8 r20 = [r20] // get pointer to time_interpolator structure
|
||||
movl r29 = xtime_lock
|
||||
movl r29 = itc_jitter_data // itc_jitter
|
||||
add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time
|
||||
ld4 r2 = [r2] // process work pending flags
|
||||
movl r27 = xtime
|
||||
;; // only one bundle here
|
||||
ld8 r21 = [r20] // first quad with control information
|
||||
and r2 = TIF_ALLWORK_MASK,r2
|
||||
(p6) br.cond.spnt.few .fail_einval // deferred branch
|
||||
;;
|
||||
add r10 = IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET,r20
|
||||
extr r3 = r21,32,32 // time_interpolator->nsec_per_cyc
|
||||
extr r8 = r21,0,16 // time_interpolator->source
|
||||
(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
|
||||
add r21 = IA64_CLKSRC_MMIO_OFFSET,r20
|
||||
add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
|
||||
and r2 = TIF_ALLWORK_MASK,r2
|
||||
(p6) br.cond.spnt.few .fail_einval // ? deferred branch
|
||||
;;
|
||||
add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
|
||||
cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
|
||||
(p6) br.cond.spnt.many fsys_fallback_syscall
|
||||
;;
|
||||
cmp.eq p8,p12 = 0,r8 // Check for cpu timer
|
||||
cmp.eq p9,p0 = 1,r8 // MMIO64 ?
|
||||
extr r2 = r21,24,8 // time_interpolator->jitter
|
||||
cmp.eq p10,p0 = 2,r8 // MMIO32 ?
|
||||
cmp.ltu p11,p0 = 2,r8 // function or other clock
|
||||
(p11) br.cond.spnt.many fsys_fallback_syscall
|
||||
;;
|
||||
setf.sig f7 = r3 // Setup for scaling of counter
|
||||
(p15) movl r19 = wall_to_monotonic
|
||||
(p12) ld8 r30 = [r10]
|
||||
cmp.ne p13,p0 = r2,r0 // need jitter compensation?
|
||||
extr r21 = r21,16,8 // shift factor
|
||||
;;
|
||||
// Begin critical section
|
||||
.time_redo:
|
||||
.pred.rel.mutex p8,p9,p10
|
||||
ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes
|
||||
ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first
|
||||
;;
|
||||
and r28 = ~1,r28 // Make sequence even to force retry if odd
|
||||
and r28 = ~1,r28 // And make sequence even to force retry if odd
|
||||
;;
|
||||
ld8 r30 = [r21] // clocksource->mmio_ptr
|
||||
add r24 = IA64_CLKSRC_MULT_OFFSET,r20
|
||||
ld4 r2 = [r29] // itc_jitter value
|
||||
add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20
|
||||
add r14 = IA64_CLKSRC_MASK_OFFSET,r20
|
||||
;;
|
||||
ld4 r3 = [r24] // clocksource mult value
|
||||
ld8 r14 = [r14] // clocksource mask value
|
||||
cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr
|
||||
;;
|
||||
setf.sig f7 = r3 // Setup for mult scaling of counter
|
||||
(p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13
|
||||
ld4 r23 = [r23] // clocksource shift value
|
||||
ld8 r24 = [r26] // get clksrc_cycle_last value
|
||||
(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
|
||||
;;
|
||||
.pred.rel.mutex p8,p9
|
||||
(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
|
||||
add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20
|
||||
(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues..
|
||||
(p10) ld4 r2 = [r30] // readw(ti->address)
|
||||
(p13) add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20
|
||||
;; // could be removed by moving the last add upward
|
||||
ld8 r26 = [r22] // time_interpolator->last_counter
|
||||
(p13) ld8 r25 = [r23] // time interpolator->last_cycle
|
||||
add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20
|
||||
(p15) ld8 r17 = [r19],IA64_TIMESPEC_TV_NSEC_OFFSET
|
||||
ld8 r9 = [r27],IA64_TIMESPEC_TV_NSEC_OFFSET
|
||||
add r14 = IA64_TIME_INTERPOLATOR_MASK_OFFSET, r20
|
||||
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
|
||||
(p13) ld8 r25 = [r19] // get itc_lastcycle value
|
||||
;; // ? could be removed by moving the last add upward
|
||||
ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
|
||||
;;
|
||||
ld8 r18 = [r24] // time_interpolator->offset
|
||||
ld8 r8 = [r27],-IA64_TIMESPEC_TV_NSEC_OFFSET // xtime.tv_nsec
|
||||
(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
|
||||
ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
|
||||
(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
|
||||
;;
|
||||
ld8 r14 = [r14] // time_interpolator->mask
|
||||
(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
|
||||
sub r10 = r2,r26 // current_counter - last_counter
|
||||
(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
|
||||
sub r10 = r2,r24 // current_cycle - last_cycle
|
||||
;;
|
||||
(p6) sub r10 = r25,r26 // time we got was less than last_cycle
|
||||
(p6) sub r10 = r25,r24 // time we got was less than last_cycle
|
||||
(p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg
|
||||
;;
|
||||
(p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv
|
||||
;;
|
||||
(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful
|
||||
;;
|
||||
(p7) sub r10 = r3,r24 // then use new last_cycle instead
|
||||
;;
|
||||
and r10 = r10,r14 // Apply mask
|
||||
;;
|
||||
setf.sig f8 = r10
|
||||
nop.i 123
|
||||
;;
|
||||
(p7) cmpxchg8.rel r3 = [r23],r2,ar.ccv
|
||||
EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare time
|
||||
// fault check takes 5 cycles and we have spare time
|
||||
EX(.fail_efault, probe.w.fault r31, 3)
|
||||
xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter)
|
||||
(p15) add r9 = r9,r17 // Add wall to monotonic.secs to result secs
|
||||
;;
|
||||
(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET
|
||||
(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo
|
||||
// simulate tbit.nz.or p7,p0 = r28,0
|
||||
// ? simulate tbit.nz.or p7,p0 = r28,0
|
||||
getf.sig r2 = f8
|
||||
mf
|
||||
add r8 = r8,r18 // Add time interpolator offset
|
||||
;;
|
||||
ld4 r10 = [r29] // xtime_lock.sequence
|
||||
(p15) add r8 = r8, r17 // Add monotonic.nsecs to nsecs
|
||||
shr.u r2 = r2,r21
|
||||
;; // overloaded 3 bundles!
|
||||
// End critical section.
|
||||
ld4 r10 = [r20] // gtod_lock.sequence
|
||||
shr.u r2 = r2,r23 // shift by factor
|
||||
;; // ? overloaded 3 bundles!
|
||||
add r8 = r8,r2 // Add xtime.nsecs
|
||||
cmp4.ne.or p7,p0 = r28,r10
|
||||
(p7) br.cond.dpnt.few .time_redo // sequence number changed ?
|
||||
cmp4.ne p7,p0 = r28,r10
|
||||
(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
|
||||
// End critical section.
|
||||
// Now r8=tv->tv_nsec and r9=tv->tv_sec
|
||||
mov r10 = r0
|
||||
movl r2 = 1000000000
|
||||
@@ -308,19 +305,19 @@ EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare
|
||||
.time_normalize:
|
||||
mov r21 = r8
|
||||
cmp.ge p6,p0 = r8,r2
|
||||
(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting some time
|
||||
(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time
|
||||
;;
|
||||
(p14) setf.sig f8 = r20
|
||||
(p6) sub r8 = r8,r2
|
||||
(p6) add r9 = 1,r9 // two nops before the branch.
|
||||
(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod
|
||||
(p6) add r9 = 1,r9 // two nops before the branch.
|
||||
(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod
|
||||
(p6) br.cond.dpnt.few .time_normalize
|
||||
;;
|
||||
// Divided by 8 though shift. Now divide by 125
|
||||
// The compiler was able to do that with a multiply
|
||||
// and a shift and we do the same
|
||||
EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
|
||||
(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it...
|
||||
EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
|
||||
(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it
|
||||
;;
|
||||
mov r8 = r0
|
||||
(p14) getf.sig r2 = f8
|
||||
|
23
arch/ia64/kernel/fsyscall_gtod_data.h
Normal file
23
arch/ia64/kernel/fsyscall_gtod_data.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
|
||||
* Contributed by Peter Keilty <peter.keilty@hp.com>
|
||||
*
|
||||
* fsyscall gettimeofday data
|
||||
*/
|
||||
|
||||
struct fsyscall_gtod_data_t {
|
||||
seqlock_t lock;
|
||||
struct timespec wall_time;
|
||||
struct timespec monotonic_time;
|
||||
cycle_t clk_mask;
|
||||
u32 clk_mult;
|
||||
u32 clk_shift;
|
||||
void *clk_fsys_mmio;
|
||||
cycle_t clk_cycle_last;
|
||||
} __attribute__ ((aligned (L1_CACHE_BYTES)));
|
||||
|
||||
struct itc_jitter_data_t {
|
||||
int itc_jitter;
|
||||
cycle_t itc_lastcycle;
|
||||
} __attribute__ ((aligned (L1_CACHE_BYTES)));
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq)
|
||||
#ifdef CONFIG_IA64_GENERIC
|
||||
unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
|
||||
{
|
||||
return (unsigned int) vec;
|
||||
return __get_cpu_var(vector_irq)[vec];
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -46,6 +46,12 @@
|
||||
|
||||
#define IRQ_DEBUG 0
|
||||
|
||||
#define IRQ_VECTOR_UNASSIGNED (0)
|
||||
|
||||
#define IRQ_UNUSED (0)
|
||||
#define IRQ_USED (1)
|
||||
#define IRQ_RSVD (2)
|
||||
|
||||
/* These can be overridden in platform_irq_init */
|
||||
int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
|
||||
int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
|
||||
@@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
|
||||
void __iomem *ipi_base_addr = ((void __iomem *)
|
||||
(__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu);
|
||||
|
||||
/*
|
||||
* Legacy IRQ to IA-64 vector translation table.
|
||||
*/
|
||||
@@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = {
|
||||
};
|
||||
EXPORT_SYMBOL(isa_irq_to_vector_map);
|
||||
|
||||
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
|
||||
DEFINE_SPINLOCK(vector_lock);
|
||||
|
||||
struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
|
||||
[0 ... NR_IRQS - 1] = {
|
||||
.vector = IRQ_VECTOR_UNASSIGNED,
|
||||
.domain = CPU_MASK_NONE
|
||||
}
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
|
||||
[0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
|
||||
};
|
||||
|
||||
static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
|
||||
[0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
|
||||
};
|
||||
|
||||
static int irq_status[NR_IRQS] = {
|
||||
[0 ... NR_IRQS -1] = IRQ_UNUSED
|
||||
};
|
||||
|
||||
int check_irq_used(int irq)
|
||||
{
|
||||
if (irq_status[irq] == IRQ_USED)
|
||||
return 1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void reserve_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
irq_status[irq] = IRQ_RSVD;
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
}
|
||||
|
||||
static inline int find_unassigned_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
|
||||
if (irq_status[irq] == IRQ_UNUSED)
|
||||
return irq;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static inline int find_unassigned_vector(cpumask_t domain)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int pos;
|
||||
|
||||
cpus_and(mask, domain, cpu_online_map);
|
||||
if (cpus_empty(mask))
|
||||
return -EINVAL;
|
||||
|
||||
for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
|
||||
cpus_and(mask, domain, vector_table[pos]);
|
||||
if (!cpus_empty(mask))
|
||||
continue;
|
||||
return IA64_FIRST_DEVICE_VECTOR + pos;
|
||||
}
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int cpu, pos;
|
||||
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||
|
||||
cpus_and(mask, domain, cpu_online_map);
|
||||
if (cpus_empty(mask))
|
||||
return -EINVAL;
|
||||
if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
|
||||
return 0;
|
||||
if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
|
||||
return -EBUSY;
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
per_cpu(vector_irq, cpu)[vector] = irq;
|
||||
cfg->vector = vector;
|
||||
cfg->domain = domain;
|
||||
irq_status[irq] = IRQ_USED;
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
cpus_or(vector_table[pos], vector_table[pos], domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bind_irq_vector(int irq, int vector, cpumask_t domain)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
ret = __bind_irq_vector(irq, vector, domain);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __clear_irq_vector(int irq)
|
||||
{
|
||||
int vector, cpu, pos;
|
||||
cpumask_t mask;
|
||||
cpumask_t domain;
|
||||
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||
|
||||
BUG_ON((unsigned)irq >= NR_IRQS);
|
||||
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
|
||||
vector = cfg->vector;
|
||||
domain = cfg->domain;
|
||||
cpus_and(mask, cfg->domain, cpu_online_map);
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
|
||||
cfg->vector = IRQ_VECTOR_UNASSIGNED;
|
||||
cfg->domain = CPU_MASK_NONE;
|
||||
irq_status[irq] = IRQ_UNUSED;
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
cpus_andnot(vector_table[pos], vector_table[pos], domain);
|
||||
}
|
||||
|
||||
static void clear_irq_vector(int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
__clear_irq_vector(irq);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
}
|
||||
|
||||
int
|
||||
assign_irq_vector (int irq)
|
||||
{
|
||||
int pos, vector;
|
||||
again:
|
||||
pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
|
||||
vector = IA64_FIRST_DEVICE_VECTOR + pos;
|
||||
if (vector > IA64_LAST_DEVICE_VECTOR)
|
||||
return -ENOSPC;
|
||||
if (test_and_set_bit(pos, ia64_vector_mask))
|
||||
goto again;
|
||||
unsigned long flags;
|
||||
int vector, cpu;
|
||||
cpumask_t domain;
|
||||
|
||||
vector = -ENOSPC;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
if (irq < 0) {
|
||||
goto out;
|
||||
}
|
||||
for_each_online_cpu(cpu) {
|
||||
domain = vector_allocation_domain(cpu);
|
||||
vector = find_unassigned_vector(domain);
|
||||
if (vector >= 0)
|
||||
break;
|
||||
}
|
||||
if (vector < 0)
|
||||
goto out;
|
||||
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
||||
out:
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
return vector;
|
||||
}
|
||||
|
||||
void
|
||||
free_irq_vector (int vector)
|
||||
{
|
||||
int pos;
|
||||
|
||||
if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
|
||||
if (vector < IA64_FIRST_DEVICE_VECTOR ||
|
||||
vector > IA64_LAST_DEVICE_VECTOR)
|
||||
return;
|
||||
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
if (!test_and_clear_bit(pos, ia64_vector_mask))
|
||||
printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
|
||||
clear_irq_vector(vector);
|
||||
}
|
||||
|
||||
int
|
||||
reserve_irq_vector (int vector)
|
||||
{
|
||||
int pos;
|
||||
|
||||
if (vector < IA64_FIRST_DEVICE_VECTOR ||
|
||||
vector > IA64_LAST_DEVICE_VECTOR)
|
||||
return -EINVAL;
|
||||
return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
|
||||
}
|
||||
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
return test_and_set_bit(pos, ia64_vector_mask);
|
||||
/*
|
||||
* Initialize vector_irq on a new cpu. This function must be called
|
||||
* with vector_lock held.
|
||||
*/
|
||||
void __setup_vector_irq(int cpu)
|
||||
{
|
||||
int irq, vector;
|
||||
|
||||
/* Clear vector_irq */
|
||||
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
|
||||
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
|
||||
/* Mark the inuse vectors */
|
||||
for (irq = 0; irq < NR_IRQS; ++irq) {
|
||||
if (!cpu_isset(cpu, irq_cfg[irq].domain))
|
||||
continue;
|
||||
vector = irq_to_vector(irq);
|
||||
per_cpu(vector_irq, cpu)[vector] = irq;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
|
||||
static enum vector_domain_type {
|
||||
VECTOR_DOMAIN_NONE,
|
||||
VECTOR_DOMAIN_PERCPU
|
||||
} vector_domain_type = VECTOR_DOMAIN_NONE;
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
|
||||
return cpumask_of_cpu(cpu);
|
||||
return CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
static int __init parse_vector_domain(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
if (!strcmp(arg, "percpu")) {
|
||||
vector_domain_type = VECTOR_DOMAIN_PERCPU;
|
||||
no_int_routing = 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
early_param("vector", parse_vector_domain);
|
||||
#else
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
return CPU_MASK_ALL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void destroy_and_reserve_irq(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup(irq);
|
||||
|
||||
clear_irq_vector(irq);
|
||||
reserve_irq(irq);
|
||||
}
|
||||
|
||||
static int __reassign_irq_vector(int irq, int cpu)
|
||||
{
|
||||
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||
int vector;
|
||||
cpumask_t domain;
|
||||
|
||||
if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
|
||||
return -EINVAL;
|
||||
if (cpu_isset(cpu, cfg->domain))
|
||||
return 0;
|
||||
domain = vector_allocation_domain(cpu);
|
||||
vector = find_unassigned_vector(domain);
|
||||
if (vector < 0)
|
||||
return -ENOSPC;
|
||||
__clear_irq_vector(irq);
|
||||
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int reassign_irq_vector(int irq, int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
ret = __reassign_irq_vector(irq, cpu);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -111,18 +342,35 @@ reserve_irq_vector (int vector)
|
||||
*/
|
||||
int create_irq(void)
|
||||
{
|
||||
int vector = assign_irq_vector(AUTO_ASSIGN);
|
||||
unsigned long flags;
|
||||
int irq, vector, cpu;
|
||||
cpumask_t domain;
|
||||
|
||||
if (vector >= 0)
|
||||
dynamic_irq_init(vector);
|
||||
|
||||
return vector;
|
||||
irq = vector = -ENOSPC;
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
for_each_online_cpu(cpu) {
|
||||
domain = vector_allocation_domain(cpu);
|
||||
vector = find_unassigned_vector(domain);
|
||||
if (vector >= 0)
|
||||
break;
|
||||
}
|
||||
if (vector < 0)
|
||||
goto out;
|
||||
irq = find_unassigned_irq();
|
||||
if (irq < 0)
|
||||
goto out;
|
||||
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
||||
out:
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
if (irq >= 0)
|
||||
dynamic_irq_init(irq);
|
||||
return irq;
|
||||
}
|
||||
|
||||
void destroy_irq(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup(irq);
|
||||
free_irq_vector(irq);
|
||||
clear_irq_vector(irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
||||
irq_desc_t *desc;
|
||||
unsigned int irq;
|
||||
|
||||
for (irq = 0; irq < NR_IRQS; ++irq)
|
||||
if (irq_to_vector(irq) == vec) {
|
||||
desc = irq_desc + irq;
|
||||
desc->status |= IRQ_PER_CPU;
|
||||
desc->chip = &irq_type_ia64_lsapic;
|
||||
if (action)
|
||||
setup_irq(irq, action);
|
||||
}
|
||||
irq = vec;
|
||||
BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
|
||||
desc = irq_desc + irq;
|
||||
desc->status |= IRQ_PER_CPU;
|
||||
desc->chip = &irq_type_ia64_lsapic;
|
||||
if (action)
|
||||
setup_irq(irq, action);
|
||||
}
|
||||
|
||||
void __init
|
||||
|
@@ -13,6 +13,7 @@
|
||||
|
||||
#define MSI_DATA_VECTOR_SHIFT 0
|
||||
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
|
||||
#define MSI_DATA_VECTOR_MASK 0xffffff00
|
||||
|
||||
#define MSI_DATA_DELIVERY_SHIFT 8
|
||||
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
|
||||
@@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip;
|
||||
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
u32 addr;
|
||||
u32 addr, data;
|
||||
int cpu = first_cpu(cpu_mask);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
|
||||
if (reassign_irq_vector(irq, cpu))
|
||||
return;
|
||||
|
||||
read_msi_msg(irq, &msg);
|
||||
|
||||
addr = msg.address_lo;
|
||||
addr &= MSI_ADDR_DESTID_MASK;
|
||||
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask)));
|
||||
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
|
||||
msg.address_lo = addr;
|
||||
|
||||
data = msg.data;
|
||||
data &= MSI_DATA_VECTOR_MASK;
|
||||
data |= MSI_DATA_VECTOR(irq_to_vector(irq));
|
||||
msg.data = data;
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = cpu_mask;
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
@@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
||||
struct msi_msg msg;
|
||||
unsigned long dest_phys_id;
|
||||
int irq, vector;
|
||||
cpumask_t mask;
|
||||
|
||||
irq = create_irq();
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
set_irq_msi(irq, desc);
|
||||
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
|
||||
cpus_and(mask, irq_to_domain(irq), cpu_online_map);
|
||||
dest_phys_id = cpu_physical_id(first_cpu(mask));
|
||||
vector = irq_to_vector(irq);
|
||||
|
||||
msg.address_hi = 0;
|
||||
|
@@ -395,9 +395,13 @@ smp_callin (void)
|
||||
fix_b0_for_bsp();
|
||||
|
||||
lock_ipi_calllock();
|
||||
spin_lock(&vector_lock);
|
||||
/* Setup the per cpu irq handling data structures */
|
||||
__setup_vector_irq(cpuid);
|
||||
cpu_set(cpuid, cpu_online_map);
|
||||
unlock_ipi_calllock();
|
||||
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
|
||||
spin_unlock(&vector_lock);
|
||||
|
||||
smp_setup_percpu_timer();
|
||||
|
||||
|
@@ -19,6 +19,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/clocksource.h>
|
||||
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/delay.h>
|
||||
@@ -28,6 +29,16 @@
|
||||
#include <asm/sections.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include "fsyscall_gtod_data.h"
|
||||
|
||||
static cycle_t itc_get_cycles(void);
|
||||
|
||||
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
|
||||
.lock = SEQLOCK_UNLOCKED,
|
||||
};
|
||||
|
||||
struct itc_jitter_data_t itc_jitter_data;
|
||||
|
||||
volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
|
||||
|
||||
#ifdef CONFIG_IA64_DEBUG_IRQ
|
||||
@@ -37,11 +48,16 @@ EXPORT_SYMBOL(last_cli_ip);
|
||||
|
||||
#endif
|
||||
|
||||
static struct time_interpolator itc_interpolator = {
|
||||
.shift = 16,
|
||||
.mask = 0xffffffffffffffffLL,
|
||||
.source = TIME_SOURCE_CPU
|
||||
static struct clocksource clocksource_itc = {
|
||||
.name = "itc",
|
||||
.rating = 350,
|
||||
.read = itc_get_cycles,
|
||||
.mask = 0xffffffffffffffff,
|
||||
.mult = 0, /*to be caluclated*/
|
||||
.shift = 16,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
static struct clocksource *itc_clocksource;
|
||||
|
||||
static irqreturn_t
|
||||
timer_interrupt (int irq, void *dev_id)
|
||||
@@ -210,8 +226,6 @@ ia64_init_itm (void)
|
||||
+ itc_freq/2)/itc_freq;
|
||||
|
||||
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
|
||||
itc_interpolator.frequency = local_cpu_data->itc_freq;
|
||||
itc_interpolator.drift = itc_drift;
|
||||
#ifdef CONFIG_SMP
|
||||
/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
|
||||
* Jitter compensation requires a cmpxchg which may limit
|
||||
@@ -223,15 +237,50 @@ ia64_init_itm (void)
|
||||
* even going backward) if the ITC offsets between the individual CPUs
|
||||
* are too large.
|
||||
*/
|
||||
if (!nojitter) itc_interpolator.jitter = 1;
|
||||
if (!nojitter)
|
||||
itc_jitter_data.itc_jitter = 1;
|
||||
#endif
|
||||
register_time_interpolator(&itc_interpolator);
|
||||
}
|
||||
|
||||
/* Setup the CPU local timer tick */
|
||||
ia64_cpu_local_tick();
|
||||
|
||||
if (!itc_clocksource) {
|
||||
/* Sort out mult/shift values: */
|
||||
clocksource_itc.mult =
|
||||
clocksource_hz2mult(local_cpu_data->itc_freq,
|
||||
clocksource_itc.shift);
|
||||
clocksource_register(&clocksource_itc);
|
||||
itc_clocksource = &clocksource_itc;
|
||||
}
|
||||
}
|
||||
|
||||
static cycle_t itc_get_cycles()
|
||||
{
|
||||
u64 lcycle, now, ret;
|
||||
|
||||
if (!itc_jitter_data.itc_jitter)
|
||||
return get_cycles();
|
||||
|
||||
lcycle = itc_jitter_data.itc_lastcycle;
|
||||
now = get_cycles();
|
||||
if (lcycle && time_after(lcycle, now))
|
||||
return lcycle;
|
||||
|
||||
/*
|
||||
* Keep track of the last timer value returned.
|
||||
* In an SMP environment, you could lose out in contention of
|
||||
* cmpxchg. If so, your cmpxchg returns new value which the
|
||||
* winner of contention updated to. Use the new value instead.
|
||||
*/
|
||||
ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
|
||||
if (unlikely(ret != lcycle))
|
||||
return ret;
|
||||
|
||||
return now;
|
||||
}
|
||||
|
||||
|
||||
static struct irqaction timer_irqaction = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED | IRQF_IRQPOLL,
|
||||
@@ -307,3 +356,34 @@ ia64_setup_printk_clock(void)
|
||||
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT))
|
||||
ia64_printk_clock = ia64_itc_printk_clock;
|
||||
}
|
||||
|
||||
void update_vsyscall(struct timespec *wall, struct clocksource *c)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
|
||||
|
||||
/* copy fsyscall clock data */
|
||||
fsyscall_gtod_data.clk_mask = c->mask;
|
||||
fsyscall_gtod_data.clk_mult = c->mult;
|
||||
fsyscall_gtod_data.clk_shift = c->shift;
|
||||
fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
|
||||
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
|
||||
|
||||
/* copy kernel time structures */
|
||||
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
|
||||
fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
|
||||
fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
|
||||
+ wall->tv_sec;
|
||||
fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
|
||||
+ wall->tv_nsec;
|
||||
|
||||
/* normalize */
|
||||
while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
|
||||
fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
|
||||
fsyscall_gtod_data.monotonic_time.tv_sec++;
|
||||
}
|
||||
|
||||
write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user