Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "One performance improvement and a few bug fixes. Two of the fixes deal with the clock related problems we have seen on recent kernels" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/mm: handle asce-type exceptions as normal page fault s390,time: revert direct ktime path for s390 clockevent device s390/time,vdso: convert to the new update_vsyscall interface s390/uaccess: add missing page table walk range check s390/mm: optimize copy_page s390/dasd: validate request size before building CCW/TCW request s390/signal: always restore saved runtime instrumentation psw bit
This commit is contained in:
@@ -101,7 +101,7 @@ config S390
|
|||||||
select GENERIC_CPU_DEVICES if !SMP
|
select GENERIC_CPU_DEVICES if !SMP
|
||||||
select GENERIC_FIND_FIRST_BIT
|
select GENERIC_FIND_FIRST_BIT
|
||||||
select GENERIC_SMP_IDLE_THREAD
|
select GENERIC_SMP_IDLE_THREAD
|
||||||
select GENERIC_TIME_VSYSCALL_OLD
|
select GENERIC_TIME_VSYSCALL
|
||||||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||||
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
||||||
select HAVE_ARCH_SECCOMP_FILTER
|
select HAVE_ARCH_SECCOMP_FILTER
|
||||||
|
@@ -48,33 +48,21 @@ static inline void clear_page(void *page)
|
|||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
|
||||||
|
* bypass caches when copying a page. Especially when copying huge pages
|
||||||
|
* this keeps L1 and L2 data caches alive.
|
||||||
|
*/
|
||||||
static inline void copy_page(void *to, void *from)
|
static inline void copy_page(void *to, void *from)
|
||||||
{
|
{
|
||||||
if (MACHINE_HAS_MVPG) {
|
register void *reg2 asm ("2") = to;
|
||||||
register unsigned long reg0 asm ("0") = 0;
|
register unsigned long reg3 asm ("3") = 0x1000;
|
||||||
|
register void *reg4 asm ("4") = from;
|
||||||
|
register unsigned long reg5 asm ("5") = 0xb0001000;
|
||||||
asm volatile(
|
asm volatile(
|
||||||
" mvpg %0,%1"
|
" mvcl 2,4"
|
||||||
: : "a" (to), "a" (from), "d" (reg0)
|
: "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
|
||||||
: "memory", "cc");
|
: : "memory", "cc");
|
||||||
} else
|
|
||||||
asm volatile(
|
|
||||||
" mvc 0(256,%0),0(%1)\n"
|
|
||||||
" mvc 256(256,%0),256(%1)\n"
|
|
||||||
" mvc 512(256,%0),512(%1)\n"
|
|
||||||
" mvc 768(256,%0),768(%1)\n"
|
|
||||||
" mvc 1024(256,%0),1024(%1)\n"
|
|
||||||
" mvc 1280(256,%0),1280(%1)\n"
|
|
||||||
" mvc 1536(256,%0),1536(%1)\n"
|
|
||||||
" mvc 1792(256,%0),1792(%1)\n"
|
|
||||||
" mvc 2048(256,%0),2048(%1)\n"
|
|
||||||
" mvc 2304(256,%0),2304(%1)\n"
|
|
||||||
" mvc 2560(256,%0),2560(%1)\n"
|
|
||||||
" mvc 2816(256,%0),2816(%1)\n"
|
|
||||||
" mvc 3072(256,%0),3072(%1)\n"
|
|
||||||
" mvc 3328(256,%0),3328(%1)\n"
|
|
||||||
" mvc 3584(256,%0),3584(%1)\n"
|
|
||||||
" mvc 3840(256,%0),3840(%1)\n"
|
|
||||||
: : "a" (to), "a" (from) : "memory");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||||
|
@@ -26,8 +26,9 @@ struct vdso_data {
|
|||||||
__u64 wtom_clock_nsec; /* 0x28 */
|
__u64 wtom_clock_nsec; /* 0x28 */
|
||||||
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
|
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
|
||||||
__u32 tz_dsttime; /* Type of dst correction 0x34 */
|
__u32 tz_dsttime; /* Type of dst correction 0x34 */
|
||||||
__u32 ectg_available;
|
__u32 ectg_available; /* ECTG instruction present 0x38 */
|
||||||
__u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
|
__u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
|
||||||
|
__u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vdso_per_cpu_data {
|
struct vdso_per_cpu_data {
|
||||||
|
@@ -65,7 +65,8 @@ int main(void)
|
|||||||
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
|
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
|
||||||
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
|
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
|
||||||
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
|
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
|
||||||
DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
|
DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
|
||||||
|
DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
|
||||||
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
|
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
|
||||||
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
|
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
|
||||||
/* constants used by the vdso */
|
/* constants used by the vdso */
|
||||||
|
@@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
|
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
|
||||||
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
|
regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
|
||||||
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
|
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
|
||||||
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
|
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
|
||||||
(__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
|
(__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
|
||||||
|
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
|
|||||||
PGM_CHECK_DEFAULT /* 35 */
|
PGM_CHECK_DEFAULT /* 35 */
|
||||||
PGM_CHECK_DEFAULT /* 36 */
|
PGM_CHECK_DEFAULT /* 36 */
|
||||||
PGM_CHECK_DEFAULT /* 37 */
|
PGM_CHECK_DEFAULT /* 37 */
|
||||||
PGM_CHECK_DEFAULT /* 38 */
|
PGM_CHECK_64BIT(do_dat_exception) /* 38 */
|
||||||
PGM_CHECK_64BIT(do_dat_exception) /* 39 */
|
PGM_CHECK_64BIT(do_dat_exception) /* 39 */
|
||||||
PGM_CHECK_64BIT(do_dat_exception) /* 3a */
|
PGM_CHECK_64BIT(do_dat_exception) /* 3a */
|
||||||
PGM_CHECK_64BIT(do_dat_exception) /* 3b */
|
PGM_CHECK_64BIT(do_dat_exception) /* 3b */
|
||||||
|
@@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
|
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
|
||||||
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
|
regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
|
||||||
(user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
|
(user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
|
||||||
/* Check for invalid user address space control. */
|
/* Check for invalid user address space control. */
|
||||||
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
|
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
|
||||||
|
@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
|
|||||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int s390_next_ktime(ktime_t expires,
|
static int s390_next_event(unsigned long delta,
|
||||||
struct clock_event_device *evt)
|
struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
struct timespec ts;
|
S390_lowcore.clock_comparator = get_tod_clock() + delta;
|
||||||
u64 nsecs;
|
|
||||||
|
|
||||||
ts.tv_sec = ts.tv_nsec = 0;
|
|
||||||
monotonic_to_bootbased(&ts);
|
|
||||||
nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
|
|
||||||
do_div(nsecs, 125);
|
|
||||||
S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
|
|
||||||
/* Program the maximum value if we have an overflow (== year 2042) */
|
|
||||||
if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
|
|
||||||
S390_lowcore.clock_comparator = -1ULL;
|
|
||||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -146,15 +136,14 @@ void init_cpu_timer(void)
|
|||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
cd = &per_cpu(comparators, cpu);
|
cd = &per_cpu(comparators, cpu);
|
||||||
cd->name = "comparator";
|
cd->name = "comparator";
|
||||||
cd->features = CLOCK_EVT_FEAT_ONESHOT |
|
cd->features = CLOCK_EVT_FEAT_ONESHOT;
|
||||||
CLOCK_EVT_FEAT_KTIME;
|
|
||||||
cd->mult = 16777;
|
cd->mult = 16777;
|
||||||
cd->shift = 12;
|
cd->shift = 12;
|
||||||
cd->min_delta_ns = 1;
|
cd->min_delta_ns = 1;
|
||||||
cd->max_delta_ns = LONG_MAX;
|
cd->max_delta_ns = LONG_MAX;
|
||||||
cd->rating = 400;
|
cd->rating = 400;
|
||||||
cd->cpumask = cpumask_of(cpu);
|
cd->cpumask = cpumask_of(cpu);
|
||||||
cd->set_next_ktime = s390_next_ktime;
|
cd->set_next_event = s390_next_event;
|
||||||
cd->set_mode = s390_set_mode;
|
cd->set_mode = s390_set_mode;
|
||||||
|
|
||||||
clockevents_register_device(cd);
|
clockevents_register_device(cd);
|
||||||
@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
|
|||||||
return &clocksource_tod;
|
return &clocksource_tod;
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
|
void update_vsyscall(struct timekeeper *tk)
|
||||||
struct clocksource *clock, u32 mult)
|
|
||||||
{
|
{
|
||||||
if (clock != &clocksource_tod)
|
u64 nsecps;
|
||||||
|
|
||||||
|
if (tk->clock != &clocksource_tod)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Make userspace gettimeofday spin until we're done. */
|
/* Make userspace gettimeofday spin until we're done. */
|
||||||
++vdso_data->tb_update_count;
|
++vdso_data->tb_update_count;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
vdso_data->xtime_tod_stamp = clock->cycle_last;
|
vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
|
||||||
vdso_data->xtime_clock_sec = wall_time->tv_sec;
|
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||||
vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
|
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
||||||
vdso_data->wtom_clock_sec = wtm->tv_sec;
|
vdso_data->wtom_clock_sec =
|
||||||
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
|
tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
|
||||||
vdso_data->ntp_mult = mult;
|
vdso_data->wtom_clock_nsec = tk->xtime_nsec +
|
||||||
|
+ (tk->wall_to_monotonic.tv_nsec << tk->shift);
|
||||||
|
nsecps = (u64) NSEC_PER_SEC << tk->shift;
|
||||||
|
while (vdso_data->wtom_clock_nsec >= nsecps) {
|
||||||
|
vdso_data->wtom_clock_nsec -= nsecps;
|
||||||
|
vdso_data->wtom_clock_sec++;
|
||||||
|
}
|
||||||
|
vdso_data->tk_mult = tk->mult;
|
||||||
|
vdso_data->tk_shift = tk->shift;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
++vdso_data->tb_update_count;
|
++vdso_data->tb_update_count;
|
||||||
}
|
}
|
||||||
|
@@ -38,25 +38,26 @@ __kernel_clock_gettime:
|
|||||||
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||||
brc 3,2f
|
brc 3,2f
|
||||||
ahi %r0,-1
|
ahi %r0,-1
|
||||||
2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
|
2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||||
lr %r2,%r0
|
lr %r2,%r0
|
||||||
l %r0,__VDSO_NTP_MULT(%r5)
|
l %r0,__VDSO_TK_MULT(%r5)
|
||||||
ltr %r1,%r1
|
ltr %r1,%r1
|
||||||
mr %r0,%r0
|
mr %r0,%r0
|
||||||
jnm 3f
|
jnm 3f
|
||||||
a %r0,__VDSO_NTP_MULT(%r5)
|
a %r0,__VDSO_TK_MULT(%r5)
|
||||||
3: alr %r0,%r2
|
3: alr %r0,%r2
|
||||||
srdl %r0,12
|
al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||||
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
|
||||||
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
||||||
brc 12,4f
|
brc 12,4f
|
||||||
ahi %r0,1
|
ahi %r0,1
|
||||||
4: l %r2,__VDSO_XTIME_SEC+4(%r5)
|
4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
|
||||||
al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
|
|
||||||
al %r1,__VDSO_WTOM_NSEC+4(%r5)
|
al %r1,__VDSO_WTOM_NSEC+4(%r5)
|
||||||
brc 12,5f
|
brc 12,5f
|
||||||
ahi %r0,1
|
ahi %r0,1
|
||||||
5: al %r2,__VDSO_WTOM_SEC+4(%r5)
|
5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||||
|
srdl %r0,0(%r2) /* >> tk->shift */
|
||||||
|
l %r2,__VDSO_XTIME_SEC+4(%r5)
|
||||||
|
al %r2,__VDSO_WTOM_SEC+4(%r5)
|
||||||
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
||||||
jne 1b
|
jne 1b
|
||||||
basr %r5,0
|
basr %r5,0
|
||||||
@@ -86,20 +87,21 @@ __kernel_clock_gettime:
|
|||||||
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||||
brc 3,12f
|
brc 3,12f
|
||||||
ahi %r0,-1
|
ahi %r0,-1
|
||||||
12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
|
12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||||
lr %r2,%r0
|
lr %r2,%r0
|
||||||
l %r0,__VDSO_NTP_MULT(%r5)
|
l %r0,__VDSO_TK_MULT(%r5)
|
||||||
ltr %r1,%r1
|
ltr %r1,%r1
|
||||||
mr %r0,%r0
|
mr %r0,%r0
|
||||||
jnm 13f
|
jnm 13f
|
||||||
a %r0,__VDSO_NTP_MULT(%r5)
|
a %r0,__VDSO_TK_MULT(%r5)
|
||||||
13: alr %r0,%r2
|
13: alr %r0,%r2
|
||||||
srdl %r0,12
|
al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||||
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
|
||||||
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
||||||
brc 12,14f
|
brc 12,14f
|
||||||
ahi %r0,1
|
ahi %r0,1
|
||||||
14: l %r2,__VDSO_XTIME_SEC+4(%r5)
|
14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||||
|
srdl %r0,0(%r2) /* >> tk->shift */
|
||||||
|
l %r2,__VDSO_XTIME_SEC+4(%r5)
|
||||||
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
||||||
jne 11b
|
jne 11b
|
||||||
basr %r5,0
|
basr %r5,0
|
||||||
|
@@ -35,15 +35,14 @@ __kernel_gettimeofday:
|
|||||||
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||||
brc 3,3f
|
brc 3,3f
|
||||||
ahi %r0,-1
|
ahi %r0,-1
|
||||||
3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
|
3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||||
st %r0,24(%r15)
|
st %r0,24(%r15)
|
||||||
l %r0,__VDSO_NTP_MULT(%r5)
|
l %r0,__VDSO_TK_MULT(%r5)
|
||||||
ltr %r1,%r1
|
ltr %r1,%r1
|
||||||
mr %r0,%r0
|
mr %r0,%r0
|
||||||
jnm 4f
|
jnm 4f
|
||||||
a %r0,__VDSO_NTP_MULT(%r5)
|
a %r0,__VDSO_TK_MULT(%r5)
|
||||||
4: al %r0,24(%r15)
|
4: al %r0,24(%r15)
|
||||||
srdl %r0,12
|
|
||||||
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
||||||
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
||||||
brc 12,5f
|
brc 12,5f
|
||||||
@@ -51,6 +50,8 @@ __kernel_gettimeofday:
|
|||||||
5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
|
5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
|
||||||
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
||||||
jne 1b
|
jne 1b
|
||||||
|
l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||||
|
srdl %r0,0(%r4) /* >> tk->shift */
|
||||||
l %r4,24(%r15) /* get tv_sec from stack */
|
l %r4,24(%r15) /* get tv_sec from stack */
|
||||||
basr %r5,0
|
basr %r5,0
|
||||||
6: ltr %r0,%r0
|
6: ltr %r0,%r0
|
||||||
|
@@ -34,14 +34,15 @@ __kernel_clock_gettime:
|
|||||||
tmll %r4,0x0001 /* pending update ? loop */
|
tmll %r4,0x0001 /* pending update ? loop */
|
||||||
jnz 0b
|
jnz 0b
|
||||||
stck 48(%r15) /* Store TOD clock */
|
stck 48(%r15) /* Store TOD clock */
|
||||||
|
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||||
|
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
|
||||||
|
alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
|
||||||
lg %r1,48(%r15)
|
lg %r1,48(%r15)
|
||||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
|
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||||
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
|
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
|
||||||
lg %r0,__VDSO_XTIME_SEC(%r5)
|
srlg %r1,%r1,0(%r2) /* >> tk->shift */
|
||||||
alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
|
|
||||||
alg %r0,__VDSO_WTOM_SEC(%r5)
|
|
||||||
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
||||||
jne 0b
|
jne 0b
|
||||||
larl %r5,13f
|
larl %r5,13f
|
||||||
@@ -62,12 +63,13 @@ __kernel_clock_gettime:
|
|||||||
tmll %r4,0x0001 /* pending update ? loop */
|
tmll %r4,0x0001 /* pending update ? loop */
|
||||||
jnz 5b
|
jnz 5b
|
||||||
stck 48(%r15) /* Store TOD clock */
|
stck 48(%r15) /* Store TOD clock */
|
||||||
|
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||||
lg %r1,48(%r15)
|
lg %r1,48(%r15)
|
||||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
|
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||||
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
|
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
srlg %r1,%r1,0(%r2) /* >> tk->shift */
|
||||||
lg %r0,__VDSO_XTIME_SEC(%r5)
|
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
|
||||||
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
||||||
jne 5b
|
jne 5b
|
||||||
larl %r5,13f
|
larl %r5,13f
|
||||||
|
@@ -31,12 +31,13 @@ __kernel_gettimeofday:
|
|||||||
stck 48(%r15) /* Store TOD clock */
|
stck 48(%r15) /* Store TOD clock */
|
||||||
lg %r1,48(%r15)
|
lg %r1,48(%r15)
|
||||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
|
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||||
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
|
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
|
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
|
||||||
lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
|
|
||||||
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
||||||
jne 0b
|
jne 0b
|
||||||
|
lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||||
|
srlg %r1,%r1,0(%r5) /* >> tk->shift */
|
||||||
larl %r5,5f
|
larl %r5,5f
|
||||||
2: clg %r1,0(%r5)
|
2: clg %r1,0(%r5)
|
||||||
jl 3f
|
jl 3f
|
||||||
|
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
|
|||||||
* contains the (negative) exception code.
|
* contains the (negative) exception code.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
|
|
||||||
static unsigned long follow_table(struct mm_struct *mm,
|
static unsigned long follow_table(struct mm_struct *mm,
|
||||||
unsigned long address, int write)
|
unsigned long address, int write)
|
||||||
{
|
{
|
||||||
unsigned long *table = (unsigned long *)__pa(mm->pgd);
|
unsigned long *table = (unsigned long *)__pa(mm->pgd);
|
||||||
|
|
||||||
|
if (unlikely(address > mm->context.asce_limit - 1))
|
||||||
|
return -0x38UL;
|
||||||
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
|
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
|
||||||
case _ASCE_TYPE_REGION1:
|
case _ASCE_TYPE_REGION1:
|
||||||
table = table + ((address >> 53) & 0x7ff);
|
table = table + ((address >> 53) & 0x7ff);
|
||||||
|
@@ -3224,6 +3224,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
|
|||||||
|
|
||||||
fcx_multitrack = private->features.feature[40] & 0x20;
|
fcx_multitrack = private->features.feature[40] & 0x20;
|
||||||
data_size = blk_rq_bytes(req);
|
data_size = blk_rq_bytes(req);
|
||||||
|
if (data_size % blksize)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
/* tpm write request add CBC data on each track boundary */
|
/* tpm write request add CBC data on each track boundary */
|
||||||
if (rq_data_dir(req) == WRITE)
|
if (rq_data_dir(req) == WRITE)
|
||||||
data_size += (last_trk - first_trk) * 4;
|
data_size += (last_trk - first_trk) * 4;
|
||||||
|
Reference in New Issue
Block a user