Merge branches 'timers-core-for-linus' and 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates - and a leftover fix - from Thomas Gleixner:
 "A rather large (commit wise) update from the timer side:

   - A bulk update to make compile tests work in the clocksource drivers

   - An overhaul of the h8300 timers

   - Some more Y2038 work

   - A few overflow prevention checks in the timekeeping/ntp code

   - The usual pile of fixes and improvements to the various
     clocksource/clockevent drivers and core code"

Also:
 "A single fix for the posix-clock poll code which did not make it into
  4.4"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (84 commits)
  clocksource/drivers/acpi_pm: Convert to pr_* macros
  clocksource: Make clocksource validation work for all clocksources
  timekeeping: Cap adjustments so they don't exceed the maxadj value
  ntp: Fix second_overflow's input parameter type to be 64bits
  ntp: Change time_reftime to time64_t and utilize 64bit __ktime_get_real_seconds
  timekeeping: Provide internal function __ktime_get_real_seconds
  clocksource/drivers/h8300: Use ioread / iowrite
  clocksource/drivers/h8300: Initializer cleanup.
  clocksource/drivers/h8300: Simplify delta handling
  clocksource/drivers/h8300: Fix timer not overflow case
  clocksource/drivers/h8300: Change to overflow interrupt
  clocksource/drivers/lpc32: Correct pr_err() output format
  clocksource/drivers/arm_global_timer: Fix suspend resume
  clocksource/drivers/pistachio: Fix wrong calculated clocksource read value
  clockevents/drivers/arm_global_timer: Use writel_relaxed in gt_compare_set
  clocksource/drivers/dw_apb_timer: Inline apbt_readl and apbt_writel
  clocksource/drivers/dw_apb_timer: Use {readl|writel}_relaxed in critical path
  clocksource/drivers/dw_apb_timer: Fix apbt_readl return types
  clocksource/drivers/tango-xtal: Replace code by clocksource_mmio_init
  clocksource/drivers/h8300: Increase the compilation test coverage
  ...

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  posix-clock: Fix return code on the poll method's error path
This commit is contained in:
Linus Torvalds 2016-01-11 18:06:43 -08:00
commit b4cee21ee0
38 changed files with 629 additions and 632 deletions

View File

@ -9364,7 +9364,7 @@ M: Andreas Noever <andreas.noever@gmail.com>
S: Maintained
F: drivers/thunderbolt/
TIMEKEEPING, CLOCKSOURCE CORE, NTP
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
M: John Stultz <john.stultz@linaro.org>
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
@ -9377,6 +9377,7 @@ F: include/uapi/linux/time.h
F: include/uapi/linux/timex.h
F: kernel/time/clocksource.c
F: kernel/time/time*.c
F: kernel/time/alarmtimer.c
F: kernel/time/ntp.c
F: tools/testing/selftests/timers/

View File

@ -611,6 +611,7 @@ config ARCH_PXA
select AUTO_ZRELADDR
select COMMON_CLK
select CLKDEV_LOOKUP
select CLKSRC_PXA
select CLKSRC_MMIO
select CLKSRC_OF
select GENERIC_CLOCKEVENTS
@ -650,6 +651,8 @@ config ARCH_SA1100
select ARCH_SPARSEMEM_ENABLE
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CLKSRC_PXA
select CLKSRC_OF if OF
select CPU_FREQ
select CPU_SA1100
select GENERIC_CLOCKEVENTS

View File

@ -27,6 +27,7 @@ menuconfig ARCH_EXYNOS
select SRAM
select THERMAL
select MFD_SYSCON
select CLKSRC_EXYNOS_MCT
help
Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)

View File

@ -3,6 +3,7 @@ menuconfig ARCH_STI
select ARM_GIC
select ST_IRQCHIP
select ARM_GLOBAL_TIMER
select CLKSRC_ST_LPC
select PINCTRL
select PINCTRL_ST
select MFD_SYSCON

View File

@ -32,6 +32,7 @@ config UX500_SOC_DB8500
select PINCTRL_AB8540
select REGULATOR
select REGULATOR_DB8500_PRCMU
select CLKSRC_DBX500_PRCMU
select PM_GENERIC_DOMAINS if PM
config MACH_MOP500

View File

@ -17,6 +17,7 @@ config H8300
select HAVE_MEMBLOCK
select HAVE_DMA_ATTRS
select CLKSRC_OF
select H8300_TMR8
config RWSEM_GENERIC_SPINLOCK
def_bool y

View File

@ -3,40 +3,45 @@
#ifdef __KERNEL__
#include <asm-generic/io.h>
/* H8/300 internal I/O functions */
static inline unsigned char ctrl_inb(unsigned long addr)
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
return *(volatile unsigned char *)addr;
return *(volatile u8 *)addr;
}
static inline unsigned short ctrl_inw(unsigned long addr)
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
return *(volatile unsigned short *)addr;
return *(volatile u16 *)addr;
}
static inline unsigned long ctrl_inl(unsigned long addr)
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
return *(volatile unsigned long *)addr;
return *(volatile u32 *)addr;
}
static inline void ctrl_outb(unsigned char b, unsigned long addr)
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, const volatile void __iomem *addr)
{
*(volatile unsigned char *)addr = b;
*(volatile u8 *)addr = b;
}
static inline void ctrl_outw(unsigned short b, unsigned long addr)
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 b, const volatile void __iomem *addr)
{
*(volatile unsigned short *)addr = b;
*(volatile u16 *)addr = b;
}
static inline void ctrl_outl(unsigned long b, unsigned long addr)
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 b, const volatile void __iomem *addr)
{
*(volatile unsigned long *)addr = b;
*(volatile u32 *)addr = b;
}
static inline void ctrl_bclr(int b, unsigned char *addr)
static inline void ctrl_bclr(int b, void __iomem *addr)
{
if (__builtin_constant_p(b))
__asm__("bclr %1,%0" : "+WU"(*addr): "i"(b));
@ -44,7 +49,7 @@ static inline void ctrl_bclr(int b, unsigned char *addr)
__asm__("bclr %w1,%0" : "+WU"(*addr): "r"(b));
}
static inline void ctrl_bset(int b, unsigned char *addr)
static inline void ctrl_bset(int b, void __iomem *addr)
{
if (__builtin_constant_p(b))
__asm__("bset %1,%0" : "+WU"(*addr): "i"(b));
@ -52,6 +57,8 @@ static inline void ctrl_bset(int b, unsigned char *addr)
__asm__("bset %w1,%0" : "+WU"(*addr): "r"(b));
}
#include <asm-generic/io.h>
#endif /* __KERNEL__ */
#endif /* _H8300_IO_H */

View File

@ -207,14 +207,14 @@ device_initcall(device_probe);
#define get_wait(base, addr) ({ \
int baddr; \
baddr = ((addr) / 0x200000 * 2); \
w *= (ctrl_inw((unsigned long)(base) + 2) & (3 << baddr)) + 1; \
w *= (readw((base) + 2) & (3 << baddr)) + 1; \
})
#endif
#if defined(CONFIG_CPU_H8S)
#define get_wait(base, addr) ({ \
int baddr; \
baddr = ((addr) / 0x200000 * 16); \
w *= (ctrl_inl((unsigned long)(base) + 2) & (7 << baddr)) + 1; \
w *= (readl((base) + 2) & (7 << baddr)) + 1; \
})
#endif
@ -228,8 +228,8 @@ static __init int access_timing(void)
bsc = of_find_compatible_node(NULL, NULL, "renesas,h8300-bsc");
base = of_iomap(bsc, 0);
w = (ctrl_inb((unsigned long)base + 0) & bit)?2:1;
if (ctrl_inb((unsigned long)base + 1) & bit)
w = (readb(base + 0) & bit)?2:1;
if (readb(base + 1) & bit)
w *= get_wait(base, addr);
else
w *= 2;

View File

@ -28,10 +28,16 @@ config CLKSRC_MMIO
bool
config DIGICOLOR_TIMER
bool
bool "Digicolor timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
Enables the support for the digicolor timer driver.
config DW_APB_TIMER
bool
bool "DW APB timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
Enables the support for the dw_apb timer.
config DW_APB_TIMER_OF
bool
@ -39,47 +45,77 @@ config DW_APB_TIMER_OF
select CLKSRC_OF
config ROCKCHIP_TIMER
bool
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
select CLKSRC_OF
help
Enables the support for the rockchip timer driver.
config ARMADA_370_XP_TIMER
bool
bool "Armada 370 and XP timer driver" if COMPILE_TEST
depends on ARM
select CLKSRC_OF
help
Enables the support for the Armada 370 and XP timer driver.
config MESON6_TIMER
bool
bool "Meson6 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the Meson6 timer driver.
config ORION_TIMER
bool "Orion timer driver" if COMPILE_TEST
depends on ARM
select CLKSRC_OF
select CLKSRC_MMIO
bool
help
Enables the support for the Orion timer driver
config SUN4I_TIMER
bool "Sun4i timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
bool
help
Enables support for the Sun4i timer.
config SUN5I_HSTIMER
bool "Sun5i timer driver" if COMPILE_TEST
select CLKSRC_MMIO
bool
depends on COMMON_CLK
help
Enables support the Sun5i timer.
config TEGRA_TIMER
bool
bool "Tegra timer driver" if COMPILE_TEST
depends on ARM
help
Enables support for the Tegra driver.
config VT8500_TIMER
bool
bool "VT8500 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
Enables support for the VT8500 driver.
config CADENCE_TTC_TIMER
bool
bool "Cadence TTC timer driver" if COMPILE_TEST
depends on COMMON_CLK
help
Enables support for the cadence ttc driver.
config ASM9260_TIMER
bool
bool "ASM9260 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
select CLKSRC_OF
help
Enables support for the ASM9260 timer.
config CLKSRC_NOMADIK_MTU
bool
depends on (ARCH_NOMADIK || ARCH_U8500)
bool "Nomakdik clocksource driver" if COMPILE_TEST
depends on ARM
select CLKSRC_MMIO
help
Support for Multi Timer Unit. MTU provides access
@ -93,9 +129,8 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
Use the Multi Timer Unit as the sched_clock.
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB8500
default y
bool "Clocksource PRCMU Timer" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
Use the always on PRCMU Timer as clocksource
@ -116,13 +151,18 @@ config CLKSRC_EFM32
event device.
config CLKSRC_LPC32XX
bool
bool "Clocksource for LPC32XX" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
select CLKSRC_OF
help
Support for the LPC32XX clocksource.
config CLKSRC_PISTACHIO
bool
bool "Clocksource for Pistachio SoC" if COMPILE_TEST
select CLKSRC_OF
help
Enables the clocksource for the Pistachio SoC.
config CLKSRC_TI_32K
bool "Texas Instruments 32.768 Hz Clocksource" if COMPILE_TEST
@ -199,13 +239,14 @@ config CLKSRC_METAG_GENERIC
This option enables support for the Meta per-thread timers.
config CLKSRC_EXYNOS_MCT
def_bool y if ARCH_EXYNOS
depends on !ARM64
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM
help
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
bool
bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@ -213,7 +254,8 @@ config CLKSRC_SAMSUNG_PWM
needed only on systems that do not have the Exynos MCT available.
config FSL_FTM_TIMER
bool
bool "Freescale FlexTimer Module driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
Support for Freescale FlexTimer Module (FTM) timer.
@ -226,9 +268,12 @@ config SYS_SUPPORTS_SH_CMT
bool
config MTK_TIMER
bool "Mediatek timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_OF
select CLKSRC_MMIO
bool
help
Support for Mediatek timer driver.
config SYS_SUPPORTS_SH_MTU2
bool
@ -279,7 +324,12 @@ config EM_TIMER_STI
such as EMEV2 from former NEC Electronics.
config CLKSRC_QCOM
bool
bool "Qualcomm MSM timer" if COMPILE_TEST
depends on ARM
select CLKSRC_OF
help
This enables the clocksource and the per CPU clockevent driver for the
Qualcomm SoCs.
config CLKSRC_VERSATILE
bool "ARM Versatile (Express) reference platforms clock source"
@ -298,21 +348,40 @@ config CLKSRC_MIPS_GIC
select CLKSRC_OF
config CLKSRC_TANGO_XTAL
bool
bool "Clocksource for Tango SoC" if COMPILE_TEST
depends on ARM
select CLKSRC_OF
select CLKSRC_MMIO
help
This enables the clocksource for Tango SoC
config CLKSRC_PXA
def_bool y if ARCH_PXA || ARCH_SA1100
select CLKSRC_OF if OF
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
This enables OST0 support available on PXA and SA-11x0
platforms.
config H8300_TMR8
bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
This enables the 8 bits timer for the H8300 platform.
config H8300_TMR16
bool
bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
This enables the 16 bits timer for the H8300 platform with the
H83069 cpu.
config H8300_TPU
bool
bool "Clocksource for the H8300 platform" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
help
This enables the clocksource for the H8300 platform with the
H8S2678 cpu.
config CLKSRC_IMX_GPT
bool "Clocksource using i.MX GPT" if COMPILE_TEST
@ -320,8 +389,7 @@ config CLKSRC_IMX_GPT
select CLKSRC_MMIO
config CLKSRC_ST_LPC
bool
depends on ARCH_STI
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select CLKSRC_OF if OF
help
Enable this option to use the Low Power controller timer

View File

@ -60,7 +60,7 @@ obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o

View File

@ -109,10 +109,8 @@ static void acpi_pm_check_blacklist(struct pci_dev *dev)
/* the bug has been fixed in PIIX4M */
if (dev->revision < 3) {
printk(KERN_WARNING "* Found PM-Timer Bug on the chipset."
" Due to workarounds for a bug,\n"
"* this clock source is slow. Consider trying"
" other clock sources\n");
pr_warn("* Found PM-Timer Bug on the chipset. Due to workarounds for a bug,\n"
"* this clock source is slow. Consider trying other clock sources\n");
acpi_pm_need_workaround();
}
@ -125,12 +123,9 @@ static void acpi_pm_check_graylist(struct pci_dev *dev)
if (acpi_pm_good)
return;
printk(KERN_WARNING "* The chipset may have PM-Timer Bug. Due to"
" workarounds for a bug,\n"
"* this clock source is slow. If you are sure your timer"
" does not have\n"
"* this bug, please use \"acpi_pm_good\" to disable the"
" workaround\n");
pr_warn("* The chipset may have PM-Timer Bug. Due to workarounds for a bug,\n"
"* this clock source is slow. If you are sure your timer does not have\n"
"* this bug, please use \"acpi_pm_good\" to disable the workaround\n");
acpi_pm_need_workaround();
}
@ -162,8 +157,7 @@ static int verify_pmtmr_rate(void)
/* Check that the PMTMR delta is within 5% of what we expect */
if (delta < (PMTMR_EXPECTED_RATE * 19) / 20 ||
delta > (PMTMR_EXPECTED_RATE * 21) / 20) {
printk(KERN_INFO "PM-Timer running at invalid rate: %lu%% "
"of normal - aborting.\n",
pr_info("PM-Timer running at invalid rate: %lu%% of normal - aborting.\n",
100UL * delta / PMTMR_EXPECTED_RATE);
return -1;
}
@ -199,15 +193,14 @@ static int __init init_acpi_pm_clocksource(void)
break;
if ((value2 < value1) && ((value2) < 0xFFF))
break;
printk(KERN_INFO "PM-Timer had inconsistent results:"
" %#llx, %#llx - aborting.\n",
pr_info("PM-Timer had inconsistent results: %#llx, %#llx - aborting.\n",
value1, value2);
pmtmr_ioport = 0;
return -EINVAL;
}
if (i == ACPI_PM_READ_CHECKS) {
printk(KERN_INFO "PM-Timer failed consistency check "
" (%#llx) - aborting.\n", value1);
pr_info("PM-Timer failed consistency check (%#llx) - aborting.\n",
value1);
pmtmr_ioport = 0;
return -ENODEV;
}

View File

@ -99,17 +99,17 @@ static void gt_compare_set(unsigned long delta, int periodic)
counter += delta;
ctrl = GT_CONTROL_TIMER_ENABLE;
writel(ctrl, gt_base + GT_CONTROL);
writel(lower_32_bits(counter), gt_base + GT_COMP0);
writel(upper_32_bits(counter), gt_base + GT_COMP1);
writel_relaxed(ctrl, gt_base + GT_CONTROL);
writel_relaxed(lower_32_bits(counter), gt_base + GT_COMP0);
writel_relaxed(upper_32_bits(counter), gt_base + GT_COMP1);
if (periodic) {
writel(delta, gt_base + GT_AUTO_INC);
writel_relaxed(delta, gt_base + GT_AUTO_INC);
ctrl |= GT_CONTROL_AUTO_INC;
}
ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE;
writel(ctrl, gt_base + GT_CONTROL);
writel_relaxed(ctrl, gt_base + GT_CONTROL);
}
static int gt_clockevent_shutdown(struct clock_event_device *evt)
@ -195,12 +195,23 @@ static cycle_t gt_clocksource_read(struct clocksource *cs)
return gt_counter_read();
}
static void gt_resume(struct clocksource *cs)
{
unsigned long ctrl;
ctrl = readl(gt_base + GT_CONTROL);
if (!(ctrl & GT_CONTROL_TIMER_ENABLE))
/* re-enable timer on resume */
writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
}
static struct clocksource gt_clocksource = {
.name = "arm_global_timer",
.rating = 300,
.read = gt_clocksource_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = gt_resume,
};
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK

View File

@ -49,20 +49,31 @@ clocksource_to_dw_apb_clocksource(struct clocksource *cs)
return container_of(cs, struct dw_apb_clocksource, cs);
}
static unsigned long apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
static inline u32 apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
{
return readl(timer->base + offs);
}
static void apbt_writel(struct dw_apb_timer *timer, unsigned long val,
static inline void apbt_writel(struct dw_apb_timer *timer, u32 val,
unsigned long offs)
{
writel(val, timer->base + offs);
}
static inline u32 apbt_readl_relaxed(struct dw_apb_timer *timer, unsigned long offs)
{
return readl_relaxed(timer->base + offs);
}
static inline void apbt_writel_relaxed(struct dw_apb_timer *timer, u32 val,
unsigned long offs)
{
writel_relaxed(val, timer->base + offs);
}
static void apbt_disable_int(struct dw_apb_timer *timer)
{
unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
ctrl |= APBTMR_CONTROL_INT;
apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
@ -81,7 +92,7 @@ void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
static void apbt_eoi(struct dw_apb_timer *timer)
{
apbt_readl(timer, APBTMR_N_EOI);
apbt_readl_relaxed(timer, APBTMR_N_EOI);
}
static irqreturn_t dw_apb_clockevent_irq(int irq, void *data)
@ -103,7 +114,7 @@ static irqreturn_t dw_apb_clockevent_irq(int irq, void *data)
static void apbt_enable_int(struct dw_apb_timer *timer)
{
unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
/* clear pending intr */
apbt_readl(timer, APBTMR_N_EOI);
ctrl &= ~APBTMR_CONTROL_INT;
@ -113,7 +124,7 @@ static void apbt_enable_int(struct dw_apb_timer *timer)
static int apbt_shutdown(struct clock_event_device *evt)
{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
unsigned long ctrl;
u32 ctrl;
pr_debug("%s CPU %d state=shutdown\n", __func__,
cpumask_first(evt->cpumask));
@ -127,7 +138,7 @@ static int apbt_shutdown(struct clock_event_device *evt)
static int apbt_set_oneshot(struct clock_event_device *evt)
{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
unsigned long ctrl;
u32 ctrl;
pr_debug("%s CPU %d state=oneshot\n", __func__,
cpumask_first(evt->cpumask));
@ -160,7 +171,7 @@ static int apbt_set_periodic(struct clock_event_device *evt)
{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
unsigned long period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
unsigned long ctrl;
u32 ctrl;
pr_debug("%s CPU %d state=periodic\n", __func__,
cpumask_first(evt->cpumask));
@ -196,17 +207,17 @@ static int apbt_resume(struct clock_event_device *evt)
static int apbt_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long ctrl;
u32 ctrl;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
/* Disable timer */
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl = apbt_readl_relaxed(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/* write new count */
apbt_writel(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT);
apbt_writel_relaxed(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT);
ctrl |= APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
return 0;
}
@ -323,7 +334,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
* start count down from 0xffff_ffff. this is done by toggling the
* enable bit then load initial load count to ~0.
*/
unsigned long ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL);
u32 ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
@ -338,11 +349,12 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
static cycle_t __apbt_read_clocksource(struct clocksource *cs)
{
unsigned long current_count;
u32 current_count;
struct dw_apb_clocksource *dw_cs =
clocksource_to_dw_apb_clocksource(cs);
current_count = apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
current_count = apbt_readl_relaxed(&dw_cs->timer,
APBTMR_N_CURRENT_VALUE);
return (cycle_t)~current_count;
}

View File

@ -16,6 +16,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/delay.h>
#include <linux/dw_apb_timer.h>
#include <linux/of.h>
#include <linux/of_address.h>
@ -130,6 +131,17 @@ static void __init init_sched_clock(void)
sched_clock_register(read_sched_clock, 32, sched_rate);
}
#ifdef CONFIG_ARM
static unsigned long dw_apb_delay_timer_read(void)
{
return ~readl_relaxed(sched_io_base);
}
static struct delay_timer dw_apb_delay_timer = {
.read_current_timer = dw_apb_delay_timer_read,
};
#endif
static int num_called;
static void __init dw_apb_timer_init(struct device_node *timer)
{
@ -142,6 +154,10 @@ static void __init dw_apb_timer_init(struct device_node *timer)
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
init_sched_clock();
#ifdef CONFIG_ARM
dw_apb_delay_timer.freq = sched_rate;
register_current_timer_delay(&dw_apb_delay_timer);
#endif
break;
default:
break;

View File

@ -4,85 +4,56 @@
* Copyright 2015 Yoshinori Sato <ysato@users.sourcefoge.jp>
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <asm/segment.h>
#include <asm/irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define TSTR 0
#define TSNC 1
#define TMDR 2
#define TOLR 3
#define TISRA 4
#define TISRB 5
#define TISRC 6
#define TCR 0
#define TIOR 1
#define TCNT 2
#define GRA 4
#define GRB 6
#define FLAG_REPROGRAM (1 << 0)
#define FLAG_SKIPEVENT (1 << 1)
#define FLAG_IRQCONTEXT (1 << 2)
#define FLAG_STARTED (1 << 3)
#define ONESHOT 0
#define PERIODIC 1
#define RELATIVE 0
#define ABSOLUTE 1
#define bset(b, a) iowrite8(ioread8(a) | (1 << (b)), (a))
#define bclr(b, a) iowrite8(ioread8(a) & ~(1 << (b)), (a))
struct timer16_priv {
struct platform_device *pdev;
struct clocksource cs;
struct irqaction irqaction;
unsigned long total_cycles;
unsigned long mapbase;
unsigned long mapcommon;
unsigned long flags;
unsigned short gra;
void __iomem *mapbase;
void __iomem *mapcommon;
unsigned short cs_enabled;
unsigned char enb;
unsigned char imfa;
unsigned char imiea;
unsigned char ovf;
raw_spinlock_t lock;
struct clk *clk;
unsigned char ovie;
};
static unsigned long timer16_get_counter(struct timer16_priv *p)
{
unsigned long v1, v2, v3;
int o1, o2;
unsigned short v1, v2, v3;
unsigned char o1, o2;
o1 = ctrl_inb(p->mapcommon + TISRC) & p->ovf;
o1 = ioread8(p->mapcommon + TISRC) & p->ovf;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
o2 = o1;
v1 = ctrl_inw(p->mapbase + TCNT);
v2 = ctrl_inw(p->mapbase + TCNT);
v3 = ctrl_inw(p->mapbase + TCNT);
o1 = ctrl_inb(p->mapcommon + TISRC) & p->ovf;
v1 = ioread16be(p->mapbase + TCNT);
v2 = ioread16be(p->mapbase + TCNT);
v3 = ioread16be(p->mapbase + TCNT);
o1 = ioread8(p->mapcommon + TISRC) & p->ovf;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
v2 |= 0x10000;
if (likely(!o1))
return v2;
else
return v2 + 0x10000;
}
@ -90,8 +61,7 @@ static irqreturn_t timer16_interrupt(int irq, void *dev_id)
{
struct timer16_priv *p = (struct timer16_priv *)dev_id;
ctrl_outb(ctrl_inb(p->mapcommon + TISRA) & ~p->imfa,
p->mapcommon + TISRA);
bclr(p->ovf, p->mapcommon + TISRC);
p->total_cycles += 0x10000;
return IRQ_HANDLED;
@ -105,13 +75,10 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs)
static cycle_t timer16_clocksource_read(struct clocksource *cs)
{
struct timer16_priv *p = cs_to_priv(cs);
unsigned long flags, raw;
unsigned long value;
unsigned long raw, value;
raw_spin_lock_irqsave(&p->lock, flags);
value = p->total_cycles;
raw = timer16_get_counter(p);
raw_spin_unlock_irqrestore(&p->lock, flags);
return value + raw;
}
@ -123,10 +90,10 @@ static int timer16_enable(struct clocksource *cs)
WARN_ON(p->cs_enabled);
p->total_cycles = 0;
ctrl_outw(0x0000, p->mapbase + TCNT);
ctrl_outb(0x83, p->mapbase + TCR);
ctrl_outb(ctrl_inb(p->mapcommon + TSTR) | p->enb,
p->mapcommon + TSTR);
iowrite16be(0x0000, p->mapbase + TCNT);
iowrite8(0x83, p->mapbase + TCR);
bset(p->ovie, p->mapcommon + TISRC);
bset(p->enb, p->mapcommon + TSTR);
p->cs_enabled = true;
return 0;
@ -138,116 +105,83 @@ static void timer16_disable(struct clocksource *cs)
WARN_ON(!p->cs_enabled);
ctrl_outb(ctrl_inb(p->mapcommon + TSTR) & ~p->enb,
p->mapcommon + TSTR);
bclr(p->ovie, p->mapcommon + TISRC);
bclr(p->enb, p->mapcommon + TSTR);
p->cs_enabled = false;
}
static struct timer16_priv timer16_priv = {
.cs = {
.name = "h8300_16timer",
.rating = 200,
.read = timer16_clocksource_read,
.enable = timer16_enable,
.disable = timer16_disable,
.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
#define REG_CH 0
#define REG_COMM 1
static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev)
static void __init h8300_16timer_init(struct device_node *node)
{
struct resource *res[2];
void __iomem *base[2];
int ret, irq;
unsigned int ch;
struct clk *clk;
p->pdev = pdev;
res[REG_CH] = platform_get_resource(p->pdev,
IORESOURCE_MEM, REG_CH);
res[REG_COMM] = platform_get_resource(p->pdev,
IORESOURCE_MEM, REG_COMM);
if (!res[REG_CH] || !res[REG_COMM]) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
}
irq = platform_get_irq(p->pdev, 0);
if (irq < 0) {
dev_err(&p->pdev->dev, "failed to get irq\n");
return irq;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
return;
}
p->clk = clk_get(&p->pdev->dev, "fck");
if (IS_ERR(p->clk)) {
dev_err(&p->pdev->dev, "can't get clk\n");
return PTR_ERR(p->clk);
base[REG_CH] = of_iomap(node, 0);
if (!base[REG_CH]) {
pr_err("failed to map registers for clocksource\n");
goto free_clk;
}
of_property_read_u32(p->pdev->dev.of_node, "renesas,channel", &ch);
p->pdev = pdev;
p->mapbase = res[REG_CH]->start;
p->mapcommon = res[REG_COMM]->start;
p->enb = 1 << ch;
p->imfa = 1 << ch;
p->imiea = 1 << (4 + ch);
p->cs.name = pdev->name;
p->cs.rating = 200;
p->cs.read = timer16_clocksource_read;
p->cs.enable = timer16_enable;
p->cs.disable = timer16_disable;
p->cs.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
p->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
base[REG_COMM] = of_iomap(node, 1);
if (!base[REG_COMM]) {
pr_err("failed to map registers for clocksource\n");
goto unmap_ch;
}
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
goto unmap_comm;
}
of_property_read_u32(node, "renesas,channel", &ch);
timer16_priv.mapbase = base[REG_CH];
timer16_priv.mapcommon = base[REG_COMM];
timer16_priv.enb = ch;
timer16_priv.ovf = ch;
timer16_priv.ovie = 4 + ch;
ret = request_irq(irq, timer16_interrupt,
IRQF_TIMER, pdev->name, p);
IRQF_TIMER, timer16_priv.cs.name, &timer16_priv);
if (ret < 0) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
return ret;
pr_err("failed to request irq %d of clocksource\n", irq);
goto unmap_comm;
}
clocksource_register_hz(&p->cs, clk_get_rate(p->clk) / 8);
clocksource_register_hz(&timer16_priv.cs,
clk_get_rate(clk) / 8);
return;
return 0;
unmap_comm:
iounmap(base[REG_COMM]);
unmap_ch:
iounmap(base[REG_CH]);
free_clk:
clk_put(clk);
}
static int timer16_probe(struct platform_device *pdev)
{
struct timer16_priv *p = platform_get_drvdata(pdev);
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
return timer16_setup(p, pdev);
}
static int timer16_remove(struct platform_device *pdev)
{
return -EBUSY;
}
static const struct of_device_id timer16_of_table[] = {
{ .compatible = "renesas,16bit-timer" },
{ }
};
static struct platform_driver timer16_driver = {
.probe = timer16_probe,
.remove = timer16_remove,
.driver = {
.name = "h8300h-16timer",
.of_match_table = of_match_ptr(timer16_of_table),
}
};
static int __init timer16_init(void)
{
return platform_driver_register(&timer16_driver);
}
static void __exit timer16_exit(void)
{
platform_driver_unregister(&timer16_driver);
}
subsys_initcall(timer16_init);
module_exit(timer16_exit);
MODULE_AUTHOR("Yoshinori Sato");
MODULE_DESCRIPTION("H8/300H 16bit Timer Driver");
MODULE_LICENSE("GPL v2");
CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init);

View File

@ -8,19 +8,15 @@
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clockchips.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <asm/irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define _8TCR 0
#define _8TCSR 2
@ -28,126 +24,74 @@
#define TCORB 6
#define _8TCNT 8
#define FLAG_REPROGRAM (1 << 0)
#define FLAG_SKIPEVENT (1 << 1)
#define FLAG_IRQCONTEXT (1 << 2)
#define CMIEA 6
#define CMFA 6
#define FLAG_STARTED (1 << 3)
#define ONESHOT 0
#define PERIODIC 1
#define SCALE 64
#define RELATIVE 0
#define ABSOLUTE 1
#define bset(b, a) iowrite8(ioread8(a) | (1 << (b)), (a))
#define bclr(b, a) iowrite8(ioread8(a) & ~(1 << (b)), (a))
struct timer8_priv {
struct platform_device *pdev;
struct clock_event_device ced;
struct irqaction irqaction;
unsigned long mapbase;
raw_spinlock_t lock;
void __iomem *mapbase;
unsigned long flags;
unsigned int rate;
unsigned int tcora;
struct clk *pclk;
};
static unsigned long timer8_get_counter(struct timer8_priv *p)
{
unsigned long v1, v2, v3;
int o1, o2;
o1 = ctrl_inb(p->mapbase + _8TCSR) & 0x20;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
o2 = o1;
v1 = ctrl_inw(p->mapbase + _8TCNT);
v2 = ctrl_inw(p->mapbase + _8TCNT);
v3 = ctrl_inw(p->mapbase + _8TCNT);
o1 = ctrl_inb(p->mapbase + _8TCSR) & 0x20;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
v2 |= o1 << 10;
return v2;
}
static irqreturn_t timer8_interrupt(int irq, void *dev_id)
{
struct timer8_priv *p = dev_id;
ctrl_outb(ctrl_inb(p->mapbase + _8TCSR) & ~0x40,
p->mapbase + _8TCSR);
p->flags |= FLAG_IRQCONTEXT;
ctrl_outw(p->tcora, p->mapbase + TCORA);
if (!(p->flags & FLAG_SKIPEVENT)) {
if (clockevent_state_oneshot(&p->ced))
ctrl_outw(0x0000, p->mapbase + _8TCR);
iowrite16be(0x0000, p->mapbase + _8TCR);
p->ced.event_handler(&p->ced);
}
p->flags &= ~(FLAG_SKIPEVENT | FLAG_IRQCONTEXT);
bclr(CMFA, p->mapbase + _8TCSR);
return IRQ_HANDLED;
}
static void timer8_set_next(struct timer8_priv *p, unsigned long delta)
{
unsigned long flags;
unsigned long now;
raw_spin_lock_irqsave(&p->lock, flags);
if (delta >= 0x10000)
dev_warn(&p->pdev->dev, "delta out of range\n");
now = timer8_get_counter(p);
p->tcora = delta;
ctrl_outb(ctrl_inb(p->mapbase + _8TCR) | 0x40, p->mapbase + _8TCR);
if (delta > now)
ctrl_outw(delta, p->mapbase + TCORA);
else
ctrl_outw(now + 1, p->mapbase + TCORA);
raw_spin_unlock_irqrestore(&p->lock, flags);
pr_warn("delta out of range\n");
bclr(CMIEA, p->mapbase + _8TCR);
iowrite16be(delta, p->mapbase + TCORA);
iowrite16be(0x0000, p->mapbase + _8TCNT);
bclr(CMFA, p->mapbase + _8TCSR);
bset(CMIEA, p->mapbase + _8TCR);
}
static int timer8_enable(struct timer8_priv *p)
{
p->rate = clk_get_rate(p->pclk) / 64;
ctrl_outw(0xffff, p->mapbase + TCORA);
ctrl_outw(0x0000, p->mapbase + _8TCNT);
ctrl_outw(0x0c02, p->mapbase + _8TCR);
iowrite16be(0xffff, p->mapbase + TCORA);
iowrite16be(0x0000, p->mapbase + _8TCNT);
iowrite16be(0x0c02, p->mapbase + _8TCR);
return 0;
}
static int timer8_start(struct timer8_priv *p)
{
int ret = 0;
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&p->lock, flags);
if ((p->flags & FLAG_STARTED))
return 0;
if (!(p->flags & FLAG_STARTED))
ret = timer8_enable(p);
if (ret)
goto out;
if (!ret)
p->flags |= FLAG_STARTED;
out:
raw_spin_unlock_irqrestore(&p->lock, flags);
return ret;
}
static void timer8_stop(struct timer8_priv *p)
{
unsigned long flags;
raw_spin_lock_irqsave(&p->lock, flags);
ctrl_outw(0x0000, p->mapbase + _8TCR);
raw_spin_unlock_irqrestore(&p->lock, flags);
iowrite16be(0x0000, p->mapbase + _8TCR);
}
static inline struct timer8_priv *ced_to_priv(struct clock_event_device *ced)
@ -155,7 +99,7 @@ static inline struct timer8_priv *ced_to_priv(struct clock_event_device *ced)
return container_of(ced, struct timer8_priv, ced);
}
static void timer8_clock_event_start(struct timer8_priv *p, int periodic)
static void timer8_clock_event_start(struct timer8_priv *p, unsigned long delta)
{
struct clock_event_device *ced = &p->ced;
@ -166,7 +110,7 @@ static void timer8_clock_event_start(struct timer8_priv *p, int periodic)
ced->max_delta_ns = clockevent_delta2ns(0xffff, ced);
ced->min_delta_ns = clockevent_delta2ns(0x0001, ced);
timer8_set_next(p, periodic?(p->rate + HZ/2) / HZ:0x10000);
timer8_set_next(p, delta);
}
static int timer8_clock_event_shutdown(struct clock_event_device *ced)
@ -179,9 +123,9 @@ static int timer8_clock_event_periodic(struct clock_event_device *ced)
{
struct timer8_priv *p = ced_to_priv(ced);
dev_info(&p->pdev->dev, "used for periodic clock events\n");
pr_info("%s: used for periodic clock events\n", ced->name);
timer8_stop(p);
timer8_clock_event_start(p, PERIODIC);
timer8_clock_event_start(p, (p->rate + HZ/2) / HZ);
return 0;
}
@ -190,9 +134,9 @@ static int timer8_clock_event_oneshot(struct clock_event_device *ced)
{
struct timer8_priv *p = ced_to_priv(ced);
dev_info(&p->pdev->dev, "used for oneshot clock events\n");
pr_info("%s: used for oneshot clock events\n", ced->name);
timer8_stop(p);
timer8_clock_event_start(p, ONESHOT);
timer8_clock_event_start(p, 0x10000);
return 0;
}
@ -208,110 +152,64 @@ static int timer8_clock_event_next(unsigned long delta,
return 0;
}
static int timer8_setup(struct timer8_priv *p,
struct platform_device *pdev)
static struct timer8_priv timer8_priv = {
.ced = {
.name = "h8300_8timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_next_event = timer8_clock_event_next,
.set_state_shutdown = timer8_clock_event_shutdown,
.set_state_periodic = timer8_clock_event_periodic,
.set_state_oneshot = timer8_clock_event_oneshot,
},
};
static void __init h8300_8timer_init(struct device_node *node)
{
struct resource *res;
void __iomem *base;
int irq;
int ret;
struct clk *clk;
p->pdev = pdev;
res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clockevent\n");
return;
}
irq = platform_get_irq(p->pdev, 0);
if (irq < 0) {
dev_err(&p->pdev->dev, "failed to get irq\n");
return -ENXIO;
base = of_iomap(node, 0);
if (!base) {
pr_err("failed to map registers for clockevent\n");
goto free_clk;
}
p->mapbase = res->start;
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = timer8_interrupt;
p->irqaction.dev_id = p;
p->irqaction.flags = IRQF_TIMER;
p->pclk = clk_get(&p->pdev->dev, "fck");
if (IS_ERR(p->pclk)) {
dev_err(&p->pdev->dev, "can't get clk\n");
return PTR_ERR(p->pclk);
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
goto unmap_reg;
}
p->ced.name = pdev->name;
p->ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
p->ced.rating = 200;
p->ced.cpumask = cpumask_of(0);
p->ced.set_next_event = timer8_clock_event_next;
p->ced.set_state_shutdown = timer8_clock_event_shutdown;
p->ced.set_state_periodic = timer8_clock_event_periodic;
p->ced.set_state_oneshot = timer8_clock_event_oneshot;
timer8_priv.mapbase = base;
ret = setup_irq(irq, &p->irqaction);
if (ret < 0) {
dev_err(&p->pdev->dev,
"failed to request irq %d\n", irq);
return ret;
}
clockevents_register_device(&p->ced);
platform_set_drvdata(pdev, p);
return 0;
timer8_priv.rate = clk_get_rate(clk) / SCALE;
if (!timer8_priv.rate) {
pr_err("Failed to get rate for the clocksource\n");
goto unmap_reg;
}
static int timer8_probe(struct platform_device *pdev)
{
struct timer8_priv *p = platform_get_drvdata(pdev);
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
if (request_irq(irq, timer8_interrupt, IRQF_TIMER,
timer8_priv.ced.name, &timer8_priv) < 0) {
pr_err("failed to request irq %d for clockevent\n", irq);
goto unmap_reg;
}
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
clockevents_config_and_register(&timer8_priv.ced,
timer8_priv.rate, 1, 0x0000ffff);
return timer8_setup(p, pdev);
return;
unmap_reg:
iounmap(base);
free_clk:
clk_put(clk);
}
static int timer8_remove(struct platform_device *pdev)
{
return -EBUSY;
}
static const struct of_device_id timer8_of_table[] __maybe_unused = {
{ .compatible = "renesas,8bit-timer" },
{ }
};
MODULE_DEVICE_TABLE(of, timer8_of_table);
static struct platform_driver timer8_driver = {
.probe = timer8_probe,
.remove = timer8_remove,
.driver = {
.name = "h8300-8timer",
.of_match_table = of_match_ptr(timer8_of_table),
}
};
static int __init timer8_init(void)
{
return platform_driver_register(&timer8_driver);
}
static void __exit timer8_exit(void)
{
platform_driver_unregister(&timer8_driver);
}
subsys_initcall(timer8_init);
module_exit(timer8_exit);
MODULE_AUTHOR("Yoshinori Sato");
MODULE_DESCRIPTION("H8/300 8bit Timer Driver");
MODULE_LICENSE("GPL v2");
CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);

View File

@ -1,42 +1,30 @@
/*
* H8/300 TPU Driver
* H8S TPU Driver
*
* Copyright 2015 Yoshinori Sato <ysato@users.sourcefoge.jp>
*
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/irq.h>
#define TCR 0x0
#define TSR 0x5
#define TCNT 0x6
#define TCR 0
#define TMDR 1
#define TIOR 2
#define TER 4
#define TSR 5
#define TCNT 6
#define TGRA 8
#define TGRB 10
#define TGRC 12
#define TGRD 14
#define TCFV 0x10
struct tpu_priv {
struct platform_device *pdev;
struct clocksource cs;
struct clk *clk;
unsigned long mapbase1;
unsigned long mapbase2;
void __iomem *mapbase1;
void __iomem *mapbase2;
raw_spinlock_t lock;
unsigned int cs_enabled;
};
@ -45,8 +33,8 @@ static inline unsigned long read_tcnt32(struct tpu_priv *p)
{
unsigned long tcnt;
tcnt = ctrl_inw(p->mapbase1 + TCNT) << 16;
tcnt |= ctrl_inw(p->mapbase2 + TCNT);
tcnt = ioread16be(p->mapbase1 + TCNT) << 16;
tcnt |= ioread16be(p->mapbase2 + TCNT);
return tcnt;
}
@ -55,7 +43,7 @@ static int tpu_get_counter(struct tpu_priv *p, unsigned long long *val)
unsigned long v1, v2, v3;
int o1, o2;
o1 = ctrl_inb(p->mapbase1 + TSR) & 0x10;
o1 = ioread8(p->mapbase1 + TSR) & TCFV;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
@ -63,7 +51,7 @@ static int tpu_get_counter(struct tpu_priv *p, unsigned long long *val)
v1 = read_tcnt32(p);
v2 = read_tcnt32(p);
v3 = read_tcnt32(p);
o1 = ctrl_inb(p->mapbase1 + TSR) & 0x10;
o1 = ioread8(p->mapbase1 + TSR) & TCFV;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
@ -96,10 +84,10 @@ static int tpu_clocksource_enable(struct clocksource *cs)
WARN_ON(p->cs_enabled);
ctrl_outw(0, p->mapbase1 + TCNT);
ctrl_outw(0, p->mapbase2 + TCNT);
ctrl_outb(0x0f, p->mapbase1 + TCR);
ctrl_outb(0x03, p->mapbase2 + TCR);
iowrite16be(0, p->mapbase1 + TCNT);
iowrite16be(0, p->mapbase2 + TCNT);
iowrite8(0x0f, p->mapbase1 + TCR);
iowrite8(0x03, p->mapbase2 + TCR);
p->cs_enabled = true;
return 0;
@ -111,96 +99,59 @@ static void tpu_clocksource_disable(struct clocksource *cs)
WARN_ON(!p->cs_enabled);
ctrl_outb(0, p->mapbase1 + TCR);
ctrl_outb(0, p->mapbase2 + TCR);
iowrite8(0, p->mapbase1 + TCR);
iowrite8(0, p->mapbase2 + TCR);
p->cs_enabled = false;
}
static struct tpu_priv tpu_priv = {
.cs = {
.name = "H8S_TPU",
.rating = 200,
.read = tpu_clocksource_read,
.enable = tpu_clocksource_enable,
.disable = tpu_clocksource_disable,
.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
#define CH_L 0
#define CH_H 1
static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev)
static void __init h8300_tpu_init(struct device_node *node)
{
struct resource *res[2];
void __iomem *base[2];
struct clk *clk;
p->pdev = pdev;
res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L);
res[CH_H] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_H);
if (!res[CH_L] || !res[CH_H]) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
return;
}
p->clk = clk_get(&p->pdev->dev, "fck");
if (IS_ERR(p->clk)) {
dev_err(&p->pdev->dev, "can't get clk\n");
return PTR_ERR(p->clk);
base[CH_L] = of_iomap(node, CH_L);
if (!base[CH_L]) {
pr_err("failed to map registers for clocksource\n");
goto free_clk;
}
base[CH_H] = of_iomap(node, CH_H);
if (!base[CH_H]) {
pr_err("failed to map registers for clocksource\n");
goto unmap_L;
}
p->mapbase1 = res[CH_L]->start;
p->mapbase2 = res[CH_H]->start;
tpu_priv.mapbase1 = base[CH_L];
tpu_priv.mapbase2 = base[CH_H];
p->cs.name = pdev->name;
p->cs.rating = 200;
p->cs.read = tpu_clocksource_read;
p->cs.enable = tpu_clocksource_enable;
p->cs.disable = tpu_clocksource_disable;
p->cs.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
p->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
clocksource_register_hz(&p->cs, clk_get_rate(p->clk) / 64);
platform_set_drvdata(pdev, p);
clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
return 0;
return;
unmap_L:
iounmap(base[CH_H]);
free_clk:
clk_put(clk);
}
static int tpu_probe(struct platform_device *pdev)
{
struct tpu_priv *p = platform_get_drvdata(pdev);
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
return tpu_setup(p, pdev);
}
static int tpu_remove(struct platform_device *pdev)
{
return -EBUSY;
}
static const struct of_device_id tpu_of_table[] = {
{ .compatible = "renesas,tpu" },
{ }
};
static struct platform_driver tpu_driver = {
.probe = tpu_probe,
.remove = tpu_remove,
.driver = {
.name = "h8s-tpu",
.of_match_table = of_match_ptr(tpu_of_table),
}
};
static int __init tpu_init(void)
{
return platform_driver_register(&tpu_driver);
}
static void __exit tpu_exit(void)
{
platform_driver_unregister(&tpu_driver);
}
subsys_initcall(tpu_init);
module_exit(tpu_exit);
MODULE_AUTHOR("Yoshinori Sato");
MODULE_DESCRIPTION("H8S Timer Pulse Unit Driver");
MODULE_LICENSE("GPL v2");
CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);

View File

@ -16,6 +16,8 @@
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@ -187,10 +189,8 @@ static void __init mtk_timer_init(struct device_node *node)
struct clk *clk;
evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt) {
pr_warn("Can't allocate mtk clock event driver struct");
if (!evt)
return;
}
evt->dev.name = "mtk_tick";
evt->dev.rating = 300;
@ -204,31 +204,31 @@ static void __init mtk_timer_init(struct device_node *node)
evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
if (IS_ERR(evt->gpt_base)) {
pr_warn("Can't get resource\n");
return;
pr_err("Can't get resource\n");
goto err_kzalloc;
}
evt->dev.irq = irq_of_parse_and_map(node, 0);
if (evt->dev.irq <= 0) {
pr_warn("Can't parse IRQ");
pr_err("Can't parse IRQ\n");
goto err_mem;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_warn("Can't get timer clock");
pr_err("Can't get timer clock\n");
goto err_irq;
}
if (clk_prepare_enable(clk)) {
pr_warn("Can't prepare clock");
pr_err("Can't prepare clock\n");
goto err_clk_put;
}
rate = clk_get_rate(clk);
if (request_irq(evt->dev.irq, mtk_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
pr_warn("failed to setup irq %d\n", evt->dev.irq);
pr_err("failed to setup irq %d\n", evt->dev.irq);
goto err_clk_disable;
}
@ -260,5 +260,7 @@ static void __init mtk_timer_init(struct device_node *node)
iounmap(evt->gpt_base);
of_address_to_resource(node, 0, &res);
release_mem_region(res.start, resource_size(&res));
err_kzalloc:
kfree(evt);
}
CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);

View File

@ -49,14 +49,12 @@ static inline void __iomem *rk_base(struct clock_event_device *ce)
static inline void rk_timer_disable(struct clock_event_device *ce)
{
writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG);
dsb();
}
static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
{
writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
rk_base(ce) + TIMER_CONTROL_REG);
dsb();
}
static void rk_timer_update_counter(unsigned long cycles,
@ -64,13 +62,11 @@ static void rk_timer_update_counter(unsigned long cycles,
{
writel_relaxed(cycles, rk_base(ce) + TIMER_LOAD_COUNT0);
writel_relaxed(0, rk_base(ce) + TIMER_LOAD_COUNT1);
dsb();
}
static void rk_timer_interrupt_clear(struct clock_event_device *ce)
{
writel_relaxed(1, rk_base(ce) + TIMER_INT_STATUS);
dsb();
}
static inline int rk_timer_set_next_event(unsigned long cycles,
@ -173,4 +169,5 @@ static void __init rk_timer_init(struct device_node *np)
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
}
CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);

View File

@ -19,19 +19,6 @@ static u64 notrace read_sched_clock(void)
return read_xtal_counter();
}
static cycle_t read_clocksource(struct clocksource *cs)
{
return read_xtal_counter();
}
static struct clocksource tango_xtal = {
.name = "tango-xtal",
.rating = 350,
.read = read_clocksource,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init tango_clocksource_init(struct device_node *np)
{
struct clk *clk;
@ -53,8 +40,9 @@ static void __init tango_clocksource_init(struct device_node *np)
delay_timer.freq = xtal_freq;
delay_timer.read_current_timer = read_xtal_counter;
ret = clocksource_register_hz(&tango_xtal, xtal_freq);
if (ret != 0) {
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
32, clocksource_mmio_readl_up);
if (!ret) {
pr_err("%s: registration failed\n", np->full_name);
return;
}

View File

@ -96,7 +96,8 @@ static struct clock_event_device tegra_clockevent = {
.name = "timer0",
.rating = 300,
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC,
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DYNIRQ,
.set_next_event = tegra_timer_set_next_event,
.set_state_shutdown = tegra_timer_shutdown,
.set_state_periodic = tegra_timer_set_periodic,

View File

@ -125,7 +125,7 @@ static int __init lpc32xx_clocksource_init(struct device_node *np)
clk = of_clk_get_by_name(np, "timerclk");
if (IS_ERR(clk)) {
pr_err("clock get failed (%lu)\n", PTR_ERR(clk));
pr_err("clock get failed (%ld)\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
@ -184,7 +184,7 @@ static int __init lpc32xx_clockevent_init(struct device_node *np)
clk = of_clk_get_by_name(np, "timerclk");
if (IS_ERR(clk)) {
pr_err("clock get failed (%lu)\n", PTR_ERR(clk));
pr_err("clock get failed (%ld)\n", PTR_ERR(clk));
return PTR_ERR(clk);
}

View File

@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
raw_spin_unlock_irqrestore(&pcs->lock, flags);
return ~(cycle_t)counter;
return (cycle_t)~counter;
}
static u64 notrace pistachio_read_sched_clock(void)

View File

@ -152,13 +152,6 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
{
struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
}
static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
unsigned long event, void *data)
{
@ -217,13 +210,8 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
cs->clksrc.name = node->name;
cs->clksrc.rating = 340;
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
ret = clocksource_register_hz(&cs->clksrc, rate);
ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
rate, 340, 32, clocksource_mmio_readl_down);
if (ret) {
pr_err("Couldn't register clock source.\n");
goto err_remove_notifier;

View File

@ -30,7 +30,6 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <asm/mach/time.h>
#include <linux/of.h>
#include <linux/of_address.h>

View File

@ -21,9 +21,9 @@ static const char ipr_bit[] = {
10, 10, 10, 10, 9, 9, 9, 9,
};
static void *intc_baseaddr;
static void __iomem *intc_baseaddr;
#define IPR ((unsigned long)intc_baseaddr + 6)
#define IPR (intc_baseaddr + 6)
static void h8300h_disable_irq(struct irq_data *data)
{
@ -81,8 +81,8 @@ static int __init h8300h_intc_of_init(struct device_node *intc,
BUG_ON(!intc_baseaddr);
/* All interrupt priority low */
ctrl_outb(0x00, IPR + 0);
ctrl_outb(0x00, IPR + 1);
writeb(0x00, IPR + 0);
writeb(0x00, IPR + 1);
domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL);
BUG_ON(!domain);

View File

@ -62,12 +62,18 @@ struct module;
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
* @owner: module reference, must be set by clocksource in modules
*
* Note: This struct is not used in hotpathes of the timekeeping code
* because the timekeeper caches the hot path fields in its own data
* structure, so no line cache alignment is required,
*
* The pointer to the clocksource itself is handed to the read
* callback. If you need extra information there you can wrap struct
* clocksource into your own struct. Depending on the amount of
* information you need you should consider to cache line align that
* structure.
*/
struct clocksource {
/*
* Hotpath data, fits in a single cache line when the
* clocksource itself is cacheline aligned.
*/
cycle_t (*read)(struct clocksource *cs);
cycle_t mask;
u32 mult;
@ -95,7 +101,7 @@ struct clocksource {
cycle_t wd_last;
#endif
struct module *owner;
} ____cacheline_aligned;
};
/*
* Clock source flags bits::

View File

@ -10,11 +10,17 @@
#ifdef CONFIG_GENERIC_SCHED_CLOCK
extern void sched_clock_postinit(void);
#else
static inline void sched_clock_postinit(void) { }
#endif
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
#else
static inline void sched_clock_postinit(void) { }
static inline void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
;
}
#endif
#endif

View File

@ -125,6 +125,32 @@ static inline bool timeval_valid(const struct timeval *tv)
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
/*
* Validates if a timespec/timeval used to inject a time offset is valid.
* Offsets can be postive or negative. The value of the timeval/timespec
* is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must
* always be non-negative.
*/
static inline bool timeval_inject_offset_valid(const struct timeval *tv)
{
/* We don't check the tv_sec as it can be positive or negative */
/* Can't have more microseconds then a second */
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
return false;
return true;
}
static inline bool timespec_inject_offset_valid(const struct timespec *ts)
{
/* We don't check the tv_sec as it can be positive or negative */
/* Can't have more nanoseconds then a second */
if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
return false;
return true;
}
#define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })

View File

@ -271,11 +271,27 @@ static int alarmtimer_suspend(struct device *dev)
__pm_wakeup_event(ws, MSEC_PER_SEC);
return ret;
}
static int alarmtimer_resume(struct device *dev)
{
struct rtc_device *rtc;
rtc = alarmtimer_get_rtcdev();
if (rtc)
rtc_timer_cancel(rtc, &rtctimer);
return 0;
}
#else
static int alarmtimer_suspend(struct device *dev)
{
return 0;
}
static int alarmtimer_resume(struct device *dev)
{
return 0;
}
#endif
static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
@ -800,6 +816,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
/* Suspend hook structures */
static const struct dev_pm_ops alarmtimer_pm_ops = {
.suspend = alarmtimer_suspend,
.resume = alarmtimer_resume,
};
static struct platform_driver alarmtimer_driver = {

View File

@ -218,8 +218,8 @@ static void clocksource_watchdog(unsigned long data)
/* Check the deviation from the watchdog clocksource. */
if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
cs->name);
pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
smp_processor_id(), cs->name);
pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
watchdog->name, wdnow, wdlast, watchdog->mask);
pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",

View File

@ -16,8 +16,11 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/math64.h>
#include "ntp_internal.h"
#include "timekeeping_internal.h"
/*
* NTP timekeeping variables:
@ -70,7 +73,7 @@ static long time_esterror = NTP_PHASE_LIMIT;
static s64 time_freq;
/* time at last adjustment (secs): */
static long time_reftime;
static time64_t time_reftime;
static long time_adjust;
@ -297,25 +300,27 @@ static void ntp_update_offset(long offset)
if (!(time_status & STA_PLL))
return;
if (!(time_status & STA_NANO))
if (!(time_status & STA_NANO)) {
/* Make sure the multiplication below won't overflow */
offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC);
offset *= NSEC_PER_USEC;
}
/*
* Scale the phase adjustment and
* clamp to the operating range.
*/
offset = min(offset, MAXPHASE);
offset = max(offset, -MAXPHASE);
offset = clamp(offset, -MAXPHASE, MAXPHASE);
/*
* Select how the frequency is to be controlled
* and in which mode (PLL or FLL).
*/
secs = get_seconds() - time_reftime;
secs = (long)(__ktime_get_real_seconds() - time_reftime);
if (unlikely(time_status & STA_FREQHOLD))
secs = 0;
time_reftime = get_seconds();
time_reftime = __ktime_get_real_seconds();
offset64 = offset;
freq_adj = ntp_update_offset_fll(offset64, secs);
@ -390,10 +395,11 @@ ktime_t ntp_get_next_leap(void)
*
* Also handles leap second processing, and returns leap offset
*/
int second_overflow(unsigned long secs)
int second_overflow(time64_t secs)
{
s64 delta;
int leap = 0;
s32 rem;
/*
* Leap second processing. If in leap-insert state at the end of the
@ -404,19 +410,19 @@ int second_overflow(unsigned long secs)
case TIME_OK:
if (time_status & STA_INS) {
time_state = TIME_INS;
ntp_next_leap_sec = secs + SECS_PER_DAY -
(secs % SECS_PER_DAY);
div_s64_rem(secs, SECS_PER_DAY, &rem);
ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
} else if (time_status & STA_DEL) {
time_state = TIME_DEL;
ntp_next_leap_sec = secs + SECS_PER_DAY -
((secs+1) % SECS_PER_DAY);
div_s64_rem(secs + 1, SECS_PER_DAY, &rem);
ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
}
break;
case TIME_INS:
if (!(time_status & STA_INS)) {
ntp_next_leap_sec = TIME64_MAX;
time_state = TIME_OK;
} else if (secs % SECS_PER_DAY == 0) {
} else if (secs == ntp_next_leap_sec) {
leap = -1;
time_state = TIME_OOP;
printk(KERN_NOTICE
@ -427,7 +433,7 @@ int second_overflow(unsigned long secs)
if (!(time_status & STA_DEL)) {
ntp_next_leap_sec = TIME64_MAX;
time_state = TIME_OK;
} else if ((secs + 1) % SECS_PER_DAY == 0) {
} else if (secs == ntp_next_leap_sec) {
leap = 1;
ntp_next_leap_sec = TIME64_MAX;
time_state = TIME_WAIT;
@ -590,7 +596,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
* reference time to current time.
*/
if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
time_reftime = get_seconds();
time_reftime = __ktime_get_real_seconds();
/* only set allowed bits */
time_status &= STA_RONLY;
@ -674,9 +680,15 @@ int ntp_validate_timex(struct timex *txc)
return -EINVAL;
}
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
if (txc->modes & ADJ_SETOFFSET) {
/* In order to inject time, you gotta be super-user! */
if (!capable(CAP_SYS_TIME))
return -EPERM;
if (!timeval_inject_offset_valid(&txc->time))
return -EINVAL;
}
/*
* Check for potential multiplication overflows that can
* only happen on 64-bit systems:

View File

@ -6,7 +6,7 @@ extern void ntp_clear(void);
/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
extern u64 ntp_tick_length(void);
extern ktime_t ntp_get_next_leap(void);
extern int second_overflow(unsigned long secs);
extern int second_overflow(time64_t secs);
extern int ntp_validate_timex(struct timex *);
extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
extern void __hardpps(const struct timespec64 *, const struct timespec64 *);

View File

@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
{
struct posix_clock *clk = get_posix_clock(fp);
int result = 0;
unsigned int result = 0;
if (!clk)
return -ENODEV;
return POLLERR;
if (clk->ops.poll)
result = clk->ops.poll(clk, fp, wait);

View File

@ -603,15 +603,31 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/*
* If the tick is due in the next period, keep it ticking or
* restart it proper.
* force prod the timer.
*/
delta = next_tick - basemono;
if (delta <= (u64)TICK_NSEC) {
tick.tv64 = 0;
/*
* We've not stopped the tick yet, and there's a timer in the
* next period, so no point in stopping it either, bail.
*/
if (!ts->tick_stopped)
goto out;
/*
* If, OTOH, we did stop it, but there's a pending (expired)
* timer reprogram the timer hardware to fire now.
*
* We will not restart the tick proper, just prod the timer
* hardware into firing an interrupt to process the pending
* timers. Just like tick_irq_exit() will not restart the tick
* for 'normal' interrupts.
*
* Only once we exit the idle loop will we re-enable the tick,
* see tick_nohz_idle_exit().
*/
if (delta == 0) {
/* Tick is stopped, but required now. Enforce it */
tick_nohz_restart(ts, now);
goto out;
}

View File

@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
delta = timekeeping_get_delta(tkr);
nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift;
nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
/* If arch requires, add in get_arch_timeoffset() */
return nsec + arch_gettimeoffset();
@ -846,6 +845,19 @@ time64_t ktime_get_real_seconds(void)
}
EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
/**
* __ktime_get_real_seconds - The same as ktime_get_real_seconds
* but without the sequence counter protect. This internal function
* is called just when timekeeping lock is already held.
*/
time64_t __ktime_get_real_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return tk->xtime_sec;
}
#ifdef CONFIG_NTP_PPS
/**
@ -959,7 +971,7 @@ int timekeeping_inject_offset(struct timespec *ts)
struct timespec64 ts64, tmp;
int ret = 0;
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
if (!timespec_inject_offset_valid(ts))
return -EINVAL;
ts64 = timespec_to_timespec64(*ts);
@ -1592,9 +1604,12 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
{
s64 interval = tk->cycle_interval;
s64 xinterval = tk->xtime_interval;
u32 base = tk->tkr_mono.clock->mult;
u32 max = tk->tkr_mono.clock->maxadj;
u32 cur_adj = tk->tkr_mono.mult;
s64 tick_error;
bool negative;
u32 adj;
u32 adj_scale;
/* Remove any current error adj from freq calculation */
if (tk->ntp_err_mult)
@ -1613,13 +1628,33 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
/* preserve the direction of correction */
negative = (tick_error < 0);
/* Sort out the magnitude of the correction */
/* If any adjustment would pass the max, just return */
if (negative && (cur_adj - 1) <= (base - max))
return;
if (!negative && (cur_adj + 1) >= (base + max))
return;
/*
* Sort out the magnitude of the correction, but
* avoid making so large a correction that we go
* over the max adjustment.
*/
adj_scale = 0;
tick_error = abs(tick_error);
for (adj = 0; tick_error > interval; adj++)
while (tick_error > interval) {
u32 adj = 1 << (adj_scale + 1);
/* Check if adjustment gets us within 1 unit from the max */
if (negative && (cur_adj - adj) <= (base - max))
break;
if (!negative && (cur_adj + adj) >= (base + max))
break;
adj_scale++;
tick_error >>= 1;
}
/* scale the corrections */
timekeeping_apply_adjustment(tk, offset, negative, adj);
timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
}
/*

View File

@ -17,7 +17,11 @@ static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
{
cycle_t ret = (now - last) & mask;
return (s64) ret > 0 ? ret : 0;
/*
* Prevent time going backwards by checking the MSB of mask in
* the result. If set, return 0.
*/
return ret & ~(mask >> 1) ? 0 : ret;
}
#else
static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
@ -26,4 +30,6 @@ static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
}
#endif
extern time64_t __ktime_get_real_seconds(void);
#endif /* _TIMEKEEPING_INTERNAL_H */

View File

@ -97,7 +97,7 @@ int get_cur_clocksource(char *buf, size_t size)
int change_clocksource(char *clocksource)
{
int fd;
size_t size;
ssize_t size;
fd = open("/sys/devices/system/clocksource/clocksource0/current_clocksource", O_WRONLY);