Merge branches 'acpi', 'idle', 'mrst-pmu' and 'pm-tools' into next
This commit is contained in:
@@ -33,7 +33,8 @@ static struct cpuidle_driver at91_idle_driver = {
|
|||||||
|
|
||||||
/* Actual code that puts the SoC in different idle states */
|
/* Actual code that puts the SoC in different idle states */
|
||||||
static int at91_enter_idle(struct cpuidle_device *dev,
|
static int at91_enter_idle(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct timeval before, after;
|
struct timeval before, after;
|
||||||
int idle_time;
|
int idle_time;
|
||||||
@@ -41,10 +42,10 @@ static int at91_enter_idle(struct cpuidle_device *dev,
|
|||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
do_gettimeofday(&before);
|
do_gettimeofday(&before);
|
||||||
if (state == &dev->states[0])
|
if (index == 0)
|
||||||
/* Wait for interrupt state */
|
/* Wait for interrupt state */
|
||||||
cpu_do_idle();
|
cpu_do_idle();
|
||||||
else if (state == &dev->states[1]) {
|
else if (index == 1) {
|
||||||
asm("b 1f; .align 5; 1:");
|
asm("b 1f; .align 5; 1:");
|
||||||
asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */
|
asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */
|
||||||
saved_lpr = sdram_selfrefresh_enable();
|
saved_lpr = sdram_selfrefresh_enable();
|
||||||
@@ -55,34 +56,38 @@ static int at91_enter_idle(struct cpuidle_device *dev,
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
||||||
(after.tv_usec - before.tv_usec);
|
(after.tv_usec - before.tv_usec);
|
||||||
return idle_time;
|
|
||||||
|
dev->last_residency = idle_time;
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize CPU idle by registering the idle states */
|
/* Initialize CPU idle by registering the idle states */
|
||||||
static int at91_init_cpuidle(void)
|
static int at91_init_cpuidle(void)
|
||||||
{
|
{
|
||||||
struct cpuidle_device *device;
|
struct cpuidle_device *device;
|
||||||
|
struct cpuidle_driver *driver = &at91_idle_driver;
|
||||||
cpuidle_register_driver(&at91_idle_driver);
|
|
||||||
|
|
||||||
device = &per_cpu(at91_cpuidle_device, smp_processor_id());
|
device = &per_cpu(at91_cpuidle_device, smp_processor_id());
|
||||||
device->state_count = AT91_MAX_STATES;
|
device->state_count = AT91_MAX_STATES;
|
||||||
|
driver->state_count = AT91_MAX_STATES;
|
||||||
|
|
||||||
/* Wait for interrupt state */
|
/* Wait for interrupt state */
|
||||||
device->states[0].enter = at91_enter_idle;
|
driver->states[0].enter = at91_enter_idle;
|
||||||
device->states[0].exit_latency = 1;
|
driver->states[0].exit_latency = 1;
|
||||||
device->states[0].target_residency = 10000;
|
driver->states[0].target_residency = 10000;
|
||||||
device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||||
strcpy(device->states[0].name, "WFI");
|
strcpy(driver->states[0].name, "WFI");
|
||||||
strcpy(device->states[0].desc, "Wait for interrupt");
|
strcpy(driver->states[0].desc, "Wait for interrupt");
|
||||||
|
|
||||||
/* Wait for interrupt and RAM self refresh state */
|
/* Wait for interrupt and RAM self refresh state */
|
||||||
device->states[1].enter = at91_enter_idle;
|
driver->states[1].enter = at91_enter_idle;
|
||||||
device->states[1].exit_latency = 10;
|
driver->states[1].exit_latency = 10;
|
||||||
device->states[1].target_residency = 10000;
|
driver->states[1].target_residency = 10000;
|
||||||
device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||||
strcpy(device->states[1].name, "RAM_SR");
|
strcpy(driver->states[1].name, "RAM_SR");
|
||||||
strcpy(device->states[1].desc, "WFI and RAM Self Refresh");
|
strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
|
||||||
|
|
||||||
|
cpuidle_register_driver(&at91_idle_driver);
|
||||||
|
|
||||||
if (cpuidle_register_device(device)) {
|
if (cpuidle_register_device(device)) {
|
||||||
printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
|
printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
|
||||||
|
@@ -78,9 +78,11 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
|
|||||||
|
|
||||||
/* Actual code that puts the SoC in different idle states */
|
/* Actual code that puts the SoC in different idle states */
|
||||||
static int davinci_enter_idle(struct cpuidle_device *dev,
|
static int davinci_enter_idle(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct davinci_ops *ops = cpuidle_get_statedata(state);
|
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||||
|
struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
|
||||||
struct timeval before, after;
|
struct timeval before, after;
|
||||||
int idle_time;
|
int idle_time;
|
||||||
|
|
||||||
@@ -98,13 +100,17 @@ static int davinci_enter_idle(struct cpuidle_device *dev,
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
||||||
(after.tv_usec - before.tv_usec);
|
(after.tv_usec - before.tv_usec);
|
||||||
return idle_time;
|
|
||||||
|
dev->last_residency = idle_time;
|
||||||
|
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
|
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct cpuidle_device *device;
|
struct cpuidle_device *device;
|
||||||
|
struct cpuidle_driver *driver = &davinci_idle_driver;
|
||||||
struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
|
struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
|
||||||
|
|
||||||
device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
|
device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
|
||||||
@@ -116,33 +122,34 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
ddr2_reg_base = pdata->ddr2_ctlr_base;
|
ddr2_reg_base = pdata->ddr2_ctlr_base;
|
||||||
|
|
||||||
|
/* Wait for interrupt state */
|
||||||
|
driver->states[0].enter = davinci_enter_idle;
|
||||||
|
driver->states[0].exit_latency = 1;
|
||||||
|
driver->states[0].target_residency = 10000;
|
||||||
|
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||||
|
strcpy(driver->states[0].name, "WFI");
|
||||||
|
strcpy(driver->states[0].desc, "Wait for interrupt");
|
||||||
|
|
||||||
|
/* Wait for interrupt and DDR self refresh state */
|
||||||
|
driver->states[1].enter = davinci_enter_idle;
|
||||||
|
driver->states[1].exit_latency = 10;
|
||||||
|
driver->states[1].target_residency = 10000;
|
||||||
|
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||||
|
strcpy(driver->states[1].name, "DDR SR");
|
||||||
|
strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
|
||||||
|
if (pdata->ddr2_pdown)
|
||||||
|
davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
|
||||||
|
cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
|
||||||
|
|
||||||
|
device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
|
||||||
|
driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
|
||||||
|
|
||||||
ret = cpuidle_register_driver(&davinci_idle_driver);
|
ret = cpuidle_register_driver(&davinci_idle_driver);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "failed to register driver\n");
|
dev_err(&pdev->dev, "failed to register driver\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for interrupt state */
|
|
||||||
device->states[0].enter = davinci_enter_idle;
|
|
||||||
device->states[0].exit_latency = 1;
|
|
||||||
device->states[0].target_residency = 10000;
|
|
||||||
device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
|
||||||
strcpy(device->states[0].name, "WFI");
|
|
||||||
strcpy(device->states[0].desc, "Wait for interrupt");
|
|
||||||
|
|
||||||
/* Wait for interrupt and DDR self refresh state */
|
|
||||||
device->states[1].enter = davinci_enter_idle;
|
|
||||||
device->states[1].exit_latency = 10;
|
|
||||||
device->states[1].target_residency = 10000;
|
|
||||||
device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
|
||||||
strcpy(device->states[1].name, "DDR SR");
|
|
||||||
strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
|
|
||||||
if (pdata->ddr2_pdown)
|
|
||||||
davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
|
|
||||||
cpuidle_set_statedata(&device->states[1], &davinci_states[1]);
|
|
||||||
|
|
||||||
device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
|
|
||||||
|
|
||||||
ret = cpuidle_register_device(device);
|
ret = cpuidle_register_device(device);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "failed to register device\n");
|
dev_err(&pdev->dev, "failed to register device\n");
|
||||||
|
@@ -16,7 +16,8 @@
|
|||||||
#include <asm/proc-fns.h>
|
#include <asm/proc-fns.h>
|
||||||
|
|
||||||
static int exynos4_enter_idle(struct cpuidle_device *dev,
|
static int exynos4_enter_idle(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state);
|
struct cpuidle_driver *drv,
|
||||||
|
int index);
|
||||||
|
|
||||||
static struct cpuidle_state exynos4_cpuidle_set[] = {
|
static struct cpuidle_state exynos4_cpuidle_set[] = {
|
||||||
[0] = {
|
[0] = {
|
||||||
@@ -37,7 +38,8 @@ static struct cpuidle_driver exynos4_idle_driver = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static int exynos4_enter_idle(struct cpuidle_device *dev,
|
static int exynos4_enter_idle(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct timeval before, after;
|
struct timeval before, after;
|
||||||
int idle_time;
|
int idle_time;
|
||||||
@@ -52,29 +54,31 @@ static int exynos4_enter_idle(struct cpuidle_device *dev,
|
|||||||
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
||||||
(after.tv_usec - before.tv_usec);
|
(after.tv_usec - before.tv_usec);
|
||||||
|
|
||||||
return idle_time;
|
dev->last_residency = idle_time;
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init exynos4_init_cpuidle(void)
|
static int __init exynos4_init_cpuidle(void)
|
||||||
{
|
{
|
||||||
int i, max_cpuidle_state, cpu_id;
|
int i, max_cpuidle_state, cpu_id;
|
||||||
struct cpuidle_device *device;
|
struct cpuidle_device *device;
|
||||||
|
struct cpuidle_driver *drv = &exynos4_idle_driver;
|
||||||
|
|
||||||
|
/* Setup cpuidle driver */
|
||||||
|
drv->state_count = (sizeof(exynos4_cpuidle_set) /
|
||||||
|
sizeof(struct cpuidle_state));
|
||||||
|
max_cpuidle_state = drv->state_count;
|
||||||
|
for (i = 0; i < max_cpuidle_state; i++) {
|
||||||
|
memcpy(&drv->states[i], &exynos4_cpuidle_set[i],
|
||||||
|
sizeof(struct cpuidle_state));
|
||||||
|
}
|
||||||
cpuidle_register_driver(&exynos4_idle_driver);
|
cpuidle_register_driver(&exynos4_idle_driver);
|
||||||
|
|
||||||
for_each_cpu(cpu_id, cpu_online_mask) {
|
for_each_cpu(cpu_id, cpu_online_mask) {
|
||||||
device = &per_cpu(exynos4_cpuidle_device, cpu_id);
|
device = &per_cpu(exynos4_cpuidle_device, cpu_id);
|
||||||
device->cpu = cpu_id;
|
device->cpu = cpu_id;
|
||||||
|
|
||||||
device->state_count = (sizeof(exynos4_cpuidle_set) /
|
device->state_count = drv->state_count;
|
||||||
sizeof(struct cpuidle_state));
|
|
||||||
|
|
||||||
max_cpuidle_state = device->state_count;
|
|
||||||
|
|
||||||
for (i = 0; i < max_cpuidle_state; i++) {
|
|
||||||
memcpy(&device->states[i], &exynos4_cpuidle_set[i],
|
|
||||||
sizeof(struct cpuidle_state));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpuidle_register_device(device)) {
|
if (cpuidle_register_device(device)) {
|
||||||
printk(KERN_ERR "CPUidle register device failed\n,");
|
printk(KERN_ERR "CPUidle register device failed\n,");
|
||||||
|
@@ -32,17 +32,18 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
|
|||||||
|
|
||||||
/* Actual code that puts the SoC in different idle states */
|
/* Actual code that puts the SoC in different idle states */
|
||||||
static int kirkwood_enter_idle(struct cpuidle_device *dev,
|
static int kirkwood_enter_idle(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct timeval before, after;
|
struct timeval before, after;
|
||||||
int idle_time;
|
int idle_time;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
do_gettimeofday(&before);
|
do_gettimeofday(&before);
|
||||||
if (state == &dev->states[0])
|
if (index == 0)
|
||||||
/* Wait for interrupt state */
|
/* Wait for interrupt state */
|
||||||
cpu_do_idle();
|
cpu_do_idle();
|
||||||
else if (state == &dev->states[1]) {
|
else if (index == 1) {
|
||||||
/*
|
/*
|
||||||
* Following write will put DDR in self refresh.
|
* Following write will put DDR in self refresh.
|
||||||
* Note that we have 256 cycles before DDR puts it
|
* Note that we have 256 cycles before DDR puts it
|
||||||
@@ -57,35 +58,40 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev,
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
||||||
(after.tv_usec - before.tv_usec);
|
(after.tv_usec - before.tv_usec);
|
||||||
return idle_time;
|
|
||||||
|
/* Update last residency */
|
||||||
|
dev->last_residency = idle_time;
|
||||||
|
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize CPU idle by registering the idle states */
|
/* Initialize CPU idle by registering the idle states */
|
||||||
static int kirkwood_init_cpuidle(void)
|
static int kirkwood_init_cpuidle(void)
|
||||||
{
|
{
|
||||||
struct cpuidle_device *device;
|
struct cpuidle_device *device;
|
||||||
|
struct cpuidle_driver *driver = &kirkwood_idle_driver;
|
||||||
cpuidle_register_driver(&kirkwood_idle_driver);
|
|
||||||
|
|
||||||
device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
|
device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
|
||||||
device->state_count = KIRKWOOD_MAX_STATES;
|
device->state_count = KIRKWOOD_MAX_STATES;
|
||||||
|
driver->state_count = KIRKWOOD_MAX_STATES;
|
||||||
|
|
||||||
/* Wait for interrupt state */
|
/* Wait for interrupt state */
|
||||||
device->states[0].enter = kirkwood_enter_idle;
|
driver->states[0].enter = kirkwood_enter_idle;
|
||||||
device->states[0].exit_latency = 1;
|
driver->states[0].exit_latency = 1;
|
||||||
device->states[0].target_residency = 10000;
|
driver->states[0].target_residency = 10000;
|
||||||
device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||||
strcpy(device->states[0].name, "WFI");
|
strcpy(driver->states[0].name, "WFI");
|
||||||
strcpy(device->states[0].desc, "Wait for interrupt");
|
strcpy(driver->states[0].desc, "Wait for interrupt");
|
||||||
|
|
||||||
/* Wait for interrupt and DDR self refresh state */
|
/* Wait for interrupt and DDR self refresh state */
|
||||||
device->states[1].enter = kirkwood_enter_idle;
|
driver->states[1].enter = kirkwood_enter_idle;
|
||||||
device->states[1].exit_latency = 10;
|
driver->states[1].exit_latency = 10;
|
||||||
device->states[1].target_residency = 10000;
|
driver->states[1].target_residency = 10000;
|
||||||
device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||||
strcpy(device->states[1].name, "DDR SR");
|
strcpy(driver->states[1].name, "DDR SR");
|
||||||
strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
|
strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
|
||||||
|
|
||||||
|
cpuidle_register_driver(&kirkwood_idle_driver);
|
||||||
if (cpuidle_register_device(device)) {
|
if (cpuidle_register_device(device)) {
|
||||||
printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n");
|
printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@@ -88,17 +88,21 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
|
|||||||
/**
|
/**
|
||||||
* omap3_enter_idle - Programs OMAP3 to enter the specified state
|
* omap3_enter_idle - Programs OMAP3 to enter the specified state
|
||||||
* @dev: cpuidle device
|
* @dev: cpuidle device
|
||||||
* @state: The target state to be programmed
|
* @drv: cpuidle driver
|
||||||
|
* @index: the index of state to be entered
|
||||||
*
|
*
|
||||||
* Called from the CPUidle framework to program the device to the
|
* Called from the CPUidle framework to program the device to the
|
||||||
* specified target state selected by the governor.
|
* specified target state selected by the governor.
|
||||||
*/
|
*/
|
||||||
static int omap3_enter_idle(struct cpuidle_device *dev,
|
static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
|
struct omap3_idle_statedata *cx =
|
||||||
|
cpuidle_get_statedata(&dev->states_usage[index]);
|
||||||
struct timespec ts_preidle, ts_postidle, ts_idle;
|
struct timespec ts_preidle, ts_postidle, ts_idle;
|
||||||
u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
|
u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
|
||||||
|
int idle_time;
|
||||||
|
|
||||||
/* Used to keep track of the total time in idle */
|
/* Used to keep track of the total time in idle */
|
||||||
getnstimeofday(&ts_preidle);
|
getnstimeofday(&ts_preidle);
|
||||||
@@ -113,7 +117,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
|||||||
goto return_sleep_time;
|
goto return_sleep_time;
|
||||||
|
|
||||||
/* Deny idle for C1 */
|
/* Deny idle for C1 */
|
||||||
if (state == &dev->states[0]) {
|
if (index == 0) {
|
||||||
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
|
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
|
||||||
pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
|
pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
|
||||||
}
|
}
|
||||||
@@ -122,7 +126,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
|||||||
omap_sram_idle();
|
omap_sram_idle();
|
||||||
|
|
||||||
/* Re-allow idle for C1 */
|
/* Re-allow idle for C1 */
|
||||||
if (state == &dev->states[0]) {
|
if (index == 0) {
|
||||||
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
|
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
|
||||||
pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
|
pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
|
||||||
}
|
}
|
||||||
@@ -134,28 +138,38 @@ return_sleep_time:
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
local_fiq_enable();
|
local_fiq_enable();
|
||||||
|
|
||||||
return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
|
idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
|
||||||
|
USEC_PER_SEC;
|
||||||
|
|
||||||
|
/* Update cpuidle counters */
|
||||||
|
dev->last_residency = idle_time;
|
||||||
|
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* next_valid_state - Find next valid C-state
|
* next_valid_state - Find next valid C-state
|
||||||
* @dev: cpuidle device
|
* @dev: cpuidle device
|
||||||
* @state: Currently selected C-state
|
* @drv: cpuidle driver
|
||||||
|
* @index: Index of currently selected c-state
|
||||||
*
|
*
|
||||||
* If the current state is valid, it is returned back to the caller.
|
* If the state corresponding to index is valid, index is returned back
|
||||||
* Else, this function searches for a lower c-state which is still
|
* to the caller. Else, this function searches for a lower c-state which is
|
||||||
* valid.
|
* still valid (as defined in omap3_power_states[]) and returns its index.
|
||||||
*
|
*
|
||||||
* A state is valid if the 'valid' field is enabled and
|
* A state is valid if the 'valid' field is enabled and
|
||||||
* if it satisfies the enable_off_mode condition.
|
* if it satisfies the enable_off_mode condition.
|
||||||
*/
|
*/
|
||||||
static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
|
static int next_valid_state(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *curr)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct cpuidle_state *next = NULL;
|
struct cpuidle_state_usage *curr_usage = &dev->states_usage[index];
|
||||||
struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
|
struct cpuidle_state *curr = &drv->states[index];
|
||||||
|
struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage);
|
||||||
u32 mpu_deepest_state = PWRDM_POWER_RET;
|
u32 mpu_deepest_state = PWRDM_POWER_RET;
|
||||||
u32 core_deepest_state = PWRDM_POWER_RET;
|
u32 core_deepest_state = PWRDM_POWER_RET;
|
||||||
|
int next_index = -1;
|
||||||
|
|
||||||
if (enable_off_mode) {
|
if (enable_off_mode) {
|
||||||
mpu_deepest_state = PWRDM_POWER_OFF;
|
mpu_deepest_state = PWRDM_POWER_OFF;
|
||||||
@@ -172,20 +186,20 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
|
|||||||
if ((cx->valid) &&
|
if ((cx->valid) &&
|
||||||
(cx->mpu_state >= mpu_deepest_state) &&
|
(cx->mpu_state >= mpu_deepest_state) &&
|
||||||
(cx->core_state >= core_deepest_state)) {
|
(cx->core_state >= core_deepest_state)) {
|
||||||
return curr;
|
return index;
|
||||||
} else {
|
} else {
|
||||||
int idx = OMAP3_NUM_STATES - 1;
|
int idx = OMAP3_NUM_STATES - 1;
|
||||||
|
|
||||||
/* Reach the current state starting at highest C-state */
|
/* Reach the current state starting at highest C-state */
|
||||||
for (; idx >= 0; idx--) {
|
for (; idx >= 0; idx--) {
|
||||||
if (&dev->states[idx] == curr) {
|
if (&drv->states[idx] == curr) {
|
||||||
next = &dev->states[idx];
|
next_index = idx;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Should never hit this condition */
|
/* Should never hit this condition */
|
||||||
WARN_ON(next == NULL);
|
WARN_ON(next_index == -1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Drop to next valid state.
|
* Drop to next valid state.
|
||||||
@@ -193,41 +207,44 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
|
|||||||
*/
|
*/
|
||||||
idx--;
|
idx--;
|
||||||
for (; idx >= 0; idx--) {
|
for (; idx >= 0; idx--) {
|
||||||
cx = cpuidle_get_statedata(&dev->states[idx]);
|
cx = cpuidle_get_statedata(&dev->states_usage[idx]);
|
||||||
if ((cx->valid) &&
|
if ((cx->valid) &&
|
||||||
(cx->mpu_state >= mpu_deepest_state) &&
|
(cx->mpu_state >= mpu_deepest_state) &&
|
||||||
(cx->core_state >= core_deepest_state)) {
|
(cx->core_state >= core_deepest_state)) {
|
||||||
next = &dev->states[idx];
|
next_index = idx;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* C1 is always valid.
|
* C1 is always valid.
|
||||||
* So, no need to check for 'next==NULL' outside this loop.
|
* So, no need to check for 'next_index == -1' outside
|
||||||
|
* this loop.
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
return next;
|
return next_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* omap3_enter_idle_bm - Checks for any bus activity
|
* omap3_enter_idle_bm - Checks for any bus activity
|
||||||
* @dev: cpuidle device
|
* @dev: cpuidle device
|
||||||
* @state: The target state to be programmed
|
* @drv: cpuidle driver
|
||||||
|
* @index: array index of target state to be programmed
|
||||||
*
|
*
|
||||||
* This function checks for any pending activity and then programs
|
* This function checks for any pending activity and then programs
|
||||||
* the device to the specified or a safer state.
|
* the device to the specified or a safer state.
|
||||||
*/
|
*/
|
||||||
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct cpuidle_state *new_state;
|
int new_state_idx;
|
||||||
u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
|
u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
|
||||||
struct omap3_idle_statedata *cx;
|
struct omap3_idle_statedata *cx;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!omap3_can_sleep()) {
|
if (!omap3_can_sleep()) {
|
||||||
new_state = dev->safe_state;
|
new_state_idx = drv->safe_state_index;
|
||||||
goto select_state;
|
goto select_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,7 +254,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
|||||||
*/
|
*/
|
||||||
cam_state = pwrdm_read_pwrst(cam_pd);
|
cam_state = pwrdm_read_pwrst(cam_pd);
|
||||||
if (cam_state == PWRDM_POWER_ON) {
|
if (cam_state == PWRDM_POWER_ON) {
|
||||||
new_state = dev->safe_state;
|
new_state_idx = drv->safe_state_index;
|
||||||
goto select_state;
|
goto select_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -253,7 +270,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
|||||||
* Prevent PER off if CORE is not in retention or off as this
|
* Prevent PER off if CORE is not in retention or off as this
|
||||||
* would disable PER wakeups completely.
|
* would disable PER wakeups completely.
|
||||||
*/
|
*/
|
||||||
cx = cpuidle_get_statedata(state);
|
cx = cpuidle_get_statedata(&dev->states_usage[index]);
|
||||||
core_next_state = cx->core_state;
|
core_next_state = cx->core_state;
|
||||||
per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
|
per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
|
||||||
if ((per_next_state == PWRDM_POWER_OFF) &&
|
if ((per_next_state == PWRDM_POWER_OFF) &&
|
||||||
@@ -264,11 +281,10 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
|||||||
if (per_next_state != per_saved_state)
|
if (per_next_state != per_saved_state)
|
||||||
pwrdm_set_next_pwrst(per_pd, per_next_state);
|
pwrdm_set_next_pwrst(per_pd, per_next_state);
|
||||||
|
|
||||||
new_state = next_valid_state(dev, state);
|
new_state_idx = next_valid_state(dev, drv, index);
|
||||||
|
|
||||||
select_state:
|
select_state:
|
||||||
dev->last_state = new_state;
|
ret = omap3_enter_idle(dev, drv, new_state_idx);
|
||||||
ret = omap3_enter_idle(dev, new_state);
|
|
||||||
|
|
||||||
/* Restore original PER state if it was modified */
|
/* Restore original PER state if it was modified */
|
||||||
if (per_next_state != per_saved_state)
|
if (per_next_state != per_saved_state)
|
||||||
@@ -301,22 +317,31 @@ struct cpuidle_driver omap3_idle_driver = {
|
|||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Helper to fill the C-state common data and register the driver_data */
|
/* Helper to fill the C-state common data*/
|
||||||
static inline struct omap3_idle_statedata *_fill_cstate(
|
static inline void _fill_cstate(struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev,
|
|
||||||
int idx, const char *descr)
|
int idx, const char *descr)
|
||||||
{
|
{
|
||||||
struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
|
struct cpuidle_state *state = &drv->states[idx];
|
||||||
struct cpuidle_state *state = &dev->states[idx];
|
|
||||||
|
|
||||||
state->exit_latency = cpuidle_params_table[idx].exit_latency;
|
state->exit_latency = cpuidle_params_table[idx].exit_latency;
|
||||||
state->target_residency = cpuidle_params_table[idx].target_residency;
|
state->target_residency = cpuidle_params_table[idx].target_residency;
|
||||||
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
||||||
state->enter = omap3_enter_idle_bm;
|
state->enter = omap3_enter_idle_bm;
|
||||||
cx->valid = cpuidle_params_table[idx].valid;
|
|
||||||
sprintf(state->name, "C%d", idx + 1);
|
sprintf(state->name, "C%d", idx + 1);
|
||||||
strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
|
strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
|
||||||
cpuidle_set_statedata(state, cx);
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Helper to register the driver_data */
|
||||||
|
static inline struct omap3_idle_statedata *_fill_cstate_usage(
|
||||||
|
struct cpuidle_device *dev,
|
||||||
|
int idx)
|
||||||
|
{
|
||||||
|
struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
|
||||||
|
struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
|
||||||
|
|
||||||
|
cx->valid = cpuidle_params_table[idx].valid;
|
||||||
|
cpuidle_set_statedata(state_usage, cx);
|
||||||
|
|
||||||
return cx;
|
return cx;
|
||||||
}
|
}
|
||||||
@@ -330,6 +355,7 @@ static inline struct omap3_idle_statedata *_fill_cstate(
|
|||||||
int __init omap3_idle_init(void)
|
int __init omap3_idle_init(void)
|
||||||
{
|
{
|
||||||
struct cpuidle_device *dev;
|
struct cpuidle_device *dev;
|
||||||
|
struct cpuidle_driver *drv = &omap3_idle_driver;
|
||||||
struct omap3_idle_statedata *cx;
|
struct omap3_idle_statedata *cx;
|
||||||
|
|
||||||
mpu_pd = pwrdm_lookup("mpu_pwrdm");
|
mpu_pd = pwrdm_lookup("mpu_pwrdm");
|
||||||
@@ -337,44 +363,52 @@ int __init omap3_idle_init(void)
|
|||||||
per_pd = pwrdm_lookup("per_pwrdm");
|
per_pd = pwrdm_lookup("per_pwrdm");
|
||||||
cam_pd = pwrdm_lookup("cam_pwrdm");
|
cam_pd = pwrdm_lookup("cam_pwrdm");
|
||||||
|
|
||||||
cpuidle_register_driver(&omap3_idle_driver);
|
|
||||||
|
drv->safe_state_index = -1;
|
||||||
dev = &per_cpu(omap3_idle_dev, smp_processor_id());
|
dev = &per_cpu(omap3_idle_dev, smp_processor_id());
|
||||||
|
|
||||||
/* C1 . MPU WFI + Core active */
|
/* C1 . MPU WFI + Core active */
|
||||||
cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
|
_fill_cstate(drv, 0, "MPU ON + CORE ON");
|
||||||
(&dev->states[0])->enter = omap3_enter_idle;
|
(&drv->states[0])->enter = omap3_enter_idle;
|
||||||
dev->safe_state = &dev->states[0];
|
drv->safe_state_index = 0;
|
||||||
|
cx = _fill_cstate_usage(dev, 0);
|
||||||
cx->valid = 1; /* C1 is always valid */
|
cx->valid = 1; /* C1 is always valid */
|
||||||
cx->mpu_state = PWRDM_POWER_ON;
|
cx->mpu_state = PWRDM_POWER_ON;
|
||||||
cx->core_state = PWRDM_POWER_ON;
|
cx->core_state = PWRDM_POWER_ON;
|
||||||
|
|
||||||
/* C2 . MPU WFI + Core inactive */
|
/* C2 . MPU WFI + Core inactive */
|
||||||
cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
|
_fill_cstate(drv, 1, "MPU ON + CORE ON");
|
||||||
|
cx = _fill_cstate_usage(dev, 1);
|
||||||
cx->mpu_state = PWRDM_POWER_ON;
|
cx->mpu_state = PWRDM_POWER_ON;
|
||||||
cx->core_state = PWRDM_POWER_ON;
|
cx->core_state = PWRDM_POWER_ON;
|
||||||
|
|
||||||
/* C3 . MPU CSWR + Core inactive */
|
/* C3 . MPU CSWR + Core inactive */
|
||||||
cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
|
_fill_cstate(drv, 2, "MPU RET + CORE ON");
|
||||||
|
cx = _fill_cstate_usage(dev, 2);
|
||||||
cx->mpu_state = PWRDM_POWER_RET;
|
cx->mpu_state = PWRDM_POWER_RET;
|
||||||
cx->core_state = PWRDM_POWER_ON;
|
cx->core_state = PWRDM_POWER_ON;
|
||||||
|
|
||||||
/* C4 . MPU OFF + Core inactive */
|
/* C4 . MPU OFF + Core inactive */
|
||||||
cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
|
_fill_cstate(drv, 3, "MPU OFF + CORE ON");
|
||||||
|
cx = _fill_cstate_usage(dev, 3);
|
||||||
cx->mpu_state = PWRDM_POWER_OFF;
|
cx->mpu_state = PWRDM_POWER_OFF;
|
||||||
cx->core_state = PWRDM_POWER_ON;
|
cx->core_state = PWRDM_POWER_ON;
|
||||||
|
|
||||||
/* C5 . MPU RET + Core RET */
|
/* C5 . MPU RET + Core RET */
|
||||||
cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
|
_fill_cstate(drv, 4, "MPU RET + CORE RET");
|
||||||
|
cx = _fill_cstate_usage(dev, 4);
|
||||||
cx->mpu_state = PWRDM_POWER_RET;
|
cx->mpu_state = PWRDM_POWER_RET;
|
||||||
cx->core_state = PWRDM_POWER_RET;
|
cx->core_state = PWRDM_POWER_RET;
|
||||||
|
|
||||||
/* C6 . MPU OFF + Core RET */
|
/* C6 . MPU OFF + Core RET */
|
||||||
cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
|
_fill_cstate(drv, 5, "MPU OFF + CORE RET");
|
||||||
|
cx = _fill_cstate_usage(dev, 5);
|
||||||
cx->mpu_state = PWRDM_POWER_OFF;
|
cx->mpu_state = PWRDM_POWER_OFF;
|
||||||
cx->core_state = PWRDM_POWER_RET;
|
cx->core_state = PWRDM_POWER_RET;
|
||||||
|
|
||||||
/* C7 . MPU OFF + Core OFF */
|
/* C7 . MPU OFF + Core OFF */
|
||||||
cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
|
_fill_cstate(drv, 6, "MPU OFF + CORE OFF");
|
||||||
|
cx = _fill_cstate_usage(dev, 6);
|
||||||
/*
|
/*
|
||||||
* Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
|
* Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
|
||||||
* enable OFF mode in a stable form for previous revisions.
|
* enable OFF mode in a stable form for previous revisions.
|
||||||
@@ -388,6 +422,9 @@ int __init omap3_idle_init(void)
|
|||||||
cx->mpu_state = PWRDM_POWER_OFF;
|
cx->mpu_state = PWRDM_POWER_OFF;
|
||||||
cx->core_state = PWRDM_POWER_OFF;
|
cx->core_state = PWRDM_POWER_OFF;
|
||||||
|
|
||||||
|
drv->state_count = OMAP3_NUM_STATES;
|
||||||
|
cpuidle_register_driver(&omap3_idle_driver);
|
||||||
|
|
||||||
dev->state_count = OMAP3_NUM_STATES;
|
dev->state_count = OMAP3_NUM_STATES;
|
||||||
if (cpuidle_register_device(dev)) {
|
if (cpuidle_register_device(dev)) {
|
||||||
printk(KERN_ERR "%s: CPUidle register device failed\n",
|
printk(KERN_ERR "%s: CPUidle register device failed\n",
|
||||||
|
@@ -25,11 +25,12 @@ static unsigned long cpuidle_mode[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static int cpuidle_sleep_enter(struct cpuidle_device *dev,
|
static int cpuidle_sleep_enter(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
unsigned long allowed_mode = arch_hwblk_sleep_mode();
|
unsigned long allowed_mode = arch_hwblk_sleep_mode();
|
||||||
ktime_t before, after;
|
ktime_t before, after;
|
||||||
int requested_state = state - &dev->states[0];
|
int requested_state = index;
|
||||||
int allowed_state;
|
int allowed_state;
|
||||||
int k;
|
int k;
|
||||||
|
|
||||||
@@ -46,11 +47,13 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
|
|||||||
*/
|
*/
|
||||||
k = min_t(int, allowed_state, requested_state);
|
k = min_t(int, allowed_state, requested_state);
|
||||||
|
|
||||||
dev->last_state = &dev->states[k];
|
|
||||||
before = ktime_get();
|
before = ktime_get();
|
||||||
sh_mobile_call_standby(cpuidle_mode[k]);
|
sh_mobile_call_standby(cpuidle_mode[k]);
|
||||||
after = ktime_get();
|
after = ktime_get();
|
||||||
return ktime_to_ns(ktime_sub(after, before)) >> 10;
|
|
||||||
|
dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
|
||||||
|
|
||||||
|
return k;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpuidle_device cpuidle_dev;
|
static struct cpuidle_device cpuidle_dev;
|
||||||
@@ -62,19 +65,19 @@ static struct cpuidle_driver cpuidle_driver = {
|
|||||||
void sh_mobile_setup_cpuidle(void)
|
void sh_mobile_setup_cpuidle(void)
|
||||||
{
|
{
|
||||||
struct cpuidle_device *dev = &cpuidle_dev;
|
struct cpuidle_device *dev = &cpuidle_dev;
|
||||||
|
struct cpuidle_driver *drv = &cpuidle_driver;
|
||||||
struct cpuidle_state *state;
|
struct cpuidle_state *state;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cpuidle_register_driver(&cpuidle_driver);
|
|
||||||
|
|
||||||
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
|
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
|
||||||
dev->states[i].name[0] = '\0';
|
drv->states[i].name[0] = '\0';
|
||||||
dev->states[i].desc[0] = '\0';
|
drv->states[i].desc[0] = '\0';
|
||||||
}
|
}
|
||||||
|
|
||||||
i = CPUIDLE_DRIVER_STATE_START;
|
i = CPUIDLE_DRIVER_STATE_START;
|
||||||
|
|
||||||
state = &dev->states[i++];
|
state = &drv->states[i++];
|
||||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
|
snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
|
||||||
strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
|
strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
|
||||||
state->exit_latency = 1;
|
state->exit_latency = 1;
|
||||||
@@ -84,10 +87,10 @@ void sh_mobile_setup_cpuidle(void)
|
|||||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||||
state->enter = cpuidle_sleep_enter;
|
state->enter = cpuidle_sleep_enter;
|
||||||
|
|
||||||
dev->safe_state = state;
|
drv->safe_state_index = i-1;
|
||||||
|
|
||||||
if (sh_mobile_sleep_supported & SUSP_SH_SF) {
|
if (sh_mobile_sleep_supported & SUSP_SH_SF) {
|
||||||
state = &dev->states[i++];
|
state = &drv->states[i++];
|
||||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
|
snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
|
||||||
strncpy(state->desc, "SuperH Sleep Mode [SF]",
|
strncpy(state->desc, "SuperH Sleep Mode [SF]",
|
||||||
CPUIDLE_DESC_LEN);
|
CPUIDLE_DESC_LEN);
|
||||||
@@ -100,7 +103,7 @@ void sh_mobile_setup_cpuidle(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
|
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
|
||||||
state = &dev->states[i++];
|
state = &drv->states[i++];
|
||||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
|
snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
|
||||||
strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
|
strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
|
||||||
CPUIDLE_DESC_LEN);
|
CPUIDLE_DESC_LEN);
|
||||||
@@ -112,7 +115,10 @@ void sh_mobile_setup_cpuidle(void)
|
|||||||
state->enter = cpuidle_sleep_enter;
|
state->enter = cpuidle_sleep_enter;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drv->state_count = i;
|
||||||
dev->state_count = i;
|
dev->state_count = i;
|
||||||
|
|
||||||
|
cpuidle_register_driver(&cpuidle_driver);
|
||||||
|
|
||||||
cpuidle_register_device(dev);
|
cpuidle_register_device(dev);
|
||||||
}
|
}
|
||||||
|
@@ -70,7 +70,7 @@ static struct mrst_device mrst_devs[] = {
|
|||||||
/* 24 */ { 0x4110, 0 }, /* Lincroft */
|
/* 24 */ { 0x4110, 0 }, /* Lincroft */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */
|
/* n.b. We ignore PCI-id 0x815 in LSS9 b/c Linux has no driver for it */
|
||||||
static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0};
|
static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0};
|
||||||
static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803,
|
static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803,
|
||||||
0x0804, 0x0805, 0x080f, 0};
|
0x0804, 0x0805, 0x080f, 0};
|
||||||
|
@@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
|
|||||||
|
|
||||||
if (action == CPU_ONLINE && pr) {
|
if (action == CPU_ONLINE && pr) {
|
||||||
acpi_processor_ppc_has_changed(pr, 0);
|
acpi_processor_ppc_has_changed(pr, 0);
|
||||||
acpi_processor_cst_has_changed(pr);
|
acpi_processor_hotplug(pr);
|
||||||
acpi_processor_reevaluate_tstate(pr, action);
|
acpi_processor_reevaluate_tstate(pr, action);
|
||||||
acpi_processor_tstate_has_changed(pr);
|
acpi_processor_tstate_has_changed(pr);
|
||||||
}
|
}
|
||||||
@@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|||||||
acpi_processor_get_throttling_info(pr);
|
acpi_processor_get_throttling_info(pr);
|
||||||
acpi_processor_get_limit_info(pr);
|
acpi_processor_get_limit_info(pr);
|
||||||
|
|
||||||
|
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
|
||||||
if (cpuidle_get_driver() == &acpi_idle_driver)
|
|
||||||
acpi_processor_power_init(pr, device);
|
acpi_processor_power_init(pr, device);
|
||||||
|
|
||||||
pr->cdev = thermal_cooling_device_register("Processor", device,
|
pr->cdev = thermal_cooling_device_register("Processor", device,
|
||||||
@@ -800,17 +799,9 @@ static int __init acpi_processor_init(void)
|
|||||||
|
|
||||||
memset(&errata, 0, sizeof(errata));
|
memset(&errata, 0, sizeof(errata));
|
||||||
|
|
||||||
if (!cpuidle_register_driver(&acpi_idle_driver)) {
|
|
||||||
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
|
|
||||||
acpi_idle_driver.name);
|
|
||||||
} else {
|
|
||||||
printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
|
|
||||||
cpuidle_get_driver()->name);
|
|
||||||
}
|
|
||||||
|
|
||||||
result = acpi_bus_register_driver(&acpi_processor_driver);
|
result = acpi_bus_register_driver(&acpi_processor_driver);
|
||||||
if (result < 0)
|
if (result < 0)
|
||||||
goto out_cpuidle;
|
return result;
|
||||||
|
|
||||||
acpi_processor_install_hotplug_notify();
|
acpi_processor_install_hotplug_notify();
|
||||||
|
|
||||||
@@ -821,11 +812,6 @@ static int __init acpi_processor_init(void)
|
|||||||
acpi_processor_throttling_init();
|
acpi_processor_throttling_init();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_cpuidle:
|
|
||||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit acpi_processor_exit(void)
|
static void __exit acpi_processor_exit(void)
|
||||||
|
@@ -741,22 +741,25 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
|||||||
/**
|
/**
|
||||||
* acpi_idle_enter_c1 - enters an ACPI C1 state-type
|
* acpi_idle_enter_c1 - enters an ACPI C1 state-type
|
||||||
* @dev: the target CPU
|
* @dev: the target CPU
|
||||||
* @state: the state data
|
* @drv: cpuidle driver containing cpuidle state info
|
||||||
|
* @index: index of target state
|
||||||
*
|
*
|
||||||
* This is equivalent to the HALT instruction.
|
* This is equivalent to the HALT instruction.
|
||||||
*/
|
*/
|
||||||
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv, int index)
|
||||||
{
|
{
|
||||||
ktime_t kt1, kt2;
|
ktime_t kt1, kt2;
|
||||||
s64 idle_time;
|
s64 idle_time;
|
||||||
struct acpi_processor *pr;
|
struct acpi_processor *pr;
|
||||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||||
|
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
|
||||||
|
|
||||||
pr = __this_cpu_read(processors);
|
pr = __this_cpu_read(processors);
|
||||||
|
dev->last_residency = 0;
|
||||||
|
|
||||||
if (unlikely(!pr))
|
if (unlikely(!pr))
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
@@ -764,7 +767,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|||||||
if (acpi_idle_suspend) {
|
if (acpi_idle_suspend) {
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 1);
|
lapic_timer_state_broadcast(pr, cx, 1);
|
||||||
@@ -773,37 +776,47 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|||||||
kt2 = ktime_get_real();
|
kt2 = ktime_get_real();
|
||||||
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
|
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
|
||||||
|
|
||||||
|
/* Update device last_residency*/
|
||||||
|
dev->last_residency = (int)idle_time;
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
cx->usage++;
|
cx->usage++;
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
lapic_timer_state_broadcast(pr, cx, 0);
|
||||||
|
|
||||||
return idle_time;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_idle_enter_simple - enters an ACPI state without BM handling
|
* acpi_idle_enter_simple - enters an ACPI state without BM handling
|
||||||
* @dev: the target CPU
|
* @dev: the target CPU
|
||||||
* @state: the state data
|
* @drv: cpuidle driver with cpuidle state information
|
||||||
|
* @index: the index of suggested state
|
||||||
*/
|
*/
|
||||||
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv, int index)
|
||||||
{
|
{
|
||||||
struct acpi_processor *pr;
|
struct acpi_processor *pr;
|
||||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||||
|
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
|
||||||
ktime_t kt1, kt2;
|
ktime_t kt1, kt2;
|
||||||
s64 idle_time_ns;
|
s64 idle_time_ns;
|
||||||
s64 idle_time;
|
s64 idle_time;
|
||||||
|
|
||||||
pr = __this_cpu_read(processors);
|
pr = __this_cpu_read(processors);
|
||||||
|
dev->last_residency = 0;
|
||||||
|
|
||||||
if (unlikely(!pr))
|
if (unlikely(!pr))
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
if (acpi_idle_suspend)
|
|
||||||
return(acpi_idle_enter_c1(dev, state));
|
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
|
if (acpi_idle_suspend) {
|
||||||
|
local_irq_enable();
|
||||||
|
cpu_relax();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if (cx->entry_method != ACPI_CSTATE_FFH) {
|
if (cx->entry_method != ACPI_CSTATE_FFH) {
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
current_thread_info()->status &= ~TS_POLLING;
|
||||||
/*
|
/*
|
||||||
@@ -815,7 +828,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|||||||
if (unlikely(need_resched())) {
|
if (unlikely(need_resched())) {
|
||||||
current_thread_info()->status |= TS_POLLING;
|
current_thread_info()->status |= TS_POLLING;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -837,6 +850,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|||||||
idle_time = idle_time_ns;
|
idle_time = idle_time_ns;
|
||||||
do_div(idle_time, NSEC_PER_USEC);
|
do_div(idle_time, NSEC_PER_USEC);
|
||||||
|
|
||||||
|
/* Update device last_residency*/
|
||||||
|
dev->last_residency = (int)idle_time;
|
||||||
|
|
||||||
/* Tell the scheduler how much we idled: */
|
/* Tell the scheduler how much we idled: */
|
||||||
sched_clock_idle_wakeup_event(idle_time_ns);
|
sched_clock_idle_wakeup_event(idle_time_ns);
|
||||||
|
|
||||||
@@ -848,7 +864,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
lapic_timer_state_broadcast(pr, cx, 0);
|
||||||
cx->time += idle_time;
|
cx->time += idle_time;
|
||||||
return idle_time;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int c3_cpu_count;
|
static int c3_cpu_count;
|
||||||
@@ -857,37 +873,43 @@ static DEFINE_SPINLOCK(c3_lock);
|
|||||||
/**
|
/**
|
||||||
* acpi_idle_enter_bm - enters C3 with proper BM handling
|
* acpi_idle_enter_bm - enters C3 with proper BM handling
|
||||||
* @dev: the target CPU
|
* @dev: the target CPU
|
||||||
* @state: the state data
|
* @drv: cpuidle driver containing state data
|
||||||
|
* @index: the index of suggested state
|
||||||
*
|
*
|
||||||
* If BM is detected, the deepest non-C3 idle state is entered instead.
|
* If BM is detected, the deepest non-C3 idle state is entered instead.
|
||||||
*/
|
*/
|
||||||
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state)
|
struct cpuidle_driver *drv, int index)
|
||||||
{
|
{
|
||||||
struct acpi_processor *pr;
|
struct acpi_processor *pr;
|
||||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||||
|
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
|
||||||
ktime_t kt1, kt2;
|
ktime_t kt1, kt2;
|
||||||
s64 idle_time_ns;
|
s64 idle_time_ns;
|
||||||
s64 idle_time;
|
s64 idle_time;
|
||||||
|
|
||||||
|
|
||||||
pr = __this_cpu_read(processors);
|
pr = __this_cpu_read(processors);
|
||||||
|
dev->last_residency = 0;
|
||||||
|
|
||||||
if (unlikely(!pr))
|
if (unlikely(!pr))
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
if (acpi_idle_suspend)
|
|
||||||
return(acpi_idle_enter_c1(dev, state));
|
if (acpi_idle_suspend) {
|
||||||
|
cpu_relax();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
|
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
|
||||||
if (dev->safe_state) {
|
if (drv->safe_state_index >= 0) {
|
||||||
dev->last_state = dev->safe_state;
|
return drv->states[drv->safe_state_index].enter(dev,
|
||||||
return dev->safe_state->enter(dev, dev->safe_state);
|
drv, drv->safe_state_index);
|
||||||
} else {
|
} else {
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
acpi_safe_halt();
|
acpi_safe_halt();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -904,7 +926,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||||||
if (unlikely(need_resched())) {
|
if (unlikely(need_resched())) {
|
||||||
current_thread_info()->status |= TS_POLLING;
|
current_thread_info()->status |= TS_POLLING;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -954,6 +976,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||||||
idle_time = idle_time_ns;
|
idle_time = idle_time_ns;
|
||||||
do_div(idle_time, NSEC_PER_USEC);
|
do_div(idle_time, NSEC_PER_USEC);
|
||||||
|
|
||||||
|
/* Update device last_residency*/
|
||||||
|
dev->last_residency = (int)idle_time;
|
||||||
|
|
||||||
/* Tell the scheduler how much we idled: */
|
/* Tell the scheduler how much we idled: */
|
||||||
sched_clock_idle_wakeup_event(idle_time_ns);
|
sched_clock_idle_wakeup_event(idle_time_ns);
|
||||||
|
|
||||||
@@ -965,7 +990,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
lapic_timer_state_broadcast(pr, cx, 0);
|
||||||
cx->time += idle_time;
|
cx->time += idle_time;
|
||||||
return idle_time;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct cpuidle_driver acpi_idle_driver = {
|
struct cpuidle_driver acpi_idle_driver = {
|
||||||
@@ -974,14 +999,16 @@ struct cpuidle_driver acpi_idle_driver = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
|
* acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
|
||||||
|
* device i.e. per-cpu data
|
||||||
|
*
|
||||||
* @pr: the ACPI processor
|
* @pr: the ACPI processor
|
||||||
*/
|
*/
|
||||||
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
|
||||||
{
|
{
|
||||||
int i, count = CPUIDLE_DRIVER_STATE_START;
|
int i, count = CPUIDLE_DRIVER_STATE_START;
|
||||||
struct acpi_processor_cx *cx;
|
struct acpi_processor_cx *cx;
|
||||||
struct cpuidle_state *state;
|
struct cpuidle_state_usage *state_usage;
|
||||||
struct cpuidle_device *dev = &pr->power.dev;
|
struct cpuidle_device *dev = &pr->power.dev;
|
||||||
|
|
||||||
if (!pr->flags.power_setup_done)
|
if (!pr->flags.power_setup_done)
|
||||||
@@ -992,17 +1019,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
dev->cpu = pr->id;
|
dev->cpu = pr->id;
|
||||||
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
|
|
||||||
dev->states[i].name[0] = '\0';
|
|
||||||
dev->states[i].desc[0] = '\0';
|
|
||||||
}
|
|
||||||
|
|
||||||
if (max_cstate == 0)
|
if (max_cstate == 0)
|
||||||
max_cstate = 1;
|
max_cstate = 1;
|
||||||
|
|
||||||
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
|
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
|
||||||
cx = &pr->power.states[i];
|
cx = &pr->power.states[i];
|
||||||
state = &dev->states[count];
|
state_usage = &dev->states_usage[count];
|
||||||
|
|
||||||
if (!cx->valid)
|
if (!cx->valid)
|
||||||
continue;
|
continue;
|
||||||
@@ -1013,8 +1036,64 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|||||||
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||||
continue;
|
continue;
|
||||||
#endif
|
#endif
|
||||||
cpuidle_set_statedata(state, cx);
|
|
||||||
|
|
||||||
|
cpuidle_set_statedata(state_usage, cx);
|
||||||
|
|
||||||
|
count++;
|
||||||
|
if (count == CPUIDLE_STATE_MAX)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev->state_count = count;
|
||||||
|
|
||||||
|
if (!count)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* acpi_processor_setup_cpuidle states- prepares and configures cpuidle
|
||||||
|
* global state data i.e. idle routines
|
||||||
|
*
|
||||||
|
* @pr: the ACPI processor
|
||||||
|
*/
|
||||||
|
static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
|
||||||
|
{
|
||||||
|
int i, count = CPUIDLE_DRIVER_STATE_START;
|
||||||
|
struct acpi_processor_cx *cx;
|
||||||
|
struct cpuidle_state *state;
|
||||||
|
struct cpuidle_driver *drv = &acpi_idle_driver;
|
||||||
|
|
||||||
|
if (!pr->flags.power_setup_done)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (pr->flags.power == 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
drv->safe_state_index = -1;
|
||||||
|
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
|
||||||
|
drv->states[i].name[0] = '\0';
|
||||||
|
drv->states[i].desc[0] = '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (max_cstate == 0)
|
||||||
|
max_cstate = 1;
|
||||||
|
|
||||||
|
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
|
||||||
|
cx = &pr->power.states[i];
|
||||||
|
|
||||||
|
if (!cx->valid)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
||||||
|
!pr->flags.has_cst &&
|
||||||
|
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||||
|
continue;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
state = &drv->states[count];
|
||||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
|
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
|
||||||
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
|
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
|
||||||
state->exit_latency = cx->latency;
|
state->exit_latency = cx->latency;
|
||||||
@@ -1027,13 +1106,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|||||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||||
|
|
||||||
state->enter = acpi_idle_enter_c1;
|
state->enter = acpi_idle_enter_c1;
|
||||||
dev->safe_state = state;
|
drv->safe_state_index = count;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ACPI_STATE_C2:
|
case ACPI_STATE_C2:
|
||||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||||
state->enter = acpi_idle_enter_simple;
|
state->enter = acpi_idle_enter_simple;
|
||||||
dev->safe_state = state;
|
drv->safe_state_index = count;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ACPI_STATE_C3:
|
case ACPI_STATE_C3:
|
||||||
@@ -1049,7 +1128,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->state_count = count;
|
drv->state_count = count;
|
||||||
|
|
||||||
if (!count)
|
if (!count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -1057,7 +1136,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
int acpi_processor_hotplug(struct acpi_processor *pr)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@@ -1078,7 +1157,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|||||||
cpuidle_disable_device(&pr->power.dev);
|
cpuidle_disable_device(&pr->power.dev);
|
||||||
acpi_processor_get_power_info(pr);
|
acpi_processor_get_power_info(pr);
|
||||||
if (pr->flags.power) {
|
if (pr->flags.power) {
|
||||||
acpi_processor_setup_cpuidle(pr);
|
acpi_processor_setup_cpuidle_cx(pr);
|
||||||
ret = cpuidle_enable_device(&pr->power.dev);
|
ret = cpuidle_enable_device(&pr->power.dev);
|
||||||
}
|
}
|
||||||
cpuidle_resume_and_unlock();
|
cpuidle_resume_and_unlock();
|
||||||
@@ -1086,10 +1165,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
struct acpi_processor *_pr;
|
||||||
|
|
||||||
|
if (disabled_by_idle_boot_param())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!pr)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (nocst)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (!pr->flags.power_setup_done)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: Design the ACPI notification to make it once per
|
||||||
|
* system instead of once per-cpu. This condition is a hack
|
||||||
|
* to make the code that updates C-States be called once.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (smp_processor_id() == 0 &&
|
||||||
|
cpuidle_get_driver() == &acpi_idle_driver) {
|
||||||
|
|
||||||
|
cpuidle_pause_and_lock();
|
||||||
|
/* Protect against cpu-hotplug */
|
||||||
|
get_online_cpus();
|
||||||
|
|
||||||
|
/* Disable all cpuidle devices */
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
_pr = per_cpu(processors, cpu);
|
||||||
|
if (!_pr || !_pr->flags.power_setup_done)
|
||||||
|
continue;
|
||||||
|
cpuidle_disable_device(&_pr->power.dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Populate Updated C-state information */
|
||||||
|
acpi_processor_setup_cpuidle_states(pr);
|
||||||
|
|
||||||
|
/* Enable all cpuidle devices */
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
_pr = per_cpu(processors, cpu);
|
||||||
|
if (!_pr || !_pr->flags.power_setup_done)
|
||||||
|
continue;
|
||||||
|
acpi_processor_get_power_info(_pr);
|
||||||
|
if (_pr->flags.power) {
|
||||||
|
acpi_processor_setup_cpuidle_cx(_pr);
|
||||||
|
cpuidle_enable_device(&_pr->power.dev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
put_online_cpus();
|
||||||
|
cpuidle_resume_and_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int acpi_processor_registered;
|
||||||
|
|
||||||
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||||
struct acpi_device *device)
|
struct acpi_device *device)
|
||||||
{
|
{
|
||||||
acpi_status status = 0;
|
acpi_status status = 0;
|
||||||
|
int retval;
|
||||||
static int first_run;
|
static int first_run;
|
||||||
|
|
||||||
if (disabled_by_idle_boot_param())
|
if (disabled_by_idle_boot_param())
|
||||||
@@ -1126,9 +1267,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
|||||||
* platforms that only support C1.
|
* platforms that only support C1.
|
||||||
*/
|
*/
|
||||||
if (pr->flags.power) {
|
if (pr->flags.power) {
|
||||||
acpi_processor_setup_cpuidle(pr);
|
/* Register acpi_idle_driver if not already registered */
|
||||||
if (cpuidle_register_device(&pr->power.dev))
|
if (!acpi_processor_registered) {
|
||||||
return -EIO;
|
acpi_processor_setup_cpuidle_states(pr);
|
||||||
|
retval = cpuidle_register_driver(&acpi_idle_driver);
|
||||||
|
if (retval)
|
||||||
|
return retval;
|
||||||
|
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
|
||||||
|
acpi_idle_driver.name);
|
||||||
|
}
|
||||||
|
/* Register per-cpu cpuidle_device. Cpuidle driver
|
||||||
|
* must already be registered before registering device
|
||||||
|
*/
|
||||||
|
acpi_processor_setup_cpuidle_cx(pr);
|
||||||
|
retval = cpuidle_register_device(&pr->power.dev);
|
||||||
|
if (retval) {
|
||||||
|
if (acpi_processor_registered == 0)
|
||||||
|
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
acpi_processor_registered++;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1139,8 +1297,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
|
|||||||
if (disabled_by_idle_boot_param())
|
if (disabled_by_idle_boot_param())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cpuidle_unregister_device(&pr->power.dev);
|
if (pr->flags.power) {
|
||||||
pr->flags.power_setup_done = 0;
|
cpuidle_unregister_device(&pr->power.dev);
|
||||||
|
acpi_processor_registered--;
|
||||||
|
if (acpi_processor_registered == 0)
|
||||||
|
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||||
|
}
|
||||||
|
|
||||||
|
pr->flags.power_setup_done = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -61,8 +61,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
|
|||||||
int cpuidle_idle_call(void)
|
int cpuidle_idle_call(void)
|
||||||
{
|
{
|
||||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||||
|
struct cpuidle_driver *drv = cpuidle_get_driver();
|
||||||
struct cpuidle_state *target_state;
|
struct cpuidle_state *target_state;
|
||||||
int next_state;
|
int next_state, entered_state;
|
||||||
|
|
||||||
if (off)
|
if (off)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@@ -83,45 +84,36 @@ int cpuidle_idle_call(void)
|
|||||||
hrtimer_peek_ahead_timers();
|
hrtimer_peek_ahead_timers();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Call the device's prepare function before calling the
|
|
||||||
* governor's select function. ->prepare gives the device's
|
|
||||||
* cpuidle driver a chance to update any dynamic information
|
|
||||||
* of its cpuidle states for the current idle period, e.g.
|
|
||||||
* state availability, latencies, residencies, etc.
|
|
||||||
*/
|
|
||||||
if (dev->prepare)
|
|
||||||
dev->prepare(dev);
|
|
||||||
|
|
||||||
/* ask the governor for the next state */
|
/* ask the governor for the next state */
|
||||||
next_state = cpuidle_curr_governor->select(dev);
|
next_state = cpuidle_curr_governor->select(drv, dev);
|
||||||
if (need_resched()) {
|
if (need_resched()) {
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
target_state = &dev->states[next_state];
|
target_state = &drv->states[next_state];
|
||||||
|
|
||||||
/* enter the state and update stats */
|
|
||||||
dev->last_state = target_state;
|
|
||||||
|
|
||||||
trace_power_start(POWER_CSTATE, next_state, dev->cpu);
|
trace_power_start(POWER_CSTATE, next_state, dev->cpu);
|
||||||
trace_cpu_idle(next_state, dev->cpu);
|
trace_cpu_idle(next_state, dev->cpu);
|
||||||
|
|
||||||
dev->last_residency = target_state->enter(dev, target_state);
|
entered_state = target_state->enter(dev, drv, next_state);
|
||||||
|
|
||||||
trace_power_end(dev->cpu);
|
trace_power_end(dev->cpu);
|
||||||
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
|
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
|
||||||
|
|
||||||
if (dev->last_state)
|
if (entered_state >= 0) {
|
||||||
target_state = dev->last_state;
|
/* Update cpuidle counters */
|
||||||
|
/* This can be moved to within driver enter routine
|
||||||
target_state->time += (unsigned long long)dev->last_residency;
|
* but that results in multiple copies of same code.
|
||||||
target_state->usage++;
|
*/
|
||||||
|
dev->states_usage[entered_state].time +=
|
||||||
|
(unsigned long long)dev->last_residency;
|
||||||
|
dev->states_usage[entered_state].usage++;
|
||||||
|
}
|
||||||
|
|
||||||
/* give the governor an opportunity to reflect on the outcome */
|
/* give the governor an opportunity to reflect on the outcome */
|
||||||
if (cpuidle_curr_governor->reflect)
|
if (cpuidle_curr_governor->reflect)
|
||||||
cpuidle_curr_governor->reflect(dev);
|
cpuidle_curr_governor->reflect(dev, entered_state);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -172,11 +164,11 @@ void cpuidle_resume_and_unlock(void)
|
|||||||
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
|
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
|
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
|
||||||
static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
|
static int poll_idle(struct cpuidle_device *dev,
|
||||||
|
struct cpuidle_driver *drv, int index)
|
||||||
{
|
{
|
||||||
ktime_t t1, t2;
|
ktime_t t1, t2;
|
||||||
s64 diff;
|
s64 diff;
|
||||||
int ret;
|
|
||||||
|
|
||||||
t1 = ktime_get();
|
t1 = ktime_get();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
@@ -188,15 +180,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
|
|||||||
if (diff > INT_MAX)
|
if (diff > INT_MAX)
|
||||||
diff = INT_MAX;
|
diff = INT_MAX;
|
||||||
|
|
||||||
ret = (int) diff;
|
dev->last_residency = (int) diff;
|
||||||
return ret;
|
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void poll_idle_init(struct cpuidle_device *dev)
|
static void poll_idle_init(struct cpuidle_driver *drv)
|
||||||
{
|
{
|
||||||
struct cpuidle_state *state = &dev->states[0];
|
struct cpuidle_state *state = &drv->states[0];
|
||||||
|
|
||||||
cpuidle_set_statedata(state, NULL);
|
|
||||||
|
|
||||||
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
|
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
|
||||||
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
|
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
|
||||||
@@ -207,7 +198,7 @@ static void poll_idle_init(struct cpuidle_device *dev)
|
|||||||
state->enter = poll_idle;
|
state->enter = poll_idle;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void poll_idle_init(struct cpuidle_device *dev) {}
|
static void poll_idle_init(struct cpuidle_driver *drv) {}
|
||||||
#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
|
#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -234,21 +225,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
poll_idle_init(dev);
|
poll_idle_init(cpuidle_get_driver());
|
||||||
|
|
||||||
if ((ret = cpuidle_add_state_sysfs(dev)))
|
if ((ret = cpuidle_add_state_sysfs(dev)))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (cpuidle_curr_governor->enable &&
|
if (cpuidle_curr_governor->enable &&
|
||||||
(ret = cpuidle_curr_governor->enable(dev)))
|
(ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
|
||||||
goto fail_sysfs;
|
goto fail_sysfs;
|
||||||
|
|
||||||
for (i = 0; i < dev->state_count; i++) {
|
for (i = 0; i < dev->state_count; i++) {
|
||||||
dev->states[i].usage = 0;
|
dev->states_usage[i].usage = 0;
|
||||||
dev->states[i].time = 0;
|
dev->states_usage[i].time = 0;
|
||||||
}
|
}
|
||||||
dev->last_residency = 0;
|
dev->last_residency = 0;
|
||||||
dev->last_state = NULL;
|
|
||||||
|
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
@@ -282,7 +272,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
|
|||||||
dev->enabled = 0;
|
dev->enabled = 0;
|
||||||
|
|
||||||
if (cpuidle_curr_governor->disable)
|
if (cpuidle_curr_governor->disable)
|
||||||
cpuidle_curr_governor->disable(dev);
|
cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
|
||||||
|
|
||||||
cpuidle_remove_state_sysfs(dev);
|
cpuidle_remove_state_sysfs(dev);
|
||||||
enabled_devices--;
|
enabled_devices--;
|
||||||
@@ -310,26 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
|
|||||||
|
|
||||||
init_completion(&dev->kobj_unregister);
|
init_completion(&dev->kobj_unregister);
|
||||||
|
|
||||||
/*
|
|
||||||
* cpuidle driver should set the dev->power_specified bit
|
|
||||||
* before registering the device if the driver provides
|
|
||||||
* power_usage numbers.
|
|
||||||
*
|
|
||||||
* For those devices whose ->power_specified is not set,
|
|
||||||
* we fill in power_usage with decreasing values as the
|
|
||||||
* cpuidle code has an implicit assumption that state Cn
|
|
||||||
* uses less power than C(n-1).
|
|
||||||
*
|
|
||||||
* With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
|
|
||||||
* an power value of -1. So we use -2, -3, etc, for other
|
|
||||||
* c-states.
|
|
||||||
*/
|
|
||||||
if (!dev->power_specified) {
|
|
||||||
int i;
|
|
||||||
for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
|
|
||||||
dev->states[i].power_usage = -1 - i;
|
|
||||||
}
|
|
||||||
|
|
||||||
per_cpu(cpuidle_devices, dev->cpu) = dev;
|
per_cpu(cpuidle_devices, dev->cpu) = dev;
|
||||||
list_add(&dev->device_list, &cpuidle_detected_devices);
|
list_add(&dev->device_list, &cpuidle_detected_devices);
|
||||||
if ((ret = cpuidle_add_sysfs(sys_dev))) {
|
if ((ret = cpuidle_add_sysfs(sys_dev))) {
|
||||||
|
@@ -17,6 +17,30 @@
|
|||||||
static struct cpuidle_driver *cpuidle_curr_driver;
|
static struct cpuidle_driver *cpuidle_curr_driver;
|
||||||
DEFINE_SPINLOCK(cpuidle_driver_lock);
|
DEFINE_SPINLOCK(cpuidle_driver_lock);
|
||||||
|
|
||||||
|
static void __cpuidle_register_driver(struct cpuidle_driver *drv)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
/*
|
||||||
|
* cpuidle driver should set the drv->power_specified bit
|
||||||
|
* before registering if the driver provides
|
||||||
|
* power_usage numbers.
|
||||||
|
*
|
||||||
|
* If power_specified is not set,
|
||||||
|
* we fill in power_usage with decreasing values as the
|
||||||
|
* cpuidle code has an implicit assumption that state Cn
|
||||||
|
* uses less power than C(n-1).
|
||||||
|
*
|
||||||
|
* With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
|
||||||
|
* an power value of -1. So we use -2, -3, etc, for other
|
||||||
|
* c-states.
|
||||||
|
*/
|
||||||
|
if (!drv->power_specified) {
|
||||||
|
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
|
||||||
|
drv->states[i].power_usage = -1 - i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuidle_register_driver - registers a driver
|
* cpuidle_register_driver - registers a driver
|
||||||
* @drv: the driver
|
* @drv: the driver
|
||||||
@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
|
|||||||
spin_unlock(&cpuidle_driver_lock);
|
spin_unlock(&cpuidle_driver_lock);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
__cpuidle_register_driver(drv);
|
||||||
cpuidle_curr_driver = drv;
|
cpuidle_curr_driver = drv;
|
||||||
spin_unlock(&cpuidle_driver_lock);
|
spin_unlock(&cpuidle_driver_lock);
|
||||||
|
|
||||||
|
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* ladder_select_state - selects the next state to enter
|
* ladder_select_state - selects the next state to enter
|
||||||
|
* @drv: cpuidle driver
|
||||||
* @dev: the CPU
|
* @dev: the CPU
|
||||||
*/
|
*/
|
||||||
static int ladder_select_state(struct cpuidle_device *dev)
|
static int ladder_select_state(struct cpuidle_driver *drv,
|
||||||
|
struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
|
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
|
||||||
struct ladder_device_state *last_state;
|
struct ladder_device_state *last_state;
|
||||||
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
|
|||||||
|
|
||||||
last_state = &ldev->states[last_idx];
|
last_state = &ldev->states[last_idx];
|
||||||
|
|
||||||
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
|
if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
|
||||||
last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
|
last_residency = cpuidle_get_last_residency(dev) - \
|
||||||
|
drv->states[last_idx].exit_latency;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
last_residency = last_state->threshold.promotion_time + 1;
|
last_residency = last_state->threshold.promotion_time + 1;
|
||||||
|
|
||||||
/* consider promotion */
|
/* consider promotion */
|
||||||
if (last_idx < dev->state_count - 1 &&
|
if (last_idx < drv->state_count - 1 &&
|
||||||
last_residency > last_state->threshold.promotion_time &&
|
last_residency > last_state->threshold.promotion_time &&
|
||||||
dev->states[last_idx + 1].exit_latency <= latency_req) {
|
drv->states[last_idx + 1].exit_latency <= latency_req) {
|
||||||
last_state->stats.promotion_count++;
|
last_state->stats.promotion_count++;
|
||||||
last_state->stats.demotion_count = 0;
|
last_state->stats.demotion_count = 0;
|
||||||
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
|
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
|
||||||
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
|
|||||||
|
|
||||||
/* consider demotion */
|
/* consider demotion */
|
||||||
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
|
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
|
||||||
dev->states[last_idx].exit_latency > latency_req) {
|
drv->states[last_idx].exit_latency > latency_req) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
|
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
|
||||||
if (dev->states[i].exit_latency <= latency_req)
|
if (drv->states[i].exit_latency <= latency_req)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ladder_do_selection(ldev, last_idx, i);
|
ladder_do_selection(ldev, last_idx, i);
|
||||||
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* ladder_enable_device - setup for the governor
|
* ladder_enable_device - setup for the governor
|
||||||
|
* @drv: cpuidle driver
|
||||||
* @dev: the CPU
|
* @dev: the CPU
|
||||||
*/
|
*/
|
||||||
static int ladder_enable_device(struct cpuidle_device *dev)
|
static int ladder_enable_device(struct cpuidle_driver *drv,
|
||||||
|
struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
|
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
|
||||||
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev)
|
|||||||
|
|
||||||
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
|
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
|
||||||
|
|
||||||
for (i = 0; i < dev->state_count; i++) {
|
for (i = 0; i < drv->state_count; i++) {
|
||||||
state = &dev->states[i];
|
state = &drv->states[i];
|
||||||
lstate = &ldev->states[i];
|
lstate = &ldev->states[i];
|
||||||
|
|
||||||
lstate->stats.promotion_count = 0;
|
lstate->stats.promotion_count = 0;
|
||||||
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
|
|||||||
lstate->threshold.promotion_count = PROMOTION_COUNT;
|
lstate->threshold.promotion_count = PROMOTION_COUNT;
|
||||||
lstate->threshold.demotion_count = DEMOTION_COUNT;
|
lstate->threshold.demotion_count = DEMOTION_COUNT;
|
||||||
|
|
||||||
if (i < dev->state_count - 1)
|
if (i < drv->state_count - 1)
|
||||||
lstate->threshold.promotion_time = state->exit_latency;
|
lstate->threshold.promotion_time = state->exit_latency;
|
||||||
if (i > 0)
|
if (i > 0)
|
||||||
lstate->threshold.demotion_time = state->exit_latency;
|
lstate->threshold.demotion_time = state->exit_latency;
|
||||||
@@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ladder_reflect - update the correct last_state_idx
|
||||||
|
* @dev: the CPU
|
||||||
|
* @index: the index of actual state entered
|
||||||
|
*/
|
||||||
|
static void ladder_reflect(struct cpuidle_device *dev, int index)
|
||||||
|
{
|
||||||
|
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
|
||||||
|
if (index > 0)
|
||||||
|
ldev->last_state_idx = index;
|
||||||
|
}
|
||||||
|
|
||||||
static struct cpuidle_governor ladder_governor = {
|
static struct cpuidle_governor ladder_governor = {
|
||||||
.name = "ladder",
|
.name = "ladder",
|
||||||
.rating = 10,
|
.rating = 10,
|
||||||
.enable = ladder_enable_device,
|
.enable = ladder_enable_device,
|
||||||
.select = ladder_select_state,
|
.select = ladder_select_state,
|
||||||
|
.reflect = ladder_reflect,
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -182,7 +182,7 @@ static inline int performance_multiplier(void)
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct menu_device, menu_devices);
|
static DEFINE_PER_CPU(struct menu_device, menu_devices);
|
||||||
|
|
||||||
static void menu_update(struct cpuidle_device *dev);
|
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
|
||||||
|
|
||||||
/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
|
/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
|
||||||
static u64 div_round64(u64 dividend, u32 divisor)
|
static u64 div_round64(u64 dividend, u32 divisor)
|
||||||
@@ -228,9 +228,10 @@ static void detect_repeating_patterns(struct menu_device *data)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* menu_select - selects the next idle state to enter
|
* menu_select - selects the next idle state to enter
|
||||||
|
* @drv: cpuidle driver containing state data
|
||||||
* @dev: the CPU
|
* @dev: the CPU
|
||||||
*/
|
*/
|
||||||
static int menu_select(struct cpuidle_device *dev)
|
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||||
@@ -240,7 +241,7 @@ static int menu_select(struct cpuidle_device *dev)
|
|||||||
struct timespec t;
|
struct timespec t;
|
||||||
|
|
||||||
if (data->needs_update) {
|
if (data->needs_update) {
|
||||||
menu_update(dev);
|
menu_update(drv, dev);
|
||||||
data->needs_update = 0;
|
data->needs_update = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,11 +286,9 @@ static int menu_select(struct cpuidle_device *dev)
|
|||||||
* Find the idle state with the lowest power while satisfying
|
* Find the idle state with the lowest power while satisfying
|
||||||
* our constraints.
|
* our constraints.
|
||||||
*/
|
*/
|
||||||
for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
|
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
|
||||||
struct cpuidle_state *s = &dev->states[i];
|
struct cpuidle_state *s = &drv->states[i];
|
||||||
|
|
||||||
if (s->flags & CPUIDLE_FLAG_IGNORE)
|
|
||||||
continue;
|
|
||||||
if (s->target_residency > data->predicted_us)
|
if (s->target_residency > data->predicted_us)
|
||||||
continue;
|
continue;
|
||||||
if (s->exit_latency > latency_req)
|
if (s->exit_latency > latency_req)
|
||||||
@@ -310,26 +309,30 @@ static int menu_select(struct cpuidle_device *dev)
|
|||||||
/**
|
/**
|
||||||
* menu_reflect - records that data structures need update
|
* menu_reflect - records that data structures need update
|
||||||
* @dev: the CPU
|
* @dev: the CPU
|
||||||
|
* @index: the index of actual entered state
|
||||||
*
|
*
|
||||||
* NOTE: it's important to be fast here because this operation will add to
|
* NOTE: it's important to be fast here because this operation will add to
|
||||||
* the overall exit latency.
|
* the overall exit latency.
|
||||||
*/
|
*/
|
||||||
static void menu_reflect(struct cpuidle_device *dev)
|
static void menu_reflect(struct cpuidle_device *dev, int index)
|
||||||
{
|
{
|
||||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||||
data->needs_update = 1;
|
data->last_state_idx = index;
|
||||||
|
if (index >= 0)
|
||||||
|
data->needs_update = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* menu_update - attempts to guess what happened after entry
|
* menu_update - attempts to guess what happened after entry
|
||||||
|
* @drv: cpuidle driver containing state data
|
||||||
* @dev: the CPU
|
* @dev: the CPU
|
||||||
*/
|
*/
|
||||||
static void menu_update(struct cpuidle_device *dev)
|
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||||
int last_idx = data->last_state_idx;
|
int last_idx = data->last_state_idx;
|
||||||
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
|
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
|
||||||
struct cpuidle_state *target = &dev->states[last_idx];
|
struct cpuidle_state *target = &drv->states[last_idx];
|
||||||
unsigned int measured_us;
|
unsigned int measured_us;
|
||||||
u64 new_factor;
|
u64 new_factor;
|
||||||
|
|
||||||
@@ -383,9 +386,11 @@ static void menu_update(struct cpuidle_device *dev)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* menu_enable_device - scans a CPU's states and does setup
|
* menu_enable_device - scans a CPU's states and does setup
|
||||||
|
* @drv: cpuidle driver
|
||||||
* @dev: the CPU
|
* @dev: the CPU
|
||||||
*/
|
*/
|
||||||
static int menu_enable_device(struct cpuidle_device *dev)
|
static int menu_enable_device(struct cpuidle_driver *drv,
|
||||||
|
struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
|
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
|
||||||
|
|
||||||
|
@@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = {
|
|||||||
|
|
||||||
struct cpuidle_state_attr {
|
struct cpuidle_state_attr {
|
||||||
struct attribute attr;
|
struct attribute attr;
|
||||||
ssize_t (*show)(struct cpuidle_state *, char *);
|
ssize_t (*show)(struct cpuidle_state *, \
|
||||||
|
struct cpuidle_state_usage *, char *);
|
||||||
ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
|
ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -224,19 +225,22 @@ struct cpuidle_state_attr {
|
|||||||
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
|
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
|
||||||
|
|
||||||
#define define_show_state_function(_name) \
|
#define define_show_state_function(_name) \
|
||||||
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
|
static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
||||||
|
struct cpuidle_state_usage *state_usage, char *buf) \
|
||||||
{ \
|
{ \
|
||||||
return sprintf(buf, "%u\n", state->_name);\
|
return sprintf(buf, "%u\n", state->_name);\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define define_show_state_ull_function(_name) \
|
#define define_show_state_ull_function(_name) \
|
||||||
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
|
static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
||||||
|
struct cpuidle_state_usage *state_usage, char *buf) \
|
||||||
{ \
|
{ \
|
||||||
return sprintf(buf, "%llu\n", state->_name);\
|
return sprintf(buf, "%llu\n", state_usage->_name);\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define define_show_state_str_function(_name) \
|
#define define_show_state_str_function(_name) \
|
||||||
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
|
static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
||||||
|
struct cpuidle_state_usage *state_usage, char *buf) \
|
||||||
{ \
|
{ \
|
||||||
if (state->_name[0] == '\0')\
|
if (state->_name[0] == '\0')\
|
||||||
return sprintf(buf, "<null>\n");\
|
return sprintf(buf, "<null>\n");\
|
||||||
@@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = {
|
|||||||
|
|
||||||
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
|
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
|
||||||
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
|
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
|
||||||
|
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
|
||||||
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
|
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
|
||||||
static ssize_t cpuidle_state_show(struct kobject * kobj,
|
static ssize_t cpuidle_state_show(struct kobject * kobj,
|
||||||
struct attribute * attr ,char * buf)
|
struct attribute * attr ,char * buf)
|
||||||
{
|
{
|
||||||
int ret = -EIO;
|
int ret = -EIO;
|
||||||
struct cpuidle_state *state = kobj_to_state(kobj);
|
struct cpuidle_state *state = kobj_to_state(kobj);
|
||||||
|
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
|
||||||
struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
|
struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
|
||||||
|
|
||||||
if (cattr->show)
|
if (cattr->show)
|
||||||
ret = cattr->show(state, buf);
|
ret = cattr->show(state, state_usage, buf);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
|
|||||||
{
|
{
|
||||||
int i, ret = -ENOMEM;
|
int i, ret = -ENOMEM;
|
||||||
struct cpuidle_state_kobj *kobj;
|
struct cpuidle_state_kobj *kobj;
|
||||||
|
struct cpuidle_driver *drv = cpuidle_get_driver();
|
||||||
|
|
||||||
/* state statistics */
|
/* state statistics */
|
||||||
for (i = 0; i < device->state_count; i++) {
|
for (i = 0; i < device->state_count; i++) {
|
||||||
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
|
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
|
||||||
if (!kobj)
|
if (!kobj)
|
||||||
goto error_state;
|
goto error_state;
|
||||||
kobj->state = &device->states[i];
|
kobj->state = &drv->states[i];
|
||||||
|
kobj->state_usage = &device->states_usage[i];
|
||||||
init_completion(&kobj->kobj_unregister);
|
init_completion(&kobj->kobj_unregister);
|
||||||
|
|
||||||
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
|
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
|
||||||
|
@@ -81,7 +81,8 @@ static unsigned int mwait_substates;
|
|||||||
static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
|
static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
|
||||||
|
|
||||||
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
|
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
|
||||||
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
|
static int intel_idle(struct cpuidle_device *dev,
|
||||||
|
struct cpuidle_driver *drv, int index);
|
||||||
|
|
||||||
static struct cpuidle_state *cpuidle_state_table;
|
static struct cpuidle_state *cpuidle_state_table;
|
||||||
|
|
||||||
@@ -109,7 +110,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C1 */
|
{ /* MWAIT C1 */
|
||||||
.name = "C1-NHM",
|
.name = "C1-NHM",
|
||||||
.desc = "MWAIT 0x00",
|
.desc = "MWAIT 0x00",
|
||||||
.driver_data = (void *) 0x00,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||||
.exit_latency = 3,
|
.exit_latency = 3,
|
||||||
.target_residency = 6,
|
.target_residency = 6,
|
||||||
@@ -117,7 +117,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C2 */
|
{ /* MWAIT C2 */
|
||||||
.name = "C3-NHM",
|
.name = "C3-NHM",
|
||||||
.desc = "MWAIT 0x10",
|
.desc = "MWAIT 0x10",
|
||||||
.driver_data = (void *) 0x10,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 20,
|
.exit_latency = 20,
|
||||||
.target_residency = 80,
|
.target_residency = 80,
|
||||||
@@ -125,7 +124,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C3 */
|
{ /* MWAIT C3 */
|
||||||
.name = "C6-NHM",
|
.name = "C6-NHM",
|
||||||
.desc = "MWAIT 0x20",
|
.desc = "MWAIT 0x20",
|
||||||
.driver_data = (void *) 0x20,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 200,
|
.exit_latency = 200,
|
||||||
.target_residency = 800,
|
.target_residency = 800,
|
||||||
@@ -137,7 +135,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C1 */
|
{ /* MWAIT C1 */
|
||||||
.name = "C1-SNB",
|
.name = "C1-SNB",
|
||||||
.desc = "MWAIT 0x00",
|
.desc = "MWAIT 0x00",
|
||||||
.driver_data = (void *) 0x00,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||||
.exit_latency = 1,
|
.exit_latency = 1,
|
||||||
.target_residency = 1,
|
.target_residency = 1,
|
||||||
@@ -145,7 +142,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C2 */
|
{ /* MWAIT C2 */
|
||||||
.name = "C3-SNB",
|
.name = "C3-SNB",
|
||||||
.desc = "MWAIT 0x10",
|
.desc = "MWAIT 0x10",
|
||||||
.driver_data = (void *) 0x10,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 80,
|
.exit_latency = 80,
|
||||||
.target_residency = 211,
|
.target_residency = 211,
|
||||||
@@ -153,7 +149,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C3 */
|
{ /* MWAIT C3 */
|
||||||
.name = "C6-SNB",
|
.name = "C6-SNB",
|
||||||
.desc = "MWAIT 0x20",
|
.desc = "MWAIT 0x20",
|
||||||
.driver_data = (void *) 0x20,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 104,
|
.exit_latency = 104,
|
||||||
.target_residency = 345,
|
.target_residency = 345,
|
||||||
@@ -161,7 +156,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C4 */
|
{ /* MWAIT C4 */
|
||||||
.name = "C7-SNB",
|
.name = "C7-SNB",
|
||||||
.desc = "MWAIT 0x30",
|
.desc = "MWAIT 0x30",
|
||||||
.driver_data = (void *) 0x30,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 109,
|
.exit_latency = 109,
|
||||||
.target_residency = 345,
|
.target_residency = 345,
|
||||||
@@ -173,7 +167,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C1 */
|
{ /* MWAIT C1 */
|
||||||
.name = "C1-ATM",
|
.name = "C1-ATM",
|
||||||
.desc = "MWAIT 0x00",
|
.desc = "MWAIT 0x00",
|
||||||
.driver_data = (void *) 0x00,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||||
.exit_latency = 1,
|
.exit_latency = 1,
|
||||||
.target_residency = 4,
|
.target_residency = 4,
|
||||||
@@ -181,7 +174,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C2 */
|
{ /* MWAIT C2 */
|
||||||
.name = "C2-ATM",
|
.name = "C2-ATM",
|
||||||
.desc = "MWAIT 0x10",
|
.desc = "MWAIT 0x10",
|
||||||
.driver_data = (void *) 0x10,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||||
.exit_latency = 20,
|
.exit_latency = 20,
|
||||||
.target_residency = 80,
|
.target_residency = 80,
|
||||||
@@ -190,7 +182,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C4 */
|
{ /* MWAIT C4 */
|
||||||
.name = "C4-ATM",
|
.name = "C4-ATM",
|
||||||
.desc = "MWAIT 0x30",
|
.desc = "MWAIT 0x30",
|
||||||
.driver_data = (void *) 0x30,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 100,
|
.exit_latency = 100,
|
||||||
.target_residency = 400,
|
.target_residency = 400,
|
||||||
@@ -199,23 +190,55 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
|||||||
{ /* MWAIT C6 */
|
{ /* MWAIT C6 */
|
||||||
.name = "C6-ATM",
|
.name = "C6-ATM",
|
||||||
.desc = "MWAIT 0x52",
|
.desc = "MWAIT 0x52",
|
||||||
.driver_data = (void *) 0x52,
|
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 140,
|
.exit_latency = 140,
|
||||||
.target_residency = 560,
|
.target_residency = 560,
|
||||||
.enter = &intel_idle },
|
.enter = &intel_idle },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int get_driver_data(int cstate)
|
||||||
|
{
|
||||||
|
int driver_data;
|
||||||
|
switch (cstate) {
|
||||||
|
|
||||||
|
case 1: /* MWAIT C1 */
|
||||||
|
driver_data = 0x00;
|
||||||
|
break;
|
||||||
|
case 2: /* MWAIT C2 */
|
||||||
|
driver_data = 0x10;
|
||||||
|
break;
|
||||||
|
case 3: /* MWAIT C3 */
|
||||||
|
driver_data = 0x20;
|
||||||
|
break;
|
||||||
|
case 4: /* MWAIT C4 */
|
||||||
|
driver_data = 0x30;
|
||||||
|
break;
|
||||||
|
case 5: /* MWAIT C5 */
|
||||||
|
driver_data = 0x40;
|
||||||
|
break;
|
||||||
|
case 6: /* MWAIT C6 */
|
||||||
|
driver_data = 0x52;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
driver_data = 0x00;
|
||||||
|
}
|
||||||
|
return driver_data;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_idle
|
* intel_idle
|
||||||
* @dev: cpuidle_device
|
* @dev: cpuidle_device
|
||||||
* @state: cpuidle state
|
* @drv: cpuidle driver
|
||||||
|
* @index: index of cpuidle state
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
|
static int intel_idle(struct cpuidle_device *dev,
|
||||||
|
struct cpuidle_driver *drv, int index)
|
||||||
{
|
{
|
||||||
unsigned long ecx = 1; /* break on interrupt flag */
|
unsigned long ecx = 1; /* break on interrupt flag */
|
||||||
unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
|
struct cpuidle_state *state = &drv->states[index];
|
||||||
|
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||||
|
unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
|
||||||
unsigned int cstate;
|
unsigned int cstate;
|
||||||
ktime_t kt_before, kt_after;
|
ktime_t kt_before, kt_after;
|
||||||
s64 usec_delta;
|
s64 usec_delta;
|
||||||
@@ -256,7 +279,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
|
|||||||
if (!(lapic_timer_reliable_states & (1 << (cstate))))
|
if (!(lapic_timer_reliable_states & (1 << (cstate))))
|
||||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
|
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
|
||||||
|
|
||||||
return usec_delta;
|
/* Update cpuidle counters */
|
||||||
|
dev->last_residency = (int)usec_delta;
|
||||||
|
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __setup_broadcast_timer(void *arg)
|
static void __setup_broadcast_timer(void *arg)
|
||||||
@@ -396,6 +422,60 @@ static void intel_idle_cpuidle_devices_uninit(void)
|
|||||||
free_percpu(intel_idle_cpuidle_devices);
|
free_percpu(intel_idle_cpuidle_devices);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* intel_idle_cpuidle_driver_init()
|
||||||
|
* allocate, initialize cpuidle_states
|
||||||
|
*/
|
||||||
|
static int intel_idle_cpuidle_driver_init(void)
|
||||||
|
{
|
||||||
|
int cstate;
|
||||||
|
struct cpuidle_driver *drv = &intel_idle_driver;
|
||||||
|
|
||||||
|
drv->state_count = 1;
|
||||||
|
|
||||||
|
for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
|
||||||
|
int num_substates;
|
||||||
|
|
||||||
|
if (cstate > max_cstate) {
|
||||||
|
printk(PREFIX "max_cstate %d reached\n",
|
||||||
|
max_cstate);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* does the state exist in CPUID.MWAIT? */
|
||||||
|
num_substates = (mwait_substates >> ((cstate) * 4))
|
||||||
|
& MWAIT_SUBSTATE_MASK;
|
||||||
|
if (num_substates == 0)
|
||||||
|
continue;
|
||||||
|
/* is the state not enabled? */
|
||||||
|
if (cpuidle_state_table[cstate].enter == NULL) {
|
||||||
|
/* does the driver not know about the state? */
|
||||||
|
if (*cpuidle_state_table[cstate].name == '\0')
|
||||||
|
pr_debug(PREFIX "unaware of model 0x%x"
|
||||||
|
" MWAIT %d please"
|
||||||
|
" contact lenb@kernel.org",
|
||||||
|
boot_cpu_data.x86_model, cstate);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cstate > 2) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
||||||
|
mark_tsc_unstable("TSC halts in idle"
|
||||||
|
" states deeper than C2");
|
||||||
|
|
||||||
|
drv->states[drv->state_count] = /* structure copy */
|
||||||
|
cpuidle_state_table[cstate];
|
||||||
|
|
||||||
|
drv->state_count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (auto_demotion_disable_flags)
|
||||||
|
smp_call_function(auto_demotion_disable, NULL, 1);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* intel_idle_cpuidle_devices_init()
|
* intel_idle_cpuidle_devices_init()
|
||||||
* allocate, initialize, register cpuidle_devices
|
* allocate, initialize, register cpuidle_devices
|
||||||
@@ -430,22 +510,11 @@ static int intel_idle_cpuidle_devices_init(void)
|
|||||||
continue;
|
continue;
|
||||||
/* is the state not enabled? */
|
/* is the state not enabled? */
|
||||||
if (cpuidle_state_table[cstate].enter == NULL) {
|
if (cpuidle_state_table[cstate].enter == NULL) {
|
||||||
/* does the driver not know about the state? */
|
|
||||||
if (*cpuidle_state_table[cstate].name == '\0')
|
|
||||||
pr_debug(PREFIX "unaware of model 0x%x"
|
|
||||||
" MWAIT %d please"
|
|
||||||
" contact lenb@kernel.org",
|
|
||||||
boot_cpu_data.x86_model, cstate);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((cstate > 2) &&
|
dev->states_usage[dev->state_count].driver_data =
|
||||||
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
(void *)get_driver_data(cstate);
|
||||||
mark_tsc_unstable("TSC halts in idle"
|
|
||||||
" states deeper than C2");
|
|
||||||
|
|
||||||
dev->states[dev->state_count] = /* structure copy */
|
|
||||||
cpuidle_state_table[cstate];
|
|
||||||
|
|
||||||
dev->state_count += 1;
|
dev->state_count += 1;
|
||||||
}
|
}
|
||||||
@@ -458,8 +527,6 @@ static int intel_idle_cpuidle_devices_init(void)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (auto_demotion_disable_flags)
|
|
||||||
smp_call_function(auto_demotion_disable, NULL, 1);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -477,6 +544,7 @@ static int __init intel_idle_init(void)
|
|||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
|
intel_idle_cpuidle_driver_init();
|
||||||
retval = cpuidle_register_driver(&intel_idle_driver);
|
retval = cpuidle_register_driver(&intel_idle_driver);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
|
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
|
||||||
|
@@ -329,6 +329,7 @@ extern void acpi_processor_throttling_init(void);
|
|||||||
int acpi_processor_power_init(struct acpi_processor *pr,
|
int acpi_processor_power_init(struct acpi_processor *pr,
|
||||||
struct acpi_device *device);
|
struct acpi_device *device);
|
||||||
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
|
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
|
||||||
|
int acpi_processor_hotplug(struct acpi_processor *pr);
|
||||||
int acpi_processor_power_exit(struct acpi_processor *pr,
|
int acpi_processor_power_exit(struct acpi_processor *pr,
|
||||||
struct acpi_device *device);
|
struct acpi_device *device);
|
||||||
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
|
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
|
||||||
|
@@ -22,57 +22,62 @@
|
|||||||
#define CPUIDLE_DESC_LEN 32
|
#define CPUIDLE_DESC_LEN 32
|
||||||
|
|
||||||
struct cpuidle_device;
|
struct cpuidle_device;
|
||||||
|
struct cpuidle_driver;
|
||||||
|
|
||||||
|
|
||||||
/****************************
|
/****************************
|
||||||
* CPUIDLE DEVICE INTERFACE *
|
* CPUIDLE DEVICE INTERFACE *
|
||||||
****************************/
|
****************************/
|
||||||
|
|
||||||
|
struct cpuidle_state_usage {
|
||||||
|
void *driver_data;
|
||||||
|
|
||||||
|
unsigned long long usage;
|
||||||
|
unsigned long long time; /* in US */
|
||||||
|
};
|
||||||
|
|
||||||
struct cpuidle_state {
|
struct cpuidle_state {
|
||||||
char name[CPUIDLE_NAME_LEN];
|
char name[CPUIDLE_NAME_LEN];
|
||||||
char desc[CPUIDLE_DESC_LEN];
|
char desc[CPUIDLE_DESC_LEN];
|
||||||
void *driver_data;
|
|
||||||
|
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
unsigned int exit_latency; /* in US */
|
unsigned int exit_latency; /* in US */
|
||||||
unsigned int power_usage; /* in mW */
|
unsigned int power_usage; /* in mW */
|
||||||
unsigned int target_residency; /* in US */
|
unsigned int target_residency; /* in US */
|
||||||
|
|
||||||
unsigned long long usage;
|
|
||||||
unsigned long long time; /* in US */
|
|
||||||
|
|
||||||
int (*enter) (struct cpuidle_device *dev,
|
int (*enter) (struct cpuidle_device *dev,
|
||||||
struct cpuidle_state *state);
|
struct cpuidle_driver *drv,
|
||||||
|
int index);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Idle State Flags */
|
/* Idle State Flags */
|
||||||
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
|
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
|
||||||
#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
|
|
||||||
|
|
||||||
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
|
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuidle_get_statedata - retrieves private driver state data
|
* cpuidle_get_statedata - retrieves private driver state data
|
||||||
* @state: the state
|
* @st_usage: the state usage statistics
|
||||||
*/
|
*/
|
||||||
static inline void * cpuidle_get_statedata(struct cpuidle_state *state)
|
static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage)
|
||||||
{
|
{
|
||||||
return state->driver_data;
|
return st_usage->driver_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuidle_set_statedata - stores private driver state data
|
* cpuidle_set_statedata - stores private driver state data
|
||||||
* @state: the state
|
* @st_usage: the state usage statistics
|
||||||
* @data: the private data
|
* @data: the private data
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
cpuidle_set_statedata(struct cpuidle_state *state, void *data)
|
cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data)
|
||||||
{
|
{
|
||||||
state->driver_data = data;
|
st_usage->driver_data = data;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct cpuidle_state_kobj {
|
struct cpuidle_state_kobj {
|
||||||
struct cpuidle_state *state;
|
struct cpuidle_state *state;
|
||||||
|
struct cpuidle_state_usage *state_usage;
|
||||||
struct completion kobj_unregister;
|
struct completion kobj_unregister;
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
};
|
};
|
||||||
@@ -80,22 +85,17 @@ struct cpuidle_state_kobj {
|
|||||||
struct cpuidle_device {
|
struct cpuidle_device {
|
||||||
unsigned int registered:1;
|
unsigned int registered:1;
|
||||||
unsigned int enabled:1;
|
unsigned int enabled:1;
|
||||||
unsigned int power_specified:1;
|
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
int last_residency;
|
int last_residency;
|
||||||
int state_count;
|
int state_count;
|
||||||
struct cpuidle_state states[CPUIDLE_STATE_MAX];
|
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
|
||||||
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
|
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
|
||||||
struct cpuidle_state *last_state;
|
|
||||||
|
|
||||||
struct list_head device_list;
|
struct list_head device_list;
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
struct completion kobj_unregister;
|
struct completion kobj_unregister;
|
||||||
void *governor_data;
|
void *governor_data;
|
||||||
struct cpuidle_state *safe_state;
|
|
||||||
|
|
||||||
int (*prepare) (struct cpuidle_device *dev);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
||||||
@@ -119,6 +119,11 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
|
|||||||
struct cpuidle_driver {
|
struct cpuidle_driver {
|
||||||
char name[CPUIDLE_NAME_LEN];
|
char name[CPUIDLE_NAME_LEN];
|
||||||
struct module *owner;
|
struct module *owner;
|
||||||
|
|
||||||
|
unsigned int power_specified:1;
|
||||||
|
struct cpuidle_state states[CPUIDLE_STATE_MAX];
|
||||||
|
int state_count;
|
||||||
|
int safe_state_index;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_IDLE
|
#ifdef CONFIG_CPU_IDLE
|
||||||
@@ -165,11 +170,14 @@ struct cpuidle_governor {
|
|||||||
struct list_head governor_list;
|
struct list_head governor_list;
|
||||||
unsigned int rating;
|
unsigned int rating;
|
||||||
|
|
||||||
int (*enable) (struct cpuidle_device *dev);
|
int (*enable) (struct cpuidle_driver *drv,
|
||||||
void (*disable) (struct cpuidle_device *dev);
|
struct cpuidle_device *dev);
|
||||||
|
void (*disable) (struct cpuidle_driver *drv,
|
||||||
|
struct cpuidle_device *dev);
|
||||||
|
|
||||||
int (*select) (struct cpuidle_device *dev);
|
int (*select) (struct cpuidle_driver *drv,
|
||||||
void (*reflect) (struct cpuidle_device *dev);
|
struct cpuidle_device *dev);
|
||||||
|
void (*reflect) (struct cpuidle_device *dev, int index);
|
||||||
|
|
||||||
struct module *owner;
|
struct module *owner;
|
||||||
};
|
};
|
||||||
|
@@ -162,19 +162,21 @@ void print_header(void)
|
|||||||
|
|
||||||
void dump_cnt(struct counters *cnt)
|
void dump_cnt(struct counters *cnt)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "package: %d ", cnt->pkg);
|
if (!cnt)
|
||||||
fprintf(stderr, "core:: %d ", cnt->core);
|
return;
|
||||||
fprintf(stderr, "CPU: %d ", cnt->cpu);
|
if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
|
||||||
fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
|
if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
|
||||||
fprintf(stderr, "c3: %016llX\n", cnt->c3);
|
if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
|
||||||
fprintf(stderr, "c6: %016llX\n", cnt->c6);
|
if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
|
||||||
fprintf(stderr, "c7: %016llX\n", cnt->c7);
|
if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
|
||||||
fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
|
if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
|
||||||
fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
|
if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
|
||||||
fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
|
if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
|
||||||
fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
|
if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
|
||||||
fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
|
if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
|
||||||
fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
|
if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
|
||||||
|
if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
|
||||||
|
if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dump_list(struct counters *cnt)
|
void dump_list(struct counters *cnt)
|
||||||
|
Reference in New Issue
Block a user