ACPICA: use new ACPI headers.
Signed-off-by: Len Brown <len.brown@intel.com>
This commit is contained in:
committed by
Len Brown
parent
ceb6c46839
commit
cee324b145
@@ -627,15 +627,15 @@ void __init acpi_early_init(void)
|
||||
acpi_sci_flags.trigger = 3;
|
||||
|
||||
/* Set PIC-mode SCI trigger type */
|
||||
acpi_pic_sci_set_trigger(acpi_fadt.sci_int,
|
||||
acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
|
||||
acpi_sci_flags.trigger);
|
||||
} else {
|
||||
extern int acpi_sci_override_gsi;
|
||||
/*
|
||||
* now that acpi_fadt is initialized,
|
||||
* now that acpi_gbl_FADT is initialized,
|
||||
* update it with result from INT_SRC_OVR parsing
|
||||
*/
|
||||
acpi_fadt.sci_int = acpi_sci_override_gsi;
|
||||
acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -245,7 +245,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
|
||||
* FADT. It may not be the same if an interrupt source override exists
|
||||
* for the SCI.
|
||||
*/
|
||||
gsi = acpi_fadt.sci_int;
|
||||
gsi = acpi_gbl_FADT.sci_interrupt;
|
||||
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
|
||||
printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
|
||||
gsi);
|
||||
|
@@ -513,7 +513,7 @@ int __init acpi_irq_penalty_init(void)
|
||||
}
|
||||
}
|
||||
/* Add a penalty for the SCI */
|
||||
acpi_irq_penalty[acpi_fadt.sci_int] += PIRQ_PENALTY_PCI_USING;
|
||||
acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -431,7 +431,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
|
||||
* Check to see if we have bus mastering arbitration control. This
|
||||
* is required for proper C3 usage (to maintain cache coherency).
|
||||
*/
|
||||
if (acpi_fadt.pm2_control_block && acpi_fadt.pm2_control_length) {
|
||||
if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
|
||||
pr->flags.bm_control = 1;
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Bus mastering arbitration control present\n"));
|
||||
@@ -490,8 +490,8 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
|
||||
object.processor.pblk_length);
|
||||
else {
|
||||
pr->throttling.address = object.processor.pblk_address;
|
||||
pr->throttling.duty_offset = acpi_fadt.duty_offset;
|
||||
pr->throttling.duty_width = acpi_fadt.duty_width;
|
||||
pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
|
||||
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
|
||||
|
||||
pr->pblk = object.processor.pblk_address;
|
||||
|
||||
|
@@ -160,7 +160,7 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2)
|
||||
{
|
||||
if (t2 >= t1)
|
||||
return (t2 - t1);
|
||||
else if (!(acpi_fadt.flags & ACPI_FADT_32BIT_TIMER))
|
||||
else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
|
||||
return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
|
||||
else
|
||||
return ((0xFFFFFFFF - t1) + t2);
|
||||
@@ -234,7 +234,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
|
||||
/* Dummy wait op - must do something useless after P_LVL2 read
|
||||
because chipsets cannot guarantee that STPCLK# signal
|
||||
gets asserted in time to freeze execution properly. */
|
||||
unused = inl(acpi_fadt.xpm_timer_block.address);
|
||||
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,7 +334,7 @@ static void acpi_processor_idle(void)
|
||||
* detection phase, to work cleanly with logical CPU hotplug.
|
||||
*/
|
||||
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
||||
!pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
!pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
cx = &pr->power.states[ACPI_STATE_C1];
|
||||
#endif
|
||||
|
||||
@@ -380,11 +380,11 @@ static void acpi_processor_idle(void)
|
||||
|
||||
case ACPI_STATE_C2:
|
||||
/* Get start time (ticks) */
|
||||
t1 = inl(acpi_fadt.xpm_timer_block.address);
|
||||
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
/* Invoke C2 */
|
||||
acpi_cstate_enter(cx);
|
||||
/* Get end time (ticks) */
|
||||
t2 = inl(acpi_fadt.xpm_timer_block.address);
|
||||
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
|
||||
#ifdef CONFIG_GENERIC_TIME
|
||||
/* TSC halts in C2, so notify users */
|
||||
@@ -415,11 +415,11 @@ static void acpi_processor_idle(void)
|
||||
}
|
||||
|
||||
/* Get start time (ticks) */
|
||||
t1 = inl(acpi_fadt.xpm_timer_block.address);
|
||||
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
/* Invoke C3 */
|
||||
acpi_cstate_enter(cx);
|
||||
/* Get end time (ticks) */
|
||||
t2 = inl(acpi_fadt.xpm_timer_block.address);
|
||||
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
if (pr->flags.bm_check) {
|
||||
/* Enable bus master arbitration */
|
||||
atomic_dec(&c3_cpu_count);
|
||||
@@ -451,7 +451,7 @@ static void acpi_processor_idle(void)
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* Don't do promotion/demotion */
|
||||
if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
||||
!pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
|
||||
!pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
|
||||
next_state = cx;
|
||||
goto end;
|
||||
}
|
||||
@@ -622,7 +622,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
|
||||
* an SMP system.
|
||||
*/
|
||||
if ((num_online_cpus() > 1) &&
|
||||
!(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
return -ENODEV;
|
||||
#endif
|
||||
|
||||
@@ -631,8 +631,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
|
||||
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
|
||||
|
||||
/* determine latencies from FADT */
|
||||
pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.C2latency;
|
||||
pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.C3latency;
|
||||
pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
|
||||
pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"lvl2[0x%08x] lvl3[0x%08x]\n",
|
||||
@@ -878,7 +878,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
|
||||
* WBINVD should be set in fadt, for C3 state to be
|
||||
* supported on when bm_check is not required.
|
||||
*/
|
||||
if (!(acpi_fadt.flags & ACPI_FADT_WBINVD)) {
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Cache invalidation should work properly"
|
||||
" for C3 to be enabled on SMP systems\n"));
|
||||
@@ -1158,9 +1158,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
if (!pr)
|
||||
return -EINVAL;
|
||||
|
||||
if (acpi_fadt.cst_control && !nocst) {
|
||||
if (acpi_gbl_FADT.cst_control && !nocst) {
|
||||
status =
|
||||
acpi_os_write_port(acpi_fadt.smi_command, acpi_fadt.cst_control, 8);
|
||||
acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Notifying BIOS of _CST ability failed"));
|
||||
|
@@ -353,7 +353,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
|
||||
is_done = -EIO;
|
||||
|
||||
/* Can't write pstate_control to smi_command if either value is zero */
|
||||
if ((!acpi_fadt.smi_command) || (!acpi_fadt.pstate_control)) {
|
||||
if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
|
||||
module_put(calling_module);
|
||||
return 0;
|
||||
@@ -361,15 +361,15 @@ int acpi_processor_notify_smm(struct module *calling_module)
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Writing pstate_control [0x%x] to smi_command [0x%x]\n",
|
||||
acpi_fadt.pstate_control, acpi_fadt.smi_command));
|
||||
acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
|
||||
|
||||
status = acpi_os_write_port(acpi_fadt.smi_command,
|
||||
(u32) acpi_fadt.pstate_control, 8);
|
||||
status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
|
||||
(u32) acpi_gbl_FADT.pstate_control, 8);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Failed to write pstate_control [0x%x] to "
|
||||
"smi_command [0x%x]", acpi_fadt.pstate_control,
|
||||
acpi_fadt.smi_command));
|
||||
"smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
|
||||
acpi_gbl_FADT.smi_command));
|
||||
module_put(calling_module);
|
||||
return status;
|
||||
}
|
||||
|
@@ -125,7 +125,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
||||
/* Used to clear all duty_value bits */
|
||||
duty_mask = pr->throttling.state_count - 1;
|
||||
|
||||
duty_mask <<= acpi_fadt.duty_offset;
|
||||
duty_mask <<= acpi_gbl_FADT.duty_offset;
|
||||
duty_mask = ~duty_mask;
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr->throttling.state_count = 1 << acpi_fadt.duty_width;
|
||||
pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
|
||||
|
||||
/*
|
||||
* Compute state values. Note that throttling displays a linear power/
|
||||
|
@@ -1333,7 +1333,7 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
|
||||
/*
|
||||
* Enumerate all fixed-feature devices.
|
||||
*/
|
||||
if ((acpi_fadt.flags & ACPI_FADT_POWER_BUTTON) == 0) {
|
||||
if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
|
||||
result = acpi_add_single_object(&device, acpi_root,
|
||||
NULL,
|
||||
ACPI_BUS_TYPE_POWER_BUTTON);
|
||||
@@ -1341,7 +1341,7 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
|
||||
result = acpi_start_single_object(device);
|
||||
}
|
||||
|
||||
if ((acpi_fadt.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
|
||||
if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
|
||||
result = acpi_add_single_object(&device, acpi_root,
|
||||
NULL,
|
||||
ACPI_BUS_TYPE_SLEEP_BUTTON);
|
||||
|
Reference in New Issue
Block a user