Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] update sn2_defconfig [IA64] Fix kernel hangup in kdump on INIT [IA64] Fix kernel panic in kdump on INIT [IA64] Remove vector from ia64_machine_kexec() [IA64] Fix race when multiple cpus go through MCA [IA64] Remove needless delay in MCA rendezvous [IA64] add driver for ACPI methods to call native firmware [IA64] abstract SAL_CALL wrapper to allow other firmware entry points [IA64] perfmon: Remove exit_pfm_fs() [IA64] tree-wide: Misc __cpu{initdata, init, exit} annotations
This commit is contained in:
@@ -118,11 +118,6 @@ machine_crash_shutdown(struct pt_regs *pt)
|
||||
static void
|
||||
machine_kdump_on_init(void)
|
||||
{
|
||||
if (!ia64_kimage) {
|
||||
printk(KERN_NOTICE "machine_kdump_on_init(): "
|
||||
"kdump not configured\n");
|
||||
return;
|
||||
}
|
||||
local_irq_disable();
|
||||
kexec_disable_iosapic();
|
||||
machine_kexec(ia64_kimage);
|
||||
@@ -156,6 +151,14 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
||||
if (!kdump_on_init)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!ia64_kimage) {
|
||||
if (val == DIE_INIT_MONARCH_LEAVE)
|
||||
ia64_mca_printk(KERN_NOTICE
|
||||
"%s: kdump not configured\n",
|
||||
__FUNCTION__);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
if (val != DIE_INIT_MONARCH_LEAVE &&
|
||||
val != DIE_INIT_SLAVE_LEAVE &&
|
||||
val != DIE_INIT_MONARCH_PROCESS &&
|
||||
|
@@ -79,7 +79,6 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
|
||||
relocate_new_kernel_t rnk;
|
||||
void *pal_addr = efi_get_pal_addr();
|
||||
unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
|
||||
unsigned long vector;
|
||||
int ii;
|
||||
|
||||
BUG_ON(!image);
|
||||
@@ -107,11 +106,8 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
|
||||
/* unmask TPR and clear any pending interrupts */
|
||||
ia64_setreg(_IA64_REG_CR_TPR, 0);
|
||||
ia64_srlz_d();
|
||||
vector = ia64_get_ivr();
|
||||
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
||||
while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
|
||||
ia64_eoi();
|
||||
vector = ia64_get_ivr();
|
||||
}
|
||||
platform_kernel_launch_event();
|
||||
rnk = (relocate_new_kernel_t)&code_addr;
|
||||
(*rnk)(image->head, image->start, ia64_boot_param,
|
||||
|
@@ -701,8 +701,7 @@ ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
|
||||
/*
|
||||
* ia64_mca_wakeup
|
||||
*
|
||||
* Send an inter-cpu interrupt to wake-up a particular cpu
|
||||
* and mark that cpu to be out of rendez.
|
||||
* Send an inter-cpu interrupt to wake-up a particular cpu.
|
||||
*
|
||||
* Inputs : cpuid
|
||||
* Outputs : None
|
||||
@@ -711,14 +710,12 @@ static void
|
||||
ia64_mca_wakeup(int cpu)
|
||||
{
|
||||
platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* ia64_mca_wakeup_all
|
||||
*
|
||||
* Wakeup all the cpus which have rendez'ed previously.
|
||||
* Wakeup all the slave cpus which have rendez'ed previously.
|
||||
*
|
||||
* Inputs : None
|
||||
* Outputs : None
|
||||
@@ -741,7 +738,10 @@ ia64_mca_wakeup_all(void)
|
||||
*
|
||||
* This is handler used to put slave processors into spinloop
|
||||
* while the monarch processor does the mca handling and later
|
||||
* wake each slave up once the monarch is done.
|
||||
* wake each slave up once the monarch is done. The state
|
||||
* IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
|
||||
* in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
|
||||
* the cpu has come out of OS rendezvous.
|
||||
*
|
||||
* Inputs : None
|
||||
* Outputs : None
|
||||
@@ -778,6 +778,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
|
||||
(long)&nd, 0, 0) == NOTIFY_STOP)
|
||||
ia64_mca_spin(__FUNCTION__);
|
||||
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
|
||||
/* Enable all interrupts */
|
||||
local_irq_restore(flags);
|
||||
return IRQ_HANDLED;
|
||||
@@ -1135,30 +1136,27 @@ no_mod:
|
||||
static void
|
||||
ia64_wait_for_slaves(int monarch, const char *type)
|
||||
{
|
||||
int c, wait = 0, missing = 0;
|
||||
for_each_online_cpu(c) {
|
||||
if (c == monarch)
|
||||
continue;
|
||||
if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
|
||||
udelay(1000); /* short wait first */
|
||||
wait = 1;
|
||||
break;
|
||||
int c, i , wait;
|
||||
|
||||
/*
|
||||
* wait 5 seconds total for slaves (arbitrary)
|
||||
*/
|
||||
for (i = 0; i < 5000; i++) {
|
||||
wait = 0;
|
||||
for_each_online_cpu(c) {
|
||||
if (c == monarch)
|
||||
continue;
|
||||
if (ia64_mc_info.imi_rendez_checkin[c]
|
||||
== IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
|
||||
udelay(1000); /* short wait */
|
||||
wait = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!wait)
|
||||
goto all_in;
|
||||
}
|
||||
if (!wait)
|
||||
goto all_in;
|
||||
for_each_online_cpu(c) {
|
||||
if (c == monarch)
|
||||
continue;
|
||||
if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
|
||||
udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */
|
||||
if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
|
||||
missing = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!missing)
|
||||
goto all_in;
|
||||
|
||||
/*
|
||||
* Maybe slave(s) dead. Print buffered messages immediately.
|
||||
*/
|
||||
@@ -1224,26 +1222,27 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
|
||||
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
|
||||
== NOTIFY_STOP)
|
||||
ia64_mca_spin(__FUNCTION__);
|
||||
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
|
||||
if (sos->monarch) {
|
||||
ia64_wait_for_slaves(cpu, "MCA");
|
||||
|
||||
/* Wakeup all the processors which are spinning in the
|
||||
* rendezvous loop. They will leave SAL, then spin in the OS
|
||||
* with interrupts disabled until this monarch cpu leaves the
|
||||
* MCA handler. That gets control back to the OS so we can
|
||||
* backtrace the other cpus, backtrace when spinning in SAL
|
||||
* does not work.
|
||||
*/
|
||||
ia64_mca_wakeup_all();
|
||||
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
|
||||
== NOTIFY_STOP)
|
||||
ia64_mca_spin(__FUNCTION__);
|
||||
} else {
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
|
||||
while (cpu_isset(cpu, mca_cpu))
|
||||
cpu_relax(); /* spin until monarch wakes us */
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
|
||||
}
|
||||
|
||||
/* Wakeup all the processors which are spinning in the rendezvous loop.
|
||||
* They will leave SAL, then spin in the OS with interrupts disabled
|
||||
* until this monarch cpu leaves the MCA handler. That gets control
|
||||
* back to the OS so we can backtrace the other cpus, backtrace when
|
||||
* spinning in SAL does not work.
|
||||
*/
|
||||
ia64_mca_wakeup_all();
|
||||
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
|
||||
== NOTIFY_STOP)
|
||||
ia64_mca_spin(__FUNCTION__);
|
||||
|
||||
/* Get the MCA error record and log it */
|
||||
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
|
||||
|
||||
@@ -1277,21 +1276,22 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
|
||||
/* wake up the next monarch cpu,
|
||||
* and put this cpu in the rendez loop.
|
||||
*/
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
|
||||
for_each_online_cpu(i) {
|
||||
if (cpu_isset(i, mca_cpu)) {
|
||||
monarch_cpu = i;
|
||||
cpu_clear(i, mca_cpu); /* wake next cpu */
|
||||
while (monarch_cpu != -1)
|
||||
cpu_relax(); /* spin until last cpu leaves */
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
|
||||
set_curr_task(cpu, previous_current);
|
||||
ia64_mc_info.imi_rendez_checkin[cpu]
|
||||
= IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
set_curr_task(cpu, previous_current);
|
||||
monarch_cpu = -1;
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
|
||||
monarch_cpu = -1; /* This frees the slaves and previous monarchs */
|
||||
}
|
||||
|
||||
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
|
||||
|
@@ -118,7 +118,5 @@ struct mca_table_entry {
|
||||
|
||||
extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
|
||||
extern int mca_recover_range(unsigned long);
|
||||
extern void ia64_mca_printk(const char * fmt, ...)
|
||||
__attribute__ ((format (printf, 1, 2)));
|
||||
extern void ia64_mlogbuf_dump(void);
|
||||
|
||||
|
@@ -907,7 +907,7 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
|
||||
return len;
|
||||
}
|
||||
|
||||
static void
|
||||
static void __cpuinit
|
||||
create_palinfo_proc_entries(unsigned int cpu)
|
||||
{
|
||||
# define CPUSTR "cpu%d"
|
||||
@@ -968,7 +968,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static int palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int hotcpu = (unsigned long)hcpu;
|
||||
@@ -986,7 +986,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block palinfo_cpu_notifier =
|
||||
static struct notifier_block palinfo_cpu_notifier __cpuinitdata =
|
||||
{
|
||||
.notifier_call = palinfo_cpu_callback,
|
||||
.priority = 0,
|
||||
|
@@ -1538,13 +1538,6 @@ init_pfm_fs(void)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit
|
||||
exit_pfm_fs(void)
|
||||
{
|
||||
unregister_filesystem(&pfm_fs_type);
|
||||
mntput(pfmfs_mnt);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
|
||||
{
|
||||
|
@@ -574,7 +574,7 @@ static const struct file_operations salinfo_data_fops = {
|
||||
.write = salinfo_log_write,
|
||||
};
|
||||
|
||||
static int __devinit
|
||||
static int __cpuinit
|
||||
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int i, cpu = (unsigned long)hcpu;
|
||||
@@ -615,7 +615,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block salinfo_cpu_notifier =
|
||||
static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
|
||||
{
|
||||
.notifier_call = salinfo_cpu_callback,
|
||||
.priority = 0,
|
||||
|
@@ -118,11 +118,11 @@ struct cpu_cache_info {
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
|
||||
static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata;
|
||||
#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void cache_shared_cpu_map_setup( unsigned int cpu,
|
||||
static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
|
||||
struct cache_info * this_leaf)
|
||||
{
|
||||
pal_cache_shared_info_t csi;
|
||||
@@ -157,7 +157,7 @@ static void cache_shared_cpu_map_setup( unsigned int cpu,
|
||||
&csi) == PAL_STATUS_SUCCESS);
|
||||
}
|
||||
#else
|
||||
static void cache_shared_cpu_map_setup(unsigned int cpu,
|
||||
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
|
||||
struct cache_info * this_leaf)
|
||||
{
|
||||
cpu_set(cpu, this_leaf->shared_cpu_map);
|
||||
@@ -428,13 +428,13 @@ static struct notifier_block __cpuinitdata cache_cpu_notifier =
|
||||
.notifier_call = cache_cpu_callback
|
||||
};
|
||||
|
||||
static int __cpuinit cache_sysfs_init(void)
|
||||
static int __init cache_sysfs_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
|
||||
(void *)(long)i);
|
||||
struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
|
||||
cache_add_dev(sys_dev);
|
||||
}
|
||||
|
||||
register_hotcpu_notifier(&cache_cpu_notifier);
|
||||
|
Reference in New Issue
Block a user