[IA64] spelling fixes: arch/ia64/
Spelling and apostrophe fixes in arch/ia64/. Signed-off-by: Simon Arlott <simon@fire.lp0.eu> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
@@ -791,7 +791,7 @@ static __init int setup_additional_cpus(char *s)
|
||||
early_param("additional_cpus", setup_additional_cpus);
|
||||
|
||||
/*
|
||||
* cpu_possible_map should be static, it cannot change as cpu's
|
||||
* cpu_possible_map should be static, it cannot change as CPUs
|
||||
* are onlined, or offlined. The reason is per-cpu data-structures
|
||||
* are allocated by some modules at init time, and dont expect to
|
||||
* do this dynamically on cpu arrival/departure.
|
||||
|
@@ -163,7 +163,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
nd = (struct ia64_mca_notify_die *)args->err;
|
||||
/* Reason code 1 means machine check rendezous*/
|
||||
/* Reason code 1 means machine check rendezvous*/
|
||||
if ((val == DIE_INIT_MONARCH_ENTER || val == DIE_INIT_SLAVE_ENTER) &&
|
||||
nd->sos->rv_rc == 1)
|
||||
return NOTIFY_DONE;
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
||||
*
|
||||
* This file contains the code used by various IRQ handling routines:
|
||||
* asking for different IRQ's should be done through these routines
|
||||
* asking for different IRQs should be done through these routines
|
||||
* instead of just grabbing them. Thus setups with different IRQ numbers
|
||||
* shouldn't result in any weird surprises, and installing new handlers
|
||||
* should be easier.
|
||||
@@ -12,7 +12,7 @@
|
||||
* Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
|
||||
*
|
||||
* 4/14/2004: Added code to handle cpu migration and do safe irq
|
||||
* migration without lossing interrupts for iosapic
|
||||
* migration without losing interrupts for iosapic
|
||||
* architecture.
|
||||
*/
|
||||
|
||||
@@ -190,7 +190,7 @@ void fixup_irqs(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Phase 1: Locate irq's bound to this cpu and
|
||||
* Phase 1: Locate IRQs bound to this cpu and
|
||||
* relocate them for cpu removal.
|
||||
*/
|
||||
migrate_irqs();
|
||||
|
@@ -23,7 +23,7 @@ lsapic_noop_startup (unsigned int irq)
|
||||
static void
|
||||
lsapic_noop (unsigned int irq)
|
||||
{
|
||||
/* nuthing to do... */
|
||||
/* nothing to do... */
|
||||
}
|
||||
|
||||
static int lsapic_retrigger(unsigned int irq)
|
||||
|
@@ -151,12 +151,12 @@ static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
|
||||
|
||||
cmp_inst.l = kprobe_inst;
|
||||
if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
|
||||
/* Integere compare - Register Register (A6 type)*/
|
||||
/* Integer compare - Register Register (A6 type)*/
|
||||
if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
|
||||
&&(cmp_inst.f.c == 1))
|
||||
ctype_unc = 1;
|
||||
} else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
|
||||
/* Integere compare - Immediate Register (A8 type)*/
|
||||
/* Integer compare - Immediate Register (A8 type)*/
|
||||
if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
|
||||
ctype_unc = 1;
|
||||
}
|
||||
@@ -954,7 +954,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
/*
|
||||
* Callee owns the argument space and could overwrite it, eg
|
||||
* tail call optimization. So to be absolutely safe
|
||||
* we save the argument space before transfering the control
|
||||
* we save the argument space before transferring the control
|
||||
* to instrumented jprobe function which runs in
|
||||
* the process context
|
||||
*/
|
||||
|
@@ -438,7 +438,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
|
||||
* @peidx: pointer of index of processor error section
|
||||
*
|
||||
* Return value:
|
||||
* target address on Success / 0 on Failue
|
||||
* target address on Success / 0 on Failure
|
||||
*/
|
||||
static u64
|
||||
get_target_identifier(peidx_table_t *peidx)
|
||||
@@ -701,7 +701,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
|
||||
return fatal_mca("External bus check fatal status");
|
||||
|
||||
/*
|
||||
* This is a local MCA and estimated as a recoverble error.
|
||||
* This is a local MCA and estimated as a recoverable error.
|
||||
*/
|
||||
if (platform)
|
||||
return recover_from_platform_error(slidx, peidx, pbci, sos);
|
||||
|
@@ -861,7 +861,7 @@ apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
/*
|
||||
* Modules contain a single unwind table which covers both the core and the init text
|
||||
* sections but since the two are not contiguous, we need to split this table up such that
|
||||
* we can register (and unregister) each "segment" seperately. Fortunately, this sounds
|
||||
* we can register (and unregister) each "segment" separately. Fortunately, this sounds
|
||||
* more complicated than it really is.
|
||||
*/
|
||||
static void
|
||||
|
@@ -1318,7 +1318,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
/*
|
||||
* validy checks on cpu_mask have been done upstream
|
||||
* validity checks on cpu_mask have been done upstream
|
||||
*/
|
||||
LOCK_PFS(flags);
|
||||
|
||||
@@ -1384,7 +1384,7 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
/*
|
||||
* validy checks on cpu_mask have been done upstream
|
||||
* validity checks on cpu_mask have been done upstream
|
||||
*/
|
||||
LOCK_PFS(flags);
|
||||
|
||||
@@ -1835,7 +1835,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
|
||||
/*
|
||||
* remove our file from the async queue, if we use this mode.
|
||||
* This can be done without the context being protected. We come
|
||||
* here when the context has become unreacheable by other tasks.
|
||||
* here when the context has become unreachable by other tasks.
|
||||
*
|
||||
* We may still have active monitoring at this point and we may
|
||||
* end up in pfm_overflow_handler(). However, fasync_helper()
|
||||
@@ -2132,7 +2132,7 @@ doit:
|
||||
filp->private_data = NULL;
|
||||
|
||||
/*
|
||||
* if we free on the spot, the context is now completely unreacheable
|
||||
* if we free on the spot, the context is now completely unreachable
|
||||
* from the callers side. The monitored task side is also cut, so we
|
||||
* can freely cut.
|
||||
*
|
||||
@@ -2562,7 +2562,7 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
|
||||
ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
|
||||
|
||||
/*
|
||||
* bitmask of all PMDs that are accesible to this context
|
||||
* bitmask of all PMDs that are accessible to this context
|
||||
*/
|
||||
ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
|
||||
|
||||
@@ -3395,7 +3395,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
||||
if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
|
||||
/*
|
||||
* we can only read the register that we use. That includes
|
||||
* the one we explicitely initialize AND the one we want included
|
||||
* the one we explicitly initialize AND the one we want included
|
||||
* in the sampling buffer (smpl_regs).
|
||||
*
|
||||
* Having this restriction allows optimization in the ctxsw routine
|
||||
@@ -3715,7 +3715,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
||||
* if non-blocking, then we ensure that the task will go into
|
||||
* pfm_handle_work() before returning to user mode.
|
||||
*
|
||||
* We cannot explicitely reset another task, it MUST always
|
||||
* We cannot explicitly reset another task, it MUST always
|
||||
* be done by the task itself. This works for system wide because
|
||||
* the tool that is controlling the session is logically doing
|
||||
* "self-monitoring".
|
||||
@@ -4644,7 +4644,7 @@ pfm_exit_thread(struct task_struct *task)
|
||||
switch(state) {
|
||||
case PFM_CTX_UNLOADED:
|
||||
/*
|
||||
* only comes to thios function if pfm_context is not NULL, i.e., cannot
|
||||
* only comes to this function if pfm_context is not NULL, i.e., cannot
|
||||
* be in unloaded state
|
||||
*/
|
||||
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
|
||||
@@ -5247,7 +5247,7 @@ pfm_end_notify_user(pfm_context_t *ctx)
|
||||
|
||||
/*
|
||||
* main overflow processing routine.
|
||||
* it can be called from the interrupt path or explicitely during the context switch code
|
||||
* it can be called from the interrupt path or explicitly during the context switch code
|
||||
*/
|
||||
static void
|
||||
pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
|
||||
|
@@ -181,7 +181,7 @@ static pmu_config_t pmu_conf_mck={
|
||||
.pmc_desc = pfm_mck_pmc_desc,
|
||||
.num_ibrs = 8,
|
||||
.num_dbrs = 8,
|
||||
.use_rr_dbregs = 1 /* debug register are use for range retrictions */
|
||||
.use_rr_dbregs = 1 /* debug register are use for range restrictions */
|
||||
};
|
||||
|
||||
|
||||
|
@@ -134,7 +134,7 @@ set_smp_redirect (int flag)
|
||||
* interrupt redirection. The reason is this would require that
|
||||
* All interrupts be stopped and hard bind the irq to a cpu.
|
||||
* Later when the interrupt is fired we need to set the redir hint
|
||||
* on again in the vector. This is combersome for something that the
|
||||
* on again in the vector. This is cumbersome for something that the
|
||||
* user mode irq balancer will solve anyways.
|
||||
*/
|
||||
no_int_routing=1;
|
||||
|
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(data_saved_lock);
|
||||
/** salinfo_platform_oemdata - optional callback to decode oemdata from an error
|
||||
* record.
|
||||
* @sect_header: pointer to the start of the section to decode.
|
||||
* @oemdata: returns vmalloc area containing the decded output.
|
||||
* @oemdata: returns vmalloc area containing the decoded output.
|
||||
* @oemdata_size: returns length of decoded output (strlen).
|
||||
*
|
||||
* Description: If user space asks for oem data to be decoded by the kernel
|
||||
|
@@ -576,7 +576,7 @@ setup_arch (char **cmdline_p)
|
||||
}
|
||||
|
||||
/*
|
||||
* Display cpu info for all cpu's.
|
||||
* Display cpu info for all CPUs.
|
||||
*/
|
||||
static int
|
||||
show_cpuinfo (struct seq_file *m, void *v)
|
||||
@@ -761,7 +761,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
|
||||
c->cpu = smp_processor_id();
|
||||
|
||||
/* below default values will be overwritten by identify_siblings()
|
||||
* for Multi-Threading/Multi-Core capable cpu's
|
||||
* for Multi-Threading/Multi-Core capable CPUs
|
||||
*/
|
||||
c->threads_per_core = c->cores_per_socket = c->num_log = 1;
|
||||
c->socket_id = -1;
|
||||
@@ -947,7 +947,7 @@ cpu_init (void)
|
||||
ia32_cpu_init();
|
||||
#endif
|
||||
|
||||
/* Clear ITC to eliminiate sched_clock() overflows in human time. */
|
||||
/* Clear ITC to eliminate sched_clock() overflows in human time. */
|
||||
ia64_set_itc(0);
|
||||
|
||||
/* disable all local interrupt sources: */
|
||||
|
@@ -186,7 +186,7 @@ handle_IPI (int irq, void *dev_id)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preeemption disabled.
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
static inline void
|
||||
send_IPI_single (int dest_cpu, int op)
|
||||
@@ -196,7 +196,7 @@ send_IPI_single (int dest_cpu, int op)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preeemption disabled.
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
static inline void
|
||||
send_IPI_allbutself (int op)
|
||||
@@ -210,7 +210,7 @@ send_IPI_allbutself (int op)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preeemption disabled.
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
static inline void
|
||||
send_IPI_all (int op)
|
||||
@@ -223,7 +223,7 @@ send_IPI_all (int op)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preeemption disabled.
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
static inline void
|
||||
send_IPI_self (int op)
|
||||
@@ -252,7 +252,7 @@ kdump_smp_send_init(void)
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Called with preeemption disabled.
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
void
|
||||
smp_send_reschedule (int cpu)
|
||||
@@ -261,7 +261,7 @@ smp_send_reschedule (int cpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preeemption disabled.
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
static void
|
||||
smp_send_local_flush_tlb (int cpu)
|
||||
|
@@ -694,7 +694,7 @@ int migrate_platform_irqs(unsigned int cpu)
|
||||
set_cpei_target_cpu(new_cpei_cpu);
|
||||
desc = irq_desc + ia64_cpe_irq;
|
||||
/*
|
||||
* Switch for now, immediatly, we need to do fake intr
|
||||
* Switch for now, immediately, we need to do fake intr
|
||||
* as other interrupts, but need to study CPEI behaviour with
|
||||
* polling before making changes.
|
||||
*/
|
||||
@@ -840,7 +840,7 @@ __cpu_up (unsigned int cpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Assume that CPU's have been discovered by some platform-dependent interface. For
|
||||
* Assume that CPUs have been discovered by some platform-dependent interface. For
|
||||
* SoftSDV/Lion, that would be ACPI.
|
||||
*
|
||||
* Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
|
||||
@@ -854,7 +854,7 @@ init_smp_config(void)
|
||||
} *ap_startup;
|
||||
long sal_ret;
|
||||
|
||||
/* Tell SAL where to drop the AP's. */
|
||||
/* Tell SAL where to drop the APs. */
|
||||
ap_startup = (struct fptr *) start_ap;
|
||||
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
|
||||
ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
|
||||
|
@@ -216,7 +216,7 @@ ia64_init_itm (void)
|
||||
#ifdef CONFIG_SMP
|
||||
/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
|
||||
* Jitter compensation requires a cmpxchg which may limit
|
||||
* the scalability of the syscalls for retrieving time.
|
||||
* the scalibility of the syscalls for retrieving time.
|
||||
* The ITC synchronization is usually successful to within a few
|
||||
* ITC ticks but this is not a sure thing. If you need to improve
|
||||
* timer performance in SMP situations then boot the kernel with the
|
||||
|
@@ -304,7 +304,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
|
||||
* Lower 4 bits are used as a count. Upper bits are a sequence
|
||||
* number that is updated when count is reset. The cmpxchg will
|
||||
* fail is seqno has changed. This minimizes mutiple cpus
|
||||
* reseting the count.
|
||||
* resetting the count.
|
||||
*/
|
||||
if (current_jiffies > last.time)
|
||||
(void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
|
||||
|
@@ -2,7 +2,7 @@
|
||||
* Copyright (C) 1999-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
|
||||
* - Change pt_regs_off() to make it less dependant on pt_regs structure.
|
||||
* - Change pt_regs_off() to make it less dependent on pt_regs structure.
|
||||
*/
|
||||
/*
|
||||
* This file implements call frame unwind support for the Linux
|
||||
|
Reference in New Issue
Block a user