Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits) [IA64] swiotlb abstraction (e.g. for Xen) [IA64] swiotlb cleanup [IA64] make swiotlb use bus_to_virt/virt_to_bus [IA64] swiotlb bug fixes [IA64] Hook up getcpu system call for IA64 [IA64] clean up sparsemem memory_present call [IA64] show_mem() for IA64 sparsemem NUMA [IA64] missing exports hwsw_sync_... [IA64] virt_to_page() can be called with NULL arg [IA64] alignment bug in ldscript [IA64] register memory ranges in a consistent manner [IA64] Enable SWIOTLB only when needed [IA64-SGI] Check for TIO errors on shub2 Altix [IA64] remove bogus prototype ia64_esi_init() [IA64] Clear IRQ affinity when unregistered [IA64] fix ACPI Kconfig issues [IA64] Fix NULL-pointer dereference in ia64_machine_kexec() [IA64] find thread for user rbs address [IA64] use snprintf() on features field of /proc/cpuinfo [IA64] enable singlestep on system call ...
This commit is contained in:
@@ -11,6 +11,8 @@ menu "Processor type and features"
|
|||||||
|
|
||||||
config IA64
|
config IA64
|
||||||
bool
|
bool
|
||||||
|
select PCI if (!IA64_HP_SIM)
|
||||||
|
select ACPI if (!IA64_HP_SIM)
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
The Itanium Processor Family is Intel's 64-bit successor to
|
The Itanium Processor Family is Intel's 64-bit successor to
|
||||||
@@ -28,7 +30,6 @@ config MMU
|
|||||||
|
|
||||||
config SWIOTLB
|
config SWIOTLB
|
||||||
bool
|
bool
|
||||||
default y
|
|
||||||
|
|
||||||
config RWSEM_XCHGADD_ALGORITHM
|
config RWSEM_XCHGADD_ALGORITHM
|
||||||
bool
|
bool
|
||||||
@@ -84,10 +85,9 @@ choice
|
|||||||
|
|
||||||
config IA64_GENERIC
|
config IA64_GENERIC
|
||||||
bool "generic"
|
bool "generic"
|
||||||
select ACPI
|
|
||||||
select PCI
|
|
||||||
select NUMA
|
select NUMA
|
||||||
select ACPI_NUMA
|
select ACPI_NUMA
|
||||||
|
select SWIOTLB
|
||||||
help
|
help
|
||||||
This selects the system type of your hardware. A "generic" kernel
|
This selects the system type of your hardware. A "generic" kernel
|
||||||
will run on any supported IA-64 system. However, if you configure
|
will run on any supported IA-64 system. However, if you configure
|
||||||
@@ -104,6 +104,7 @@ config IA64_GENERIC
|
|||||||
|
|
||||||
config IA64_DIG
|
config IA64_DIG
|
||||||
bool "DIG-compliant"
|
bool "DIG-compliant"
|
||||||
|
select SWIOTLB
|
||||||
|
|
||||||
config IA64_HP_ZX1
|
config IA64_HP_ZX1
|
||||||
bool "HP-zx1/sx1000"
|
bool "HP-zx1/sx1000"
|
||||||
@@ -113,6 +114,7 @@ config IA64_HP_ZX1
|
|||||||
|
|
||||||
config IA64_HP_ZX1_SWIOTLB
|
config IA64_HP_ZX1_SWIOTLB
|
||||||
bool "HP-zx1/sx1000 with software I/O TLB"
|
bool "HP-zx1/sx1000 with software I/O TLB"
|
||||||
|
select SWIOTLB
|
||||||
help
|
help
|
||||||
Build a kernel that runs on HP zx1 and sx1000 systems even when they
|
Build a kernel that runs on HP zx1 and sx1000 systems even when they
|
||||||
have broken PCI devices which cannot DMA to full 32 bits. Apart
|
have broken PCI devices which cannot DMA to full 32 bits. Apart
|
||||||
@@ -131,6 +133,7 @@ config IA64_SGI_SN2
|
|||||||
|
|
||||||
config IA64_HP_SIM
|
config IA64_HP_SIM
|
||||||
bool "Ski-simulator"
|
bool "Ski-simulator"
|
||||||
|
select SWIOTLB
|
||||||
|
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
|
@@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
|
|||||||
EXPORT_SYMBOL(hwsw_dma_supported);
|
EXPORT_SYMBOL(hwsw_dma_supported);
|
||||||
EXPORT_SYMBOL(hwsw_alloc_coherent);
|
EXPORT_SYMBOL(hwsw_alloc_coherent);
|
||||||
EXPORT_SYMBOL(hwsw_free_coherent);
|
EXPORT_SYMBOL(hwsw_free_coherent);
|
||||||
|
EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
|
||||||
|
EXPORT_SYMBOL(hwsw_sync_single_for_device);
|
||||||
|
EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
|
||||||
|
EXPORT_SYMBOL(hwsw_sync_sg_for_device);
|
||||||
|
@@ -79,6 +79,7 @@ crash_save_this_cpu()
|
|||||||
final_note(buf);
|
final_note(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
static int
|
static int
|
||||||
kdump_wait_cpu_freeze(void)
|
kdump_wait_cpu_freeze(void)
|
||||||
{
|
{
|
||||||
@@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
|
|||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void
|
void
|
||||||
machine_crash_shutdown(struct pt_regs *pt)
|
machine_crash_shutdown(struct pt_regs *pt)
|
||||||
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
|
|||||||
static void
|
static void
|
||||||
machine_kdump_on_init(void)
|
machine_kdump_on_init(void)
|
||||||
{
|
{
|
||||||
|
if (!ia64_kimage) {
|
||||||
|
printk(KERN_NOTICE "machine_kdump_on_init(): "
|
||||||
|
"kdump not configured\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
kexec_disable_iosapic();
|
kexec_disable_iosapic();
|
||||||
machine_kexec(ia64_kimage);
|
machine_kexec(ia64_kimage);
|
||||||
@@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
|
|||||||
atomic_inc(&kdump_cpu_freezed);
|
atomic_inc(&kdump_cpu_freezed);
|
||||||
kdump_status[cpuid] = 1;
|
kdump_status[cpuid] = 1;
|
||||||
mb();
|
mb();
|
||||||
if (cpuid == 0) {
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
if (cpuid != 0)
|
||||||
|
ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
|
||||||
|
#endif
|
||||||
for (;;)
|
for (;;)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
} else
|
|
||||||
ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@@ -9,7 +9,8 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <asm/page.h>
|
||||||
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* copy_oldmem_page - copy one page from "oldmem"
|
* copy_oldmem_page - copy one page from "oldmem"
|
||||||
|
@@ -380,7 +380,7 @@ efi_get_pal_addr (void)
|
|||||||
#endif
|
#endif
|
||||||
return __va(md->phys_addr);
|
return __va(md->phys_addr);
|
||||||
}
|
}
|
||||||
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
|
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
|
||||||
__FUNCTION__);
|
__FUNCTION__);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@@ -1610,5 +1610,7 @@ sys_call_table:
|
|||||||
data8 sys_sync_file_range // 1300
|
data8 sys_sync_file_range // 1300
|
||||||
data8 sys_tee
|
data8 sys_tee
|
||||||
data8 sys_vmsplice
|
data8 sys_vmsplice
|
||||||
|
data8 sys_ni_syscall // reserved for move_pages
|
||||||
|
data8 sys_getcpu
|
||||||
|
|
||||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||||
|
@@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
|
|||||||
/* Clear the interrupt controller descriptor */
|
/* Clear the interrupt controller descriptor */
|
||||||
idesc->chip = &no_irq_type;
|
idesc->chip = &no_irq_type;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Clear affinity */
|
||||||
|
cpus_setall(idesc->affinity);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Clear the interrupt information */
|
/* Clear the interrupt information */
|
||||||
memset(&iosapic_intr_info[vector], 0,
|
memset(&iosapic_intr_info[vector], 0,
|
||||||
sizeof(struct iosapic_intr_info));
|
sizeof(struct iosapic_intr_info));
|
||||||
|
@@ -14,6 +14,7 @@
|
|||||||
#include <linux/kexec.h>
|
#include <linux/kexec.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
#include <linux/efi.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void machine_shutdown(void)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
if (cpu != smp_processor_id())
|
|
||||||
cpu_down(cpu);
|
|
||||||
}
|
|
||||||
kexec_disable_iosapic();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not allocate memory (or fail in any way) in machine_kexec().
|
* Do not allocate memory (or fail in any way) in machine_kexec().
|
||||||
* We are past the point of no return, committed to rebooting now.
|
* We are past the point of no return, committed to rebooting now.
|
||||||
*/
|
*/
|
||||||
extern void *efi_get_pal_addr(void);
|
|
||||||
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
|
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
|
||||||
{
|
{
|
||||||
struct kimage *image = arg;
|
struct kimage *image = arg;
|
||||||
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
|
|||||||
unsigned long vector;
|
unsigned long vector;
|
||||||
int ii;
|
int ii;
|
||||||
|
|
||||||
|
BUG_ON(!image);
|
||||||
if (image->type == KEXEC_TYPE_CRASH) {
|
if (image->type == KEXEC_TYPE_CRASH) {
|
||||||
crash_save_this_cpu();
|
crash_save_this_cpu();
|
||||||
current->thread.ksp = (__u64)info->sw - 16;
|
current->thread.ksp = (__u64)info->sw - 16;
|
||||||
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
|
|||||||
|
|
||||||
void machine_kexec(struct kimage *image)
|
void machine_kexec(struct kimage *image)
|
||||||
{
|
{
|
||||||
|
BUG_ON(!image);
|
||||||
unw_init_running(ia64_machine_kexec, image);
|
unw_init_running(ia64_machine_kexec, image);
|
||||||
for(;;);
|
for(;;);
|
||||||
}
|
}
|
||||||
|
@@ -34,6 +34,7 @@
|
|||||||
#include <asm/ia32.h>
|
#include <asm/ia32.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/kdebug.h>
|
#include <asm/kdebug.h>
|
||||||
|
#include <asm/kexec.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/sal.h>
|
#include <asm/sal.h>
|
||||||
@@ -803,6 +804,21 @@ cpu_halt (void)
|
|||||||
ia64_pal_halt(min_power_state);
|
ia64_pal_halt(min_power_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void machine_shutdown(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
if (cpu != smp_processor_id())
|
||||||
|
cpu_down(cpu);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_KEXEC
|
||||||
|
kexec_disable_iosapic();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
machine_restart (char *restart_cmd)
|
machine_restart (char *restart_cmd)
|
||||||
{
|
{
|
||||||
|
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
|
|||||||
*/
|
*/
|
||||||
list_for_each_safe(this, next, ¤t->children) {
|
list_for_each_safe(this, next, ¤t->children) {
|
||||||
p = list_entry(this, struct task_struct, sibling);
|
p = list_entry(this, struct task_struct, sibling);
|
||||||
if (p->mm != mm)
|
if (p->tgid != child->tgid)
|
||||||
continue;
|
continue;
|
||||||
if (thread_matches(p, addr)) {
|
if (thread_matches(p, addr)) {
|
||||||
child = p;
|
child = p;
|
||||||
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
|
|||||||
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
|
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
|
||||||
|
|
||||||
/* make sure the single step/taken-branch trap bits are not set: */
|
/* make sure the single step/taken-branch trap bits are not set: */
|
||||||
|
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
child_psr->ss = 0;
|
child_psr->ss = 0;
|
||||||
child_psr->tb = 0;
|
child_psr->tb = 0;
|
||||||
}
|
}
|
||||||
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
|
|||||||
* Make sure the single step/taken-branch trap bits
|
* Make sure the single step/taken-branch trap bits
|
||||||
* are not set:
|
* are not set:
|
||||||
*/
|
*/
|
||||||
|
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
ia64_psr(pt)->ss = 0;
|
ia64_psr(pt)->ss = 0;
|
||||||
ia64_psr(pt)->tb = 0;
|
ia64_psr(pt)->tb = 0;
|
||||||
|
|
||||||
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
|
|||||||
goto out_tsk;
|
goto out_tsk;
|
||||||
|
|
||||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||||
|
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
if (request == PTRACE_SINGLESTEP) {
|
if (request == PTRACE_SINGLESTEP) {
|
||||||
ia64_psr(pt)->ss = 1;
|
ia64_psr(pt)->ss = 1;
|
||||||
} else {
|
} else {
|
||||||
@@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void
|
static void
|
||||||
syscall_trace (void)
|
syscall_trace (void)
|
||||||
{
|
{
|
||||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
|
||||||
return;
|
|
||||||
if (!(current->ptrace & PT_PTRACED))
|
|
||||||
return;
|
|
||||||
/*
|
/*
|
||||||
* The 0x80 provides a way for the tracing parent to
|
* The 0x80 provides a way for the tracing parent to
|
||||||
* distinguish between a syscall stop and SIGTRAP delivery.
|
* distinguish between a syscall stop and SIGTRAP delivery.
|
||||||
@@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
|
|||||||
audit_syscall_exit(success, result);
|
audit_syscall_exit(success, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_thread_flag(TIF_SYSCALL_TRACE)
|
if ((test_thread_flag(TIF_SYSCALL_TRACE)
|
||||||
|
|| test_thread_flag(TIF_SINGLESTEP))
|
||||||
&& (current->ptrace & PT_PTRACED))
|
&& (current->ptrace & PT_PTRACED))
|
||||||
syscall_trace();
|
syscall_trace();
|
||||||
}
|
}
|
||||||
|
@@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
|
|||||||
{ 1UL << 1, "spontaneous deferral"},
|
{ 1UL << 1, "spontaneous deferral"},
|
||||||
{ 1UL << 2, "16-byte atomic ops" }
|
{ 1UL << 2, "16-byte atomic ops" }
|
||||||
};
|
};
|
||||||
char features[128], *cp, sep;
|
char features[128], *cp, *sep;
|
||||||
struct cpuinfo_ia64 *c = v;
|
struct cpuinfo_ia64 *c = v;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
unsigned long proc_freq;
|
unsigned long proc_freq;
|
||||||
int i;
|
int i, size;
|
||||||
|
|
||||||
mask = c->features;
|
mask = c->features;
|
||||||
|
|
||||||
/* build the feature string: */
|
/* build the feature string: */
|
||||||
memcpy(features, " standard", 10);
|
memcpy(features, "standard", 9);
|
||||||
cp = features;
|
cp = features;
|
||||||
sep = 0;
|
size = sizeof(features);
|
||||||
for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
|
sep = "";
|
||||||
|
for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
|
||||||
if (mask & feature_bits[i].mask) {
|
if (mask & feature_bits[i].mask) {
|
||||||
if (sep)
|
cp += snprintf(cp, size, "%s%s", sep,
|
||||||
*cp++ = sep;
|
feature_bits[i].feature_name),
|
||||||
sep = ',';
|
sep = ", ";
|
||||||
*cp++ = ' ';
|
|
||||||
strcpy(cp, feature_bits[i].feature_name);
|
|
||||||
cp += strlen(feature_bits[i].feature_name);
|
|
||||||
mask &= ~feature_bits[i].mask;
|
mask &= ~feature_bits[i].mask;
|
||||||
|
size = sizeof(features) - (cp - features);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (mask) {
|
if (mask && size > 1) {
|
||||||
/* print unknown features as a hex value: */
|
/* print unknown features as a hex value */
|
||||||
if (sep)
|
snprintf(cp, size, "%s0x%lx", sep, mask);
|
||||||
*cp++ = sep;
|
|
||||||
sprintf(cp, " 0x%lx", mask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
proc_freq = cpufreq_quick_get(cpunum);
|
proc_freq = cpufreq_quick_get(cpunum);
|
||||||
@@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
|
|||||||
"model name : %s\n"
|
"model name : %s\n"
|
||||||
"revision : %u\n"
|
"revision : %u\n"
|
||||||
"archrev : %u\n"
|
"archrev : %u\n"
|
||||||
"features :%s\n" /* don't change this---it _is_ right! */
|
"features : %s\n"
|
||||||
"cpu number : %lu\n"
|
"cpu number : %lu\n"
|
||||||
"cpu regs : %u\n"
|
"cpu regs : %u\n"
|
||||||
"cpu MHz : %lu.%06lu\n"
|
"cpu MHz : %lu.%06lu\n"
|
||||||
|
@@ -157,6 +157,7 @@ SECTIONS
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
. = ALIGN(8);
|
||||||
__con_initcall_start = .;
|
__con_initcall_start = .;
|
||||||
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
|
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
|
||||||
{ *(.con_initcall.init) }
|
{ *(.con_initcall.init) }
|
||||||
|
@@ -30,47 +30,69 @@ static unsigned long max_gap;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* show_mem - display a memory statistics summary
|
* show_mem - give short summary of memory stats
|
||||||
*
|
*
|
||||||
* Just walks the pages in the system and describes where they're allocated.
|
* Shows a simple page count of reserved and used pages in the system.
|
||||||
|
* For discontig machines, it does this on a per-pgdat basis.
|
||||||
*/
|
*/
|
||||||
void
|
void show_mem(void)
|
||||||
show_mem (void)
|
|
||||||
{
|
{
|
||||||
int i, total = 0, reserved = 0;
|
int i, total_reserved = 0;
|
||||||
int shared = 0, cached = 0;
|
int total_shared = 0, total_cached = 0;
|
||||||
|
unsigned long total_present = 0;
|
||||||
|
pg_data_t *pgdat;
|
||||||
|
|
||||||
printk(KERN_INFO "Mem-info:\n");
|
printk(KERN_INFO "Mem-info:\n");
|
||||||
show_free_areas();
|
show_free_areas();
|
||||||
|
|
||||||
printk(KERN_INFO "Free swap: %6ldkB\n",
|
printk(KERN_INFO "Free swap: %6ldkB\n",
|
||||||
nr_swap_pages<<(PAGE_SHIFT-10));
|
nr_swap_pages<<(PAGE_SHIFT-10));
|
||||||
i = max_mapnr;
|
printk(KERN_INFO "Node memory in pages:\n");
|
||||||
for (i = 0; i < max_mapnr; i++) {
|
for_each_online_pgdat(pgdat) {
|
||||||
if (!pfn_valid(i)) {
|
unsigned long present;
|
||||||
|
unsigned long flags;
|
||||||
|
int shared = 0, cached = 0, reserved = 0;
|
||||||
|
|
||||||
|
pgdat_resize_lock(pgdat, &flags);
|
||||||
|
present = pgdat->node_present_pages;
|
||||||
|
for(i = 0; i < pgdat->node_spanned_pages; i++) {
|
||||||
|
struct page *page;
|
||||||
|
if (pfn_valid(pgdat->node_start_pfn + i))
|
||||||
|
page = pfn_to_page(pgdat->node_start_pfn + i);
|
||||||
|
else {
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
if (max_gap < LARGE_GAP)
|
if (max_gap < LARGE_GAP)
|
||||||
continue;
|
continue;
|
||||||
i = vmemmap_find_next_valid_pfn(0, i) - 1;
|
|
||||||
#endif
|
#endif
|
||||||
|
i = vmemmap_find_next_valid_pfn(pgdat->node_id,
|
||||||
|
i) - 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
total++;
|
if (PageReserved(page))
|
||||||
if (PageReserved(mem_map+i))
|
|
||||||
reserved++;
|
reserved++;
|
||||||
else if (PageSwapCache(mem_map+i))
|
else if (PageSwapCache(page))
|
||||||
cached++;
|
cached++;
|
||||||
else if (page_count(mem_map + i))
|
else if (page_count(page))
|
||||||
shared += page_count(mem_map + i) - 1;
|
shared += page_count(page)-1;
|
||||||
}
|
}
|
||||||
printk(KERN_INFO "%d pages of RAM\n", total);
|
pgdat_resize_unlock(pgdat, &flags);
|
||||||
printk(KERN_INFO "%d reserved pages\n", reserved);
|
total_present += present;
|
||||||
printk(KERN_INFO "%d pages shared\n", shared);
|
total_reserved += reserved;
|
||||||
printk(KERN_INFO "%d pages swap cached\n", cached);
|
total_cached += cached;
|
||||||
printk(KERN_INFO "%ld pages in page table cache\n",
|
total_shared += shared;
|
||||||
|
printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
|
||||||
|
"shrd: %10d, swpd: %10d\n", pgdat->node_id,
|
||||||
|
present, reserved, shared, cached);
|
||||||
|
}
|
||||||
|
printk(KERN_INFO "%ld pages of RAM\n", total_present);
|
||||||
|
printk(KERN_INFO "%d reserved pages\n", total_reserved);
|
||||||
|
printk(KERN_INFO "%d pages shared\n", total_shared);
|
||||||
|
printk(KERN_INFO "%d pages swap cached\n", total_cached);
|
||||||
|
printk(KERN_INFO "Total of %ld pages in page table cache\n",
|
||||||
pgtable_quicklist_total_size());
|
pgtable_quicklist_total_size());
|
||||||
|
printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* physical address where the bootmem map is located */
|
/* physical address where the bootmem map is located */
|
||||||
unsigned long bootmap_start;
|
unsigned long bootmap_start;
|
||||||
|
|
||||||
@@ -177,7 +199,7 @@ find_memory (void)
|
|||||||
|
|
||||||
#ifdef CONFIG_CRASH_DUMP
|
#ifdef CONFIG_CRASH_DUMP
|
||||||
/* If we are doing a crash dump, we still need to know the real mem
|
/* If we are doing a crash dump, we still need to know the real mem
|
||||||
* size before original memory map is * reset. */
|
* size before original memory map is reset. */
|
||||||
saved_max_pfn = max_pfn;
|
saved_max_pfn = max_pfn;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SPARSEMEM
|
|
||||||
/**
|
|
||||||
* register_sparse_mem - notify SPARSEMEM that this memory range exists.
|
|
||||||
* @start: physical start of range
|
|
||||||
* @end: physical end of range
|
|
||||||
* @arg: unused
|
|
||||||
*
|
|
||||||
* Simply calls SPARSEMEM to register memory section(s).
|
|
||||||
*/
|
|
||||||
static int __init register_sparse_mem(unsigned long start, unsigned long end,
|
|
||||||
void *arg)
|
|
||||||
{
|
|
||||||
int nid;
|
|
||||||
|
|
||||||
start = __pa(start) >> PAGE_SHIFT;
|
|
||||||
end = __pa(end) >> PAGE_SHIFT;
|
|
||||||
nid = early_pfn_to_nid(start);
|
|
||||||
memory_present(nid, start, end);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init arch_sparse_init(void)
|
|
||||||
{
|
|
||||||
efi_memmap_walk(register_sparse_mem, NULL);
|
|
||||||
sparse_init();
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define arch_sparse_init() do {} while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* find_memory - walk the EFI memory map and setup the bootmem allocator
|
* find_memory - walk the EFI memory map and setup the bootmem allocator
|
||||||
*
|
*
|
||||||
@@ -473,6 +442,9 @@ void __init find_memory(void)
|
|||||||
node_clear(node, memory_less_mask);
|
node_clear(node, memory_less_mask);
|
||||||
mem_data[node].min_pfn = ~0UL;
|
mem_data[node].min_pfn = ~0UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
efi_memmap_walk(register_active_ranges, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the boot memory maps in reverse order since that's
|
* Initialize the boot memory maps in reverse order since that's
|
||||||
* what the bootmem allocator expects
|
* what the bootmem allocator expects
|
||||||
@@ -506,6 +478,12 @@ void __init find_memory(void)
|
|||||||
max_pfn = max_low_pfn;
|
max_pfn = max_low_pfn;
|
||||||
|
|
||||||
find_initrd();
|
find_initrd();
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRASH_DUMP
|
||||||
|
/* If we are doing a crash dump, we still need to know the real mem
|
||||||
|
* size before original memory map is reset. */
|
||||||
|
saved_max_pfn = max_pfn;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
@@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
|
|||||||
{
|
{
|
||||||
unsigned long end = start + len;
|
unsigned long end = start + len;
|
||||||
|
|
||||||
add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
|
||||||
mem_data[node].num_physpages += len >> PAGE_SHIFT;
|
mem_data[node].num_physpages += len >> PAGE_SHIFT;
|
||||||
if (start <= __pa(MAX_DMA_ADDRESS))
|
if (start <= __pa(MAX_DMA_ADDRESS))
|
||||||
mem_data[node].num_dma_physpages +=
|
mem_data[node].num_dma_physpages +=
|
||||||
@@ -686,10 +663,11 @@ void __init paging_init(void)
|
|||||||
|
|
||||||
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
|
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
|
||||||
|
|
||||||
arch_sparse_init();
|
|
||||||
|
|
||||||
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
|
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
|
||||||
|
|
||||||
|
sparse_memory_present_with_active_regions(MAX_NUMNODES);
|
||||||
|
sparse_init();
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
||||||
sizeof(struct page));
|
sizeof(struct page));
|
||||||
|
@@ -19,6 +19,7 @@
|
|||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/kexec.h>
|
||||||
|
|
||||||
#include <asm/a.out.h>
|
#include <asm/a.out.h>
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
|
|||||||
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
|
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since DMA is i-cache coherent, any (complete) pages that were written via
|
||||||
|
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
||||||
|
* flush them when they get mapped into an executable vm-area.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
dma_mark_clean(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
unsigned long pg_addr, end;
|
||||||
|
|
||||||
|
pg_addr = PAGE_ALIGN((unsigned long) addr);
|
||||||
|
end = (unsigned long) addr + size;
|
||||||
|
while (pg_addr + PAGE_SIZE <= end) {
|
||||||
|
struct page *page = virt_to_page(pg_addr);
|
||||||
|
set_bit(PG_arch_1, &page->flags);
|
||||||
|
pg_addr += PAGE_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline void
|
inline void
|
||||||
ia64_set_rbs_bot (void)
|
ia64_set_rbs_bot (void)
|
||||||
{
|
{
|
||||||
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_VIRTUAL_MEM_MAP */
|
||||||
|
|
||||||
int __init
|
int __init
|
||||||
register_active_ranges(u64 start, u64 end, void *arg)
|
register_active_ranges(u64 start, u64 end, void *arg)
|
||||||
{
|
{
|
||||||
add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
|
int nid = paddr_to_nid(__pa(start));
|
||||||
|
|
||||||
|
if (nid < 0)
|
||||||
|
nid = 0;
|
||||||
|
#ifdef CONFIG_KEXEC
|
||||||
|
if (start > crashk_res.start && start < crashk_res.end)
|
||||||
|
start = crashk_res.end;
|
||||||
|
if (end > crashk_res.start && end < crashk_res.end)
|
||||||
|
end = crashk_res.start;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (start < end)
|
||||||
|
add_active_range(nid, __pa(start) >> PAGE_SHIFT,
|
||||||
|
__pa(end) >> PAGE_SHIFT);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_VIRTUAL_MEM_MAP */
|
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
count_reserved_pages (u64 start, u64 end, void *arg)
|
count_reserved_pages (u64 start, u64 end, void *arg)
|
||||||
|
@@ -3,7 +3,7 @@
|
|||||||
* License. See the file "COPYING" in the main directory of this archive
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
* for more details.
|
* for more details.
|
||||||
*
|
*
|
||||||
* Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
|
* Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
@@ -38,10 +38,18 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
|
|||||||
(u64) nasid, 0, 0, 0, 0, 0, 0);
|
(u64) nasid, 0, 0, 0, 0, 0, 0);
|
||||||
|
|
||||||
if ((int)ret_stuff.v0)
|
if ((int)ret_stuff.v0)
|
||||||
panic("hubii_eint_handler(): Fatal TIO Error");
|
panic("%s: Fatal %s Error", __FUNCTION__,
|
||||||
|
((nasid & 1) ? "TIO" : "HUBII"));
|
||||||
|
|
||||||
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
|
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
|
||||||
(void)hubiio_crb_error_handler(hubdev_info);
|
(void)hubiio_crb_error_handler(hubdev_info);
|
||||||
|
} else
|
||||||
|
if (nasid & 1) { /* TIO errors */
|
||||||
|
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
|
||||||
|
(u64) nasid, 0, 0, 0, 0, 0, 0);
|
||||||
|
|
||||||
|
if ((int)ret_stuff.v0)
|
||||||
|
panic("%s: Fatal TIO Error", __FUNCTION__);
|
||||||
} else
|
} else
|
||||||
bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
|
bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
|
||||||
|
|
||||||
|
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
|
|||||||
.dma_supported = NULL,
|
.dma_supported = NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
void pci_swiotlb_init(void)
|
void __init pci_swiotlb_init(void)
|
||||||
{
|
{
|
||||||
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
||||||
if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
|
if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
|
||||||
|
@@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS;
|
|||||||
|
|
||||||
#define free_dma(x)
|
#define free_dma(x)
|
||||||
|
|
||||||
|
void dma_mark_clean(void *addr, size_t size);
|
||||||
|
|
||||||
#endif /* _ASM_IA64_DMA_H */
|
#endif /* _ASM_IA64_DMA_H */
|
||||||
|
@@ -19,7 +19,6 @@ enum esi_proc_type {
|
|||||||
ESI_PROC_REENTRANT /* MP-safe and reentrant */
|
ESI_PROC_REENTRANT /* MP-safe and reentrant */
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int ia64_esi_init (void);
|
|
||||||
extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
|
extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
|
||||||
extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
|
extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
|
||||||
enum esi_proc_type,
|
enum esi_proc_type,
|
||||||
|
@@ -51,12 +51,13 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
|
|||||||
|
|
||||||
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
|
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
|
||||||
|
|
||||||
|
extern int register_active_ranges(u64 start, u64 end, void *arg);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
||||||
extern unsigned long vmalloc_end;
|
extern unsigned long vmalloc_end;
|
||||||
extern struct page *vmem_map;
|
extern struct page *vmem_map;
|
||||||
extern int find_largest_hole (u64 start, u64 end, void *arg);
|
extern int find_largest_hole (u64 start, u64 end, void *arg);
|
||||||
extern int register_active_ranges (u64 start, u64 end, void *arg);
|
|
||||||
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
|
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
|
||||||
extern int vmemmap_find_next_valid_pfn(int, int);
|
extern int vmemmap_find_next_valid_pfn(int, int);
|
||||||
#else
|
#else
|
||||||
|
@@ -137,7 +137,8 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
|
|||||||
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
return virt_to_page(pgtable_quicklist_alloc());
|
void *pg = pgtable_quicklist_alloc();
|
||||||
|
return pg ? virt_to_page(pg) : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
|
9
include/asm-ia64/swiotlb.h
Normal file
9
include/asm-ia64/swiotlb.h
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
#ifndef _ASM_SWIOTLB_H
|
||||||
|
#define _ASM_SWIOTLB_H 1
|
||||||
|
|
||||||
|
#include <asm/machvec.h>
|
||||||
|
|
||||||
|
#define SWIOTLB_ARCH_NEED_LATE_INIT
|
||||||
|
#define SWIOTLB_ARCH_NEED_ALLOC
|
||||||
|
|
||||||
|
#endif /* _ASM_SWIOTLB_H */
|
@@ -84,6 +84,7 @@ struct thread_info {
|
|||||||
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
||||||
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
|
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
|
||||||
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
|
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
|
||||||
|
#define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */
|
||||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 17
|
#define TIF_MEMDIE 17
|
||||||
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
||||||
@@ -92,7 +93,8 @@ struct thread_info {
|
|||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||||
#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||||
|
#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
|
||||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||||
|
@@ -291,11 +291,13 @@
|
|||||||
#define __NR_sync_file_range 1300
|
#define __NR_sync_file_range 1300
|
||||||
#define __NR_tee 1301
|
#define __NR_tee 1301
|
||||||
#define __NR_vmsplice 1302
|
#define __NR_vmsplice 1302
|
||||||
|
/* 1303 reserved for move_pages */
|
||||||
|
#define __NR_getcpu 1304
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
|
||||||
#define NR_syscalls 279 /* length of syscall table */
|
#define NR_syscalls 281 /* length of syscall table */
|
||||||
|
|
||||||
#define __ARCH_WANT_SYS_RT_SIGACTION
|
#define __ARCH_WANT_SYS_RT_SIGACTION
|
||||||
|
|
||||||
|
@@ -1,6 +1,5 @@
|
|||||||
#ifndef _ASM_SWIOTLB_H
|
#ifndef _ASM_SWIOTLB_H
|
||||||
#define _ASM_SWTIOLB_H 1
|
#define _ASM_SWIOTLB_H 1
|
||||||
|
|
||||||
|
|
||||||
#include <asm/dma-mapping.h>
|
#include <asm/dma-mapping.h>
|
||||||
|
|
||||||
@@ -45,6 +44,7 @@ extern void swiotlb_init(void);
|
|||||||
extern int swiotlb_force;
|
extern int swiotlb_force;
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
|
#define SWIOTLB_ARCH_NEED_ALLOC
|
||||||
extern int swiotlb;
|
extern int swiotlb;
|
||||||
#else
|
#else
|
||||||
#define swiotlb 0
|
#define swiotlb 0
|
||||||
@@ -52,4 +52,6 @@ extern int swiotlb;
|
|||||||
|
|
||||||
extern void pci_swiotlb_init(void);
|
extern void pci_swiotlb_init(void);
|
||||||
|
|
||||||
#endif /* _ASM_SWTIOLB_H */
|
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||||
|
|
||||||
|
#endif /* _ASM_SWIOTLB_H */
|
||||||
|
282
lib/swiotlb.c
282
lib/swiotlb.c
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Dynamic DMA mapping support.
|
* Dynamic DMA mapping support.
|
||||||
*
|
*
|
||||||
* This implementation is for IA-64 and EM64T platforms that do not support
|
* This implementation is a fallback for platforms that do not support
|
||||||
* I/O TLBs (aka DMA address translation hardware).
|
* I/O TLBs (aka DMA address translation hardware).
|
||||||
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
|
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
|
||||||
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
|
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
|
||||||
@@ -28,6 +28,7 @@
|
|||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <asm/scatterlist.h>
|
#include <asm/scatterlist.h>
|
||||||
|
#include <asm/swiotlb.h>
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
@@ -35,8 +36,10 @@
|
|||||||
#define OFFSET(val,align) ((unsigned long) \
|
#define OFFSET(val,align) ((unsigned long) \
|
||||||
( (val) & ( (align) - 1)))
|
( (val) & ( (align) - 1)))
|
||||||
|
|
||||||
|
#ifndef SG_ENT_VIRT_ADDRESS
|
||||||
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
|
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
|
||||||
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
|
#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum allowable number of contiguous slabs to map,
|
* Maximum allowable number of contiguous slabs to map,
|
||||||
@@ -101,13 +104,25 @@ static unsigned int io_tlb_index;
|
|||||||
* We need to save away the original address corresponding to a mapped entry
|
* We need to save away the original address corresponding to a mapped entry
|
||||||
* for the sync operations.
|
* for the sync operations.
|
||||||
*/
|
*/
|
||||||
static unsigned char **io_tlb_orig_addr;
|
#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
|
||||||
|
typedef char *io_tlb_addr_t;
|
||||||
|
#define swiotlb_orig_addr_null(buffer) (!(buffer))
|
||||||
|
#define ptr_to_io_tlb_addr(ptr) (ptr)
|
||||||
|
#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
|
||||||
|
#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
|
||||||
|
#endif
|
||||||
|
static io_tlb_addr_t *io_tlb_orig_addr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Protect the above data structures in the map and unmap calls
|
* Protect the above data structures in the map and unmap calls
|
||||||
*/
|
*/
|
||||||
static DEFINE_SPINLOCK(io_tlb_lock);
|
static DEFINE_SPINLOCK(io_tlb_lock);
|
||||||
|
|
||||||
|
#ifdef SWIOTLB_EXTRA_VARIABLES
|
||||||
|
SWIOTLB_EXTRA_VARIABLES;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
|
||||||
static int __init
|
static int __init
|
||||||
setup_io_tlb_npages(char *str)
|
setup_io_tlb_npages(char *str)
|
||||||
{
|
{
|
||||||
@@ -122,30 +137,50 @@ setup_io_tlb_npages(char *str)
|
|||||||
swiotlb_force = 1;
|
swiotlb_force = 1;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
__setup("swiotlb=", setup_io_tlb_npages);
|
__setup("swiotlb=", setup_io_tlb_npages);
|
||||||
/* make io_tlb_overflow tunable too? */
|
/* make io_tlb_overflow tunable too? */
|
||||||
|
|
||||||
|
#ifndef swiotlb_adjust_size
|
||||||
|
#define swiotlb_adjust_size(size) ((void)0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef swiotlb_adjust_seg
|
||||||
|
#define swiotlb_adjust_seg(start, size) ((void)0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef swiotlb_print_info
|
||||||
|
#define swiotlb_print_info(bytes) \
|
||||||
|
printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
|
||||||
|
"0x%lx\n", bytes >> 20, \
|
||||||
|
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Statically reserve bounce buffer space and initialize bounce buffer data
|
* Statically reserve bounce buffer space and initialize bounce buffer data
|
||||||
* structures for the software IO TLB used to implement the DMA API.
|
* structures for the software IO TLB used to implement the DMA API.
|
||||||
*/
|
*/
|
||||||
void
|
void __init
|
||||||
swiotlb_init_with_default_size(size_t default_size)
|
swiotlb_init_with_default_size(size_t default_size)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i, bytes;
|
||||||
|
|
||||||
if (!io_tlb_nslabs) {
|
if (!io_tlb_nslabs) {
|
||||||
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
|
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
|
||||||
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
|
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
|
||||||
}
|
}
|
||||||
|
swiotlb_adjust_size(io_tlb_nslabs);
|
||||||
|
swiotlb_adjust_size(io_tlb_overflow);
|
||||||
|
|
||||||
|
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get IO TLB memory from the low pages
|
* Get IO TLB memory from the low pages
|
||||||
*/
|
*/
|
||||||
io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
|
io_tlb_start = alloc_bootmem_low_pages(bytes);
|
||||||
if (!io_tlb_start)
|
if (!io_tlb_start)
|
||||||
panic("Cannot allocate SWIOTLB buffer");
|
panic("Cannot allocate SWIOTLB buffer");
|
||||||
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
|
io_tlb_end = io_tlb_start + bytes;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate and initialize the free list array. This array is used
|
* Allocate and initialize the free list array. This array is used
|
||||||
@@ -153,25 +188,36 @@ swiotlb_init_with_default_size (size_t default_size)
|
|||||||
* between io_tlb_start and io_tlb_end.
|
* between io_tlb_start and io_tlb_end.
|
||||||
*/
|
*/
|
||||||
io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
|
io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
|
||||||
for (i = 0; i < io_tlb_nslabs; i++)
|
for (i = 0; i < io_tlb_nslabs; i++) {
|
||||||
|
if ( !(i % IO_TLB_SEGSIZE) )
|
||||||
|
swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
|
||||||
|
IO_TLB_SEGSIZE << IO_TLB_SHIFT);
|
||||||
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
|
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
|
||||||
|
}
|
||||||
io_tlb_index = 0;
|
io_tlb_index = 0;
|
||||||
io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
|
io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the overflow emergency buffer
|
* Get the overflow emergency buffer
|
||||||
*/
|
*/
|
||||||
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
|
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
|
||||||
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
|
if (!io_tlb_overflow_buffer)
|
||||||
virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
|
panic("Cannot allocate SWIOTLB overflow buffer!\n");
|
||||||
}
|
swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
|
||||||
|
|
||||||
void
|
swiotlb_print_info(bytes);
|
||||||
|
}
|
||||||
|
#ifndef __swiotlb_init_with_default_size
|
||||||
|
#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void __init
|
||||||
swiotlb_init(void)
|
swiotlb_init(void)
|
||||||
{
|
{
|
||||||
swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
|
__swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
|
||||||
/*
|
/*
|
||||||
* Systems with larger DMA zones (those that don't support ISA) can
|
* Systems with larger DMA zones (those that don't support ISA) can
|
||||||
* initialize the swiotlb later using the slab allocator if needed.
|
* initialize the swiotlb later using the slab allocator if needed.
|
||||||
@@ -180,7 +226,7 @@ swiotlb_init (void)
|
|||||||
int
|
int
|
||||||
swiotlb_late_init_with_default_size(size_t default_size)
|
swiotlb_late_init_with_default_size(size_t default_size)
|
||||||
{
|
{
|
||||||
unsigned long i, req_nslabs = io_tlb_nslabs;
|
unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
|
||||||
unsigned int order;
|
unsigned int order;
|
||||||
|
|
||||||
if (!io_tlb_nslabs) {
|
if (!io_tlb_nslabs) {
|
||||||
@@ -191,8 +237,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
|
|||||||
/*
|
/*
|
||||||
* Get IO TLB memory from the low pages
|
* Get IO TLB memory from the low pages
|
||||||
*/
|
*/
|
||||||
order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
|
order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
|
||||||
io_tlb_nslabs = SLABS_PER_PAGE << order;
|
io_tlb_nslabs = SLABS_PER_PAGE << order;
|
||||||
|
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||||
|
|
||||||
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
|
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
|
||||||
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
|
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
|
||||||
@@ -205,13 +252,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
|
|||||||
if (!io_tlb_start)
|
if (!io_tlb_start)
|
||||||
goto cleanup1;
|
goto cleanup1;
|
||||||
|
|
||||||
if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
|
if (order != get_order(bytes)) {
|
||||||
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
|
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
|
||||||
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
|
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
|
||||||
io_tlb_nslabs = SLABS_PER_PAGE << order;
|
io_tlb_nslabs = SLABS_PER_PAGE << order;
|
||||||
|
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||||
}
|
}
|
||||||
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
|
io_tlb_end = io_tlb_start + bytes;
|
||||||
memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
|
memset(io_tlb_start, 0, bytes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate and initialize the free list array. This array is used
|
* Allocate and initialize the free list array. This array is used
|
||||||
@@ -227,12 +275,12 @@ swiotlb_late_init_with_default_size (size_t default_size)
|
|||||||
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
|
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
|
||||||
io_tlb_index = 0;
|
io_tlb_index = 0;
|
||||||
|
|
||||||
io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
|
io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
|
||||||
get_order(io_tlb_nslabs * sizeof(char *)));
|
get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
|
||||||
if (!io_tlb_orig_addr)
|
if (!io_tlb_orig_addr)
|
||||||
goto cleanup3;
|
goto cleanup3;
|
||||||
|
|
||||||
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
|
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the overflow emergency buffer
|
* Get the overflow emergency buffer
|
||||||
@@ -242,29 +290,29 @@ swiotlb_late_init_with_default_size (size_t default_size)
|
|||||||
if (!io_tlb_overflow_buffer)
|
if (!io_tlb_overflow_buffer)
|
||||||
goto cleanup4;
|
goto cleanup4;
|
||||||
|
|
||||||
printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
|
swiotlb_print_info(bytes);
|
||||||
"0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
|
|
||||||
virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cleanup4:
|
cleanup4:
|
||||||
free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
|
free_pages((unsigned long)io_tlb_orig_addr,
|
||||||
sizeof(char *)));
|
get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
|
||||||
io_tlb_orig_addr = NULL;
|
io_tlb_orig_addr = NULL;
|
||||||
cleanup3:
|
cleanup3:
|
||||||
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
|
free_pages((unsigned long)io_tlb_list,
|
||||||
sizeof(int)));
|
get_order(io_tlb_nslabs * sizeof(int)));
|
||||||
io_tlb_list = NULL;
|
io_tlb_list = NULL;
|
||||||
io_tlb_end = NULL;
|
|
||||||
cleanup2:
|
cleanup2:
|
||||||
|
io_tlb_end = NULL;
|
||||||
free_pages((unsigned long)io_tlb_start, order);
|
free_pages((unsigned long)io_tlb_start, order);
|
||||||
io_tlb_start = NULL;
|
io_tlb_start = NULL;
|
||||||
cleanup1:
|
cleanup1:
|
||||||
io_tlb_nslabs = req_nslabs;
|
io_tlb_nslabs = req_nslabs;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
|
||||||
static inline int
|
static inline int
|
||||||
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
|
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
|
||||||
{
|
{
|
||||||
@@ -275,11 +323,35 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
|
|||||||
return (addr & ~mask) != 0;
|
return (addr & ~mask) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int range_needs_mapping(const void *ptr, size_t size)
|
||||||
|
{
|
||||||
|
return swiotlb_force;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int order_needs_mapping(unsigned int order)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void
|
||||||
|
__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
|
||||||
|
{
|
||||||
|
#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
|
||||||
|
if (dir == DMA_TO_DEVICE)
|
||||||
|
memcpy(dma_addr, buffer, size);
|
||||||
|
else
|
||||||
|
memcpy(buffer, dma_addr, size);
|
||||||
|
#else
|
||||||
|
__swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocates bounce buffer and returns its kernel virtual address.
|
* Allocates bounce buffer and returns its kernel virtual address.
|
||||||
*/
|
*/
|
||||||
static void *
|
static void *
|
||||||
map_single(struct device *hwdev, char *buffer, size_t size, int dir)
|
map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
char *dma_addr;
|
char *dma_addr;
|
||||||
@@ -352,7 +424,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
|
|||||||
*/
|
*/
|
||||||
io_tlb_orig_addr[index] = buffer;
|
io_tlb_orig_addr[index] = buffer;
|
||||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
|
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
|
||||||
memcpy(dma_addr, buffer, size);
|
__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
|
||||||
|
|
||||||
return dma_addr;
|
return dma_addr;
|
||||||
}
|
}
|
||||||
@@ -366,17 +438,18 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||||
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||||
char *buffer = io_tlb_orig_addr[index];
|
io_tlb_addr_t buffer = io_tlb_orig_addr[index];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First, sync the memory before unmapping the entry
|
* First, sync the memory before unmapping the entry
|
||||||
*/
|
*/
|
||||||
if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
|
if (!swiotlb_orig_addr_null(buffer)
|
||||||
|
&& ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
|
||||||
/*
|
/*
|
||||||
* bounce... copy the data back into the original buffer * and
|
* bounce... copy the data back into the original buffer * and
|
||||||
* delete the bounce buffer.
|
* delete the bounce buffer.
|
||||||
*/
|
*/
|
||||||
memcpy(buffer, dma_addr, size);
|
__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the buffer to the free list by setting the corresponding
|
* Return the buffer to the free list by setting the corresponding
|
||||||
@@ -409,18 +482,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
|
|||||||
int dir, int target)
|
int dir, int target)
|
||||||
{
|
{
|
||||||
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||||
char *buffer = io_tlb_orig_addr[index];
|
io_tlb_addr_t buffer = io_tlb_orig_addr[index];
|
||||||
|
|
||||||
switch (target) {
|
switch (target) {
|
||||||
case SYNC_FOR_CPU:
|
case SYNC_FOR_CPU:
|
||||||
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||||
memcpy(buffer, dma_addr, size);
|
__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
|
||||||
else
|
else
|
||||||
BUG_ON(dir != DMA_TO_DEVICE);
|
BUG_ON(dir != DMA_TO_DEVICE);
|
||||||
break;
|
break;
|
||||||
case SYNC_FOR_DEVICE:
|
case SYNC_FOR_DEVICE:
|
||||||
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||||
memcpy(dma_addr, buffer, size);
|
__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
|
||||||
else
|
else
|
||||||
BUG_ON(dir != DMA_FROM_DEVICE);
|
BUG_ON(dir != DMA_FROM_DEVICE);
|
||||||
break;
|
break;
|
||||||
@@ -429,11 +502,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef SWIOTLB_ARCH_NEED_ALLOC
|
||||||
|
|
||||||
void *
|
void *
|
||||||
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flags)
|
dma_addr_t *dma_handle, gfp_t flags)
|
||||||
{
|
{
|
||||||
unsigned long dev_addr;
|
dma_addr_t dev_addr;
|
||||||
void *ret;
|
void *ret;
|
||||||
int order = get_order(size);
|
int order = get_order(size);
|
||||||
|
|
||||||
@@ -444,8 +519,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||||||
*/
|
*/
|
||||||
flags |= GFP_DMA;
|
flags |= GFP_DMA;
|
||||||
|
|
||||||
|
if (!order_needs_mapping(order))
|
||||||
ret = (void *)__get_free_pages(flags, order);
|
ret = (void *)__get_free_pages(flags, order);
|
||||||
if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
|
else
|
||||||
|
ret = NULL;
|
||||||
|
if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
|
||||||
/*
|
/*
|
||||||
* The allocated memory isn't reachable by the device.
|
* The allocated memory isn't reachable by the device.
|
||||||
* Fall back on swiotlb_map_single().
|
* Fall back on swiotlb_map_single().
|
||||||
@@ -465,22 +543,24 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||||||
if (swiotlb_dma_mapping_error(handle))
|
if (swiotlb_dma_mapping_error(handle))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ret = phys_to_virt(handle);
|
ret = bus_to_virt(handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
dev_addr = virt_to_phys(ret);
|
dev_addr = virt_to_bus(ret);
|
||||||
|
|
||||||
/* Confirm address can be DMA'd by device */
|
/* Confirm address can be DMA'd by device */
|
||||||
if (address_needs_mapping(hwdev, dev_addr)) {
|
if (address_needs_mapping(hwdev, dev_addr)) {
|
||||||
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
|
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
|
||||||
(unsigned long long)*hwdev->dma_mask, dev_addr);
|
(unsigned long long)*hwdev->dma_mask,
|
||||||
|
(unsigned long long)dev_addr);
|
||||||
panic("swiotlb_alloc_coherent: allocated memory is out of "
|
panic("swiotlb_alloc_coherent: allocated memory is out of "
|
||||||
"range for device");
|
"range for device");
|
||||||
}
|
}
|
||||||
*dma_handle = dev_addr;
|
*dma_handle = dev_addr;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(swiotlb_alloc_coherent);
|
||||||
|
|
||||||
void
|
void
|
||||||
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
||||||
@@ -493,6 +573,9 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|||||||
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|
||||||
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
|
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(swiotlb_free_coherent);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
static void
|
static void
|
||||||
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
|
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
|
||||||
@@ -504,7 +587,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
|
|||||||
* When the mapping is small enough return a static buffer to limit
|
* When the mapping is small enough return a static buffer to limit
|
||||||
* the damage, or panic when the transfer is too big.
|
* the damage, or panic when the transfer is too big.
|
||||||
*/
|
*/
|
||||||
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
|
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
|
||||||
"device %s\n", size, dev ? dev->bus_id : "?");
|
"device %s\n", size, dev ? dev->bus_id : "?");
|
||||||
|
|
||||||
if (size > io_tlb_overflow && do_panic) {
|
if (size > io_tlb_overflow && do_panic) {
|
||||||
@@ -525,7 +608,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
|
|||||||
dma_addr_t
|
dma_addr_t
|
||||||
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
|
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
|
||||||
{
|
{
|
||||||
unsigned long dev_addr = virt_to_phys(ptr);
|
dma_addr_t dev_addr = virt_to_bus(ptr);
|
||||||
void *map;
|
void *map;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
@@ -534,19 +617,20 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
|
|||||||
* we can safely return the device addr and not worry about bounce
|
* we can safely return the device addr and not worry about bounce
|
||||||
* buffering it.
|
* buffering it.
|
||||||
*/
|
*/
|
||||||
if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
|
if (!range_needs_mapping(ptr, size)
|
||||||
|
&& !address_needs_mapping(hwdev, dev_addr))
|
||||||
return dev_addr;
|
return dev_addr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Oh well, have to allocate and map a bounce buffer.
|
* Oh well, have to allocate and map a bounce buffer.
|
||||||
*/
|
*/
|
||||||
map = map_single(hwdev, ptr, size, dir);
|
map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
|
||||||
if (!map) {
|
if (!map) {
|
||||||
swiotlb_full(hwdev, size, dir, 1);
|
swiotlb_full(hwdev, size, dir, 1);
|
||||||
map = io_tlb_overflow_buffer;
|
map = io_tlb_overflow_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_addr = virt_to_phys(map);
|
dev_addr = virt_to_bus(map);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that the address returned is DMA'ble
|
* Ensure that the address returned is DMA'ble
|
||||||
@@ -557,25 +641,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
|
|||||||
return dev_addr;
|
return dev_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Since DMA is i-cache coherent, any (complete) pages that were written via
|
|
||||||
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
|
||||||
* flush them when they get mapped into an executable vm-area.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
mark_clean(void *addr, size_t size)
|
|
||||||
{
|
|
||||||
unsigned long pg_addr, end;
|
|
||||||
|
|
||||||
pg_addr = PAGE_ALIGN((unsigned long) addr);
|
|
||||||
end = (unsigned long) addr + size;
|
|
||||||
while (pg_addr + PAGE_SIZE <= end) {
|
|
||||||
struct page *page = virt_to_page(pg_addr);
|
|
||||||
set_bit(PG_arch_1, &page->flags);
|
|
||||||
pg_addr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unmap a single streaming mode DMA translation. The dma_addr and size must
|
* Unmap a single streaming mode DMA translation. The dma_addr and size must
|
||||||
* match what was provided for in a previous swiotlb_map_single call. All
|
* match what was provided for in a previous swiotlb_map_single call. All
|
||||||
@@ -588,13 +653,13 @@ void
|
|||||||
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
|
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
|
||||||
int dir)
|
int dir)
|
||||||
{
|
{
|
||||||
char *dma_addr = phys_to_virt(dev_addr);
|
char *dma_addr = bus_to_virt(dev_addr);
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
|
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
|
||||||
unmap_single(hwdev, dma_addr, size, dir);
|
unmap_single(hwdev, dma_addr, size, dir);
|
||||||
else if (dir == DMA_FROM_DEVICE)
|
else if (dir == DMA_FROM_DEVICE)
|
||||||
mark_clean(dma_addr, size);
|
dma_mark_clean(dma_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -611,13 +676,13 @@ static inline void
|
|||||||
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
||||||
size_t size, int dir, int target)
|
size_t size, int dir, int target)
|
||||||
{
|
{
|
||||||
char *dma_addr = phys_to_virt(dev_addr);
|
char *dma_addr = bus_to_virt(dev_addr);
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
|
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
|
||||||
sync_single(hwdev, dma_addr, size, dir, target);
|
sync_single(hwdev, dma_addr, size, dir, target);
|
||||||
else if (dir == DMA_FROM_DEVICE)
|
else if (dir == DMA_FROM_DEVICE)
|
||||||
mark_clean(dma_addr, size);
|
dma_mark_clean(dma_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -642,13 +707,13 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
|
|||||||
unsigned long offset, size_t size,
|
unsigned long offset, size_t size,
|
||||||
int dir, int target)
|
int dir, int target)
|
||||||
{
|
{
|
||||||
char *dma_addr = phys_to_virt(dev_addr) + offset;
|
char *dma_addr = bus_to_virt(dev_addr) + offset;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
|
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
|
||||||
sync_single(hwdev, dma_addr, size, dir, target);
|
sync_single(hwdev, dma_addr, size, dir, target);
|
||||||
else if (dir == DMA_FROM_DEVICE)
|
else if (dir == DMA_FROM_DEVICE)
|
||||||
mark_clean(dma_addr, size);
|
dma_mark_clean(dma_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -687,18 +752,16 @@ int
|
|||||||
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
|
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
|
||||||
int dir)
|
int dir)
|
||||||
{
|
{
|
||||||
void *addr;
|
dma_addr_t dev_addr;
|
||||||
unsigned long dev_addr;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
for (i = 0; i < nelems; i++, sg++) {
|
for (i = 0; i < nelems; i++, sg++) {
|
||||||
addr = SG_ENT_VIRT_ADDRESS(sg);
|
dev_addr = SG_ENT_PHYS_ADDRESS(sg);
|
||||||
dev_addr = virt_to_phys(addr);
|
if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
|
||||||
if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
|
|| address_needs_mapping(hwdev, dev_addr)) {
|
||||||
void *map = map_single(hwdev, addr, sg->length, dir);
|
void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
|
||||||
sg->dma_address = virt_to_bus(map);
|
|
||||||
if (!map) {
|
if (!map) {
|
||||||
/* Don't panic here, we expect map_sg users
|
/* Don't panic here, we expect map_sg users
|
||||||
to do proper error handling. */
|
to do proper error handling. */
|
||||||
@@ -707,6 +770,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
|
|||||||
sg[0].dma_length = 0;
|
sg[0].dma_length = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
sg->dma_address = virt_to_bus(map);
|
||||||
} else
|
} else
|
||||||
sg->dma_address = dev_addr;
|
sg->dma_address = dev_addr;
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
@@ -728,9 +792,10 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
|
|||||||
|
|
||||||
for (i = 0; i < nelems; i++, sg++)
|
for (i = 0; i < nelems; i++, sg++)
|
||||||
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
|
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
|
||||||
unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
|
unmap_single(hwdev, bus_to_virt(sg->dma_address),
|
||||||
|
sg->dma_length, dir);
|
||||||
else if (dir == DMA_FROM_DEVICE)
|
else if (dir == DMA_FROM_DEVICE)
|
||||||
mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
|
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -750,8 +815,10 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
|
|||||||
|
|
||||||
for (i = 0; i < nelems; i++, sg++)
|
for (i = 0; i < nelems; i++, sg++)
|
||||||
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
|
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
|
||||||
sync_single(hwdev, (void *) sg->dma_address,
|
sync_single(hwdev, bus_to_virt(sg->dma_address),
|
||||||
sg->dma_length, dir, target);
|
sg->dma_length, dir, target);
|
||||||
|
else if (dir == DMA_FROM_DEVICE)
|
||||||
|
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -768,10 +835,48 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|||||||
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
|
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
|
||||||
|
|
||||||
|
dma_addr_t
|
||||||
|
swiotlb_map_page(struct device *hwdev, struct page *page,
|
||||||
|
unsigned long offset, size_t size,
|
||||||
|
enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
dma_addr_t dev_addr;
|
||||||
|
char *map;
|
||||||
|
|
||||||
|
dev_addr = page_to_bus(page) + offset;
|
||||||
|
if (address_needs_mapping(hwdev, dev_addr)) {
|
||||||
|
map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
|
||||||
|
if (!map) {
|
||||||
|
swiotlb_full(hwdev, size, direction, 1);
|
||||||
|
map = io_tlb_overflow_buffer;
|
||||||
|
}
|
||||||
|
dev_addr = virt_to_bus(map);
|
||||||
|
}
|
||||||
|
|
||||||
|
return dev_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
||||||
|
size_t size, enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
char *dma_addr = bus_to_virt(dev_addr);
|
||||||
|
|
||||||
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
|
||||||
|
unmap_single(hwdev, dma_addr, size, direction);
|
||||||
|
else if (direction == DMA_FROM_DEVICE)
|
||||||
|
dma_mark_clean(dma_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
int
|
int
|
||||||
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
|
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
|
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -780,10 +885,13 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
|
|||||||
* during bus mastering, then you would pass 0x00ffffff as the mask to
|
* during bus mastering, then you would pass 0x00ffffff as the mask to
|
||||||
* this function.
|
* this function.
|
||||||
*/
|
*/
|
||||||
|
#ifndef __swiotlb_dma_supported
|
||||||
|
#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
|
||||||
|
#endif
|
||||||
int
|
int
|
||||||
swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
||||||
{
|
{
|
||||||
return (virt_to_phys (io_tlb_end) - 1) <= mask;
|
return __swiotlb_dma_supported(hwdev, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(swiotlb_init);
|
EXPORT_SYMBOL(swiotlb_init);
|
||||||
@@ -798,6 +906,4 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
|
|||||||
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
|
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
|
||||||
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
|
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
|
||||||
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
|
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
|
||||||
EXPORT_SYMBOL(swiotlb_alloc_coherent);
|
|
||||||
EXPORT_SYMBOL(swiotlb_free_coherent);
|
|
||||||
EXPORT_SYMBOL(swiotlb_dma_supported);
|
EXPORT_SYMBOL(swiotlb_dma_supported);
|
||||||
|
Reference in New Issue
Block a user