Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
This commit is contained in:
23
arch/sparc/mm/Makefile
Normal file
23
arch/sparc/mm/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
# $Id: Makefile,v 1.38 2000/12/15 00:41:22 davem Exp $
|
||||
# Makefile for the linux Sparc-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
EXTRA_AFLAGS := -ansi
|
||||
|
||||
obj-y := fault.o init.o loadmmu.o generic.o extable.o btfixup.o
|
||||
|
||||
ifeq ($(CONFIG_SUN4),y)
|
||||
obj-y += nosrmmu.o
|
||||
else
|
||||
obj-y += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o
|
||||
endif
|
||||
|
||||
ifdef CONFIG_HIGHMEM
|
||||
obj-y += highmem.o
|
||||
endif
|
||||
|
||||
ifdef CONFIG_SMP
|
||||
obj-y += nosun4c.o
|
||||
else
|
||||
obj-y += sun4c.o
|
||||
endif
|
336
arch/sparc/mm/btfixup.c
Normal file
336
arch/sparc/mm/btfixup.c
Normal file
@@ -0,0 +1,336 @@
|
||||
/* $Id: btfixup.c,v 1.10 2000/05/09 17:40:13 davem Exp $
|
||||
* btfixup.c: Boot time code fixup and relocator, so that
|
||||
* we can get rid of most indirect calls to achieve single
|
||||
* image sun4c and srmmu kernel.
|
||||
*
|
||||
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/btfixup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define BTFIXUP_OPTIMIZE_NOP
|
||||
#define BTFIXUP_OPTIMIZE_OTHER
|
||||
|
||||
extern char *srmmu_name;
|
||||
static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for ";
|
||||
#ifdef CONFIG_SUN4
|
||||
static char str_sun4c[] __initdata = "sun4\n";
|
||||
#else
|
||||
static char str_sun4c[] __initdata = "sun4c\n";
|
||||
#endif
|
||||
static char str_srmmu[] __initdata = "srmmu[%s]/";
|
||||
static char str_iommu[] __initdata = "iommu\n";
|
||||
static char str_iounit[] __initdata = "io-unit\n";
|
||||
|
||||
static int visited __initdata = 0;
|
||||
extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[];
|
||||
extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[];
|
||||
static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n";
|
||||
static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n";
|
||||
static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n";
|
||||
static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n";
|
||||
static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n";
|
||||
static char wrong[] __initdata = "Wrong address for %c fixup %p\n";
|
||||
static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n";
|
||||
static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n";
|
||||
static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n";
|
||||
static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
|
||||
static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
|
||||
static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
|
||||
static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
|
||||
static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
|
||||
|
||||
#ifdef BTFIXUP_OPTIMIZE_OTHER
|
||||
static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
|
||||
{
|
||||
if (!fmangled)
|
||||
*addr = value;
|
||||
else {
|
||||
unsigned int *q = (unsigned int *)q1;
|
||||
if (*addr == 0x01000000) {
|
||||
/* Noped */
|
||||
*q = value;
|
||||
} else if (addr[-1] == *q) {
|
||||
/* Moved */
|
||||
addr[-1] = value;
|
||||
*q = value;
|
||||
} else {
|
||||
prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value);
|
||||
prom_halt();
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
static __inline__ void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
|
||||
{
|
||||
*addr = value;
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init btfixup(void)
|
||||
{
|
||||
unsigned int *p, *q;
|
||||
int type, count;
|
||||
unsigned insn;
|
||||
unsigned *addr;
|
||||
int fmangled = 0;
|
||||
void (*flush_cacheall)(void);
|
||||
|
||||
if (!visited) {
|
||||
visited++;
|
||||
printk(version);
|
||||
if (ARCH_SUN4C_SUN4)
|
||||
printk(str_sun4c);
|
||||
else {
|
||||
printk(str_srmmu, srmmu_name);
|
||||
if (sparc_cpu_model == sun4d)
|
||||
printk(str_iounit);
|
||||
else
|
||||
printk(str_iommu);
|
||||
}
|
||||
}
|
||||
for (p = ___btfixup_start; p < ___btfixup_end; ) {
|
||||
count = p[2];
|
||||
q = p + 3;
|
||||
switch (type = *(unsigned char *)p) {
|
||||
case 'f':
|
||||
count = p[3];
|
||||
q = p + 4;
|
||||
if (((p[0] & 1) || p[1])
|
||||
&& ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) {
|
||||
prom_printf(wrong_f, p, p[1]);
|
||||
prom_halt();
|
||||
}
|
||||
break;
|
||||
case 'b':
|
||||
if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) {
|
||||
prom_printf(wrong_b, p, p[1]);
|
||||
prom_halt();
|
||||
}
|
||||
break;
|
||||
case 's':
|
||||
if (p[1] + 0x1000 >= 0x2000) {
|
||||
prom_printf(wrong_s, p, p[1]);
|
||||
prom_halt();
|
||||
}
|
||||
break;
|
||||
case 'h':
|
||||
if (p[1] & 0x3ff) {
|
||||
prom_printf(wrong_h, p, p[1]);
|
||||
prom_halt();
|
||||
}
|
||||
break;
|
||||
case 'a':
|
||||
if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) {
|
||||
prom_printf(wrong_a, p, p[1]);
|
||||
prom_halt();
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (p[0] & 1) {
|
||||
p[0] &= ~1;
|
||||
while (count) {
|
||||
fmangled = 0;
|
||||
addr = (unsigned *)*q;
|
||||
if (addr < _stext || addr >= _end) {
|
||||
prom_printf(wrong, type, p);
|
||||
prom_halt();
|
||||
}
|
||||
insn = *addr;
|
||||
#ifdef BTFIXUP_OPTIMIZE_OTHER
|
||||
if (type != 'f' && q[1]) {
|
||||
insn = *(unsigned int *)q[1];
|
||||
if (!insn || insn == 1)
|
||||
insn = *addr;
|
||||
else
|
||||
fmangled = 1;
|
||||
}
|
||||
#endif
|
||||
switch (type) {
|
||||
case 'f': /* CALL */
|
||||
if (addr >= __start___ksymtab && addr < __stop___ksymtab) {
|
||||
*addr = p[1];
|
||||
break;
|
||||
} else if (!q[1]) {
|
||||
if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */
|
||||
*addr = (insn & 0xffc00000) | (p[1] >> 10); break;
|
||||
} else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */
|
||||
*addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break;
|
||||
} else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */
|
||||
bad_f:
|
||||
prom_printf(insn_f, p, addr, insn, addr[1]);
|
||||
prom_halt();
|
||||
}
|
||||
} else if (q[1] != 1)
|
||||
addr[1] = q[1];
|
||||
if (p[2] == BTFIXUPCALL_NORM) {
|
||||
norm_f:
|
||||
*addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2);
|
||||
q[1] = 0;
|
||||
break;
|
||||
}
|
||||
#ifndef BTFIXUP_OPTIMIZE_NOP
|
||||
goto norm_f;
|
||||
#else
|
||||
if (!(addr[1] & 0x80000000)) {
|
||||
if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */
|
||||
goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */
|
||||
} else {
|
||||
if ((addr[1] & 0x01800000) == 0x01800000) {
|
||||
if ((addr[1] & 0x01f80000) == 0x01e80000) {
|
||||
/* RESTORE */
|
||||
goto norm_f; /* It is dangerous to patch that */
|
||||
}
|
||||
goto bad_f;
|
||||
}
|
||||
if ((addr[1] & 0xffffe003) == 0x9e03e000) {
|
||||
/* ADD %O7, XX, %o7 */
|
||||
int displac = (addr[1] << 19);
|
||||
|
||||
displac = (displac >> 21) + 2;
|
||||
*addr = (0x10800000) + (displac & 0x3fffff);
|
||||
q[1] = addr[1];
|
||||
addr[1] = p[2];
|
||||
break;
|
||||
}
|
||||
if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000)
|
||||
goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */
|
||||
if ((addr[1] & 0x3e000000) == 0x1e000000)
|
||||
goto norm_f; /* rd is %o7. We'd better take care. */
|
||||
}
|
||||
if (p[2] == BTFIXUPCALL_NOP) {
|
||||
*addr = 0x01000000;
|
||||
q[1] = 1;
|
||||
break;
|
||||
}
|
||||
#ifndef BTFIXUP_OPTIMIZE_OTHER
|
||||
goto norm_f;
|
||||
#else
|
||||
if (addr[1] == 0x01000000) { /* NOP in the delay slot */
|
||||
q[1] = addr[1];
|
||||
*addr = p[2];
|
||||
break;
|
||||
}
|
||||
if ((addr[1] & 0xc0000000) != 0xc0000000) {
|
||||
/* Not a memory operation */
|
||||
if ((addr[1] & 0x30000000) == 0x10000000) {
|
||||
/* Ok, non-memory op with rd %oX */
|
||||
if ((addr[1] & 0x3e000000) == 0x1c000000)
|
||||
goto bad_f; /* Aiee. Someone is playing strange %sp tricks */
|
||||
if ((addr[1] & 0x3e000000) > 0x12000000 ||
|
||||
((addr[1] & 0x3e000000) == 0x12000000 &&
|
||||
p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) ||
|
||||
((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) {
|
||||
/* Nobody uses the result. We can nop it out. */
|
||||
*addr = p[2];
|
||||
q[1] = addr[1];
|
||||
addr[1] = 0x01000000;
|
||||
break;
|
||||
}
|
||||
if ((addr[1] & 0xf1ffffe0) == 0x90100000) {
|
||||
/* MOV %reg, %Ox */
|
||||
if ((addr[1] & 0x3e000000) == 0x10000000 &&
|
||||
(p[2] & 0x7c000) == 0x20000) {
|
||||
/* Ok, it is call xx; mov reg, %o0 and call optimizes
|
||||
to doing something on %o0. Patch the patch. */
|
||||
*addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14);
|
||||
q[1] = addr[1];
|
||||
addr[1] = 0x01000000;
|
||||
break;
|
||||
}
|
||||
if ((addr[1] & 0x3e000000) == 0x12000000 &&
|
||||
p[2] == BTFIXUPCALL_STO1O0) {
|
||||
*addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25);
|
||||
q[1] = addr[1];
|
||||
addr[1] = 0x01000000;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*addr = addr[1];
|
||||
q[1] = addr[1];
|
||||
addr[1] = p[2];
|
||||
break;
|
||||
#endif /* BTFIXUP_OPTIMIZE_OTHER */
|
||||
#endif /* BTFIXUP_OPTIMIZE_NOP */
|
||||
case 'b': /* BLACKBOX */
|
||||
/* Has to be sethi i, xx */
|
||||
if ((insn & 0xc1c00000) != 0x01000000) {
|
||||
prom_printf(insn_b, p, addr, insn);
|
||||
prom_halt();
|
||||
} else {
|
||||
void (*do_fixup)(unsigned *);
|
||||
|
||||
do_fixup = (void (*)(unsigned *))p[1];
|
||||
do_fixup(addr);
|
||||
}
|
||||
break;
|
||||
case 's': /* SIMM13 */
|
||||
/* Has to be or %g0, i, xx */
|
||||
if ((insn & 0xc1ffe000) != 0x80102000) {
|
||||
prom_printf(insn_s, p, addr, insn);
|
||||
prom_halt();
|
||||
}
|
||||
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff));
|
||||
break;
|
||||
case 'h': /* SETHI */
|
||||
/* Has to be sethi i, xx */
|
||||
if ((insn & 0xc1c00000) != 0x01000000) {
|
||||
prom_printf(insn_h, p, addr, insn);
|
||||
prom_halt();
|
||||
}
|
||||
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
|
||||
break;
|
||||
case 'a': /* HALF */
|
||||
/* Has to be sethi i, xx or or %g0, i, xx */
|
||||
if ((insn & 0xc1c00000) != 0x01000000 &&
|
||||
(insn & 0xc1ffe000) != 0x80102000) {
|
||||
prom_printf(insn_a, p, addr, insn);
|
||||
prom_halt();
|
||||
}
|
||||
if (p[1] & 0x3ff)
|
||||
set_addr(addr, q[1], fmangled,
|
||||
(insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff));
|
||||
else
|
||||
set_addr(addr, q[1], fmangled,
|
||||
(insn & 0x3e000000) | 0x01000000 | (p[1] >> 10));
|
||||
break;
|
||||
case 'i': /* INT */
|
||||
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
|
||||
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
|
||||
else if ((insn & 0x80002000) == 0x80002000 &&
|
||||
(insn & 0x01800000) != 0x01800000) /* %LO */
|
||||
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
|
||||
else {
|
||||
prom_printf(insn_i, p, addr, insn);
|
||||
prom_halt();
|
||||
}
|
||||
break;
|
||||
}
|
||||
count -= 2;
|
||||
q += 2;
|
||||
}
|
||||
} else
|
||||
p = q + count;
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
|
||||
#else
|
||||
flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
|
||||
#endif
|
||||
if (!flush_cacheall) {
|
||||
prom_printf(fca_und);
|
||||
prom_halt();
|
||||
}
|
||||
(*flush_cacheall)();
|
||||
}
|
77
arch/sparc/mm/extable.c
Normal file
77
arch/sparc/mm/extable.c
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
* linux/arch/sparc/mm/extable.c
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
void sort_extable(struct exception_table_entry *start,
|
||||
struct exception_table_entry *finish)
|
||||
{
|
||||
}
|
||||
|
||||
/* Caller knows they are in a range if ret->fixup == 0 */
|
||||
const struct exception_table_entry *
|
||||
search_extable(const struct exception_table_entry *start,
|
||||
const struct exception_table_entry *last,
|
||||
unsigned long value)
|
||||
{
|
||||
const struct exception_table_entry *walk;
|
||||
|
||||
/* Single insn entries are encoded as:
|
||||
* word 1: insn address
|
||||
* word 2: fixup code address
|
||||
*
|
||||
* Range entries are encoded as:
|
||||
* word 1: first insn address
|
||||
* word 2: 0
|
||||
* word 3: last insn address + 4 bytes
|
||||
* word 4: fixup code address
|
||||
*
|
||||
* See asm/uaccess.h for more details.
|
||||
*/
|
||||
|
||||
/* 1. Try to find an exact match. */
|
||||
for (walk = start; walk <= last; walk++) {
|
||||
if (walk->fixup == 0) {
|
||||
/* A range entry, skip both parts. */
|
||||
walk++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (walk->insn == value)
|
||||
return walk;
|
||||
}
|
||||
|
||||
/* 2. Try to find a range match. */
|
||||
for (walk = start; walk <= (last - 1); walk++) {
|
||||
if (walk->fixup)
|
||||
continue;
|
||||
|
||||
if (walk[0].insn <= value && walk[1].insn > value)
|
||||
return walk;
|
||||
|
||||
walk++;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Special extable search, which handles ranges. Returns fixup */
|
||||
unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
|
||||
{
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
entry = search_exception_tables(addr);
|
||||
if (!entry)
|
||||
return 0;
|
||||
|
||||
/* Inside range? Fix g2 and return correct fixup */
|
||||
if (!entry->fixup) {
|
||||
*g2 = (addr - entry->insn) / 4;
|
||||
return (entry + 1)->fixup;
|
||||
}
|
||||
|
||||
return entry->fixup;
|
||||
}
|
596
arch/sparc/mm/fault.c
Normal file
596
arch/sparc/mm/fault.c
Normal file
@@ -0,0 +1,596 @@
|
||||
/* $Id: fault.c,v 1.122 2001/11/17 07:19:26 davem Exp $
|
||||
* fault.c: Page fault handlers for the Sparc.
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <asm/head.h>
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/memreg.h>
|
||||
#include <asm/openprom.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/kdebug.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
|
||||
|
||||
extern int prom_node_root;
|
||||
|
||||
/* At boot time we determine these two values necessary for setting
|
||||
* up the segment maps and page table entries (pte's).
|
||||
*/
|
||||
|
||||
int num_segmaps, num_contexts;
|
||||
int invalid_segment;
|
||||
|
||||
/* various Virtual Address Cache parameters we find at boot time... */
|
||||
|
||||
int vac_size, vac_linesize, vac_do_hw_vac_flushes;
|
||||
int vac_entries_per_context, vac_entries_per_segment;
|
||||
int vac_entries_per_page;
|
||||
|
||||
/* Nice, simple, prom library does all the sweating for us. ;) */
|
||||
int prom_probe_memory (void)
|
||||
{
|
||||
register struct linux_mlist_v0 *mlist;
|
||||
register unsigned long bytes, base_paddr, tally;
|
||||
register int i;
|
||||
|
||||
i = 0;
|
||||
mlist= *prom_meminfo()->v0_available;
|
||||
bytes = tally = mlist->num_bytes;
|
||||
base_paddr = (unsigned long) mlist->start_adr;
|
||||
|
||||
sp_banks[0].base_addr = base_paddr;
|
||||
sp_banks[0].num_bytes = bytes;
|
||||
|
||||
while (mlist->theres_more != (void *) 0){
|
||||
i++;
|
||||
mlist = mlist->theres_more;
|
||||
bytes = mlist->num_bytes;
|
||||
tally += bytes;
|
||||
if (i > SPARC_PHYS_BANKS-1) {
|
||||
printk ("The machine has more banks than "
|
||||
"this kernel can support\n"
|
||||
"Increase the SPARC_PHYS_BANKS "
|
||||
"setting (currently %d)\n",
|
||||
SPARC_PHYS_BANKS);
|
||||
i = SPARC_PHYS_BANKS-1;
|
||||
break;
|
||||
}
|
||||
|
||||
sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
|
||||
sp_banks[i].num_bytes = mlist->num_bytes;
|
||||
}
|
||||
|
||||
i++;
|
||||
sp_banks[i].base_addr = 0xdeadbeef;
|
||||
sp_banks[i].num_bytes = 0;
|
||||
|
||||
/* Now mask all bank sizes on a page boundary, it is all we can
|
||||
* use anyways.
|
||||
*/
|
||||
for(i=0; sp_banks[i].num_bytes != 0; i++)
|
||||
sp_banks[i].num_bytes &= PAGE_MASK;
|
||||
|
||||
return tally;
|
||||
}
|
||||
|
||||
/* Traverse the memory lists in the prom to see how much physical we
|
||||
* have.
|
||||
*/
|
||||
unsigned long
|
||||
probe_memory(void)
|
||||
{
|
||||
int total;
|
||||
|
||||
total = prom_probe_memory();
|
||||
|
||||
/* Oh man, much nicer, keep the dirt in promlib. */
|
||||
return total;
|
||||
}
|
||||
|
||||
extern void sun4c_complete_all_stores(void);
|
||||
|
||||
/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
|
||||
asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
|
||||
unsigned long svaddr, unsigned long aerr,
|
||||
unsigned long avaddr)
|
||||
{
|
||||
sun4c_complete_all_stores();
|
||||
printk("FAULT: NMI received\n");
|
||||
printk("SREGS: Synchronous Error %08lx\n", serr);
|
||||
printk(" Synchronous Vaddr %08lx\n", svaddr);
|
||||
printk(" Asynchronous Error %08lx\n", aerr);
|
||||
printk(" Asynchronous Vaddr %08lx\n", avaddr);
|
||||
if (sun4c_memerr_reg)
|
||||
printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
|
||||
printk("REGISTER DUMP:\n");
|
||||
show_regs(regs);
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
static void unhandled_fault(unsigned long, struct task_struct *,
|
||||
struct pt_regs *) __attribute__ ((noreturn));
|
||||
|
||||
static void unhandled_fault(unsigned long address, struct task_struct *tsk,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if((unsigned long) address < PAGE_SIZE) {
|
||||
printk(KERN_ALERT
|
||||
"Unable to handle kernel NULL pointer dereference\n");
|
||||
} else {
|
||||
printk(KERN_ALERT "Unable to handle kernel paging request "
|
||||
"at virtual address %08lx\n", address);
|
||||
}
|
||||
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
|
||||
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
|
||||
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
|
||||
(tsk->mm ? (unsigned long) tsk->mm->pgd :
|
||||
(unsigned long) tsk->active_mm->pgd));
|
||||
die_if_kernel("Oops", regs);
|
||||
}
|
||||
|
||||
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
|
||||
unsigned long address)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
unsigned long g2;
|
||||
unsigned int insn;
|
||||
int i;
|
||||
|
||||
i = search_extables_range(ret_pc, &g2);
|
||||
switch (i) {
|
||||
case 3:
|
||||
/* load & store will be handled by fixup */
|
||||
return 3;
|
||||
|
||||
case 1:
|
||||
/* store will be handled by fixup, load will bump out */
|
||||
/* for _to_ macros */
|
||||
insn = *((unsigned int *) pc);
|
||||
if ((insn >> 21) & 1)
|
||||
return 1;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
/* load will be handled by fixup, store will bump out */
|
||||
/* for _from_ macros */
|
||||
insn = *((unsigned int *) pc);
|
||||
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
|
||||
return 2;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
};
|
||||
|
||||
memset(®s, 0, sizeof (regs));
|
||||
regs.pc = pc;
|
||||
regs.npc = pc + 4;
|
||||
__asm__ __volatile__(
|
||||
"rd %%psr, %0\n\t"
|
||||
"nop\n\t"
|
||||
"nop\n\t"
|
||||
"nop\n" : "=r" (regs.psr));
|
||||
unhandled_fault(address, current, ®s);
|
||||
|
||||
/* Not reached */
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern unsigned long safe_compute_effective_address(struct pt_regs *,
|
||||
unsigned int);
|
||||
|
||||
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
|
||||
{
|
||||
unsigned int insn;
|
||||
|
||||
if (text_fault)
|
||||
return regs->pc;
|
||||
|
||||
if (regs->psr & PSR_PS) {
|
||||
insn = *(unsigned int *) regs->pc;
|
||||
} else {
|
||||
__get_user(insn, (unsigned int *) regs->pc);
|
||||
}
|
||||
|
||||
return safe_compute_effective_address(regs, insn);
|
||||
}
|
||||
|
||||
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||
unsigned long address)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
unsigned int fixup;
|
||||
unsigned long g2;
|
||||
siginfo_t info;
|
||||
int from_user = !(regs->psr & PSR_PS);
|
||||
|
||||
if(text_fault)
|
||||
address = regs->pc;
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
* 'reference' page table is init_mm.pgd.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may
|
||||
* be in an interrupt or a critical region, and should
|
||||
* only copy the information from the master page table,
|
||||
* nothing more.
|
||||
*/
|
||||
if (!ARCH_SUN4C_SUN4 && address >= TASK_SIZE)
|
||||
goto vmalloc_fault;
|
||||
|
||||
info.si_code = SEGV_MAPERR;
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
goto no_context;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
/*
|
||||
* The kernel referencing a bad kernel pointer can lock up
|
||||
* a sun4c machine completely, so we must attempt recovery.
|
||||
*/
|
||||
if(!from_user && address >= PAGE_OFFSET)
|
||||
goto bad_area;
|
||||
|
||||
vma = find_vma(mm, address);
|
||||
if(!vma)
|
||||
goto bad_area;
|
||||
if(vma->vm_start <= address)
|
||||
goto good_area;
|
||||
if(!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto bad_area;
|
||||
if(expand_stack(vma, address))
|
||||
goto bad_area;
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
good_area:
|
||||
info.si_code = SEGV_ACCERR;
|
||||
if(write) {
|
||||
if(!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
} else {
|
||||
/* Allow reads even for write-only mappings */
|
||||
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
switch (handle_mm_fault(mm, vma, address, write)) {
|
||||
case VM_FAULT_SIGBUS:
|
||||
goto do_sigbus;
|
||||
case VM_FAULT_OOM:
|
||||
goto out_of_memory;
|
||||
case VM_FAULT_MAJOR:
|
||||
current->maj_flt++;
|
||||
break;
|
||||
case VM_FAULT_MINOR:
|
||||
default:
|
||||
current->min_flt++;
|
||||
break;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
return;
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map..
|
||||
* Fix it, but check if it's kernel or user first..
|
||||
*/
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
bad_area_nosemaphore:
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
if(from_user) {
|
||||
#if 0
|
||||
printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n",
|
||||
tsk->comm, tsk->pid, address, regs->pc);
|
||||
#endif
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
/* info.si_code set above to make clear whether
|
||||
this was a SEGV_MAPERR or SEGV_ACCERR fault. */
|
||||
info.si_addr = (void __user *)compute_si_addr(regs, text_fault);
|
||||
info.si_trapno = 0;
|
||||
force_sig_info (SIGSEGV, &info, tsk);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Is this in ex_table? */
|
||||
no_context:
|
||||
g2 = regs->u_regs[UREG_G2];
|
||||
if (!from_user && (fixup = search_extables_range(regs->pc, &g2))) {
|
||||
if (fixup > 10) { /* Values below are reserved for other things */
|
||||
extern const unsigned __memset_start[];
|
||||
extern const unsigned __memset_end[];
|
||||
extern const unsigned __csum_partial_copy_start[];
|
||||
extern const unsigned __csum_partial_copy_end[];
|
||||
|
||||
#ifdef DEBUG_EXCEPTIONS
|
||||
printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
|
||||
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
|
||||
regs->pc, fixup, g2);
|
||||
#endif
|
||||
if ((regs->pc >= (unsigned long)__memset_start &&
|
||||
regs->pc < (unsigned long)__memset_end) ||
|
||||
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
|
||||
regs->pc < (unsigned long)__csum_partial_copy_end)) {
|
||||
regs->u_regs[UREG_I4] = address;
|
||||
regs->u_regs[UREG_I5] = regs->pc;
|
||||
}
|
||||
regs->u_regs[UREG_G2] = g2;
|
||||
regs->pc = fixup;
|
||||
regs->npc = regs->pc + 4;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
unhandled_fault (address, tsk, regs);
|
||||
do_exit(SIGKILL);
|
||||
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened to us that made
|
||||
* us unable to handle the page fault gracefully.
|
||||
*/
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
printk("VM: killing process %s\n", tsk->comm);
|
||||
if (from_user)
|
||||
do_exit(SIGKILL);
|
||||
goto no_context;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = BUS_ADRERR;
|
||||
info.si_addr = (void __user *) compute_si_addr(regs, text_fault);
|
||||
info.si_trapno = 0;
|
||||
force_sig_info (SIGBUS, &info, tsk);
|
||||
if (!from_user)
|
||||
goto no_context;
|
||||
|
||||
vmalloc_fault:
|
||||
{
|
||||
/*
|
||||
* Synchronize this task's top level page-table
|
||||
* with the 'reference' page table.
|
||||
*/
|
||||
int offset = pgd_index(address);
|
||||
pgd_t *pgd, *pgd_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
|
||||
pgd = tsk->active_mm->pgd + offset;
|
||||
pgd_k = init_mm.pgd + offset;
|
||||
|
||||
if (!pgd_present(*pgd)) {
|
||||
if (!pgd_present(*pgd_k))
|
||||
goto bad_area_nosemaphore;
|
||||
pgd_val(*pgd) = pgd_val(*pgd_k);
|
||||
return;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pgd, address);
|
||||
pmd_k = pmd_offset(pgd_k, address);
|
||||
|
||||
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
|
||||
goto bad_area_nosemaphore;
|
||||
*pmd = *pmd_k;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
|
||||
unsigned long address)
|
||||
{
|
||||
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
|
||||
unsigned long,pte_t);
|
||||
extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
pgd_t *pgdp;
|
||||
pte_t *ptep;
|
||||
|
||||
if (text_fault) {
|
||||
address = regs->pc;
|
||||
} else if (!write &&
|
||||
!(regs->psr & PSR_PS)) {
|
||||
unsigned int insn, __user *ip;
|
||||
|
||||
ip = (unsigned int __user *)regs->pc;
|
||||
if (!get_user(insn, ip)) {
|
||||
if ((insn & 0xc1680000) == 0xc0680000)
|
||||
write = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mm) {
|
||||
/* We are oopsing. */
|
||||
do_sparc_fault(regs, text_fault, write, address);
|
||||
BUG(); /* P3 Oops already, you bitch */
|
||||
}
|
||||
|
||||
pgdp = pgd_offset(mm, address);
|
||||
ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
|
||||
|
||||
if (pgd_val(*pgdp)) {
|
||||
if (write) {
|
||||
if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
|
||||
== (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
|
||||
unsigned long flags;
|
||||
|
||||
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
|
||||
_SUN4C_PAGE_MODIFIED |
|
||||
_SUN4C_PAGE_VALID |
|
||||
_SUN4C_PAGE_DIRTY);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (sun4c_get_segmap(address) != invalid_segment) {
|
||||
sun4c_put_pte(address, pte_val(*ptep));
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
} else {
|
||||
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
|
||||
== (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
|
||||
unsigned long flags;
|
||||
|
||||
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
|
||||
_SUN4C_PAGE_VALID);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (sun4c_get_segmap(address) != invalid_segment) {
|
||||
sun4c_put_pte(address, pte_val(*ptep));
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* This conditional is 'interesting'. */
|
||||
if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
|
||||
&& (pte_val(*ptep) & _SUN4C_PAGE_VALID))
|
||||
/* Note: It is safe to not grab the MMAP semaphore here because
|
||||
* we know that update_mmu_cache() will not sleep for
|
||||
* any reason (at least not in the current implementation)
|
||||
* and therefore there is no danger of another thread getting
|
||||
* on the CPU and doing a shrink_mmap() on this vma.
|
||||
*/
|
||||
sun4c_update_mmu_cache (find_vma(current->mm, address), address,
|
||||
*ptep);
|
||||
else
|
||||
do_sparc_fault(regs, text_fault, write, address);
|
||||
}
|
||||
|
||||
/* This always deals with user addresses. */
|
||||
inline void force_user_fault(unsigned long address, int write)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
siginfo_t info;
|
||||
|
||||
info.si_code = SEGV_MAPERR;
|
||||
|
||||
#if 0
|
||||
printk("wf<pid=%d,wr=%d,addr=%08lx>\n",
|
||||
tsk->pid, write, address);
|
||||
#endif
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, address);
|
||||
if(!vma)
|
||||
goto bad_area;
|
||||
if(vma->vm_start <= address)
|
||||
goto good_area;
|
||||
if(!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto bad_area;
|
||||
if(expand_stack(vma, address))
|
||||
goto bad_area;
|
||||
good_area:
|
||||
info.si_code = SEGV_ACCERR;
|
||||
if(write) {
|
||||
if(!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
} else {
|
||||
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
switch (handle_mm_fault(mm, vma, address, write)) {
|
||||
case VM_FAULT_SIGBUS:
|
||||
case VM_FAULT_OOM:
|
||||
goto do_sigbus;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
return;
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
#if 0
|
||||
printk("Window whee %s [%d]: segfaults at %08lx\n",
|
||||
tsk->comm, tsk->pid, address);
|
||||
#endif
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
/* info.si_code set above to make clear whether
|
||||
this was a SEGV_MAPERR or SEGV_ACCERR fault. */
|
||||
info.si_addr = (void __user *) address;
|
||||
info.si_trapno = 0;
|
||||
force_sig_info (SIGSEGV, &info, tsk);
|
||||
return;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = BUS_ADRERR;
|
||||
info.si_addr = (void __user *) address;
|
||||
info.si_trapno = 0;
|
||||
force_sig_info (SIGBUS, &info, tsk);
|
||||
}
|
||||
|
||||
void window_overflow_fault(void)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
sp = current_thread_info()->rwbuf_stkptrs[0];
|
||||
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
|
||||
force_user_fault(sp + 0x38, 1);
|
||||
force_user_fault(sp, 1);
|
||||
}
|
||||
|
||||
void window_underflow_fault(unsigned long sp)
|
||||
{
|
||||
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
|
||||
force_user_fault(sp + 0x38, 0);
|
||||
force_user_fault(sp, 0);
|
||||
}
|
||||
|
||||
void window_ret_fault(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
sp = regs->u_regs[UREG_FP];
|
||||
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
|
||||
force_user_fault(sp + 0x38, 0);
|
||||
force_user_fault(sp, 0);
|
||||
}
|
154
arch/sparc/mm/generic.c
Normal file
154
arch/sparc/mm/generic.c
Normal file
@@ -0,0 +1,154 @@
|
||||
/* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $
|
||||
* generic.c: Generic Sparc mm routines that are not dependent upon
|
||||
* MMU type but are Sparc specific.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static inline void forget_pte(pte_t page)
|
||||
{
|
||||
#if 0 /* old 2.4 code */
|
||||
if (pte_none(page))
|
||||
return;
|
||||
if (pte_present(page)) {
|
||||
unsigned long pfn = pte_pfn(page);
|
||||
struct page *ptpage;
|
||||
if (!pfn_valid(pfn))
|
||||
return;
|
||||
ptpage = pfn_to_page(pfn);
|
||||
if (PageReserved(ptpage))
|
||||
return;
|
||||
page_cache_release(ptpage);
|
||||
return;
|
||||
}
|
||||
swap_free(pte_to_swp_entry(page));
|
||||
#else
|
||||
if (!pte_none(page)) {
|
||||
printk("forget_pte: old mapping existed!\n");
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Remap IO memory, the same way as remap_pfn_range(), but use
|
||||
* the obio memory space.
|
||||
*
|
||||
* They use a pgprot that sets PAGE_IO and does not check the
|
||||
* mem_map table as this is independent of normal memory.
|
||||
*/
|
||||
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
|
||||
unsigned long offset, pgprot_t prot, int space)
|
||||
{
|
||||
unsigned long end;
|
||||
|
||||
address &= ~PMD_MASK;
|
||||
end = address + size;
|
||||
if (end > PMD_SIZE)
|
||||
end = PMD_SIZE;
|
||||
do {
|
||||
pte_t oldpage = *pte;
|
||||
pte_clear(mm, address, pte);
|
||||
set_pte(pte, mk_pte_io(offset, prot, space));
|
||||
forget_pte(oldpage);
|
||||
address += PAGE_SIZE;
|
||||
offset += PAGE_SIZE;
|
||||
pte++;
|
||||
} while (address < end);
|
||||
}
|
||||
|
||||
static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
|
||||
unsigned long offset, pgprot_t prot, int space)
|
||||
{
|
||||
unsigned long end;
|
||||
|
||||
address &= ~PGDIR_MASK;
|
||||
end = address + size;
|
||||
if (end > PGDIR_SIZE)
|
||||
end = PGDIR_SIZE;
|
||||
offset -= address;
|
||||
do {
|
||||
pte_t * pte = pte_alloc_map(mm, pmd, address);
|
||||
if (!pte)
|
||||
return -ENOMEM;
|
||||
io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
pmd++;
|
||||
} while (address < end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
|
||||
{
|
||||
int error = 0;
|
||||
pgd_t * dir;
|
||||
unsigned long beg = from;
|
||||
unsigned long end = from + size;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
prot = __pgprot(pg_iobits);
|
||||
offset -= from;
|
||||
dir = pgd_offset(mm, from);
|
||||
flush_cache_range(vma, beg, end);
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
while (from < end) {
|
||||
pmd_t *pmd = pmd_alloc(current->mm, dir, from);
|
||||
error = -ENOMEM;
|
||||
if (!pmd)
|
||||
break;
|
||||
error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
|
||||
if (error)
|
||||
break;
|
||||
from = (from + PGDIR_SIZE) & PGDIR_MASK;
|
||||
dir++;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
|
||||
flush_tlb_range(vma, beg, end);
|
||||
return error;
|
||||
}
|
||||
|
||||
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
|
||||
unsigned long pfn, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
int error = 0;
|
||||
pgd_t * dir;
|
||||
unsigned long beg = from;
|
||||
unsigned long end = from + size;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int space = GET_IOSPACE(pfn);
|
||||
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
|
||||
|
||||
prot = __pgprot(pg_iobits);
|
||||
offset -= from;
|
||||
dir = pgd_offset(mm, from);
|
||||
flush_cache_range(vma, beg, end);
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
while (from < end) {
|
||||
pmd_t *pmd = pmd_alloc(current->mm, dir, from);
|
||||
error = -ENOMEM;
|
||||
if (!pmd)
|
||||
break;
|
||||
error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
|
||||
if (error)
|
||||
break;
|
||||
from = (from + PGDIR_SIZE) & PGDIR_MASK;
|
||||
dir++;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
|
||||
flush_tlb_range(vma, beg, end);
|
||||
return error;
|
||||
}
|
120
arch/sparc/mm/highmem.c
Normal file
120
arch/sparc/mm/highmem.c
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
* highmem.c: virtual kernel memory mappings for high memory
|
||||
*
|
||||
* Provides kernel-static versions of atomic kmap functions originally
|
||||
* found as inlines in include/asm-sparc/highmem.h. These became
|
||||
* needed as kmap_atomic() and kunmap_atomic() started getting
|
||||
* called from within modules.
|
||||
* -- Tomas Szepe <szepe@pinerecords.com>, September 2002
|
||||
*
|
||||
* But kmap_atomic() and kunmap_atomic() cannot be inlined in
|
||||
* modules because they are loaded with btfixup-ped functions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
|
||||
* gives a more generic (and caching) interface. But kmap_atomic can
|
||||
* be used in IRQ contexts, so in some (very limited) cases we need it.
|
||||
*
|
||||
* XXX This is an old text. Actually, it's good to use atomic kmaps,
|
||||
* provided you remember that they are atomic and not try to sleep
|
||||
* with a kmap taken, much like a spinlock. Non-atomic kmaps are
|
||||
* shared by CPUs, and so precious, and establishing them requires IPI.
|
||||
* Atomic kmaps are lightweight and we may have NCPUS more of them.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
void *kmap_atomic(struct page *page, enum km_type type)
|
||||
{
|
||||
unsigned long idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
inc_preempt_count();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_cache_one(vaddr);
|
||||
#else
|
||||
flush_cache_all();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
||||
#endif
|
||||
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_tlb_one(vaddr);
|
||||
#else
|
||||
flush_tlb_all();
|
||||
#endif
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
|
||||
void kunmap_atomic(void *kvaddr, enum km_type type)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
dec_preempt_count();
|
||||
preempt_check_resched();
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
|
||||
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_cache_one(vaddr);
|
||||
#else
|
||||
flush_cache_all();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* force other mappings to Oops if they'll try to access
|
||||
* this pte without first remap it
|
||||
*/
|
||||
pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_tlb_one(vaddr);
|
||||
#else
|
||||
flush_tlb_all();
|
||||
#endif
|
||||
#endif
|
||||
|
||||
dec_preempt_count();
|
||||
preempt_check_resched();
|
||||
}
|
||||
|
||||
/* We may be fed a pagetable here by ptep_to_xxx and others. */
|
||||
struct page *kmap_atomic_to_page(void *ptr)
|
||||
{
|
||||
unsigned long idx, vaddr = (unsigned long)ptr;
|
||||
pte_t *pte;
|
||||
|
||||
if (vaddr < SRMMU_NOCACHE_VADDR)
|
||||
return virt_to_page(ptr);
|
||||
if (vaddr < PKMAP_BASE)
|
||||
return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
|
||||
BUG_ON(vaddr < FIXADDR_START);
|
||||
BUG_ON(vaddr > FIXADDR_TOP);
|
||||
|
||||
idx = virt_to_fix(vaddr);
|
||||
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
|
||||
return pte_page(*pte);
|
||||
}
|
413
arch/sparc/mm/hypersparc.S
Normal file
413
arch/sparc/mm/hypersparc.S
Normal file
@@ -0,0 +1,413 @@
|
||||
/* $Id: hypersparc.S,v 1.18 2001/12/21 04:56:15 davem Exp $
|
||||
* hypersparc.S: High speed Hypersparc mmu/cache operations.
|
||||
*
|
||||
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asm_offsets.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
#include <linux/config.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
.globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm
|
||||
.globl hypersparc_flush_cache_range, hypersparc_flush_cache_page
|
||||
.globl hypersparc_flush_page_to_ram
|
||||
.globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns
|
||||
.globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
|
||||
.globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
|
||||
|
||||
hypersparc_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
sethi %hi(vac_cache_size), %g4
|
||||
ld [%g4 + %lo(vac_cache_size)], %g5
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %g2
|
||||
1:
|
||||
subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined
|
||||
bne 1b
|
||||
sta %g0, [%g5] ASI_M_FLUSH_CTX
|
||||
retl
|
||||
sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
|
||||
|
||||
/* We expand the window flush to get maximum performance. */
|
||||
hypersparc_flush_cache_mm:
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + AOFF_mm_context], %g1
|
||||
cmp %g1, -1
|
||||
be hypersparc_flush_cache_mm_out
|
||||
#endif
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o1
|
||||
sethi %hi(vac_cache_size), %g2
|
||||
ld [%g2 + %lo(vac_cache_size)], %o0
|
||||
add %o1, %o1, %g1
|
||||
add %o1, %g1, %g2
|
||||
add %o1, %g2, %g3
|
||||
add %o1, %g3, %g4
|
||||
add %o1, %g4, %g5
|
||||
add %o1, %g5, %o4
|
||||
add %o1, %o4, %o5
|
||||
|
||||
/* BLAMMO! */
|
||||
1:
|
||||
subcc %o0, %o5, %o0 ! hyper_flush_cache_user
|
||||
sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER
|
||||
bne 1b
|
||||
sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER
|
||||
hypersparc_flush_cache_mm_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* The things we do for performance... */
|
||||
hypersparc_flush_cache_range:
|
||||
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + AOFF_mm_context], %g1
|
||||
cmp %g1, -1
|
||||
be hypersparc_flush_cache_range_out
|
||||
#endif
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o4
|
||||
sethi %hi(vac_cache_size), %g2
|
||||
ld [%g2 + %lo(vac_cache_size)], %o3
|
||||
|
||||
/* Here comes the fun part... */
|
||||
add %o2, (PAGE_SIZE - 1), %o2
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
add %o4, %o4, %o5
|
||||
andn %o2, (PAGE_SIZE - 1), %o2
|
||||
add %o4, %o5, %g1
|
||||
sub %o2, %o1, %g4
|
||||
add %o4, %g1, %g2
|
||||
sll %o3, 2, %g5
|
||||
add %o4, %g2, %g3
|
||||
cmp %g4, %g5
|
||||
add %o4, %g3, %g4
|
||||
blu 0f
|
||||
add %o4, %g4, %g5
|
||||
add %o4, %g5, %g7
|
||||
|
||||
/* Flush entire user space, believe it or not this is quicker
|
||||
* than page at a time flushings for range > (cache_size<<2).
|
||||
*/
|
||||
1:
|
||||
subcc %o3, %g7, %o3
|
||||
sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER
|
||||
bne 1b
|
||||
sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER
|
||||
retl
|
||||
nop
|
||||
|
||||
/* Below our threshold, flush one page at a time. */
|
||||
0:
|
||||
ld [%o0 + AOFF_mm_context], %o0
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
lda [%g7] ASI_M_MMUREGS, %o3
|
||||
sta %o0, [%g7] ASI_M_MMUREGS
|
||||
add %o2, -PAGE_SIZE, %o0
|
||||
1:
|
||||
or %o0, 0x400, %g7
|
||||
lda [%g7] ASI_M_FLUSH_PROBE, %g7
|
||||
orcc %g7, 0, %g0
|
||||
be,a 3f
|
||||
mov %o0, %o2
|
||||
add %o4, %g5, %g7
|
||||
2:
|
||||
sub %o2, %g7, %o2
|
||||
sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE
|
||||
andcc %o2, 0xffc, %g0
|
||||
sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE
|
||||
bne 2b
|
||||
sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE
|
||||
3:
|
||||
cmp %o2, %o1
|
||||
bne 1b
|
||||
add %o2, -PAGE_SIZE, %o0
|
||||
mov SRMMU_FAULT_STATUS, %g5
|
||||
lda [%g5] ASI_M_MMUREGS, %g0
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
sta %o3, [%g7] ASI_M_MMUREGS
|
||||
hypersparc_flush_cache_range_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* HyperSparc requires a valid mapping where we are about to flush
|
||||
* in order to check for a physical tag match during the flush.
|
||||
*/
|
||||
/* Verified, my ass... */
|
||||
hypersparc_flush_cache_page:
|
||||
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %g2, -1
|
||||
be hypersparc_flush_cache_page_out
|
||||
#endif
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o4
|
||||
mov SRMMU_CTX_REG, %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
lda [%o3] ASI_M_MMUREGS, %o2
|
||||
sta %g2, [%o3] ASI_M_MMUREGS
|
||||
or %o1, 0x400, %o5
|
||||
lda [%o5] ASI_M_FLUSH_PROBE, %g1
|
||||
orcc %g0, %g1, %g0
|
||||
be 2f
|
||||
add %o4, %o4, %o5
|
||||
sub %o1, -PAGE_SIZE, %o1
|
||||
add %o4, %o5, %g1
|
||||
add %o4, %g1, %g2
|
||||
add %o4, %g2, %g3
|
||||
add %o4, %g3, %g4
|
||||
add %o4, %g4, %g5
|
||||
add %o4, %g5, %g7
|
||||
|
||||
/* BLAMMO! */
|
||||
1:
|
||||
sub %o1, %g7, %o1
|
||||
sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
|
||||
andcc %o1, 0xffc, %g0
|
||||
sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE
|
||||
2:
|
||||
mov SRMMU_FAULT_STATUS, %g7
|
||||
mov SRMMU_CTX_REG, %g4
|
||||
lda [%g7] ASI_M_MMUREGS, %g0
|
||||
sta %o2, [%g4] ASI_M_MMUREGS
|
||||
hypersparc_flush_cache_page_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
hypersparc_flush_sig_insns:
|
||||
flush %o1
|
||||
retl
|
||||
flush %o1 + 4
|
||||
|
||||
/* HyperSparc is copy-back. */
|
||||
hypersparc_flush_page_to_ram:
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o4
|
||||
andn %o0, (PAGE_SIZE - 1), %o0
|
||||
add %o4, %o4, %o5
|
||||
or %o0, 0x400, %g7
|
||||
lda [%g7] ASI_M_FLUSH_PROBE, %g5
|
||||
add %o4, %o5, %g1
|
||||
orcc %g5, 0, %g0
|
||||
be 2f
|
||||
add %o4, %g1, %g2
|
||||
add %o4, %g2, %g3
|
||||
sub %o0, -PAGE_SIZE, %o0
|
||||
add %o4, %g3, %g4
|
||||
add %o4, %g4, %g5
|
||||
add %o4, %g5, %g7
|
||||
|
||||
/* BLAMMO! */
|
||||
1:
|
||||
sub %o0, %g7, %o0
|
||||
sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE
|
||||
andcc %o0, 0xffc, %g0
|
||||
sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE
|
||||
2:
|
||||
mov SRMMU_FAULT_STATUS, %g1
|
||||
retl
|
||||
lda [%g1] ASI_M_MMUREGS, %g0
|
||||
|
||||
/* HyperSparc is IO cache coherent. */
|
||||
hypersparc_flush_page_for_dma:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* It was noted that at boot time a TLB flush all in a delay slot
|
||||
* can deliver an illegal instruction to the processor if the timing
|
||||
* is just right...
|
||||
*/
|
||||
hypersparc_flush_tlb_all:
|
||||
mov 0x400, %g1
|
||||
sta %g0, [%g1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
nop
|
||||
|
||||
hypersparc_flush_tlb_mm:
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o1
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o1, -1
|
||||
be hypersparc_flush_tlb_mm_out
|
||||
#endif
|
||||
mov 0x300, %g2
|
||||
sta %o1, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%g2] ASI_M_FLUSH_PROBE
|
||||
hypersparc_flush_tlb_mm_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
hypersparc_flush_tlb_range:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be hypersparc_flush_tlb_range_out
|
||||
#endif
|
||||
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
and %o1, %o4, %o1
|
||||
add %o1, 0x200, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
1:
|
||||
sub %o1, %o4, %o1
|
||||
cmp %o1, %o2
|
||||
blu,a 1b
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
hypersparc_flush_tlb_range_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
hypersparc_flush_tlb_page:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be hypersparc_flush_tlb_page_out
|
||||
#endif
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
hypersparc_flush_tlb_page_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
__INIT
|
||||
|
||||
/* High speed page clear/copy. */
|
||||
hypersparc_bzero_1page:
|
||||
/* NOTE: This routine has to be shorter than 40insns --jj */
|
||||
clr %g1
|
||||
mov 32, %g2
|
||||
mov 64, %g3
|
||||
mov 96, %g4
|
||||
mov 128, %g5
|
||||
mov 160, %g7
|
||||
mov 192, %o2
|
||||
mov 224, %o3
|
||||
mov 16, %o1
|
||||
1:
|
||||
stda %g0, [%o0 + %g0] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g2] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g3] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g4] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g5] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g7] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %o2] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %o3] ASI_M_BFILL
|
||||
subcc %o1, 1, %o1
|
||||
bne 1b
|
||||
add %o0, 256, %o0
|
||||
|
||||
retl
|
||||
nop
|
||||
|
||||
hypersparc_copy_1page:
|
||||
/* NOTE: This routine has to be shorter than 70insns --jj */
|
||||
sub %o1, %o0, %o2 ! difference
|
||||
mov 16, %g1
|
||||
1:
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
subcc %g1, 1, %g1
|
||||
bne 1b
|
||||
add %o0, 32, %o0
|
||||
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl hypersparc_setup_blockops
|
||||
hypersparc_setup_blockops:
|
||||
sethi %hi(bzero_1page), %o0
|
||||
or %o0, %lo(bzero_1page), %o0
|
||||
sethi %hi(hypersparc_bzero_1page), %o1
|
||||
or %o1, %lo(hypersparc_bzero_1page), %o1
|
||||
sethi %hi(hypersparc_copy_1page), %o2
|
||||
or %o2, %lo(hypersparc_copy_1page), %o2
|
||||
ld [%o1], %o4
|
||||
1:
|
||||
add %o1, 4, %o1
|
||||
st %o4, [%o0]
|
||||
add %o0, 4, %o0
|
||||
cmp %o1, %o2
|
||||
bne 1b
|
||||
ld [%o1], %o4
|
||||
sethi %hi(__copy_1page), %o0
|
||||
or %o0, %lo(__copy_1page), %o0
|
||||
sethi %hi(hypersparc_setup_blockops), %o2
|
||||
or %o2, %lo(hypersparc_setup_blockops), %o2
|
||||
ld [%o1], %o4
|
||||
1:
|
||||
add %o1, 4, %o1
|
||||
st %o4, [%o0]
|
||||
add %o0, 4, %o0
|
||||
cmp %o1, %o2
|
||||
bne 1b
|
||||
ld [%o1], %o4
|
||||
sta %g0, [%g0] ASI_M_FLUSH_IWHOLE
|
||||
retl
|
||||
nop
|
515
arch/sparc/mm/init.c
Normal file
515
arch/sparc/mm/init.c
Normal file
@@ -0,0 +1,515 @@
|
||||
/* $Id: init.c,v 1.103 2001/11/19 19:03:08 davem Exp $
|
||||
* linux/arch/sparc/mm/init.c
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
* Copyright (C) 2000 Anton Blanchard (anton@samba.org)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/vac-ops.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/vaddrs.h>
|
||||
#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
|
||||
#include <asm/tlb.h>
|
||||
|
||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
|
||||
unsigned long *sparc_valid_addr_bitmap;
|
||||
|
||||
unsigned long phys_base;
|
||||
unsigned long pfn_base;
|
||||
|
||||
unsigned long page_kernel;
|
||||
|
||||
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
|
||||
unsigned long sparc_unmapped_base;
|
||||
|
||||
struct pgtable_cache_struct pgt_quicklists;
|
||||
|
||||
/* References to section boundaries */
|
||||
extern char __init_begin, __init_end, _start, _end, etext , edata;
|
||||
|
||||
/* Initial ramdisk setup */
|
||||
extern unsigned int sparc_ramdisk_image;
|
||||
extern unsigned int sparc_ramdisk_size;
|
||||
|
||||
unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
pte_t *kmap_pte;
|
||||
pgprot_t kmap_prot;
|
||||
|
||||
#define kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
|
||||
|
||||
void __init kmap_init(void)
|
||||
{
|
||||
/* cache the first kmap pte */
|
||||
kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
|
||||
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
|
||||
}
|
||||
|
||||
void show_mem(void)
|
||||
{
|
||||
printk("Mem-info:\n");
|
||||
show_free_areas();
|
||||
printk("Free swap: %6ldkB\n",
|
||||
nr_swap_pages << (PAGE_SHIFT-10));
|
||||
printk("%ld pages of RAM\n", totalram_pages);
|
||||
printk("%d free pages\n", nr_free_pages());
|
||||
#if 0 /* undefined pgtable_cache_size, pgd_cache_size */
|
||||
printk("%ld pages in page table cache\n",pgtable_cache_size);
|
||||
#ifndef CONFIG_SMP
|
||||
if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
|
||||
printk("%ld entries in page dir cache\n",pgd_cache_size);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init sparc_context_init(int numctx)
|
||||
{
|
||||
int ctx;
|
||||
|
||||
ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
|
||||
|
||||
for(ctx = 0; ctx < numctx; ctx++) {
|
||||
struct ctx_list *clist;
|
||||
|
||||
clist = (ctx_list_pool + ctx);
|
||||
clist->ctx_number = ctx;
|
||||
clist->ctx_mm = NULL;
|
||||
}
|
||||
ctx_free.next = ctx_free.prev = &ctx_free;
|
||||
ctx_used.next = ctx_used.prev = &ctx_used;
|
||||
for(ctx = 0; ctx < numctx; ctx++)
|
||||
add_to_free_ctxlist(ctx_list_pool + ctx);
|
||||
}
|
||||
|
||||
extern unsigned long cmdline_memory_size;
|
||||
unsigned long last_valid_pfn;
|
||||
|
||||
unsigned long calc_highpages(void)
|
||||
{
|
||||
int i;
|
||||
int nr = 0;
|
||||
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
|
||||
if (end_pfn <= max_low_pfn)
|
||||
continue;
|
||||
|
||||
if (start_pfn < max_low_pfn)
|
||||
start_pfn = max_low_pfn;
|
||||
|
||||
nr += end_pfn - start_pfn;
|
||||
}
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
unsigned long calc_max_low_pfn(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
|
||||
unsigned long curr_pfn, last_pfn;
|
||||
|
||||
last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT;
|
||||
for (i = 1; sp_banks[i].num_bytes != 0; i++) {
|
||||
curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
|
||||
if (curr_pfn >= tmp) {
|
||||
if (last_pfn < tmp)
|
||||
tmp = last_pfn;
|
||||
break;
|
||||
}
|
||||
|
||||
last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
unsigned long __init bootmem_init(unsigned long *pages_avail)
|
||||
{
|
||||
unsigned long bootmap_size, start_pfn;
|
||||
unsigned long end_of_phys_memory = 0UL;
|
||||
unsigned long bootmap_pfn, bytes_avail, size;
|
||||
int i;
|
||||
|
||||
bytes_avail = 0UL;
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
end_of_phys_memory = sp_banks[i].base_addr +
|
||||
sp_banks[i].num_bytes;
|
||||
bytes_avail += sp_banks[i].num_bytes;
|
||||
if (cmdline_memory_size) {
|
||||
if (bytes_avail > cmdline_memory_size) {
|
||||
unsigned long slack = bytes_avail - cmdline_memory_size;
|
||||
|
||||
bytes_avail -= slack;
|
||||
end_of_phys_memory -= slack;
|
||||
|
||||
sp_banks[i].num_bytes -= slack;
|
||||
if (sp_banks[i].num_bytes == 0) {
|
||||
sp_banks[i].base_addr = 0xdeadbeef;
|
||||
} else {
|
||||
sp_banks[i+1].num_bytes = 0;
|
||||
sp_banks[i+1].base_addr = 0xdeadbeef;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Start with page aligned address of last symbol in kernel
|
||||
* image.
|
||||
*/
|
||||
start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
|
||||
|
||||
/* Now shift down to get the real physical page frame number. */
|
||||
start_pfn >>= PAGE_SHIFT;
|
||||
|
||||
bootmap_pfn = start_pfn;
|
||||
|
||||
max_pfn = end_of_phys_memory >> PAGE_SHIFT;
|
||||
|
||||
max_low_pfn = max_pfn;
|
||||
highstart_pfn = highend_pfn = max_pfn;
|
||||
|
||||
if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
|
||||
highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
|
||||
max_low_pfn = calc_max_low_pfn();
|
||||
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
|
||||
calc_highpages() >> (20 - PAGE_SHIFT));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
/* Now have to check initial ramdisk, so that bootmap does not overwrite it */
|
||||
if (sparc_ramdisk_image) {
|
||||
if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
|
||||
sparc_ramdisk_image -= KERNBASE;
|
||||
initrd_start = sparc_ramdisk_image + phys_base;
|
||||
initrd_end = initrd_start + sparc_ramdisk_size;
|
||||
if (initrd_end > end_of_phys_memory) {
|
||||
printk(KERN_CRIT "initrd extends beyond end of memory "
|
||||
"(0x%016lx > 0x%016lx)\ndisabling initrd\n",
|
||||
initrd_end, end_of_phys_memory);
|
||||
initrd_start = 0;
|
||||
}
|
||||
if (initrd_start) {
|
||||
if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
|
||||
initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
|
||||
bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* Initialize the boot-time allocator. */
|
||||
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
|
||||
max_low_pfn);
|
||||
|
||||
/* Now register the available physical memory with the
|
||||
* allocator.
|
||||
*/
|
||||
*pages_avail = 0;
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
unsigned long curr_pfn, last_pfn;
|
||||
|
||||
curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
if (curr_pfn >= max_low_pfn)
|
||||
break;
|
||||
|
||||
last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
if (last_pfn > max_low_pfn)
|
||||
last_pfn = max_low_pfn;
|
||||
|
||||
/*
|
||||
* .. finally, did all the rounding and playing
|
||||
* around just make the area go away?
|
||||
*/
|
||||
if (last_pfn <= curr_pfn)
|
||||
continue;
|
||||
|
||||
size = (last_pfn - curr_pfn) << PAGE_SHIFT;
|
||||
*pages_avail += last_pfn - curr_pfn;
|
||||
|
||||
free_bootmem(sp_banks[i].base_addr, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_start) {
|
||||
/* Reserve the initrd image area. */
|
||||
size = initrd_end - initrd_start;
|
||||
reserve_bootmem(initrd_start, size);
|
||||
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
|
||||
initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
|
||||
}
|
||||
#endif
|
||||
/* Reserve the kernel text/data/bss. */
|
||||
size = (start_pfn << PAGE_SHIFT) - phys_base;
|
||||
reserve_bootmem(phys_base, size);
|
||||
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
/* Reserve the bootmem map. We do not account for it
|
||||
* in pages_avail because we will release that memory
|
||||
* in free_all_bootmem.
|
||||
*/
|
||||
size = bootmap_size;
|
||||
reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
|
||||
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
return max_pfn;
|
||||
}
|
||||
|
||||
/*
|
||||
* check_pgt_cache
|
||||
*
|
||||
* This is called at the end of unmapping of VMA (zap_page_range),
|
||||
* to rescan the page cache for architecture specific things,
|
||||
* presumably something like sun4/sun4c PMEGs. Most architectures
|
||||
* define check_pgt_cache empty.
|
||||
*
|
||||
* We simply copy the 2.4 implementation for now.
|
||||
*/
|
||||
int pgt_cache_water[2] = { 25, 50 };
|
||||
|
||||
void check_pgt_cache(void)
|
||||
{
|
||||
do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables: We call the MMU specific
|
||||
* init routine based upon the Sun model type on the Sparc.
|
||||
*
|
||||
*/
|
||||
extern void sun4c_paging_init(void);
|
||||
extern void srmmu_paging_init(void);
|
||||
extern void device_scan(void);
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
switch(sparc_cpu_model) {
|
||||
case sun4c:
|
||||
case sun4e:
|
||||
case sun4:
|
||||
sun4c_paging_init();
|
||||
sparc_unmapped_base = 0xe0000000;
|
||||
BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
|
||||
break;
|
||||
case sun4m:
|
||||
case sun4d:
|
||||
srmmu_paging_init();
|
||||
sparc_unmapped_base = 0x50000000;
|
||||
BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
|
||||
break;
|
||||
default:
|
||||
prom_printf("paging_init: Cannot init paging on this Sparc\n");
|
||||
prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
|
||||
prom_printf("paging_init: Halting...\n");
|
||||
prom_halt();
|
||||
};
|
||||
|
||||
/* Initialize the protection map with non-constant, MMU dependent values. */
|
||||
protection_map[0] = PAGE_NONE;
|
||||
protection_map[1] = PAGE_READONLY;
|
||||
protection_map[2] = PAGE_COPY;
|
||||
protection_map[3] = PAGE_COPY;
|
||||
protection_map[4] = PAGE_READONLY;
|
||||
protection_map[5] = PAGE_READONLY;
|
||||
protection_map[6] = PAGE_COPY;
|
||||
protection_map[7] = PAGE_COPY;
|
||||
protection_map[8] = PAGE_NONE;
|
||||
protection_map[9] = PAGE_READONLY;
|
||||
protection_map[10] = PAGE_SHARED;
|
||||
protection_map[11] = PAGE_SHARED;
|
||||
protection_map[12] = PAGE_READONLY;
|
||||
protection_map[13] = PAGE_READONLY;
|
||||
protection_map[14] = PAGE_SHARED;
|
||||
protection_map[15] = PAGE_SHARED;
|
||||
btfixup();
|
||||
device_scan();
|
||||
}
|
||||
|
||||
struct cache_palias *sparc_aliases;
|
||||
|
||||
static void __init taint_real_pages(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; sp_banks[i].num_bytes; i++) {
|
||||
unsigned long start, end;
|
||||
|
||||
start = sp_banks[i].base_addr;
|
||||
end = start + sp_banks[i].num_bytes;
|
||||
|
||||
while (start < end) {
|
||||
set_bit(start >> 20, sparc_valid_addr_bitmap);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
|
||||
#endif
|
||||
|
||||
for (tmp = start_pfn; tmp < end_pfn; tmp++) {
|
||||
struct page *page = pfn_to_page(tmp);
|
||||
|
||||
ClearPageReserved(page);
|
||||
set_bit(PG_highmem, &page->flags);
|
||||
set_page_count(page, 1);
|
||||
__free_page(page);
|
||||
totalhigh_pages++;
|
||||
}
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codepages = 0;
|
||||
int datapages = 0;
|
||||
int initpages = 0;
|
||||
int reservedpages = 0;
|
||||
int i;
|
||||
|
||||
if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
|
||||
prom_printf("BUG: fixmap and pkmap areas overlap\n");
|
||||
prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n",
|
||||
PKMAP_BASE,
|
||||
(unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
|
||||
FIXADDR_START);
|
||||
prom_printf("Please mail sparclinux@vger.kernel.org.\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
|
||||
/* Saves us work later. */
|
||||
memset((void *)&empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
|
||||
i += 1;
|
||||
sparc_valid_addr_bitmap = (unsigned long *)
|
||||
__alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
|
||||
|
||||
if (sparc_valid_addr_bitmap == NULL) {
|
||||
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
|
||||
prom_halt();
|
||||
}
|
||||
memset(sparc_valid_addr_bitmap, 0, i << 2);
|
||||
|
||||
taint_real_pages();
|
||||
|
||||
max_mapnr = last_valid_pfn - pfn_base;
|
||||
high_memory = __va(max_low_pfn << PAGE_SHIFT);
|
||||
|
||||
totalram_pages = free_all_bootmem();
|
||||
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
|
||||
num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT;
|
||||
|
||||
if (end_pfn <= highstart_pfn)
|
||||
continue;
|
||||
|
||||
if (start_pfn < highstart_pfn)
|
||||
start_pfn = highstart_pfn;
|
||||
|
||||
map_high_region(start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
totalram_pages += totalhigh_pages;
|
||||
|
||||
codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
|
||||
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
|
||||
datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
|
||||
datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
|
||||
initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
|
||||
initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
|
||||
|
||||
/* Ignore memory holes for the purpose of counting reserved pages */
|
||||
for (i=0; i < max_low_pfn; i++)
|
||||
if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap)
|
||||
&& PageReserved(pfn_to_page(i)))
|
||||
reservedpages++;
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
|
||||
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT - 10),
|
||||
codepages << (PAGE_SHIFT-10),
|
||||
reservedpages << (PAGE_SHIFT - 10),
|
||||
datapages << (PAGE_SHIFT-10),
|
||||
initpages << (PAGE_SHIFT-10),
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
}
|
||||
|
||||
void free_initmem (void)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
addr = (unsigned long)(&__init_begin);
|
||||
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
|
||||
struct page *p;
|
||||
|
||||
p = virt_to_page(addr);
|
||||
|
||||
ClearPageReserved(p);
|
||||
set_page_count(p, 1);
|
||||
__free_page(p);
|
||||
totalram_pages++;
|
||||
num_physpages++;
|
||||
}
|
||||
printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (start < end)
|
||||
printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
|
||||
for (; start < end; start += PAGE_SIZE) {
|
||||
struct page *p = virt_to_page(start);
|
||||
|
||||
ClearPageReserved(p);
|
||||
set_page_count(p, 1);
|
||||
__free_page(p);
|
||||
num_physpages++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void sparc_flush_page_to_ram(struct page *page)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)page_address(page);
|
||||
|
||||
if (vaddr)
|
||||
__flush_page_to_ram(vaddr);
|
||||
}
|
318
arch/sparc/mm/io-unit.c
Normal file
318
arch/sparc/mm/io-unit.c
Normal file
@@ -0,0 +1,318 @@
|
||||
/* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
|
||||
* io-unit.c: IO-UNIT specific routines for memory management.
|
||||
*
|
||||
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sbus.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/io-unit.h>
|
||||
#include <asm/mxcc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
/* #define IOUNIT_DEBUG */
|
||||
#ifdef IOUNIT_DEBUG
|
||||
#define IOD(x) printk(x)
|
||||
#else
|
||||
#define IOD(x) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
|
||||
#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
|
||||
|
||||
void __init
|
||||
iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
|
||||
{
|
||||
iopte_t *xpt, *xptend;
|
||||
struct iounit_struct *iounit;
|
||||
struct linux_prom_registers iommu_promregs[PROMREG_MAX];
|
||||
struct resource r;
|
||||
|
||||
iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
|
||||
|
||||
memset(iounit, 0, sizeof(*iounit));
|
||||
iounit->limit[0] = IOUNIT_BMAP1_START;
|
||||
iounit->limit[1] = IOUNIT_BMAP2_START;
|
||||
iounit->limit[2] = IOUNIT_BMAPM_START;
|
||||
iounit->limit[3] = IOUNIT_BMAPM_END;
|
||||
iounit->rotor[1] = IOUNIT_BMAP2_START;
|
||||
iounit->rotor[2] = IOUNIT_BMAPM_START;
|
||||
|
||||
xpt = NULL;
|
||||
if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
|
||||
sizeof(iommu_promregs)) != -1) {
|
||||
prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
|
||||
memset(&r, 0, sizeof(r));
|
||||
r.flags = iommu_promregs[2].which_io;
|
||||
r.start = iommu_promregs[2].phys_addr;
|
||||
xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
|
||||
}
|
||||
if(!xpt) panic("Cannot map External Page Table.");
|
||||
|
||||
sbus->iommu = (struct iommu_struct *)iounit;
|
||||
iounit->page_table = xpt;
|
||||
|
||||
for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
|
||||
xpt < xptend;)
|
||||
iopte_val(*xpt++) = 0;
|
||||
}
|
||||
|
||||
/* One has to hold iounit->lock to call this */
|
||||
static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
|
||||
{
|
||||
int i, j, k, npages;
|
||||
unsigned long rotor, scan, limit;
|
||||
iopte_t iopte;
|
||||
|
||||
npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
|
||||
/* A tiny bit of magic ingredience :) */
|
||||
switch (npages) {
|
||||
case 1: i = 0x0231; break;
|
||||
case 2: i = 0x0132; break;
|
||||
default: i = 0x0213; break;
|
||||
}
|
||||
|
||||
IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
|
||||
|
||||
next: j = (i & 15);
|
||||
rotor = iounit->rotor[j - 1];
|
||||
limit = iounit->limit[j];
|
||||
scan = rotor;
|
||||
nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
||||
if (scan + npages > limit) {
|
||||
if (limit != rotor) {
|
||||
limit = rotor;
|
||||
scan = iounit->limit[j - 1];
|
||||
goto nexti;
|
||||
}
|
||||
i >>= 4;
|
||||
if (!(i & 15))
|
||||
panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
|
||||
goto next;
|
||||
}
|
||||
for (k = 1, scan++; k < npages; k++)
|
||||
if (test_bit(scan++, iounit->bmap))
|
||||
goto nexti;
|
||||
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
|
||||
scan -= npages;
|
||||
iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
|
||||
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
|
||||
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
|
||||
set_bit(scan, iounit->bmap);
|
||||
iounit->page_table[scan] = iopte;
|
||||
}
|
||||
IOD(("%08lx\n", vaddr));
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long ret, flags;
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
|
||||
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
|
||||
sg[sz].dvma_length = sg[sz].length;
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
||||
for (len += vaddr; vaddr < len; vaddr++)
|
||||
clear_bit(vaddr, iounit->bmap);
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long vaddr, len;
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
||||
for (len += vaddr; vaddr < len; vaddr++)
|
||||
clear_bit(vaddr, iounit->bmap);
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
|
||||
{
|
||||
unsigned long page, end;
|
||||
pgprot_t dvma_prot;
|
||||
iopte_t *iopte;
|
||||
struct sbus_bus *sbus;
|
||||
|
||||
*pba = addr;
|
||||
|
||||
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
|
||||
end = PAGE_ALIGN((addr + len));
|
||||
while(addr < end) {
|
||||
page = va;
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
long i;
|
||||
|
||||
pgdp = pgd_offset(&init_mm, addr);
|
||||
pmdp = pmd_offset(pgdp, addr);
|
||||
ptep = pte_offset_map(pmdp, addr);
|
||||
|
||||
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
|
||||
|
||||
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
|
||||
|
||||
for_each_sbus(sbus) {
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
|
||||
iopte = (iopte_t *)(iounit->page_table + i);
|
||||
*iopte = MKIOPTE(__pa(page));
|
||||
}
|
||||
}
|
||||
addr += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
flush_cache_all();
|
||||
flush_tlb_all();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iounit_unmap_dma_area(unsigned long addr, int len)
|
||||
{
|
||||
/* XXX Somebody please fill this in */
|
||||
}
|
||||
|
||||
/* XXX We do not pass sbus device here, bad. */
|
||||
static struct page *iounit_translate_dvma(unsigned long addr)
|
||||
{
|
||||
struct sbus_bus *sbus = sbus_root; /* They are all the same */
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
int i;
|
||||
iopte_t *iopte;
|
||||
|
||||
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
|
||||
iopte = (iopte_t *)(iounit->page_table + i);
|
||||
return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
|
||||
}
|
||||
#endif
|
||||
|
||||
static char *iounit_lockarea(char *vaddr, unsigned long len)
|
||||
{
|
||||
/* FIXME: Write this */
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static void iounit_unlockarea(char *vaddr, unsigned long len)
|
||||
{
|
||||
/* FIXME: Write this */
|
||||
}
|
||||
|
||||
void __init ld_mmu_iounit(void)
|
||||
{
|
||||
BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
|
||||
BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
|
||||
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
|
||||
#endif
|
||||
}
|
||||
|
||||
__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
|
||||
{
|
||||
int i, j, k, npages;
|
||||
unsigned long rotor, scan, limit;
|
||||
unsigned long flags;
|
||||
__u32 ret;
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
|
||||
npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
i = 0x0213;
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
next: j = (i & 15);
|
||||
rotor = iounit->rotor[j - 1];
|
||||
limit = iounit->limit[j];
|
||||
scan = rotor;
|
||||
nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
||||
if (scan + npages > limit) {
|
||||
if (limit != rotor) {
|
||||
limit = rotor;
|
||||
scan = iounit->limit[j - 1];
|
||||
goto nexti;
|
||||
}
|
||||
i >>= 4;
|
||||
if (!(i & 15))
|
||||
panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
|
||||
goto next;
|
||||
}
|
||||
for (k = 1, scan++; k < npages; k++)
|
||||
if (test_bit(scan++, iounit->bmap))
|
||||
goto nexti;
|
||||
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
|
||||
scan -= npages;
|
||||
ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
|
||||
for (k = 0; k < npages; k++, scan++)
|
||||
set_bit(scan, iounit->bmap);
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
|
||||
{
|
||||
int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
|
||||
|
||||
iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
|
||||
return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
|
||||
}
|
475
arch/sparc/mm/iommu.c
Normal file
475
arch/sparc/mm/iommu.c
Normal file
@@ -0,0 +1,475 @@
|
||||
/*
|
||||
* iommu.c: IOMMU specific routines for memory management.
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
|
||||
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sbus.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mxcc.h>
|
||||
#include <asm/mbus.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/bitext.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
/*
|
||||
* This can be sized dynamically, but we will do this
|
||||
* only when we have a guidance about actual I/O pressures.
|
||||
*/
|
||||
#define IOMMU_RNGE IOMMU_RNGE_256MB
|
||||
#define IOMMU_START 0xF0000000
|
||||
#define IOMMU_WINSIZE (256*1024*1024U)
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
|
||||
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
|
||||
|
||||
/* srmmu.c */
|
||||
extern int viking_mxcc_present;
|
||||
BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
|
||||
#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
|
||||
extern int flush_page_for_dma_global;
|
||||
static int viking_flush;
|
||||
/* viking.S */
|
||||
extern void viking_flush_page(unsigned long page);
|
||||
extern void viking_mxcc_flush_page(unsigned long page);
|
||||
|
||||
/*
|
||||
* Values precomputed according to CPU type.
|
||||
*/
|
||||
static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
|
||||
static pgprot_t dvma_prot; /* Consistent mapping pte flags */
|
||||
|
||||
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
|
||||
#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
|
||||
|
||||
void __init
|
||||
iommu_init(int iommund, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned int impl, vers;
|
||||
unsigned long tmp;
|
||||
struct iommu_struct *iommu;
|
||||
struct linux_prom_registers iommu_promregs[PROMREG_MAX];
|
||||
struct resource r;
|
||||
unsigned long *bitmap;
|
||||
|
||||
iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
|
||||
if (!iommu) {
|
||||
prom_printf("Unable to allocate iommu structure\n");
|
||||
prom_halt();
|
||||
}
|
||||
iommu->regs = NULL;
|
||||
if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
|
||||
sizeof(iommu_promregs)) != -1) {
|
||||
memset(&r, 0, sizeof(r));
|
||||
r.flags = iommu_promregs[0].which_io;
|
||||
r.start = iommu_promregs[0].phys_addr;
|
||||
iommu->regs = (struct iommu_regs *)
|
||||
sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
|
||||
}
|
||||
if (!iommu->regs) {
|
||||
prom_printf("Cannot map IOMMU registers\n");
|
||||
prom_halt();
|
||||
}
|
||||
impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
|
||||
vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
|
||||
tmp = iommu->regs->control;
|
||||
tmp &= ~(IOMMU_CTRL_RNGE);
|
||||
tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
|
||||
iommu->regs->control = tmp;
|
||||
iommu_invalidate(iommu->regs);
|
||||
iommu->start = IOMMU_START;
|
||||
iommu->end = 0xffffffff;
|
||||
|
||||
/* Allocate IOMMU page table */
|
||||
/* Stupid alignment constraints give me a headache.
|
||||
We need 256K or 512K or 1M or 2M area aligned to
|
||||
its size and current gfp will fortunately give
|
||||
it to us. */
|
||||
tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
|
||||
if (!tmp) {
|
||||
prom_printf("Unable to allocate iommu table [0x%08x]\n",
|
||||
IOMMU_NPTES*sizeof(iopte_t));
|
||||
prom_halt();
|
||||
}
|
||||
iommu->page_table = (iopte_t *)tmp;
|
||||
|
||||
/* Initialize new table. */
|
||||
memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
|
||||
flush_cache_all();
|
||||
flush_tlb_all();
|
||||
iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
|
||||
iommu_invalidate(iommu->regs);
|
||||
|
||||
bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
|
||||
if (!bitmap) {
|
||||
prom_printf("Unable to allocate iommu bitmap [%d]\n",
|
||||
(int)(IOMMU_NPTES>>3));
|
||||
prom_halt();
|
||||
}
|
||||
bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
|
||||
/* To be coherent on HyperSparc, the page color of DVMA
|
||||
* and physical addresses must match.
|
||||
*/
|
||||
if (srmmu_modtype == HyperSparc)
|
||||
iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
|
||||
else
|
||||
iommu->usemap.num_colors = 1;
|
||||
|
||||
printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
|
||||
impl, vers, iommu->page_table,
|
||||
(int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
|
||||
|
||||
sbus->iommu = iommu;
|
||||
}
|
||||
|
||||
/* This begs to be btfixup-ed by srmmu. */
|
||||
/* Flush the iotlb entries to ram. */
|
||||
/* This could be better if we didn't have to flush whole pages. */
|
||||
static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
|
||||
start = (unsigned long)iopte & PAGE_MASK;
|
||||
end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
|
||||
if (viking_mxcc_present) {
|
||||
while(start < end) {
|
||||
viking_mxcc_flush_page(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
} else if (viking_flush) {
|
||||
while(start < end) {
|
||||
viking_flush_page(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
} else {
|
||||
while(start < end) {
|
||||
__flush_page_to_ram(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
|
||||
{
|
||||
struct iommu_struct *iommu = sbus->iommu;
|
||||
int ioptex;
|
||||
iopte_t *iopte, *iopte0;
|
||||
unsigned int busa, busa0;
|
||||
int i;
|
||||
|
||||
/* page color = pfn of page */
|
||||
ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
|
||||
if (ioptex < 0)
|
||||
panic("iommu out");
|
||||
busa0 = iommu->start + (ioptex << PAGE_SHIFT);
|
||||
iopte0 = &iommu->page_table[ioptex];
|
||||
|
||||
busa = busa0;
|
||||
iopte = iopte0;
|
||||
for (i = 0; i < npages; i++) {
|
||||
iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
|
||||
iommu_invalidate_page(iommu->regs, busa);
|
||||
busa += PAGE_SIZE;
|
||||
iopte++;
|
||||
page++;
|
||||
}
|
||||
|
||||
iommu_flush_iotlb(iopte0, npages);
|
||||
|
||||
return busa0;
|
||||
}
|
||||
|
||||
static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
|
||||
struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long off;
|
||||
int npages;
|
||||
struct page *page;
|
||||
u32 busa;
|
||||
|
||||
off = (unsigned long)vaddr & ~PAGE_MASK;
|
||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
|
||||
busa = iommu_get_one(page, npages, sbus);
|
||||
return busa + off;
|
||||
}
|
||||
|
||||
static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
|
||||
{
|
||||
return iommu_get_scsi_one(vaddr, len, sbus);
|
||||
}
|
||||
|
||||
static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
|
||||
{
|
||||
flush_page_for_dma(0);
|
||||
return iommu_get_scsi_one(vaddr, len, sbus);
|
||||
}
|
||||
|
||||
static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
|
||||
|
||||
while(page < ((unsigned long)(vaddr + len))) {
|
||||
flush_page_for_dma(page);
|
||||
page += PAGE_SIZE;
|
||||
}
|
||||
return iommu_get_scsi_one(vaddr, len, sbus);
|
||||
}
|
||||
|
||||
static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
|
||||
{
|
||||
int n;
|
||||
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg++;
|
||||
}
|
||||
}
|
||||
|
||||
static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
|
||||
{
|
||||
int n;
|
||||
|
||||
flush_page_for_dma(0);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg++;
|
||||
}
|
||||
}
|
||||
|
||||
static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long page, oldpage = 0;
|
||||
int n, i;
|
||||
|
||||
while(sz != 0) {
|
||||
--sz;
|
||||
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* We expect unmapped highmem pages to be not in the cache.
|
||||
* XXX Is this a good assumption?
|
||||
* XXX What if someone else unmaps it here and races us?
|
||||
*/
|
||||
if ((page = (unsigned long) page_address(sg->page)) != 0) {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (page != oldpage) { /* Already flushed? */
|
||||
flush_page_for_dma(page);
|
||||
oldpage = page;
|
||||
}
|
||||
page += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg++;
|
||||
}
|
||||
}
|
||||
|
||||
static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
|
||||
{
|
||||
struct iommu_struct *iommu = sbus->iommu;
|
||||
int ioptex;
|
||||
int i;
|
||||
|
||||
if (busa < iommu->start)
|
||||
BUG();
|
||||
ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||
for (i = 0; i < npages; i++) {
|
||||
iopte_val(iommu->page_table[ioptex + i]) = 0;
|
||||
iommu_invalidate_page(iommu->regs, busa);
|
||||
busa += PAGE_SIZE;
|
||||
}
|
||||
bit_map_clear(&iommu->usemap, ioptex, npages);
|
||||
}
|
||||
|
||||
static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
|
||||
{
|
||||
unsigned long off;
|
||||
int npages;
|
||||
|
||||
off = vaddr & ~PAGE_MASK;
|
||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
|
||||
}
|
||||
|
||||
static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
|
||||
{
|
||||
int n;
|
||||
|
||||
while(sz != 0) {
|
||||
--sz;
|
||||
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
|
||||
sg->dvma_address = 0x21212121;
|
||||
sg++;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
|
||||
unsigned long addr, int len)
|
||||
{
|
||||
unsigned long page, end;
|
||||
struct iommu_struct *iommu = sbus_root->iommu;
|
||||
iopte_t *iopte = iommu->page_table;
|
||||
iopte_t *first;
|
||||
int ioptex;
|
||||
|
||||
if ((va & ~PAGE_MASK) != 0) BUG();
|
||||
if ((addr & ~PAGE_MASK) != 0) BUG();
|
||||
if ((len & ~PAGE_MASK) != 0) BUG();
|
||||
|
||||
/* page color = physical address */
|
||||
ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
|
||||
addr >> PAGE_SHIFT);
|
||||
if (ioptex < 0)
|
||||
panic("iommu out");
|
||||
|
||||
iopte += ioptex;
|
||||
first = iopte;
|
||||
end = addr + len;
|
||||
while(addr < end) {
|
||||
page = va;
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
|
||||
if (viking_mxcc_present)
|
||||
viking_mxcc_flush_page(page);
|
||||
else if (viking_flush)
|
||||
viking_flush_page(page);
|
||||
else
|
||||
__flush_page_to_ram(page);
|
||||
|
||||
pgdp = pgd_offset(&init_mm, addr);
|
||||
pmdp = pmd_offset(pgdp, addr);
|
||||
ptep = pte_offset_map(pmdp, addr);
|
||||
|
||||
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
|
||||
}
|
||||
iopte_val(*iopte++) =
|
||||
MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
|
||||
addr += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
/* P3: why do we need this?
|
||||
*
|
||||
* DAVEM: Because there are several aspects, none of which
|
||||
* are handled by a single interface. Some cpus are
|
||||
* completely not I/O DMA coherent, and some have
|
||||
* virtually indexed caches. The driver DMA flushing
|
||||
* methods handle the former case, but here during
|
||||
* IOMMU page table modifications, and usage of non-cacheable
|
||||
* cpu mappings of pages potentially in the cpu caches, we have
|
||||
* to handle the latter case as well.
|
||||
*/
|
||||
flush_cache_all();
|
||||
iommu_flush_iotlb(first, len >> PAGE_SHIFT);
|
||||
flush_tlb_all();
|
||||
iommu_invalidate(iommu->regs);
|
||||
|
||||
*pba = iommu->start + (ioptex << PAGE_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iommu_unmap_dma_area(unsigned long busa, int len)
|
||||
{
|
||||
struct iommu_struct *iommu = sbus_root->iommu;
|
||||
iopte_t *iopte = iommu->page_table;
|
||||
unsigned long end;
|
||||
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||
|
||||
if ((busa & ~PAGE_MASK) != 0) BUG();
|
||||
if ((len & ~PAGE_MASK) != 0) BUG();
|
||||
|
||||
iopte += ioptex;
|
||||
end = busa + len;
|
||||
while (busa < end) {
|
||||
iopte_val(*iopte++) = 0;
|
||||
busa += PAGE_SIZE;
|
||||
}
|
||||
flush_tlb_all();
|
||||
iommu_invalidate(iommu->regs);
|
||||
bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static struct page *iommu_translate_dvma(unsigned long busa)
|
||||
{
|
||||
struct iommu_struct *iommu = sbus_root->iommu;
|
||||
iopte_t *iopte = iommu->page_table;
|
||||
|
||||
iopte += ((busa - iommu->start) >> PAGE_SHIFT);
|
||||
return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
|
||||
}
|
||||
#endif
|
||||
|
||||
static char *iommu_lockarea(char *vaddr, unsigned long len)
|
||||
{
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static void iommu_unlockarea(char *vaddr, unsigned long len)
|
||||
{
|
||||
}
|
||||
|
||||
void __init ld_mmu_iommu(void)
|
||||
{
|
||||
viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
|
||||
BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
|
||||
BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
|
||||
|
||||
if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
|
||||
/* IO coherent chip */
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
|
||||
} else if (flush_page_for_dma_global) {
|
||||
/* flush_page_for_dma flushes everything, no matter of what page is it */
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
|
||||
} else {
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
|
||||
}
|
||||
BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
|
||||
BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
|
||||
#endif
|
||||
|
||||
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
|
||||
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
|
||||
ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
|
||||
} else {
|
||||
dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
|
||||
ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
|
||||
}
|
||||
}
|
46
arch/sparc/mm/loadmmu.c
Normal file
46
arch/sparc/mm/loadmmu.c
Normal file
@@ -0,0 +1,46 @@
|
||||
/* $Id: loadmmu.c,v 1.56 2000/02/08 20:24:21 davem Exp $
|
||||
* loadmmu.c: This code loads up all the mm function pointers once the
|
||||
* machine type has been determined. It also sets the static
|
||||
* mmu values such as PAGE_NONE, etc.
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/a.out.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/oplib.h>
|
||||
|
||||
struct ctx_list *ctx_list_pool;
|
||||
struct ctx_list ctx_free;
|
||||
struct ctx_list ctx_used;
|
||||
|
||||
unsigned int pg_iobits;
|
||||
|
||||
extern void ld_mmu_sun4c(void);
|
||||
extern void ld_mmu_srmmu(void);
|
||||
|
||||
void __init load_mmu(void)
|
||||
{
|
||||
switch(sparc_cpu_model) {
|
||||
case sun4c:
|
||||
case sun4:
|
||||
ld_mmu_sun4c();
|
||||
break;
|
||||
case sun4m:
|
||||
case sun4d:
|
||||
ld_mmu_srmmu();
|
||||
break;
|
||||
default:
|
||||
prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model);
|
||||
prom_halt();
|
||||
}
|
||||
btfixup();
|
||||
}
|
59
arch/sparc/mm/nosrmmu.c
Normal file
59
arch/sparc/mm/nosrmmu.c
Normal file
@@ -0,0 +1,59 @@
|
||||
/* $Id: nosrmmu.c,v 1.5 1999/11/19 04:11:54 davem Exp $
|
||||
* nosrmmu.c: This file is a bunch of dummies for sun4 compiles,
|
||||
* so that it does not need srmmu and avoid ifdefs.
|
||||
*
|
||||
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/mbus.h>
|
||||
#include <asm/sbus.h>
|
||||
|
||||
static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n";
|
||||
|
||||
enum mbus_module srmmu_modtype;
|
||||
void *srmmu_nocache_pool;
|
||||
|
||||
int vac_cache_size = 0;
|
||||
|
||||
static void __init should_not_happen(void)
|
||||
{
|
||||
prom_printf(shouldnothappen);
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
void __init srmmu_frob_mem_map(unsigned long start_mem)
|
||||
{
|
||||
should_not_happen();
|
||||
}
|
||||
|
||||
unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
|
||||
{
|
||||
should_not_happen();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init ld_mmu_srmmu(void)
|
||||
{
|
||||
should_not_happen();
|
||||
}
|
||||
|
||||
void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
|
||||
{
|
||||
}
|
||||
|
||||
void srmmu_unmapioaddr(unsigned long virt_addr)
|
||||
{
|
||||
}
|
||||
|
||||
__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
|
||||
{
|
||||
return 0;
|
||||
}
|
77
arch/sparc/mm/nosun4c.c
Normal file
77
arch/sparc/mm/nosun4c.c
Normal file
@@ -0,0 +1,77 @@
|
||||
/* $Id: nosun4c.c,v 1.3 2000/02/14 04:52:36 jj Exp $
|
||||
* nosun4c.c: This file is a bunch of dummies for SMP compiles,
|
||||
* so that it does not need sun4c and avoid ifdefs.
|
||||
*
|
||||
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n";
|
||||
|
||||
/* Dummies */
|
||||
struct sun4c_mmu_ring {
|
||||
unsigned long xxx1[3];
|
||||
unsigned char xxx2[2];
|
||||
int xxx3;
|
||||
};
|
||||
struct sun4c_mmu_ring sun4c_kernel_ring;
|
||||
struct sun4c_mmu_ring sun4c_kfree_ring;
|
||||
unsigned long sun4c_kernel_faults;
|
||||
unsigned long *sun4c_memerr_reg;
|
||||
|
||||
static void __init should_not_happen(void)
|
||||
{
|
||||
prom_printf(shouldnothappen);
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
|
||||
{
|
||||
should_not_happen();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init ld_mmu_sun4c(void)
|
||||
{
|
||||
should_not_happen();
|
||||
}
|
||||
|
||||
void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
|
||||
{
|
||||
}
|
||||
|
||||
void sun4c_unmapioaddr(unsigned long virt_addr)
|
||||
{
|
||||
}
|
||||
|
||||
void sun4c_complete_all_stores(void)
|
||||
{
|
||||
}
|
||||
|
||||
pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
}
|
||||
|
||||
void __init sun4c_probe_vac(void)
|
||||
{
|
||||
should_not_happen();
|
||||
}
|
||||
|
||||
void __init sun4c_probe_memerr_reg(void)
|
||||
{
|
||||
should_not_happen();
|
||||
}
|
2274
arch/sparc/mm/srmmu.c
Normal file
2274
arch/sparc/mm/srmmu.c
Normal file
File diff suppressed because it is too large
Load Diff
2276
arch/sparc/mm/sun4c.c
Normal file
2276
arch/sparc/mm/sun4c.c
Normal file
File diff suppressed because it is too large
Load Diff
256
arch/sparc/mm/swift.S
Normal file
256
arch/sparc/mm/swift.S
Normal file
@@ -0,0 +1,256 @@
|
||||
/* $Id: swift.S,v 1.9 2002/01/08 11:11:59 davem Exp $
|
||||
* swift.S: MicroSparc-II mmu/cache operations.
|
||||
*
|
||||
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
#include <asm/asm_offsets.h>
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
#if 1 /* XXX screw this, I can't get the VAC flushes working
|
||||
* XXX reliably... -DaveM
|
||||
*/
|
||||
.globl swift_flush_cache_all, swift_flush_cache_mm
|
||||
.globl swift_flush_cache_range, swift_flush_cache_page
|
||||
.globl swift_flush_page_for_dma
|
||||
.globl swift_flush_page_to_ram
|
||||
|
||||
swift_flush_cache_all:
|
||||
swift_flush_cache_mm:
|
||||
swift_flush_cache_range:
|
||||
swift_flush_cache_page:
|
||||
swift_flush_page_for_dma:
|
||||
swift_flush_page_to_ram:
|
||||
sethi %hi(0x2000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
add %o0, %o0, %o1
|
||||
sta %g0, [%o0] ASI_M_DATAC_TAG
|
||||
bne 1b
|
||||
sta %g0, [%o1] ASI_M_TXTC_TAG
|
||||
retl
|
||||
nop
|
||||
#else
|
||||
|
||||
.globl swift_flush_cache_all
|
||||
swift_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
/* Just clear out all the tags. */
|
||||
sethi %hi(16 * 1024), %o0
|
||||
1: subcc %o0, 16, %o0
|
||||
sta %g0, [%o0] ASI_M_TXTC_TAG
|
||||
bne 1b
|
||||
sta %g0, [%o0] ASI_M_DATAC_TAG
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl swift_flush_cache_mm
|
||||
swift_flush_cache_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be swift_flush_cache_mm_out
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
rd %psr, %g1
|
||||
andn %g1, PSR_ET, %g3
|
||||
wr %g3, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
lda [%g7] ASI_M_MMUREGS, %g5
|
||||
sta %g2, [%g7] ASI_M_MMUREGS
|
||||
|
||||
#if 1
|
||||
sethi %hi(0x2000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
sta %g0, [%o0] ASI_M_FLUSH_CTX
|
||||
bne 1b
|
||||
nop
|
||||
#else
|
||||
clr %o0
|
||||
or %g0, 2048, %g7
|
||||
or %g0, 2048, %o1
|
||||
add %o1, 2048, %o2
|
||||
add %o2, 2048, %o3
|
||||
mov 16, %o4
|
||||
add %o4, 2048, %o5
|
||||
add %o5, 2048, %g2
|
||||
add %g2, 2048, %g3
|
||||
1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX
|
||||
subcc %g7, 32, %g7
|
||||
bne 1b
|
||||
add %o0, 32, %o0
|
||||
#endif
|
||||
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
sta %g5, [%g7] ASI_M_MMUREGS
|
||||
wr %g1, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
swift_flush_cache_mm_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl swift_flush_cache_range
|
||||
swift_flush_cache_range:
|
||||
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
|
||||
sub %o2, %o1, %o2
|
||||
sethi %hi(4096), %o3
|
||||
cmp %o2, %o3
|
||||
bgu swift_flush_cache_mm
|
||||
nop
|
||||
b 70f
|
||||
nop
|
||||
|
||||
.globl swift_flush_cache_page
|
||||
swift_flush_cache_page:
|
||||
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
|
||||
70:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be swift_flush_cache_page_out
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
rd %psr, %g1
|
||||
andn %g1, PSR_ET, %g3
|
||||
wr %g3, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
lda [%g7] ASI_M_MMUREGS, %g5
|
||||
sta %g2, [%g7] ASI_M_MMUREGS
|
||||
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
#if 1
|
||||
sethi %hi(0x1000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
nop
|
||||
#else
|
||||
or %g0, 512, %g7
|
||||
or %g0, 512, %o0
|
||||
add %o0, 512, %o2
|
||||
add %o2, 512, %o3
|
||||
add %o3, 512, %o4
|
||||
add %o4, 512, %o5
|
||||
add %o5, 512, %g3
|
||||
add %g3, 512, %g4
|
||||
1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
|
||||
subcc %g7, 16, %g7
|
||||
bne 1b
|
||||
add %o1, 16, %o1
|
||||
#endif
|
||||
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
sta %g5, [%g7] ASI_M_MMUREGS
|
||||
wr %g1, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
swift_flush_cache_page_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* Swift is write-thru, however it is not
|
||||
* I/O nor TLB-walk coherent. Also it has
|
||||
* caches which are virtually indexed and tagged.
|
||||
*/
|
||||
.globl swift_flush_page_for_dma
|
||||
.globl swift_flush_page_to_ram
|
||||
swift_flush_page_for_dma:
|
||||
swift_flush_page_to_ram:
|
||||
andn %o0, (PAGE_SIZE - 1), %o1
|
||||
#if 1
|
||||
sethi %hi(0x1000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
nop
|
||||
#else
|
||||
or %g0, 512, %g7
|
||||
or %g0, 512, %o0
|
||||
add %o0, 512, %o2
|
||||
add %o2, 512, %o3
|
||||
add %o3, 512, %o4
|
||||
add %o4, 512, %o5
|
||||
add %o5, 512, %g3
|
||||
add %g3, 512, %g4
|
||||
1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
|
||||
subcc %g7, 16, %g7
|
||||
bne 1b
|
||||
add %o1, 16, %o1
|
||||
#endif
|
||||
retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
.globl swift_flush_sig_insns
|
||||
swift_flush_sig_insns:
|
||||
flush %o1
|
||||
retl
|
||||
flush %o1 + 4
|
||||
|
||||
.globl swift_flush_tlb_mm
|
||||
.globl swift_flush_tlb_range
|
||||
.globl swift_flush_tlb_all
|
||||
swift_flush_tlb_range:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
swift_flush_tlb_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be swift_flush_tlb_all_out
|
||||
swift_flush_tlb_all:
|
||||
mov 0x400, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
swift_flush_tlb_all_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl swift_flush_tlb_page
|
||||
swift_flush_tlb_page:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
cmp %o3, -1
|
||||
be swift_flush_tlb_page_out
|
||||
nop
|
||||
#if 1
|
||||
mov 0x400, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
#else
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#endif
|
||||
swift_flush_tlb_page_out:
|
||||
retl
|
||||
nop
|
133
arch/sparc/mm/tsunami.S
Normal file
133
arch/sparc/mm/tsunami.S
Normal file
@@ -0,0 +1,133 @@
|
||||
/* $Id: tsunami.S,v 1.7 2001/12/21 04:56:15 davem Exp $
|
||||
* tsunami.S: High speed MicroSparc-I mmu/cache operations.
|
||||
*
|
||||
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm_offsets.h>
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
.globl tsunami_flush_cache_all, tsunami_flush_cache_mm
|
||||
.globl tsunami_flush_cache_range, tsunami_flush_cache_page
|
||||
.globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma
|
||||
.globl tsunami_flush_sig_insns
|
||||
.globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm
|
||||
.globl tsunami_flush_tlb_range, tsunami_flush_tlb_page
|
||||
|
||||
/* Sliiick... */
|
||||
tsunami_flush_cache_page:
|
||||
tsunami_flush_cache_range:
|
||||
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
|
||||
tsunami_flush_cache_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be tsunami_flush_cache_out
|
||||
tsunami_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
tsunami_flush_page_for_dma:
|
||||
sta %g0, [%g0] ASI_M_IC_FLCLEAR
|
||||
sta %g0, [%g0] ASI_M_DC_FLCLEAR
|
||||
tsunami_flush_cache_out:
|
||||
tsunami_flush_page_to_ram:
|
||||
retl
|
||||
nop
|
||||
|
||||
tsunami_flush_sig_insns:
|
||||
flush %o1
|
||||
retl
|
||||
flush %o1 + 4
|
||||
|
||||
/* More slick stuff... */
|
||||
tsunami_flush_tlb_range:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
tsunami_flush_tlb_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be tsunami_flush_tlb_out
|
||||
tsunami_flush_tlb_all:
|
||||
mov 0x400, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
tsunami_flush_tlb_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* This one can be done in a fine grained manner... */
|
||||
tsunami_flush_tlb_page:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
cmp %o3, -1
|
||||
be tsunami_flush_tlb_page_out
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
tsunami_flush_tlb_page_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \
|
||||
ldd [src + offset + 0x18], t0; \
|
||||
std t0, [dst + offset + 0x18]; \
|
||||
ldd [src + offset + 0x10], t2; \
|
||||
std t2, [dst + offset + 0x10]; \
|
||||
ldd [src + offset + 0x08], t0; \
|
||||
std t0, [dst + offset + 0x08]; \
|
||||
ldd [src + offset + 0x00], t2; \
|
||||
std t2, [dst + offset + 0x00];
|
||||
|
||||
.globl tsunami_copy_1page
|
||||
tsunami_copy_1page:
|
||||
/* NOTE: This routine has to be shorter than 70insns --jj */
|
||||
or %g0, (PAGE_SIZE >> 8), %g1
|
||||
1:
|
||||
MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5)
|
||||
subcc %g1, 1, %g1
|
||||
add %o0, 0x100, %o0
|
||||
bne 1b
|
||||
add %o1, 0x100, %o1
|
||||
|
||||
.globl tsunami_setup_blockops
|
||||
tsunami_setup_blockops:
|
||||
sethi %hi(__copy_1page), %o0
|
||||
or %o0, %lo(__copy_1page), %o0
|
||||
sethi %hi(tsunami_copy_1page), %o1
|
||||
or %o1, %lo(tsunami_copy_1page), %o1
|
||||
sethi %hi(tsunami_setup_blockops), %o2
|
||||
or %o2, %lo(tsunami_setup_blockops), %o2
|
||||
ld [%o1], %o4
|
||||
1: add %o1, 4, %o1
|
||||
st %o4, [%o0]
|
||||
add %o0, 4, %o0
|
||||
cmp %o1, %o2
|
||||
bne 1b
|
||||
ld [%o1], %o4
|
||||
sta %g0, [%g0] ASI_M_IC_FLCLEAR
|
||||
sta %g0, [%g0] ASI_M_DC_FLCLEAR
|
||||
retl
|
||||
nop
|
284
arch/sparc/mm/viking.S
Normal file
284
arch/sparc/mm/viking.S
Normal file
@@ -0,0 +1,284 @@
|
||||
/* $Id: viking.S,v 1.19 2001/12/21 04:56:15 davem Exp $
|
||||
* viking.S: High speed Viking cache/mmu operations
|
||||
*
|
||||
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz)
|
||||
* Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asm_offsets.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/mxcc.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
#include <asm/viking.h>
|
||||
#include <asm/btfixup.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.data
|
||||
.align 4
|
||||
sun4dsmp_flush_tlb_spin:
|
||||
.word 0
|
||||
#endif
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
.globl viking_flush_cache_all, viking_flush_cache_mm
|
||||
.globl viking_flush_cache_range, viking_flush_cache_page
|
||||
.globl viking_flush_page, viking_mxcc_flush_page
|
||||
.globl viking_flush_page_for_dma, viking_flush_page_to_ram
|
||||
.globl viking_flush_sig_insns
|
||||
.globl viking_flush_tlb_all, viking_flush_tlb_mm
|
||||
.globl viking_flush_tlb_range, viking_flush_tlb_page
|
||||
|
||||
viking_flush_page:
|
||||
sethi %hi(PAGE_OFFSET), %g2
|
||||
sub %o0, %g2, %g3
|
||||
srl %g3, 12, %g1 ! ppage >> 12
|
||||
|
||||
clr %o1 ! set counter, 0 - 127
|
||||
sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3
|
||||
sethi %hi(0x80000000), %o4
|
||||
sethi %hi(VIKING_PTAG_VALID), %o5
|
||||
sethi %hi(2*PAGE_SIZE), %o0
|
||||
sethi %hi(PAGE_SIZE), %g7
|
||||
clr %o2 ! block counter, 0 - 3
|
||||
5:
|
||||
sll %o1, 5, %g4
|
||||
or %g4, %o4, %g4 ! 0x80000000 | (set << 5)
|
||||
|
||||
sll %o2, 26, %g5 ! block << 26
|
||||
6:
|
||||
or %g5, %g4, %g5
|
||||
ldda [%g5] ASI_M_DATAC_TAG, %g2
|
||||
cmp %g3, %g1 ! ptag == ppage?
|
||||
bne 7f
|
||||
inc %o2
|
||||
|
||||
andcc %g2, %o5, %g0 ! ptag VALID?
|
||||
be 7f
|
||||
add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5)
|
||||
ld [%g2], %g3
|
||||
ld [%g2 + %g7], %g3
|
||||
add %g2, %o0, %g2
|
||||
ld [%g2], %g3
|
||||
ld [%g2 + %g7], %g3
|
||||
add %g2, %o0, %g2
|
||||
ld [%g2], %g3
|
||||
ld [%g2 + %g7], %g3
|
||||
add %g2, %o0, %g2
|
||||
ld [%g2], %g3
|
||||
b 8f
|
||||
ld [%g2 + %g7], %g3
|
||||
|
||||
7:
|
||||
cmp %o2, 3
|
||||
ble 6b
|
||||
sll %o2, 26, %g5 ! block << 26
|
||||
|
||||
8: inc %o1
|
||||
cmp %o1, 0x7f
|
||||
ble 5b
|
||||
clr %o2
|
||||
|
||||
9: retl
|
||||
nop
|
||||
|
||||
viking_mxcc_flush_page:
|
||||
sethi %hi(PAGE_OFFSET), %g2
|
||||
sub %o0, %g2, %g3
|
||||
sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE
|
||||
sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM)
|
||||
mov 0x10, %g2 ! set cacheable bit
|
||||
or %o3, %lo(MXCC_SRCSTREAM), %o2
|
||||
or %o3, %lo(MXCC_DESSTREAM), %o3
|
||||
sub %g3, MXCC_STREAM_SIZE, %g3
|
||||
6:
|
||||
stda %g2, [%o2] ASI_M_MXCC
|
||||
stda %g2, [%o3] ASI_M_MXCC
|
||||
andncc %g3, PAGE_MASK, %g0
|
||||
bne 6b
|
||||
sub %g3, MXCC_STREAM_SIZE, %g3
|
||||
|
||||
9: retl
|
||||
nop
|
||||
|
||||
viking_flush_cache_page:
|
||||
viking_flush_cache_range:
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
|
||||
#endif
|
||||
viking_flush_cache_mm:
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + AOFF_mm_context], %g1
|
||||
cmp %g1, -1
|
||||
bne viking_flush_cache_all
|
||||
nop
|
||||
b,a viking_flush_cache_out
|
||||
#endif
|
||||
viking_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
viking_flush_cache_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
viking_flush_tlb_all:
|
||||
mov 0x400, %g1
|
||||
retl
|
||||
sta %g0, [%g1] ASI_M_FLUSH_PROBE
|
||||
|
||||
viking_flush_tlb_mm:
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o1
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o1, -1
|
||||
be 1f
|
||||
#endif
|
||||
mov 0x300, %g2
|
||||
sta %o1, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%g2] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#ifndef CONFIG_SMP
|
||||
1: retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
viking_flush_tlb_range:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be 2f
|
||||
#endif
|
||||
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
and %o1, %o4, %o1
|
||||
add %o1, 0x200, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
1: sub %o1, %o4, %o1
|
||||
cmp %o1, %o2
|
||||
blu,a 1b
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#ifndef CONFIG_SMP
|
||||
2: retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
viking_flush_tlb_page:
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be 1f
|
||||
#endif
|
||||
and %o1, PAGE_MASK, %o1
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#ifndef CONFIG_SMP
|
||||
1: retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
viking_flush_page_to_ram:
|
||||
viking_flush_page_for_dma:
|
||||
viking_flush_sig_insns:
|
||||
retl
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm
|
||||
.globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page
|
||||
sun4dsmp_flush_tlb_all:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 2f
|
||||
mov 0x400, %g1
|
||||
sta %g0, [%g1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
2: tst %g5
|
||||
bne,a 2b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
|
||||
sun4dsmp_flush_tlb_mm:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 2f
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o1
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
mov 0x300, %g2
|
||||
sta %o1, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%g2] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
2: tst %g5
|
||||
bne,a 2b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
|
||||
sun4dsmp_flush_tlb_range:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 3f
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
and %o1, %o4, %o1
|
||||
add %o1, 0x200, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
2: sub %o1, %o4, %o1
|
||||
cmp %o1, %o2
|
||||
blu,a 2b
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
3: tst %g5
|
||||
bne,a 3b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
|
||||
sun4dsmp_flush_tlb_page:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 2f
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
and %o1, PAGE_MASK, %o1
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
2: tst %g5
|
||||
bne,a 2b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
nop
|
||||
#endif
|
Reference in New Issue
Block a user