[PARISC] only make executable areas executable
Currently parisc has the whole kernel marked as RWX, meaning any kernel page at all is eligible to be executed. This can cause a theoretical problem on systems with combined I/D TLB because the act of referencing a page causes a TLB insertion with an executable bit. This TLB entry may be used by the CPU as the basis for speculating the page into the I-Cache. If this speculated page is subsequently used for a user process, there is the possibility we will get a stale I-cache line picked up as the binary executes. As a point of good practise, only mark actual kernel text pages as executable. The same has to be done for init_text pages, but they're converted to data pages (and the I-Cache flushed) when the init memory is released. Signed-off-by: James Bottomley <James.Bottomley@suse.de>
This commit is contained in:
committed by
James Bottomley
parent
e38f5b7450
commit
d7dd2ff11b
@@ -692,6 +692,9 @@ ENTRY(fault_vector_11)
|
||||
END(fault_vector_11)
|
||||
|
||||
#endif
|
||||
/* Fault vector is separately protected and *must* be on its own page */
|
||||
.align PAGE_SIZE
|
||||
ENTRY(end_fault_vector)
|
||||
|
||||
.import handle_interruption,code
|
||||
.import do_cpu_irq_mask,code
|
||||
|
@@ -106,8 +106,9 @@ $bss_loop:
|
||||
#endif
|
||||
|
||||
|
||||
/* Now initialize the PTEs themselves */
|
||||
ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
|
||||
/* Now initialize the PTEs themselves. We use RWX for
|
||||
* everything ... it will get remapped correctly later */
|
||||
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
|
||||
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
|
||||
load32 PA(pg0),%r1
|
||||
|
||||
|
@@ -61,8 +61,10 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/unwind.h>
|
||||
|
||||
#if 0
|
||||
@@ -214,7 +216,13 @@ void *module_alloc(unsigned long size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
return vmalloc(size);
|
||||
/* using RWX means less protection for modules, but it's
|
||||
* easier than trying to map the text, data, init_text and
|
||||
* init_data correctly */
|
||||
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL | __GFP_HIGHMEM,
|
||||
PAGE_KERNEL_RWX, -1,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
|
@@ -134,6 +134,7 @@ SECTIONS
|
||||
. = ALIGN(16384);
|
||||
__init_begin = .;
|
||||
INIT_TEXT_SECTION(16384)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
INIT_DATA_SECTION(16)
|
||||
/* we have to discard exit text and such at runtime, not link time */
|
||||
.exit.text :
|
||||
|
Reference in New Issue
Block a user