@@ -18,6 +18,9 @@ config UML
select GENERIC_CLOCKEVENTS
select HAVE_GCC_PLUGINS
select TTY # Needed for line.c
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_HAS_STRICT_MODULE_RWX
config MMU
bool
@@ -17,6 +17,8 @@
#define _PAGE_USER 0x040
#define _PAGE_ACCESSED 0x080
#define _PAGE_DIRTY 0x100
+#define _PAGE_RO 0x200
+#define _PAGE_NX 0x400
/* If _PAGE_PRESENT is clear, we use these: */
#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
pte_present gives true */
new file mode 100644
@@ -0,0 +1 @@
+#include <asm-generic/set_memory.h>
@@ -89,10 +89,10 @@ SECTIONS
KEEP (*(.fini))
} =0x90909090
- .kstrtab : { *(.kstrtab) }
-
#include <asm/common.lds.S>
+ .kstrtab : { *(.kstrtab) }
+
__init_begin = .;
init.data : { INIT_DATA }
__init_end = .;
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <asm/set_memory.h>
#include <asm/fixmap.h>
#include <asm/page.h>
#include <as-layout.h>
@@ -37,6 +38,22 @@ int kmalloc_ok = 0;
/* Used during early boot */
static unsigned long brk_end;
+void mark_rodata_ro(void)
+{
+ unsigned long text_start = PFN_ALIGN(_text);
+ unsigned long rodata_start = PFN_ALIGN(__start_rodata);
+ unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
+ unsigned long all_end = PFN_ALIGN(&__bss_stop);
+
+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ (rodata_end - text_start) >> 10);
+
+ set_memory_ro(text_start,
+ (rodata_end - text_start) >> PAGE_SHIFT);
+ set_memory_nx(rodata_start,
+ (all_end - rodata_start) >> PAGE_SHIFT);
+}
+
void __init mem_init(void)
{
/* clear the zero-page */
@@ -225,3 +242,62 @@ void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}
+
+struct page_change_data {
+ u32 set, clear;
+};
+
+static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+{
+ struct page_change_data *cdata = data;
+ pte_t pte = READ_ONCE(*ptep);
+
+ pte_clear_bits(pte, cdata->clear);
+ pte_set_bits(pte, cdata->set);
+
+ set_pte(ptep, pte);
+
+ os_protect_memory((void *)addr, PAGE_SIZE,
+ 1, !(pte.pte & _PAGE_RO), !(pte.pte & _PAGE_NX));
+ return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+ u32 set, u32 clear)
+{
+ unsigned long start = addr & PAGE_MASK;
+ unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
+ unsigned long size = end - start;
+ struct page_change_data data;
+
+ WARN_ON_ONCE(start != addr);
+
+ if (!size)
+ return 0;
+
+ data.set = set;
+ data.clear = clear;
+
+ return apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages, _PAGE_RO, 0);
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages, 0, _PAGE_RO);
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages, _PAGE_NX, 0);
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages, 0, _PAGE_NX);
+}