Skip to content

Commit 223b5e5

Browse files
rpptmcgrof
authored andcommitted
mm/execmem, arch: convert remaining overrides of module_alloc to execmem
Extend execmem parameters to accommodate more complex overrides of module_alloc() by architectures. This includes specification of a fallback range required by arm, arm64 and powerpc, EXECMEM_MODULE_DATA type required by powerpc, support for allocation of KASAN shadow required by s390 and x86 and support for late initialization of execmem required by arm64. The core implementation of execmem_alloc() takes care of suppressing warnings when the initial allocation fails but there is a fallback range defined. Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Song Liu <song@kernel.org> Tested-by: Liviu Dudau <liviu@dudau.co.uk> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
1 parent f6bec26 commit 223b5e5

File tree

11 files changed

+246
-185
lines changed

11 files changed

+246
-185
lines changed

arch/Kconfig

+8
Original file line numberDiff line numberDiff line change
@@ -977,6 +977,14 @@ config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
977977
For architectures like powerpc/32 which have constraints on module
978978
allocation and need to allocate module data outside of module area.
979979

980+
config ARCH_WANTS_EXECMEM_LATE
981+
bool
982+
help
983+
For architectures that do not allocate executable memory early on
984+
boot, but rather require its initialization late when there is
985+
enough entropy for module space randomization, for instance
986+
arm64.
987+
980988
config HAVE_IRQ_EXIT_ON_IRQ_STACK
981989
bool
982990
help

arch/arm/kernel/module.c

+25-16
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <linux/fs.h>
1717
#include <linux/string.h>
1818
#include <linux/gfp.h>
19+
#include <linux/execmem.h>
1920

2021
#include <asm/sections.h>
2122
#include <asm/smp_plat.h>
@@ -34,23 +35,31 @@
3435
#endif
3536

3637
#ifdef CONFIG_MMU
37-
void *module_alloc(unsigned long size)
38+
static struct execmem_info execmem_info __ro_after_init;
39+
40+
struct execmem_info __init *execmem_arch_setup(void)
3841
{
39-
gfp_t gfp_mask = GFP_KERNEL;
40-
void *p;
41-
42-
/* Silence the initial allocation */
43-
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
44-
gfp_mask |= __GFP_NOWARN;
45-
46-
p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
47-
gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
48-
__builtin_return_address(0));
49-
if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
50-
return p;
51-
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
52-
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
53-
__builtin_return_address(0));
42+
unsigned long fallback_start = 0, fallback_end = 0;
43+
44+
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) {
45+
fallback_start = VMALLOC_START;
46+
fallback_end = VMALLOC_END;
47+
}
48+
49+
execmem_info = (struct execmem_info){
50+
.ranges = {
51+
[EXECMEM_DEFAULT] = {
52+
.start = MODULES_VADDR,
53+
.end = MODULES_END,
54+
.pgprot = PAGE_KERNEL_EXEC,
55+
.alignment = 1,
56+
.fallback_start = fallback_start,
57+
.fallback_end = fallback_end,
58+
},
59+
},
60+
};
61+
62+
return &execmem_info;
5463
}
5564
#endif
5665

arch/arm64/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@ config ARM64
105105
select ARCH_WANT_FRAME_POINTERS
106106
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
107107
select ARCH_WANT_LD_ORPHAN_WARN
108+
select ARCH_WANTS_EXECMEM_LATE if EXECMEM
108109
select ARCH_WANTS_NO_INSTR
109110
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
110111
select ARCH_HAS_UBSAN

arch/arm64/kernel/module.c

+31-24
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <linux/random.h>
2121
#include <linux/scs.h>
2222
#include <linux/vmalloc.h>
23+
#include <linux/execmem.h>
2324

2425
#include <asm/alternative.h>
2526
#include <asm/insn.h>
@@ -108,41 +109,47 @@ static int __init module_init_limits(void)
108109

109110
return 0;
110111
}
111-
subsys_initcall(module_init_limits);
112112

113-
void *module_alloc(unsigned long size)
113+
static struct execmem_info execmem_info __ro_after_init;
114+
115+
struct execmem_info __init *execmem_arch_setup(void)
114116
{
115-
void *p = NULL;
117+
unsigned long fallback_start = 0, fallback_end = 0;
118+
unsigned long start = 0, end = 0;
119+
120+
module_init_limits();
116121

117122
/*
118123
* Where possible, prefer to allocate within direct branch range of the
119124
* kernel such that no PLTs are necessary.
120125
*/
121126
if (module_direct_base) {
122-
p = __vmalloc_node_range(size, MODULE_ALIGN,
123-
module_direct_base,
124-
module_direct_base + SZ_128M,
125-
GFP_KERNEL | __GFP_NOWARN,
126-
PAGE_KERNEL, 0, NUMA_NO_NODE,
127-
__builtin_return_address(0));
128-
}
127+
start = module_direct_base;
128+
end = module_direct_base + SZ_128M;
129129

130-
if (!p && module_plt_base) {
131-
p = __vmalloc_node_range(size, MODULE_ALIGN,
132-
module_plt_base,
133-
module_plt_base + SZ_2G,
134-
GFP_KERNEL | __GFP_NOWARN,
135-
PAGE_KERNEL, 0, NUMA_NO_NODE,
136-
__builtin_return_address(0));
137-
}
138-
139-
if (!p) {
140-
pr_warn_ratelimited("%s: unable to allocate memory\n",
141-
__func__);
130+
if (module_plt_base) {
131+
fallback_start = module_plt_base;
132+
fallback_end = module_plt_base + SZ_2G;
133+
}
134+
} else if (module_plt_base) {
135+
start = module_plt_base;
136+
end = module_plt_base + SZ_2G;
142137
}
143138

144-
/* Memory is intended to be executable, reset the pointer tag. */
145-
return kasan_reset_tag(p);
139+
execmem_info = (struct execmem_info){
140+
.ranges = {
141+
[EXECMEM_DEFAULT] = {
142+
.start = start,
143+
.end = end,
144+
.pgprot = PAGE_KERNEL,
145+
.alignment = 1,
146+
.fallback_start = fallback_start,
147+
.fallback_end = fallback_end,
148+
},
149+
},
150+
};
151+
152+
return &execmem_info;
146153
}
147154

148155
enum aarch64_reloc_op {

arch/powerpc/kernel/module.c

+39-21
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <linux/vmalloc.h>
1111
#include <linux/mm.h>
1212
#include <linux/bug.h>
13+
#include <linux/execmem.h>
1314
#include <asm/module.h>
1415
#include <linux/uaccess.h>
1516
#include <asm/firmware.h>
@@ -89,39 +90,56 @@ int module_finalize(const Elf_Ehdr *hdr,
8990
return 0;
9091
}
9192

92-
static __always_inline void *
93-
__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
93+
static struct execmem_info execmem_info __ro_after_init;
94+
95+
struct execmem_info __init *execmem_arch_setup(void)
9496
{
9597
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
96-
gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
98+
unsigned long fallback_start = 0, fallback_end = 0;
99+
unsigned long start, end;
97100

98101
/*
99-
* Don't do huge page allocations for modules yet until more testing
100-
* is done. STRICT_MODULE_RWX may require extra work to support this
101-
* too.
102+
* BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
103+
* allow allocating data in the entire vmalloc space
102104
*/
103-
return __vmalloc_node_range(size, 1, start, end, gfp, prot,
104-
VM_FLUSH_RESET_PERMS,
105-
NUMA_NO_NODE, __builtin_return_address(0));
106-
}
107-
108-
void *module_alloc(unsigned long size)
109-
{
110105
#ifdef MODULES_VADDR
111106
unsigned long limit = (unsigned long)_etext - SZ_32M;
112-
void *ptr = NULL;
113107

114108
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
115109

116110
/* First try within 32M limit from _etext to avoid branch trampolines */
117-
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
118-
ptr = __module_alloc(size, limit, MODULES_END, true);
119-
120-
if (!ptr)
121-
ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
111+
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
112+
start = limit;
113+
fallback_start = MODULES_VADDR;
114+
fallback_end = MODULES_END;
115+
} else {
116+
start = MODULES_VADDR;
117+
}
122118

123-
return ptr;
119+
end = MODULES_END;
124120
#else
125-
return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
121+
start = VMALLOC_START;
122+
end = VMALLOC_END;
126123
#endif
124+
125+
execmem_info = (struct execmem_info){
126+
.ranges = {
127+
[EXECMEM_DEFAULT] = {
128+
.start = start,
129+
.end = end,
130+
.pgprot = prot,
131+
.alignment = 1,
132+
.fallback_start = fallback_start,
133+
.fallback_end = fallback_end,
134+
},
135+
[EXECMEM_MODULE_DATA] = {
136+
.start = VMALLOC_START,
137+
.end = VMALLOC_END,
138+
.pgprot = PAGE_KERNEL,
139+
.alignment = 1,
140+
},
141+
},
142+
};
143+
144+
return &execmem_info;
127145
}

arch/s390/kernel/module.c

+22-32
Original file line numberDiff line numberDiff line change
@@ -37,41 +37,31 @@
3737

3838
#define PLT_ENTRY_SIZE 22
3939

40-
static unsigned long get_module_load_offset(void)
40+
static struct execmem_info execmem_info __ro_after_init;
41+
42+
struct execmem_info __init *execmem_arch_setup(void)
4143
{
42-
static DEFINE_MUTEX(module_kaslr_mutex);
43-
static unsigned long module_load_offset;
44-
45-
if (!kaslr_enabled())
46-
return 0;
47-
/*
48-
* Calculate the module_load_offset the first time this code
49-
* is called. Once calculated it stays the same until reboot.
50-
*/
51-
mutex_lock(&module_kaslr_mutex);
52-
if (!module_load_offset)
44+
unsigned long module_load_offset = 0;
45+
unsigned long start;
46+
47+
if (kaslr_enabled())
5348
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
54-
mutex_unlock(&module_kaslr_mutex);
55-
return module_load_offset;
56-
}
5749

58-
void *module_alloc(unsigned long size)
59-
{
60-
gfp_t gfp_mask = GFP_KERNEL;
61-
void *p;
62-
63-
if (PAGE_ALIGN(size) > MODULES_LEN)
64-
return NULL;
65-
p = __vmalloc_node_range(size, MODULE_ALIGN,
66-
MODULES_VADDR + get_module_load_offset(),
67-
MODULES_END, gfp_mask, PAGE_KERNEL,
68-
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
69-
NUMA_NO_NODE, __builtin_return_address(0));
70-
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
71-
vfree(p);
72-
return NULL;
73-
}
74-
return p;
50+
start = MODULES_VADDR + module_load_offset;
51+
52+
execmem_info = (struct execmem_info){
53+
.ranges = {
54+
[EXECMEM_DEFAULT] = {
55+
.flags = EXECMEM_KASAN_SHADOW,
56+
.start = start,
57+
.end = MODULES_END,
58+
.pgprot = PAGE_KERNEL,
59+
.alignment = MODULE_ALIGN,
60+
},
61+
},
62+
};
63+
64+
return &execmem_info;
7565
}
7666

7767
#ifdef CONFIG_FUNCTION_TRACER

arch/x86/kernel/module.c

+23-47
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/jump_label.h>
2020
#include <linux/random.h>
2121
#include <linux/memory.h>
22+
#include <linux/execmem.h>
2223

2324
#include <asm/text-patching.h>
2425
#include <asm/page.h>
@@ -36,55 +37,30 @@ do { \
3637
} while (0)
3738
#endif
3839

39-
#ifdef CONFIG_RANDOMIZE_BASE
40-
static unsigned long module_load_offset;
40+
static struct execmem_info execmem_info __ro_after_init;
4141

42-
/* Mutex protects the module_load_offset. */
43-
static DEFINE_MUTEX(module_kaslr_mutex);
44-
45-
static unsigned long int get_module_load_offset(void)
46-
{
47-
if (kaslr_enabled()) {
48-
mutex_lock(&module_kaslr_mutex);
49-
/*
50-
* Calculate the module_load_offset the first time this
51-
* code is called. Once calculated it stays the same until
52-
* reboot.
53-
*/
54-
if (module_load_offset == 0)
55-
module_load_offset =
56-
get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
57-
mutex_unlock(&module_kaslr_mutex);
58-
}
59-
return module_load_offset;
60-
}
61-
#else
62-
static unsigned long int get_module_load_offset(void)
42+
struct execmem_info __init *execmem_arch_setup(void)
6343
{
64-
return 0;
65-
}
66-
#endif
67-
68-
void *module_alloc(unsigned long size)
69-
{
70-
gfp_t gfp_mask = GFP_KERNEL;
71-
void *p;
72-
73-
if (PAGE_ALIGN(size) > MODULES_LEN)
74-
return NULL;
75-
76-
p = __vmalloc_node_range(size, MODULE_ALIGN,
77-
MODULES_VADDR + get_module_load_offset(),
78-
MODULES_END, gfp_mask, PAGE_KERNEL,
79-
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
80-
NUMA_NO_NODE, __builtin_return_address(0));
81-
82-
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
83-
vfree(p);
84-
return NULL;
85-
}
86-
87-
return p;
44+
unsigned long start, offset = 0;
45+
46+
if (kaslr_enabled())
47+
offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
48+
49+
start = MODULES_VADDR + offset;
50+
51+
execmem_info = (struct execmem_info){
52+
.ranges = {
53+
[EXECMEM_DEFAULT] = {
54+
.flags = EXECMEM_KASAN_SHADOW,
55+
.start = start,
56+
.end = MODULES_END,
57+
.pgprot = PAGE_KERNEL,
58+
.alignment = MODULE_ALIGN,
59+
},
60+
},
61+
};
62+
63+
return &execmem_info;
8864
}
8965

9066
#ifdef CONFIG_X86_32

0 commit comments

Comments
 (0)