|
14 | 14 | #include <linux/of.h>
|
15 | 15 |
|
16 | 16 | #include <asm/sections.h>
|
| 17 | +#include <asm/pgalloc.h> |
17 | 18 |
|
18 | 19 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
|
19 | 20 | EXPORT_SYMBOL(node_data);
|
@@ -168,22 +169,83 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
|
168 | 169 | memblock_free_early(__pa(ptr), size);
|
169 | 170 | }
|
170 | 171 |
|
| 172 | +#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
| 173 | +static void __init pcpu_populate_pte(unsigned long addr) |
| 174 | +{ |
| 175 | + pgd_t *pgd = pgd_offset_k(addr); |
| 176 | + p4d_t *p4d; |
| 177 | + pud_t *pud; |
| 178 | + pmd_t *pmd; |
| 179 | + |
| 180 | + p4d = p4d_offset(pgd, addr); |
| 181 | + if (p4d_none(*p4d)) { |
| 182 | + pud_t *new; |
| 183 | + |
| 184 | + new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
| 185 | + if (!new) |
| 186 | + goto err_alloc; |
| 187 | + p4d_populate(&init_mm, p4d, new); |
| 188 | + } |
| 189 | + |
| 190 | + pud = pud_offset(p4d, addr); |
| 191 | + if (pud_none(*pud)) { |
| 192 | + pmd_t *new; |
| 193 | + |
| 194 | + new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
| 195 | + if (!new) |
| 196 | + goto err_alloc; |
| 197 | + pud_populate(&init_mm, pud, new); |
| 198 | + } |
| 199 | + |
| 200 | + pmd = pmd_offset(pud, addr); |
| 201 | + if (!pmd_present(*pmd)) { |
| 202 | + pte_t *new; |
| 203 | + |
| 204 | + new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
| 205 | + if (!new) |
| 206 | + goto err_alloc; |
| 207 | + pmd_populate_kernel(&init_mm, pmd, new); |
| 208 | + } |
| 209 | + |
| 210 | + return; |
| 211 | + |
| 212 | +err_alloc: |
| 213 | + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", |
| 214 | + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
| 215 | +} |
| 216 | +#endif |
| 217 | + |
171 | 218 | void __init setup_per_cpu_areas(void)
|
172 | 219 | {
|
173 | 220 | unsigned long delta;
|
174 | 221 | unsigned int cpu;
|
175 |
| - int rc; |
| 222 | + int rc = -EINVAL; |
| 223 | + |
| 224 | + if (pcpu_chosen_fc != PCPU_FC_PAGE) { |
| 225 | + /* |
| 226 | + * Always reserve area for module percpu variables. That's |
| 227 | + * what the legacy allocator did. |
| 228 | + */ |
| 229 | + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
| 230 | + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, |
| 231 | + pcpu_cpu_distance, |
| 232 | + pcpu_fc_alloc, pcpu_fc_free); |
| 233 | +#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
| 234 | + if (rc < 0) |
| 235 | + pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n", |
| 236 | + pcpu_fc_names[pcpu_chosen_fc], rc); |
| 237 | +#endif |
| 238 | + } |
176 | 239 |
|
177 |
| - /* |
178 |
| - * Always reserve area for module percpu variables. That's |
179 |
| - * what the legacy allocator did. |
180 |
| - */ |
181 |
| - rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
182 |
| - PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, |
183 |
| - pcpu_cpu_distance, |
184 |
| - pcpu_fc_alloc, pcpu_fc_free); |
| 240 | +#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
| 241 | + if (rc < 0) |
| 242 | + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, |
| 243 | + pcpu_fc_alloc, |
| 244 | + pcpu_fc_free, |
| 245 | + pcpu_populate_pte); |
| 246 | +#endif |
185 | 247 | if (rc < 0)
|
186 |
| - panic("Failed to initialize percpu areas."); |
| 248 | + panic("Failed to initialize percpu areas (err=%d).", rc); |
187 | 249 |
|
188 | 250 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
|
189 | 251 | for_each_possible_cpu(cpu)
|
|
0 commit comments