Skip to content

Commit 822ee43

Browse files
ardbiesheuvelintel-lab-lkp
authored andcommitted
arm64/mm: Drop configurable 48-bit physical address space limit
Currently, the maximum supported physical address space can be configured as either 48 bits or 52 bits. The only remaining difference between these in practice is that the former omits the masking and shifting required to construct TTBR and PTE values, which carry bits torvalds#48 and higher disjoint from the rest of the physical address. The overhead of performing these additional calculations is negligible, and so there is little reason to retain support for two different configurations, and we can simply support whatever the hardware supports. Signed-off-by: Ard Biesheuvel <[email protected]>
1 parent 185de45 commit 822ee43

File tree

12 files changed

+14
-81
lines changed

12 files changed

+14
-81
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1416,38 +1416,9 @@ config ARM64_VA_BITS
14161416
default 48 if ARM64_VA_BITS_48
14171417
default 52 if ARM64_VA_BITS_52
14181418

1419-
choice
1420-
prompt "Physical address space size"
1421-
default ARM64_PA_BITS_48
1422-
help
1423-
Choose the maximum physical address range that the kernel will
1424-
support.
1425-
1426-
config ARM64_PA_BITS_48
1427-
bool "48-bit"
1428-
depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
1429-
1430-
config ARM64_PA_BITS_52
1431-
bool "52-bit"
1432-
depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
1433-
help
1434-
Enable support for a 52-bit physical address space, introduced as
1435-
part of the ARMv8.2-LPA extension.
1436-
1437-
With this enabled, the kernel will also continue to work on CPUs that
1438-
do not support ARMv8.2-LPA, but with some added memory overhead (and
1439-
minor performance overhead).
1440-
1441-
endchoice
1442-
1443-
config ARM64_PA_BITS
1444-
int
1445-
default 48 if ARM64_PA_BITS_48
1446-
default 52 if ARM64_PA_BITS_52
1447-
14481419
config ARM64_LPA2
14491420
def_bool y
1450-
depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
1421+
depends on !ARM64_64K_PAGES
14511422

14521423
choice
14531424
prompt "Endianness"

arch/arm64/include/asm/assembler.h

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -342,14 +342,13 @@ alternative_cb_end
342342
mrs \tmp0, ID_AA64MMFR0_EL1
343343
// Narrow PARange to fit the PS field in TCR_ELx
344344
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
345-
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
346345
#ifdef CONFIG_ARM64_LPA2
347346
alternative_if_not ARM64_HAS_VA52
348347
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
349-
alternative_else_nop_endif
350-
#endif
351348
cmp \tmp0, \tmp1
352349
csel \tmp0, \tmp1, \tmp0, hi
350+
alternative_else_nop_endif
351+
#endif
353352
bfi \tcr, \tmp0, \pos, #3
354353
.endm
355354

@@ -599,21 +598,13 @@ alternative_endif
599598
* ttbr: returns the TTBR value
600599
*/
601600
.macro phys_to_ttbr, ttbr, phys
602-
#ifdef CONFIG_ARM64_PA_BITS_52
603601
orr \ttbr, \phys, \phys, lsr #46
604602
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
605-
#else
606-
mov \ttbr, \phys
607-
#endif
608603
.endm
609604

610605
.macro phys_to_pte, pte, phys
611-
#ifdef CONFIG_ARM64_PA_BITS_52
612606
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
613607
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
614-
#else
615-
mov \pte, \phys
616-
#endif
617608
.endm
618609

619610
/*

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -883,9 +883,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
883883
* However, by the "D10.1.4 Principles of the ID scheme
884884
* for fields in ID registers", ARM DDI 0487C.a, any new
885885
* value is guaranteed to be higher than what we know already.
886-
* As a safe limit, we return the limit supported by the kernel.
887886
*/
888-
default: return CONFIG_ARM64_PA_BITS;
887+
default: return 52;
889888
}
890889
}
891890

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@
3030

3131
static inline u64 kvm_get_parange_max(void)
3232
{
33-
if (kvm_lpa2_is_enabled() ||
34-
(IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
33+
if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16)
3534
return ID_AA64MMFR0_EL1_PARANGE_52;
3635
else
3736
return ID_AA64MMFR0_EL1_PARANGE_48;

arch/arm64/include/asm/pgtable-hwdef.h

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,6 @@
176176
#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
177177

178178
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
179-
#ifdef CONFIG_ARM64_PA_BITS_52
180179
#ifdef CONFIG_ARM64_64K_PAGES
181180
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
182181
#define PTE_ADDR_HIGH_SHIFT 36
@@ -186,7 +185,6 @@
186185
#define PTE_ADDR_HIGH_SHIFT 42
187186
#define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
188187
#endif
189-
#endif
190188

191189
/*
192190
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -327,12 +325,10 @@
327325
/*
328326
* TTBR.
329327
*/
330-
#ifdef CONFIG_ARM64_PA_BITS_52
331328
/*
332-
* TTBR_ELx[1] is RES0 in this configuration.
329+
* TTBR_ELx[1] is RES0 when using 52-bit physical addressing
333330
*/
334331
#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
335-
#endif
336332

337333
#ifdef CONFIG_ARM64_VA_BITS_52
338334
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */

arch/arm64/include/asm/pgtable-prot.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ extern unsigned long prot_ns_shared;
8181
#define lpa2_is_enabled() false
8282
#define PTE_MAYBE_SHARED PTE_SHARED
8383
#define PMD_MAYBE_SHARED PMD_SECT_S
84-
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
84+
#define PHYS_MASK_SHIFT (52)
8585
#else
8686
static inline bool __pure lpa2_is_enabled(void)
8787
{
@@ -90,7 +90,7 @@ static inline bool __pure lpa2_is_enabled(void)
9090

9191
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
9292
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
93-
#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
93+
#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48)
9494
#endif
9595

9696
/*

arch/arm64/include/asm/pgtable.h

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
6969
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
7070

7171
/*
72-
* Macros to convert between a physical address and its placement in a
72+
* Helpers to convert between a physical address and its placement in a
7373
* page table entry, taking care of 52-bit addresses.
7474
*/
75-
#ifdef CONFIG_ARM64_PA_BITS_52
7675
static inline phys_addr_t __pte_to_phys(pte_t pte)
7776
{
7877
pte_val(pte) &= ~PTE_MAYBE_SHARED;
@@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
8382
{
8483
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
8584
}
86-
#else
87-
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
88-
#define __phys_to_pte_val(phys) (phys)
89-
#endif
9085

9186
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
9287
#define pfn_pte(pfn,prot) \
@@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
14951490
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
14961491
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
14971492

1498-
#ifdef CONFIG_ARM64_PA_BITS_52
14991493
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1500-
#else
1501-
#define phys_to_ttbr(addr) (addr)
1502-
#endif
15031494

15041495
/*
15051496
* On arm64 without hardware Access Flag, copying from user will fail because

arch/arm64/include/asm/sysreg.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -916,12 +916,6 @@
916916
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
917917
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
918918

919-
#ifdef CONFIG_ARM64_PA_BITS_52
920-
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
921-
#else
922-
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
923-
#endif
924-
925919
#if defined(CONFIG_ARM64_4K_PAGES)
926920
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
927921
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT

arch/arm64/mm/pgd.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
4848

4949
void __init pgtable_cache_init(void)
5050
{
51+
unsigned int pgd_size = PGD_SIZE;
52+
5153
if (pgdir_is_page_size())
5254
return;
5355

54-
#ifdef CONFIG_ARM64_PA_BITS_52
5556
/*
5657
* With 52-bit physical addresses, the architecture requires the
5758
* top-level table to be aligned to at least 64 bytes.
5859
*/
59-
BUILD_BUG_ON(PGD_SIZE < 64);
60-
#endif
60+
if (PHYS_MASK_SHIFT >= 52)
61+
pgd_size = max(pgd_size, 64);
6162

6263
/*
6364
* Naturally aligned pgds required by the architecture.
6465
*/
65-
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
66+
pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size,
6667
SLAB_PANIC, NULL);
6768
}

arch/arm64/mm/proc.S

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
197197

198198
.macro pte_to_phys, phys, pte
199199
and \phys, \pte, #PTE_ADDR_LOW
200-
#ifdef CONFIG_ARM64_PA_BITS_52
201200
and \pte, \pte, #PTE_ADDR_HIGH
202201
orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
203-
#endif
204202
.endm
205203

206204
.macro kpti_mk_tbl_ng, type, num_entries

0 commit comments

Comments
 (0)