Skip to content

Commit b8e4a47

Browse files
Marc ZyngierRussell King
authored andcommitted
ARM: 7768/1: prevent risks of out-of-bound access in ASID allocator
On a CPU that never ran anything, both the active and reserved ASID fields are set to zero. In this case the ASID_TO_IDX() macro will return -1, which is not a very useful value to index a bitmap. Instead of trying to offset the ASID so that ASID #1 is actually bit 0 in the asid_map bitmap, just always ignore bit 0 and start the search from bit 1. This makes the code a bit more readable, and without risk of OoB access. Cc: <[email protected]> # 3.9 Acked-by: Will Deacon <[email protected]> Acked-by: Catalin Marinas <[email protected]> Reported-by: Catalin Marinas <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Russell King <[email protected]>
1 parent ae120d9 commit b8e4a47

File tree

1 file changed

+8
-9
lines changed

1 file changed

+8
-9
lines changed

arch/arm/mm/context.c

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,7 @@
3939
* non 64-bit operations.
4040
*/
4141
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
42-
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
43-
44-
#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
45-
#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
42+
#define NUM_USER_ASIDS ASID_FIRST_VERSION
4643

4744
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
4845
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
@@ -137,7 +134,7 @@ static void flush_context(unsigned int cpu)
137134
*/
138135
if (asid == 0)
139136
asid = per_cpu(reserved_asids, i);
140-
__set_bit(ASID_TO_IDX(asid), asid_map);
137+
__set_bit(asid & ~ASID_MASK, asid_map);
141138
}
142139
per_cpu(reserved_asids, i) = asid;
143140
}
@@ -176,17 +173,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
176173
/*
177174
* Allocate a free ASID. If we can't find one, take a
178175
* note of the currently active ASIDs and mark the TLBs
179-
* as requiring flushes.
176+
* as requiring flushes. We always count from ASID #1,
177+
* as we reserve ASID #0 to switch via TTBR0 and indicate
178+
* rollover events.
180179
*/
181-
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
180+
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
182181
if (asid == NUM_USER_ASIDS) {
183182
generation = atomic64_add_return(ASID_FIRST_VERSION,
184183
&asid_generation);
185184
flush_context(cpu);
186-
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
185+
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
187186
}
188187
__set_bit(asid, asid_map);
189-
asid = generation | IDX_TO_ASID(asid);
188+
asid |= generation;
190189
cpumask_clear(mm_cpumask(mm));
191190
}
192191

0 commit comments

Comments
 (0)