|
39 | 39 | * non 64-bit operations.
|
40 | 40 | */
|
41 | 41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
|
42 |
| -#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) |
43 |
| - |
44 |
| -#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) |
45 |
| -#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) |
| 42 | +#define NUM_USER_ASIDS ASID_FIRST_VERSION |
46 | 43 |
|
47 | 44 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
|
48 | 45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
|
@@ -137,7 +134,7 @@ static void flush_context(unsigned int cpu)
|
137 | 134 | */
|
138 | 135 | if (asid == 0)
|
139 | 136 | asid = per_cpu(reserved_asids, i);
|
140 |
| - __set_bit(ASID_TO_IDX(asid), asid_map); |
| 137 | + __set_bit(asid & ~ASID_MASK, asid_map); |
141 | 138 | }
|
142 | 139 | per_cpu(reserved_asids, i) = asid;
|
143 | 140 | }
|
@@ -176,17 +173,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
176 | 173 | /*
|
177 | 174 | * Allocate a free ASID. If we can't find one, take a
|
178 | 175 | * note of the currently active ASIDs and mark the TLBs
|
179 |
| - * as requiring flushes. |
| 176 | + * as requiring flushes. We always count from ASID #1, |
| 177 | + * as we reserve ASID #0 to switch via TTBR0 and indicate |
| 178 | + * rollover events. |
180 | 179 | */
|
181 |
| - asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); |
| 180 | + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
182 | 181 | if (asid == NUM_USER_ASIDS) {
|
183 | 182 | generation = atomic64_add_return(ASID_FIRST_VERSION,
|
184 | 183 | &asid_generation);
|
185 | 184 | flush_context(cpu);
|
186 |
| - asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); |
| 185 | + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
187 | 186 | }
|
188 | 187 | __set_bit(asid, asid_map);
|
189 |
| - asid = generation | IDX_TO_ASID(asid); |
| 188 | + asid |= generation; |
190 | 189 | cpumask_clear(mm_cpumask(mm));
|
191 | 190 | }
|
192 | 191 |
|
|
0 commit comments