@@ -46,7 +46,7 @@ GrBlockAllocator::Block::~Block() {
46
46
47
47
size_t GrBlockAllocator::totalSize () const {
48
48
// Use size_t since the sum across all blocks could exceed 'int', even though each block won't
49
- size_t size = offsetof (GrBlockAllocator, fHead );
49
+ size_t size = offsetof (GrBlockAllocator, fHead ) + this -> scratchBlockSize () ;
50
50
for (const Block* b : this ->blocks ()) {
51
51
size += b->fSize ;
52
52
}
@@ -55,7 +55,10 @@ size_t GrBlockAllocator::totalSize() const {
55
55
}
56
56
57
57
size_t GrBlockAllocator::totalUsableSpace () const {
58
- size_t size = 0 ;
58
+ size_t size = this ->scratchBlockSize ();
59
+ if (size > 0 ) {
60
+ size -= kDataStart ; // scratchBlockSize reports total block size, not usable size
61
+ }
59
62
for (const Block* b : this ->blocks ()) {
60
63
size += (b->fSize - kDataStart );
61
64
}
@@ -87,9 +90,14 @@ GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
87
90
}
88
91
89
92
void GrBlockAllocator::releaseBlock (Block* block) {
90
- if (block->fPrev ) {
91
- // Unlink block from the double-linked list of blocks
92
- SkASSERT (block != &fHead );
93
+ if (block == &fHead ) {
94
+ // Reset the cursor of the head block so that it can be reused if it becomes the new tail
95
+ block->fCursor = kDataStart ;
96
+ block->fMetadata = 0 ;
97
+ // Unlike in reset(), we don't set the head's next block to null because there are
98
+ // potentially heap-allocated blocks that are still connected to it.
99
+ } else {
100
+ SkASSERT (block->fPrev );
93
101
block->fPrev ->fNext = block->fNext ;
94
102
if (block->fNext ) {
95
103
SkASSERT (fTail != block);
@@ -99,14 +107,17 @@ void GrBlockAllocator::releaseBlock(Block* block) {
99
107
fTail = block->fPrev ;
100
108
}
101
109
102
- delete block;
103
- } else {
104
- // Reset the cursor of the head block so that it can be reused
105
- SkASSERT (block == &fHead );
106
- block->fCursor = kDataStart ;
107
- block->fMetadata = 0 ;
108
- // Unlike in reset(), we don't set the head's next block to null because there are
109
- // potentially heap-allocated blocks that are still connected to it.
110
+ // The released block becomes the new scratch block (if it's bigger), or delete it
111
+ if (this ->scratchBlockSize () < block->fSize ) {
112
+ SkASSERT (block != fHead .fPrev ); // sanity check, shouldn't already be the scratch block
113
+ if (fHead .fPrev ) {
114
+ delete fHead .fPrev ;
115
+ }
116
+ block->markAsScratch ();
117
+ fHead .fPrev = block;
118
+ } else {
119
+ delete block;
120
+ }
110
121
}
111
122
112
123
// Decrement growth policy (opposite of addBlock()'s increment operations)
@@ -139,62 +150,89 @@ void GrBlockAllocator::reset() {
139
150
b->fNext = nullptr ;
140
151
b->fCursor = kDataStart ;
141
152
b->fMetadata = 0 ;
142
-
143
- // For reset(), but NOT releaseBlock(), the head allocatorMetadata resets too
153
+ // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
154
+ // are reset/destroyed.
144
155
b->fAllocatorMetadata = 0 ;
156
+ this ->resetScratchSpace ();
145
157
} else {
146
158
delete b;
147
159
}
148
160
}
149
- SkASSERT (fTail == &fHead && fHead .fNext == nullptr &&
161
+ SkASSERT (fTail == &fHead && fHead .fNext == nullptr && fHead . fPrev == nullptr &&
150
162
fHead .metadata () == 0 && fHead .fCursor == kDataStart );
151
163
152
164
GrowthPolicy gp = static_cast <GrowthPolicy>(fGrowthPolicy );
153
165
fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential ) ? 1 : 0 ;
154
166
fN1 = 1 ;
155
167
}
156
168
169
+ void GrBlockAllocator::resetScratchSpace () {
170
+ if (fHead .fPrev ) {
171
+ delete fHead .fPrev ;
172
+ fHead .fPrev = nullptr ;
173
+ }
174
+ }
175
+
157
176
void GrBlockAllocator::addBlock (int minimumSize, int maxSize) {
158
177
SkASSERT (minimumSize > (int ) sizeof (Block) && minimumSize <= maxSize);
159
178
160
179
// Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
161
180
static constexpr int kMaxN = (1 << 23 ) - 1 ;
162
181
static_assert (2 * kMaxN <= std::numeric_limits<int32_t >::max ()); // Growth policy won't overflow
163
182
164
- // Calculate the 'next' size per growth policy sequence
165
- GrowthPolicy gp = static_cast <GrowthPolicy>(fGrowthPolicy );
166
- int nextN1 = fN0 + fN1 ;
167
- int nextN0;
168
- if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear ) {
169
- nextN0 = fN0 ;
170
- } else if (gp == GrowthPolicy::kFibonacci ) {
171
- nextN0 = fN1 ;
172
- } else {
173
- SkASSERT (gp == GrowthPolicy::kExponential );
174
- nextN0 = nextN1;
175
- }
176
- fN0 = std::min (kMaxN , nextN0);
177
- fN1 = std::min (kMaxN , nextN1);
183
+ auto alignAllocSize = [](int size) {
184
+ // Round to a nice boundary since the block isn't maxing out:
185
+ // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
186
+ // nicely with jeMalloc (from SkArenaAlloc).
187
+ int mask = size > (1 << 15 ) ? ((1 << 12 ) - 1 ) : (kAddressAlign - 1 );
188
+ return (size + mask) & ~mask;
189
+ };
178
190
179
- // However, must guard against overflow here, since all the size-based asserts prevented
180
- // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
181
- int sizeIncrement = fBlockIncrement * kAddressAlign ;
182
191
int allocSize;
183
- if (maxSize / sizeIncrement < nextN1) {
184
- // The growth policy would overflow, so use the max. We've already confirmed that maxSize
185
- // will be sufficient for the requested minimumSize
186
- allocSize = maxSize;
192
+ void * mem = nullptr ;
193
+ if (this ->scratchBlockSize () >= minimumSize) {
194
+ // Activate the scratch block instead of making a new block
195
+ SkASSERT (fHead .fPrev ->isScratch ());
196
+ allocSize = fHead .fPrev ->fSize ;
197
+ mem = fHead .fPrev ;
198
+ fHead .fPrev = nullptr ;
199
+ } else if (minimumSize < maxSize) {
200
+ // Calculate the 'next' size per growth policy sequence
201
+ GrowthPolicy gp = static_cast <GrowthPolicy>(fGrowthPolicy );
202
+ int nextN1 = fN0 + fN1 ;
203
+ int nextN0;
204
+ if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear ) {
205
+ nextN0 = fN0 ;
206
+ } else if (gp == GrowthPolicy::kFibonacci ) {
207
+ nextN0 = fN1 ;
208
+ } else {
209
+ SkASSERT (gp == GrowthPolicy::kExponential );
210
+ nextN0 = nextN1;
211
+ }
212
+ fN0 = std::min (kMaxN , nextN0);
213
+ fN1 = std::min (kMaxN , nextN1);
214
+
215
+ // However, must guard against overflow here, since all the size-based asserts prevented
216
+ // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
217
+ int sizeIncrement = fBlockIncrement * kAddressAlign ;
218
+ if (maxSize / sizeIncrement < nextN1) {
219
+ // The growth policy would overflow, so use the max. We've already confirmed that
220
+ // maxSize will be sufficient for the requested minimumSize
221
+ allocSize = maxSize;
222
+ } else {
223
+ allocSize = std::min (alignAllocSize (std::max (minimumSize, sizeIncrement * nextN1)),
224
+ maxSize);
225
+ }
187
226
} else {
188
- allocSize = std::max (minimumSize, sizeIncrement * nextN1);
189
- // Then round to a nice boundary since the block isn't maxing out:
190
- // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
191
- // nicely with jeMalloc (from SkArenaAlloc).
192
- int mask = allocSize > (1 << 15 ) ? ((1 << 12 ) - 1 ) : (kAddressAlign - 1 );
193
- allocSize = std::min ((allocSize + mask) & ~mask, maxSize);
227
+ SkASSERT (minimumSize == maxSize);
228
+ // Still align on a nice boundary, no max clamping since that would just undo the alignment
229
+ allocSize = alignAllocSize (minimumSize);
194
230
}
195
231
196
232
// Create new block and append to the linked list of blocks in this allocator
197
- void * mem = operator new (allocSize);
233
+ if (!mem) {
234
+ mem = operator new (allocSize);
235
+ }
198
236
fTail ->fNext = new (mem) Block (fTail , allocSize);
199
237
fTail = fTail ->fNext ;
200
238
}
@@ -207,7 +245,13 @@ void GrBlockAllocator::validate() const {
207
245
blocks.push_back (block);
208
246
209
247
SkASSERT (kAssignedMarker == block->fSentinel );
210
- SkASSERT (prev == block->fPrev );
248
+ if (block == &fHead ) {
249
+ // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
250
+ // considered part of the linked list
251
+ SkASSERT (!prev && (!fHead .fPrev || fHead .fPrev ->isScratch ()));
252
+ } else {
253
+ SkASSERT (prev == block->fPrev );
254
+ }
211
255
if (prev) {
212
256
SkASSERT (prev->fNext == block);
213
257
}
0 commit comments