1
+ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
2
+ // See the file LICENSE for licensing terms.
3
+
1
4
package blockdb
2
5
3
6
import (
4
7
"encoding"
5
8
"encoding/binary"
9
+ "errors"
6
10
"fmt"
11
+ "io"
7
12
"math"
13
+ "os"
8
14
9
15
"github.com/cespare/xxhash/v2"
10
16
"go.uber.org/zap"
11
17
)
12
18
13
19
var (
14
- _ encoding.BinaryMarshaler = blockHeader {}
15
- _ encoding.BinaryUnmarshaler = & blockHeader {}
20
+ _ encoding.BinaryMarshaler = ( * blockHeader )( nil )
21
+ _ encoding.BinaryUnmarshaler = ( * blockHeader )( nil )
16
22
17
23
sizeOfBlockHeader = uint64 (binary .Size (blockHeader {}))
18
24
)
@@ -68,11 +74,12 @@ func (s *Database) WriteBlock(height BlockHeight, block BlockData, headerSize Bl
68
74
return ErrDatabaseClosed
69
75
}
70
76
71
- if len (block ) == 0 {
77
+ blockDataLen := uint64 (len (block ))
78
+ if blockDataLen == 0 {
72
79
return ErrBlockEmpty
73
80
}
74
81
75
- if len ( block ) > MaxBlockDataSize {
82
+ if blockDataLen > MaxBlockDataSize {
76
83
return ErrBlockTooLarge
77
84
}
78
85
@@ -85,7 +92,6 @@ func (s *Database) WriteBlock(height BlockHeight, block BlockData, headerSize Bl
85
92
return err
86
93
}
87
94
88
- blockDataLen := uint64 (len (block ))
89
95
sizeWithDataHeader := sizeOfBlockHeader + blockDataLen
90
96
writeDataOffset , err := s .allocateBlockSpace (sizeWithDataHeader )
91
97
if err != nil {
@@ -129,12 +135,15 @@ func (s *Database) ReadBlock(height BlockHeight) (BlockData, error) {
129
135
130
136
// Read the complete block data
131
137
blockData := make (BlockData , indexEntry .Size )
132
- actualDataOffset := indexEntry .Offset + sizeOfBlockHeader
133
- if actualDataOffset < indexEntry . Offset {
134
- return nil , fmt .Errorf ("internal error: block data offset calculation overflowed" )
138
+ dataFile , localOffset , err := s . getDataFileAndOffset ( indexEntry .Offset )
139
+ if err != nil {
140
+ return nil , fmt .Errorf ("failed to get data file for block at height %d: %w" , height , err )
135
141
}
136
- _ , err = s . dataFile .ReadAt (blockData , int64 (actualDataOffset ))
142
+ _ , err = dataFile .ReadAt (blockData , int64 (localOffset + sizeOfBlockHeader ))
137
143
if err != nil {
144
+ if errors .Is (err , io .EOF ) {
145
+ return nil , nil
146
+ }
138
147
return nil , fmt .Errorf ("failed to read block data from data file: %w" , err )
139
148
}
140
149
@@ -171,12 +180,15 @@ func (s *Database) ReadHeader(height BlockHeight) (BlockData, error) {
171
180
172
181
// Read only the header portion
173
182
headerData := make ([]byte , indexEntry .HeaderSize )
174
- actualDataOffset := indexEntry .Offset + sizeOfBlockHeader
175
- if actualDataOffset < indexEntry . Offset {
176
- return nil , fmt .Errorf ("internal error: block data offset calculation overflowed" )
183
+ dataFile , localOffset , err := s . getDataFileAndOffset ( indexEntry .Offset )
184
+ if err != nil {
185
+ return nil , fmt .Errorf ("failed to get data file for block header at height %d: %w" , height , err )
177
186
}
178
- _ , err = s . dataFile .ReadAt (headerData , int64 (actualDataOffset ))
187
+ _ , err = dataFile .ReadAt (headerData , int64 (localOffset + sizeOfBlockHeader ))
179
188
if err != nil {
189
+ if errors .Is (err , io .EOF ) {
190
+ return nil , nil
191
+ }
180
192
return nil , fmt .Errorf ("failed to read block header data from data file: %w" , err )
181
193
}
182
194
@@ -203,9 +215,16 @@ func (s *Database) ReadBody(height BlockHeight) (BlockData, error) {
203
215
204
216
bodySize := indexEntry .Size - uint64 (indexEntry .HeaderSize )
205
217
bodyData := make ([]byte , bodySize )
206
- bodyOffset := indexEntry .Offset + sizeOfBlockHeader + uint64 (indexEntry .HeaderSize )
207
- _ , err = s .dataFile .ReadAt (bodyData , int64 (bodyOffset ))
218
+ dataFile , localOffset , err := s .getDataFileAndOffset (indexEntry .Offset )
219
+ if err != nil {
220
+ return nil , fmt .Errorf ("failed to get data file for block body at height %d: %w" , height , err )
221
+ }
222
+ bodyOffset := localOffset + sizeOfBlockHeader + uint64 (indexEntry .HeaderSize )
223
+ _ , err = dataFile .ReadAt (bodyData , int64 (bodyOffset ))
208
224
if err != nil {
225
+ if errors .Is (err , io .EOF ) {
226
+ return nil , nil
227
+ }
209
228
return nil , fmt .Errorf ("failed to read block body data from data file: %w" , err )
210
229
}
211
230
return bodyData , nil
@@ -221,16 +240,21 @@ func (s *Database) writeBlockAt(offset uint64, bh blockHeader, block BlockData)
221
240
return fmt .Errorf ("failed to serialize block header: %w" , err )
222
241
}
223
242
243
+ dataFile , localOffset , err := s .getDataFileAndOffset (offset )
244
+ if err != nil {
245
+ return fmt .Errorf ("failed to get data file for writing block %d: %w" , bh .Height , err )
246
+ }
247
+
224
248
// Allocate combined buffer for header and block data and write it to the data file
225
249
combinedBuf := make ([]byte , sizeOfBlockHeader + uint64 (len (block )))
226
250
copy (combinedBuf , headerBytes )
227
251
copy (combinedBuf [sizeOfBlockHeader :], block )
228
- if _ , err := s . dataFile .WriteAt (combinedBuf , int64 (offset )); err != nil {
252
+ if _ , err := dataFile .WriteAt (combinedBuf , int64 (localOffset )); err != nil {
229
253
return fmt .Errorf ("failed to write block to data file at offset %d: %w" , offset , err )
230
254
}
231
255
232
256
if s .syncToDisk {
233
- if err := s . dataFile .Sync (); err != nil {
257
+ if err := dataFile .Sync (); err != nil {
234
258
return fmt .Errorf ("failed to sync data file after writing block %d: %w" , bh .Height , err )
235
259
}
236
260
}
@@ -285,29 +309,51 @@ func (s *Database) updateBlockHeights(writtenBlockHeight BlockHeight) error {
285
309
return nil
286
310
}
287
311
288
- func (s * Database ) allocateBlockSpace (sizeWithDataHeader uint64 ) (writeDataOffset uint64 , err error ) {
312
+ func (s * Database ) allocateBlockSpace (totalSize uint64 ) (writeDataOffset uint64 , err error ) {
289
313
maxDataFileSize := s .header .MaxDataFileSize
290
314
315
+ // Check if a single block would exceed the max data file size
316
+ if maxDataFileSize > 0 && totalSize > maxDataFileSize {
317
+ return 0 , ErrBlockTooLarge
318
+ }
319
+
291
320
for {
292
- // Check if the new offset would overflow uint64.
293
321
currentOffset := s .nextDataWriteOffset .Load ()
294
- if currentOffset > math .MaxUint64 - sizeWithDataHeader {
322
+ if currentOffset > math .MaxUint64 - totalSize {
295
323
return 0 , fmt .Errorf (
296
324
"adding block of size %d to offset %d would overflow uint64 data file pointer" ,
297
- sizeWithDataHeader , currentOffset ,
325
+ totalSize , currentOffset ,
298
326
)
299
327
}
300
328
301
- newOffset := currentOffset + sizeWithDataHeader
302
- if maxDataFileSize > 0 && newOffset > maxDataFileSize {
303
- return 0 , fmt .Errorf (
304
- "adding block of size %d to offset %d (new offset %d) would exceed configured max data file size of %d bytes" ,
305
- sizeWithDataHeader , currentOffset , newOffset , maxDataFileSize ,
306
- )
329
+ writeOffset := currentOffset
330
+ newOffset := currentOffset + totalSize
331
+
332
+ if maxDataFileSize > 0 {
333
+ fileIndex := int (currentOffset / maxDataFileSize )
334
+ localOffset := currentOffset % maxDataFileSize
335
+
336
+ if localOffset + totalSize > maxDataFileSize {
337
+ writeOffset = (uint64 (fileIndex ) + 1 ) * maxDataFileSize
338
+ newOffset = writeOffset + totalSize
339
+ }
307
340
}
308
341
309
342
if s .nextDataWriteOffset .CompareAndSwap (currentOffset , newOffset ) {
310
- return currentOffset , nil
343
+ return writeOffset , nil
311
344
}
312
345
}
313
346
}
347
+
348
+ func (s * Database ) getDataFileAndOffset (globalOffset uint64 ) (* os.File , uint64 , error ) {
349
+ maxFileSize := s .header .MaxDataFileSize
350
+ if maxFileSize == 0 {
351
+ handle , err := s .getOrOpenDataFile (0 )
352
+ return handle , globalOffset , err
353
+ }
354
+
355
+ fileIndex := int (globalOffset / maxFileSize )
356
+ localOffset := globalOffset % maxFileSize
357
+ handle , err := s .getOrOpenDataFile (fileIndex )
358
+ return handle , localOffset , err
359
+ }
0 commit comments