Skip to content

Commit c6989b0

Browse files
committed
feat: data splitting & fix linting
1 parent 15ae1d1 commit c6989b0

File tree

12 files changed

+473
-247
lines changed

12 files changed

+473
-247
lines changed

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ require (
1717
github.com/ava-labs/ledger-avalanche/go v0.0.0-20241009183145-e6f90a8a1a60
1818
github.com/ava-labs/libevm v1.13.14-0.3.0.rc.1
1919
github.com/btcsuite/btcd/btcutil v1.1.3
20+
github.com/cespare/xxhash/v2 v2.3.0
2021
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593
2122
github.com/compose-spec/compose-go v1.20.2
2223
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
@@ -89,7 +90,6 @@ require (
8990
github.com/bits-and-blooms/bitset v1.10.0 // indirect
9091
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
9192
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
92-
github.com/cespare/xxhash/v2 v2.3.0 // indirect
9393
github.com/cockroachdb/errors v1.9.1 // indirect
9494
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
9595
github.com/cockroachdb/redact v1.1.3 // indirect

x/blockdb/block.go

Lines changed: 74 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,24 @@
1+
// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
2+
// See the file LICENSE for licensing terms.
3+
14
package blockdb
25

36
import (
47
"encoding"
58
"encoding/binary"
9+
"errors"
610
"fmt"
11+
"io"
712
"math"
13+
"os"
814

915
"github.com/cespare/xxhash/v2"
1016
"go.uber.org/zap"
1117
)
1218

1319
var (
14-
_ encoding.BinaryMarshaler = blockHeader{}
15-
_ encoding.BinaryUnmarshaler = &blockHeader{}
20+
_ encoding.BinaryMarshaler = (*blockHeader)(nil)
21+
_ encoding.BinaryUnmarshaler = (*blockHeader)(nil)
1622

1723
sizeOfBlockHeader = uint64(binary.Size(blockHeader{}))
1824
)
@@ -68,11 +74,12 @@ func (s *Database) WriteBlock(height BlockHeight, block BlockData, headerSize Bl
6874
return ErrDatabaseClosed
6975
}
7076

71-
if len(block) == 0 {
77+
blockDataLen := uint64(len(block))
78+
if blockDataLen == 0 {
7279
return ErrBlockEmpty
7380
}
7481

75-
if len(block) > MaxBlockDataSize {
82+
if blockDataLen > MaxBlockDataSize {
7683
return ErrBlockTooLarge
7784
}
7885

@@ -85,7 +92,6 @@ func (s *Database) WriteBlock(height BlockHeight, block BlockData, headerSize Bl
8592
return err
8693
}
8794

88-
blockDataLen := uint64(len(block))
8995
sizeWithDataHeader := sizeOfBlockHeader + blockDataLen
9096
writeDataOffset, err := s.allocateBlockSpace(sizeWithDataHeader)
9197
if err != nil {
@@ -129,12 +135,15 @@ func (s *Database) ReadBlock(height BlockHeight) (BlockData, error) {
129135

130136
// Read the complete block data
131137
blockData := make(BlockData, indexEntry.Size)
132-
actualDataOffset := indexEntry.Offset + sizeOfBlockHeader
133-
if actualDataOffset < indexEntry.Offset {
134-
return nil, fmt.Errorf("internal error: block data offset calculation overflowed")
138+
dataFile, localOffset, err := s.getDataFileAndOffset(indexEntry.Offset)
139+
if err != nil {
140+
return nil, fmt.Errorf("failed to get data file for block at height %d: %w", height, err)
135141
}
136-
_, err = s.dataFile.ReadAt(blockData, int64(actualDataOffset))
142+
_, err = dataFile.ReadAt(blockData, int64(localOffset+sizeOfBlockHeader))
137143
if err != nil {
144+
if errors.Is(err, io.EOF) {
145+
return nil, nil
146+
}
138147
return nil, fmt.Errorf("failed to read block data from data file: %w", err)
139148
}
140149

@@ -171,12 +180,15 @@ func (s *Database) ReadHeader(height BlockHeight) (BlockData, error) {
171180

172181
// Read only the header portion
173182
headerData := make([]byte, indexEntry.HeaderSize)
174-
actualDataOffset := indexEntry.Offset + sizeOfBlockHeader
175-
if actualDataOffset < indexEntry.Offset {
176-
return nil, fmt.Errorf("internal error: block data offset calculation overflowed")
183+
dataFile, localOffset, err := s.getDataFileAndOffset(indexEntry.Offset)
184+
if err != nil {
185+
return nil, fmt.Errorf("failed to get data file for block header at height %d: %w", height, err)
177186
}
178-
_, err = s.dataFile.ReadAt(headerData, int64(actualDataOffset))
187+
_, err = dataFile.ReadAt(headerData, int64(localOffset+sizeOfBlockHeader))
179188
if err != nil {
189+
if errors.Is(err, io.EOF) {
190+
return nil, nil
191+
}
180192
return nil, fmt.Errorf("failed to read block header data from data file: %w", err)
181193
}
182194

@@ -203,9 +215,16 @@ func (s *Database) ReadBody(height BlockHeight) (BlockData, error) {
203215

204216
bodySize := indexEntry.Size - uint64(indexEntry.HeaderSize)
205217
bodyData := make([]byte, bodySize)
206-
bodyOffset := indexEntry.Offset + sizeOfBlockHeader + uint64(indexEntry.HeaderSize)
207-
_, err = s.dataFile.ReadAt(bodyData, int64(bodyOffset))
218+
dataFile, localOffset, err := s.getDataFileAndOffset(indexEntry.Offset)
219+
if err != nil {
220+
return nil, fmt.Errorf("failed to get data file for block body at height %d: %w", height, err)
221+
}
222+
bodyOffset := localOffset + sizeOfBlockHeader + uint64(indexEntry.HeaderSize)
223+
_, err = dataFile.ReadAt(bodyData, int64(bodyOffset))
208224
if err != nil {
225+
if errors.Is(err, io.EOF) {
226+
return nil, nil
227+
}
209228
return nil, fmt.Errorf("failed to read block body data from data file: %w", err)
210229
}
211230
return bodyData, nil
@@ -221,16 +240,21 @@ func (s *Database) writeBlockAt(offset uint64, bh blockHeader, block BlockData)
221240
return fmt.Errorf("failed to serialize block header: %w", err)
222241
}
223242

243+
dataFile, localOffset, err := s.getDataFileAndOffset(offset)
244+
if err != nil {
245+
return fmt.Errorf("failed to get data file for writing block %d: %w", bh.Height, err)
246+
}
247+
224248
// Allocate combined buffer for header and block data and write it to the data file
225249
combinedBuf := make([]byte, sizeOfBlockHeader+uint64(len(block)))
226250
copy(combinedBuf, headerBytes)
227251
copy(combinedBuf[sizeOfBlockHeader:], block)
228-
if _, err := s.dataFile.WriteAt(combinedBuf, int64(offset)); err != nil {
252+
if _, err := dataFile.WriteAt(combinedBuf, int64(localOffset)); err != nil {
229253
return fmt.Errorf("failed to write block to data file at offset %d: %w", offset, err)
230254
}
231255

232256
if s.syncToDisk {
233-
if err := s.dataFile.Sync(); err != nil {
257+
if err := dataFile.Sync(); err != nil {
234258
return fmt.Errorf("failed to sync data file after writing block %d: %w", bh.Height, err)
235259
}
236260
}
@@ -285,29 +309,51 @@ func (s *Database) updateBlockHeights(writtenBlockHeight BlockHeight) error {
285309
return nil
286310
}
287311

288-
func (s *Database) allocateBlockSpace(sizeWithDataHeader uint64) (writeDataOffset uint64, err error) {
312+
func (s *Database) allocateBlockSpace(totalSize uint64) (writeDataOffset uint64, err error) {
289313
maxDataFileSize := s.header.MaxDataFileSize
290314

315+
// Check if a single block would exceed the max data file size
316+
if maxDataFileSize > 0 && totalSize > maxDataFileSize {
317+
return 0, ErrBlockTooLarge
318+
}
319+
291320
for {
292-
// Check if the new offset would overflow uint64.
293321
currentOffset := s.nextDataWriteOffset.Load()
294-
if currentOffset > math.MaxUint64-sizeWithDataHeader {
322+
if currentOffset > math.MaxUint64-totalSize {
295323
return 0, fmt.Errorf(
296324
"adding block of size %d to offset %d would overflow uint64 data file pointer",
297-
sizeWithDataHeader, currentOffset,
325+
totalSize, currentOffset,
298326
)
299327
}
300328

301-
newOffset := currentOffset + sizeWithDataHeader
302-
if maxDataFileSize > 0 && newOffset > maxDataFileSize {
303-
return 0, fmt.Errorf(
304-
"adding block of size %d to offset %d (new offset %d) would exceed configured max data file size of %d bytes",
305-
sizeWithDataHeader, currentOffset, newOffset, maxDataFileSize,
306-
)
329+
writeOffset := currentOffset
330+
newOffset := currentOffset + totalSize
331+
332+
if maxDataFileSize > 0 {
333+
fileIndex := int(currentOffset / maxDataFileSize)
334+
localOffset := currentOffset % maxDataFileSize
335+
336+
if localOffset+totalSize > maxDataFileSize {
337+
writeOffset = (uint64(fileIndex) + 1) * maxDataFileSize
338+
newOffset = writeOffset + totalSize
339+
}
307340
}
308341

309342
if s.nextDataWriteOffset.CompareAndSwap(currentOffset, newOffset) {
310-
return currentOffset, nil
343+
return writeOffset, nil
311344
}
312345
}
313346
}
347+
348+
func (s *Database) getDataFileAndOffset(globalOffset uint64) (*os.File, uint64, error) {
349+
maxFileSize := s.header.MaxDataFileSize
350+
if maxFileSize == 0 {
351+
handle, err := s.getOrOpenDataFile(0)
352+
return handle, globalOffset, err
353+
}
354+
355+
fileIndex := int(globalOffset / maxFileSize)
356+
localOffset := globalOffset % maxFileSize
357+
handle, err := s.getOrOpenDataFile(fileIndex)
358+
return handle, localOffset, err
359+
}

x/blockdb/config.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1+
// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
2+
// See the file LICENSE for licensing terms.
3+
14
package blockdb
25

3-
import (
4-
"fmt"
5-
)
6+
import "errors"
67

78
// DefaultMaxDataFileSize is the default maximum size of the data block file in bytes (500GB).
89
const DefaultMaxDataFileSize = 500 * 1024 * 1024 * 1024
@@ -31,7 +32,7 @@ func DefaultDatabaseConfig() DatabaseConfig {
3132
// Validate checks if the store options are valid.
3233
func (opts DatabaseConfig) Validate() error {
3334
if opts.CheckpointInterval == 0 {
34-
return fmt.Errorf("CheckpointInterval cannot be 0")
35+
return errors.New("CheckpointInterval cannot be 0")
3536
}
3637
return nil
3738
}

0 commit comments

Comments
 (0)