-
Notifications
You must be signed in to change notification settings - Fork 35
Expand file tree
/
Copy pathforkchoice.zig
More file actions
3787 lines (3251 loc) · 169 KB
/
forkchoice.zig
File metadata and controls
3787 lines (3251 loc) · 169 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
const std = @import("std");
const json = std.json;
const Allocator = std.mem.Allocator;
const Thread = std.Thread;
const ssz = @import("ssz");
const types = @import("@zeam/types");
const configs = @import("@zeam/configs");
const zeam_utils = @import("@zeam/utils");
const stf = @import("@zeam/state-transition");
const zeam_metrics = @import("@zeam/metrics");
const params = @import("@zeam/params");
const keymanager = @import("@zeam/key-manager");
const constants = @import("./constants.zig");
const AggregatedSignatureProof = types.AggregatedSignatureProof;
const Root = types.Root;
const ValidatorIndex = types.ValidatorIndex;
const ZERO_SIGBYTES = types.ZERO_SIGBYTES;
const ProtoBlock = types.ProtoBlock;
pub const ProtoNode = struct {
// Fields from ProtoBlock
slot: types.Slot,
proposer_index: types.ValidatorIndex,
blockRoot: Root,
parentRoot: Root,
stateRoot: Root,
timeliness: bool,
confirmed: bool,
// Fields from ProtoMeta
parent: ?usize,
weight: isize,
bestChild: ?usize,
bestDescendant: ?usize,
depth: usize, // depth from the anchor/forkchoice root
// idx of next sibling, for easy traversal of children 0 means no there is no next sibling as 0 is anchor root and isn't anyone's sibling
nextSibling: usize,
// idx of the first and latest added children for easy children traversal through siblings
firstChild: usize,
latestChild: usize,
numChildren: usize,
// info populated lazily for tree visualization in snapshot for efficiency purposes
numBranches: ?usize = null,
pub fn format(self: ProtoNode, writer: anytype) !void {
try writer.print("ProtoNode{{ slot={d}, weight={d}, blockRoot=0x{x} }}", .{
self.slot,
self.weight,
&self.blockRoot,
});
}
};
pub const ProtoArray = struct {
nodes: std.ArrayList(ProtoNode),
indices: std.AutoHashMap(types.Root, usize),
allocator: Allocator,
const Self = @This();
pub fn init(allocator: Allocator, anchorBlock: ProtoBlock) !Self {
var proto_array = Self{
.nodes = .empty,
.indices = std.AutoHashMap(types.Root, usize).init(allocator),
.allocator = allocator,
};
try proto_array.onBlock(anchorBlock, anchorBlock.slot);
return proto_array;
}
pub fn onBlock(self: *Self, block: ProtoBlock, currentSlot: types.Slot) !void {
const onblock_timer = zeam_metrics.lean_fork_choice_block_processing_time_seconds.start();
defer _ = onblock_timer.observe();
// currentSlot might be needed in future for finding the viable head
_ = currentSlot;
const node_or_null = self.indices.get(block.blockRoot);
if (node_or_null) |node| {
_ = node;
return;
}
// index at which node will be inserted
const node_index = self.nodes.items.len;
const parent = self.indices.get(block.parentRoot);
// some tree book keeping
var depth: usize = 0;
if (parent) |parent_id| {
depth = self.nodes.items[parent_id].depth + 1;
// update next sibling of the current parent's latest
const prevLatestChild = self.nodes.items[parent_id].latestChild;
if (prevLatestChild == 0) {
self.nodes.items[parent_id].firstChild = node_index;
} else {
self.nodes.items[prevLatestChild].nextSibling = node_index;
}
self.nodes.items[parent_id].latestChild = node_index;
self.nodes.items[parent_id].numChildren += 1;
}
// TODO extend is not working so copy data for now
// const node = utils.Extend(ProtoNode, block, .{
// .parent = parent,
// .weight = weight,
// // bestChild and bestDescendant are left null
// });
const node = ProtoNode{
.slot = block.slot,
.proposer_index = block.proposer_index,
.blockRoot = block.blockRoot,
.parentRoot = block.parentRoot,
.stateRoot = block.stateRoot,
.timeliness = block.timeliness,
.confirmed = block.confirmed,
.parent = parent,
.weight = 0,
.bestChild = null,
.bestDescendant = null,
// tree book keeping
.depth = depth,
.nextSibling = 0,
.firstChild = 0,
.latestChild = 0,
.numChildren = 0,
};
try self.nodes.append(self.allocator, node);
try self.indices.put(node.blockRoot, node_index);
}
fn getNode(self: *Self, blockRoot: types.Root) ?ProtoNode {
const block_index = self.indices.get(blockRoot);
if (block_index) |blkidx| {
const node = self.nodes.items[blkidx];
return node;
} else {
return null;
}
}
// Internal unlocked version - assumes caller holds lock
fn applyDeltasUnlocked(self: *Self, deltas: []isize, cutoff_weight: u64) !void {
if (deltas.len != self.nodes.items.len) {
return ForkChoiceError.InvalidDeltas;
}
// iterate backwards apply deltas and propagating deltas to parents
var node_idx_a = self.nodes.items.len;
while (node_idx_a > 0) {
node_idx_a -= 1;
const node_idx = node_idx_a;
const node_delta = deltas[node_idx];
self.nodes.items[node_idx].weight += node_delta;
if (self.nodes.items[node_idx].parent) |parent_idx| {
deltas[parent_idx] += node_delta;
}
}
// re-iterate backwards and calc best child and descendant
// there seems to be no filter block tree in the mini3sf fc
var node_idx_b = self.nodes.items.len;
while (node_idx_b > 0) {
node_idx_b -= 1;
const node_idx = node_idx_b;
const node = self.nodes.items[node_idx];
if (self.nodes.items[node_idx].parent) |parent_idx| {
const nodeBestDescendant = node.bestDescendant orelse (
// by recurssion, we will always have a bestDescendant >= cutoff
if (self.nodes.items[node_idx].weight >= cutoff_weight) node_idx else null
//
);
const parent = self.nodes.items[parent_idx];
var updateBest = false;
if (parent.bestChild == node_idx) {
// check if bestDescendant needs to be updated even if best child is same
if (parent.bestDescendant != nodeBestDescendant) {
updateBest = true;
}
} else {
const bestChildOrNull = if (parent.bestChild) |bestChildIdx| self.nodes.items[bestChildIdx] else null;
// see if we can update parent's best
if (bestChildOrNull) |bestChild| {
if (bestChild.weight < node.weight) {
updateBest = true;
} else if (bestChild.weight == node.weight and (std.mem.order(u8, &bestChild.blockRoot, &node.blockRoot) == .lt)) {
// tie break by lexicographically larger block root (leanSpec-compatible)
updateBest = true;
}
} else {
updateBest = true;
}
}
if (updateBest) {
self.nodes.items[parent_idx].bestChild = node_idx;
self.nodes.items[parent_idx].bestDescendant = nodeBestDescendant;
}
}
}
}
};
const OnBlockOpts = struct {
currentSlot: types.Slot,
blockDelayMs: u64,
blockRoot: ?types.Root = null,
confirmed: bool,
};
pub const ForkChoiceStore = struct {
// Shared slot/interval clock - updated by the forkchoice on every tick.
// Also pointed to by ZeamLoggerConfig so loggers can annotate each line
// with the current slot and interval without acquiring any lock.
slot_clock: zeam_utils.SlotTimeClock,
latest_justified: types.Checkpoint,
// finalized is not tracked the same way in 3sf mini as it corresponds to head's finalized
// however its unlikely that a finalized can be rolled back in a normal node operation
// (for example a buggy chain has been finalized in which case node should be started with
// anchor of the new non buggy branch)
latest_finalized: types.Checkpoint,
const Self = @This();
pub fn update(self: *Self, justified: types.Checkpoint, finalized: types.Checkpoint) void {
if (justified.slot > self.latest_justified.slot) {
self.latest_justified = justified;
}
if (finalized.slot > self.latest_finalized.slot) {
self.latest_finalized = finalized;
}
}
};
const ProtoAttestation = struct {
//
index: usize = 0,
slot: types.Slot = 0,
// we store AttestationData here since signatures are stored separately in gossip_signatures/latest_*_aggregated_payloads
attestation_data: ?types.AttestationData = null,
};
const AttestationTracker = struct {
// prev latest attestation applied index null if not applied
appliedIndex: ?usize = null,
// latest known on-chain attestation of the validator
latestKnown: ?ProtoAttestation = null,
// nlatest new attestation of validator not yet seen on-chain
latestNew: ?ProtoAttestation = null,
};
pub const ForkChoiceParams = struct {
config: configs.ChainConfig,
anchorState: *const types.BeamState,
logger: zeam_utils.ModuleLogger,
};
// Use shared signature map types from types package
const StoredSignature = types.StoredSignature;
const SignaturesMap = types.SignaturesMap;
const StoredAggregatedPayload = types.StoredAggregatedPayload;
const AggregatedPayloadsList = types.AggregatedPayloadsList;
const AggregatedPayloadsMap = types.AggregatedPayloadsMap;
/// Tracks whether the forkchoice has observed a real justified checkpoint via onBlock.
/// For genesis (anchor slot == 0) we start ready; for checkpoint-sync or DB restore we
/// start initing and transition once the first block-driven justified update arrives.
pub const ForkChoiceStatus = enum { initing, ready };
pub const ForkChoice = struct {
protoArray: ProtoArray,
anchorState: *const types.BeamState,
config: configs.ChainConfig,
fcStore: ForkChoiceStore,
allocator: Allocator,
// map of validator ids to attestation tracker, better to have a map instead of array
// because of churn in validators
attestations: std.AutoHashMap(usize, AttestationTracker),
head: ProtoBlock,
safeTarget: ProtoBlock,
// data structure to hold validator deltas, could be grown over time as more validators
// get added
deltas: std.ArrayList(isize),
logger: zeam_utils.ModuleLogger,
// Thread-safe access protection
mutex: Thread.RwLock,
// Per-validator XMSS signatures learned from gossip, keyed by AttestationData.
// Each AttestationData maps to a per-validator-id inner map of signatures.
gossip_signatures: SignaturesMap,
// Aggregated signature proofs pending processing.
// These payloads are "new" and migrate to known payloads via interval ticks.
latest_new_aggregated_payloads: AggregatedPayloadsMap,
// Aggregated signature proofs that are known and contribute to fork choice weights.
// Used for recursive signature aggregation when building blocks.
latest_known_aggregated_payloads: AggregatedPayloadsMap,
// Mutex to protect concurrent access to signature/payload maps
signatures_mutex: std.Thread.Mutex,
// Tracks whether FC has observed a real justified checkpoint via block processing.
// Starts as `initing` for checkpoint-sync init (anchor slot > 0); transitions to
// `ready` on the first block-driven justified update. Validator duties (block
// production, attestation) must not run while status == .initing.
status: ForkChoiceStatus,
const Self = @This();
/// Thread-safe snapshot for observability
pub const Snapshot = struct {
head: ProtoNode,
latest_justified: types.Checkpoint,
latest_finalized: types.Checkpoint,
safe_target_root: [32]u8,
validator_count: u64,
nodes: []ProtoNode,
pub fn deinit(self: Snapshot, allocator: Allocator) void {
allocator.free(self.nodes);
}
};
pub fn init(allocator: Allocator, opts: ForkChoiceParams) !Self {
const anchor_block_header = try opts.anchorState.genStateBlockHeader(allocator);
var anchor_block_root: [32]u8 = undefined;
try zeam_utils.hashTreeRoot(
types.BeamBlockHeader,
anchor_block_header,
&anchor_block_root,
allocator,
);
const anchor_block = ProtoBlock{
.slot = opts.anchorState.slot,
.proposer_index = anchor_block_header.proposer_index,
.blockRoot = anchor_block_root,
.parentRoot = anchor_block_header.parent_root,
.stateRoot = anchor_block_header.state_root,
.timeliness = true,
.confirmed = true,
};
const proto_array = try ProtoArray.init(allocator, anchor_block);
const anchorCP = types.Checkpoint{ .slot = opts.anchorState.slot, .root = anchor_block_root };
const fc_store = ForkChoiceStore{
.slot_clock = zeam_utils.SlotTimeClock.init(
opts.anchorState.slot * constants.INTERVALS_PER_SLOT,
opts.anchorState.slot,
0, // slotInterval is 0 at anchor: time is always a slot boundary
),
.latest_justified = anchorCP,
.latest_finalized = anchorCP,
};
const attestations = std.AutoHashMap(usize, AttestationTracker).init(allocator);
const deltas: std.ArrayList(isize) = .empty;
const gossip_signatures = SignaturesMap.init(allocator);
const latest_new_aggregated_payloads = AggregatedPayloadsMap.init(allocator);
const latest_known_aggregated_payloads = AggregatedPayloadsMap.init(allocator);
var fc = Self{
.allocator = allocator,
.protoArray = proto_array,
.anchorState = opts.anchorState,
.config = opts.config,
.fcStore = fc_store,
.attestations = attestations,
.head = anchor_block,
.safeTarget = anchor_block,
.deltas = deltas,
.logger = opts.logger,
.mutex = Thread.RwLock{},
.gossip_signatures = gossip_signatures,
.latest_new_aggregated_payloads = latest_new_aggregated_payloads,
.latest_known_aggregated_payloads = latest_known_aggregated_payloads,
.signatures_mutex = .{},
// Genesis (slot == 0) is immediately ready; checkpoint-sync / DB-restore anchors
// (slot > 0) start in `initing` and become `ready` once the first real justified
// checkpoint is observed through block processing.
.status = if (opts.anchorState.slot == 0) .ready else .initing,
};
if (fc.status == .initing) {
fc.logger.info("[forkchoice] init: checkpoint-sync anchor at slot={d} — status=initing; awaiting first justified update before enabling validator duties", .{opts.anchorState.slot});
} else {
fc.logger.info("[forkchoice] init: genesis anchor — status=ready", .{});
}
// No lock needed during init - struct not yet accessible to other threads
_ = try fc.updateHeadUnlocked();
return fc;
}
/// Thread-safe snapshot for observability
/// Holds shared lock only during copy, caller formats JSON lock-free
pub fn snapshot(self: *Self, allocator: Allocator) !Snapshot {
self.mutex.lockShared();
defer self.mutex.unlockShared();
// Quick copy - ProtoNode has no pointer members, shallow copy is safe
const nodes_copy = try allocator.alloc(ProtoNode, self.protoArray.nodes.items.len);
@memcpy(nodes_copy, self.protoArray.nodes.items);
// populate numBranches
var node_natural_idx = nodes_copy.len;
while (node_natural_idx > 0) {
if (nodes_copy[node_natural_idx - 1].numBranches == null) {
// leaf of the forkchoice tree is always a branch by itself
nodes_copy[node_natural_idx - 1].numBranches = 1;
}
const numBranches = nodes_copy[node_natural_idx - 1].numBranches orelse @panic("invalid null num branches for node");
if (nodes_copy[node_natural_idx - 1].parent) |parent_idx| {
nodes_copy[parent_idx].numBranches = (nodes_copy[parent_idx].numBranches orelse 0) + numBranches;
}
node_natural_idx -= 1;
}
// Get the full ProtoNode for head from protoArray
const head_idx = self.protoArray.indices.get(self.head.blockRoot) orelse {
// Fallback: create a ProtoNode from ProtoBlock if not found
const head_node = ProtoNode{
.slot = self.head.slot,
.proposer_index = self.head.proposer_index,
.blockRoot = self.head.blockRoot,
.parentRoot = self.head.parentRoot,
.stateRoot = self.head.stateRoot,
.timeliness = self.head.timeliness,
.confirmed = self.head.confirmed,
.parent = null,
.weight = 0,
.bestChild = null,
.bestDescendant = null,
.depth = 0,
.nextSibling = 0,
.firstChild = 0,
.latestChild = 0,
.numChildren = 0,
.numBranches = 1,
};
return Snapshot{
.head = head_node,
.latest_justified = self.fcStore.latest_justified,
.latest_finalized = self.fcStore.latest_finalized,
.safe_target_root = self.safeTarget.blockRoot,
.validator_count = self.config.genesis.numValidators(),
.nodes = nodes_copy,
};
};
return Snapshot{
.head = self.protoArray.nodes.items[head_idx],
.latest_justified = self.fcStore.latest_justified,
.latest_finalized = self.fcStore.latest_finalized,
.safe_target_root = self.safeTarget.blockRoot,
.validator_count = self.config.genesis.numValidators(),
.nodes = nodes_copy,
};
}
pub fn deinit(self: *Self) void {
self.protoArray.nodes.deinit(self.protoArray.allocator);
self.protoArray.indices.deinit();
self.attestations.deinit();
self.deltas.deinit(self.allocator);
self.signatures_mutex.lock();
defer self.signatures_mutex.unlock();
self.gossip_signatures.deinit();
// Deinit each list in the aggregated payloads maps
var it_known = self.latest_known_aggregated_payloads.iterator();
while (it_known.next()) |entry| {
for (entry.value_ptr.items) |*stored| {
stored.proof.deinit();
}
entry.value_ptr.deinit(self.allocator);
}
self.latest_known_aggregated_payloads.deinit();
var it_new = self.latest_new_aggregated_payloads.iterator();
while (it_new.next()) |entry| {
for (entry.value_ptr.items) |*stored| {
stored.proof.deinit();
}
entry.value_ptr.deinit(self.allocator);
}
self.latest_new_aggregated_payloads.deinit();
}
fn isBlockTimely(self: *Self, blockDelayMs: usize) bool {
_ = self;
_ = blockDelayMs;
return true;
}
fn isFinalizedDescendant(self: *Self, blockRoot: types.Root) bool {
const finalized_slot = self.fcStore.latest_finalized.slot;
const finalized_root = self.fcStore.latest_finalized.root;
var searched_idx_or_null = self.protoArray.indices.get(blockRoot);
while (searched_idx_or_null) |searched_idx| {
const searched_node_or_null: ?ProtoNode = self.protoArray.nodes.items[searched_idx];
if (searched_node_or_null) |searched_node| {
if (searched_node.slot <= finalized_slot) {
if (std.mem.eql(u8, searched_node.blockRoot[0..], finalized_root[0..])) {
return true;
} else {
return false;
}
} else {
searched_idx_or_null = searched_node.parent;
}
} else {
break;
}
}
return false;
}
/// Builds a canonical view hashmap containing all blocks in the canonical chain
/// from targetAnchor back to prevAnchor, plus all their unfinalized descendants.
// Internal unlocked version - assumes caller holds lock
fn getCanonicalViewUnlocked(self: *Self, canonical_view: *std.AutoHashMap(types.Root, void), targetAnchorRoot: types.Root, prevAnchorRootOrNull: ?types.Root) !void {
const prev_anchor_idx = if (prevAnchorRootOrNull) |prevAnchorRoot| (self.protoArray.indices.get(prevAnchorRoot) orelse return ForkChoiceError.InvalidAnchor) else 0;
const target_anchor_idx = self.protoArray.indices.get(targetAnchorRoot) orelse return ForkChoiceError.InvalidTargetAnchor;
// first get all canonical blocks till previous anchors
var current_idx = target_anchor_idx;
while (current_idx >= prev_anchor_idx) {
const current_node = self.protoArray.nodes.items[current_idx];
try canonical_view.put(current_node.blockRoot, {});
if (current_idx != prev_anchor_idx) {
current_idx = current_node.parent orelse return ForkChoiceError.InvalidCanonicalTraversal;
// extra soundness check
if (current_idx < prev_anchor_idx) {
return ForkChoiceError.InvalidCanonicalTraversal;
}
} else {
break;
}
}
// add all the potential downstream canonical blocks to the map i.e. unfinalized descendants
current_idx = target_anchor_idx + 1;
while (current_idx < self.protoArray.nodes.items.len) {
// if the parent of this node is already in the canonical_blocks, this is a potential canonical block
const current_node = self.protoArray.nodes.items[current_idx];
const parent_idx = current_node.parent orelse return ForkChoiceError.InvalidCanonicalTraversal;
const parent_node = self.protoArray.nodes.items[parent_idx];
// parent should be canonical but no parent should be before target anchor
// because then it would be on a side branch to target anchor
//
// root=be35ab6546a38c4d5d42b588ac952867f19e03d1f12b4474f3b627db15739431 slot=30 index=7 parent=4 (arrived late)
// root=35ba9cb9ea2e0e8d1248f40dc9d2142e0de2d18812be529ff024c7bcb5cd4b31 slot=31 index=5 parent=4
// root=50ebab7c7948a768f298d9dc0b9863c0095d8df55f15e761b7eb032f3177ba6c slot=24 index=4 parent=3
// root=c06f61119634e626d5e947ac7baaa8242b707a012880370875efeb2c0539ce7b slot=22 index=3 parent=2
// root=57018d16f19782f832e8585657862930dd1acd217f308e60d23ad5a8efbb5f81 slot=21 index=2 parent=1
// root=788b12ebd124982cc09433b1aadc655c7d876214ea2905f1b594564308c80e86 slot=20 index=1 parent=0
// root=d754cf64f908c488eafc7453db7383be232a568f8e411c43bff809eb7a8e3028 slot=19 index=0 parent=null
// targetAnchorRoot is 35ba9cb9ea2e0e8d1248f40dc9d2142e0de2d18812be529ff024c7bcb5cd4b31
//
// now without the parent index >= target_anchor_idx check slot=30 also ends up being added in canonical
// because its parent is correctly canonical and has already been added to canonical_view in first while loop
// however target anchor is slot=31 and hence slot=30 shouldn't be on a downstream unfinalized subtree
//
// test cases for the above are already present in the rebase testing
if (parent_idx >= target_anchor_idx and canonical_view.contains(parent_node.blockRoot)) {
try canonical_view.put(current_node.blockRoot, {});
}
current_idx += 1;
}
}
/// Analyzes block canonicality relative to a target finalization anchor.
/// Returns [canonical_roots, potential_canonical_roots, non_canonical_roots].
///
/// SCOPE: Analysis is limited to blocks at or after prevAnchorRootOrNull (or genesis if null).
/// Blocks before the previous anchor are considered stable and not analyzed.
///
/// - canonical_roots: Blocks on the path from targetAnchor back to prevAnchor (slot <= target)
/// - potential_canonical_roots: Descendants of canonical blocks with slot > target (unfinalized)
/// - non_canonical_roots: Blocks not in the canonical set (orphans)
///
/// If canonicalViewOrNull is provided, it reuses an existing canonical view for efficiency.
// Internal unlocked version - assumes caller holds lock
fn getCanonicalityAnalysisUnlocked(self: *Self, targetAnchorRoot: types.Root, prevAnchorRootOrNull: ?types.Root, canonicalViewOrNull: ?*std.AutoHashMap(types.Root, void)) ![3][]types.Root {
var canonical_roots: std.ArrayList(types.Root) = .empty;
var potential_canonical_roots: std.ArrayList(types.Root) = .empty;
var non_canonical_roots: std.ArrayList(types.Root) = .empty;
// get some info about previous and target anchors
const prev_anchor_idx = if (prevAnchorRootOrNull) |prevAnchorRoot| (self.protoArray.indices.get(prevAnchorRoot) orelse return ForkChoiceError.InvalidAnchor) else 0;
const target_anchor_idx = self.protoArray.indices.get(targetAnchorRoot) orelse return ForkChoiceError.InvalidTargetAnchor;
const target_anchor_slot = self.protoArray.nodes.items[target_anchor_idx].slot;
// get all canonical view of the chain finalized and unfinalized anchored at the targetAnchorRoot
var canonical_blocks = canonicalViewOrNull orelse blk: {
var local_view = std.AutoHashMap(types.Root, void).init(self.allocator);
try self.getCanonicalViewUnlocked(&local_view, targetAnchorRoot, prevAnchorRootOrNull);
break :blk &local_view;
};
// now we can split forkchoice into 3 parts (excluding target anchor)
// traversing all the way from the bottom to the prev_anchor_idx
var current_idx = self.protoArray.nodes.items.len - 1;
while (current_idx >= prev_anchor_idx) {
const current_node = self.protoArray.nodes.items[current_idx];
if (canonical_blocks.contains(current_node.blockRoot)) {
if (current_node.slot <= target_anchor_slot) {
self.logger.debug("adding confirmed canonical root={x} slot={d} index={d} parent={any}", .{
¤t_node.blockRoot,
current_node.slot,
current_idx,
current_node.parent,
});
try canonical_roots.append(self.allocator, current_node.blockRoot);
} else if (current_node.slot > target_anchor_slot) {
try potential_canonical_roots.append(self.allocator, current_node.blockRoot);
}
} else {
try non_canonical_roots.append(self.allocator, current_node.blockRoot);
}
if (current_idx == 0) {
break;
} else {
current_idx -= 1;
}
}
// confirm first root in canonical_roots is the new anchor because it should have been pushed first
if (!std.mem.eql(u8, &canonical_roots.items[0], &targetAnchorRoot)) {
for (canonical_roots.items, 0..) |root, index| {
self.logger.err("canonical root at index={d} {x}", .{
index,
&root,
});
}
self.logger.err("targetAnchorRoot is {x}", .{&targetAnchorRoot});
return ForkChoiceError.InvalidCanonicalTraversal;
}
const result = [_]([]types.Root){
try canonical_roots.toOwnedSlice(self.allocator),
//
try potential_canonical_roots.toOwnedSlice(self.allocator),
try non_canonical_roots.toOwnedSlice(self.allocator),
};
// only way to conditionally deinit locally allocated map created in a orelse block scope
if (canonicalViewOrNull == null) {
canonical_blocks.deinit();
}
return result;
}
/// Rebases the forkchoice tree to a new anchor, pruning non-canonical blocks.
// Internal unlocked version - assumes caller holds lock
fn rebaseUnlocked(self: *Self, targetAnchorRoot: types.Root, canonicalViewOrNull: ?*std.AutoHashMap(types.Root, void)) !void {
const target_anchor_idx = self.protoArray.indices.get(targetAnchorRoot) orelse return ForkChoiceError.InvalidTargetAnchor;
const target_anchor_slot = self.protoArray.nodes.items[target_anchor_idx].slot;
const target_anchor_depth = self.protoArray.nodes.items[target_anchor_idx].depth;
var canonical_view = canonicalViewOrNull orelse blk: {
var local_view = std.AutoHashMap(types.Root, void).init(self.allocator);
try self.getCanonicalViewUnlocked(&local_view, targetAnchorRoot, null);
break :blk &local_view;
};
// prune, interesting thing to note is the entire subtree of targetAnchorRoot is not affected and is to be
// preserved as it is, because nothing from there is getting pruned
var shifted_left: usize = 0;
var old_indices_to_new = std.AutoHashMap(usize, usize).init(self.allocator);
defer old_indices_to_new.deinit();
var current_idx: usize = 0;
while (current_idx < self.protoArray.nodes.items.len) {
const current_node = self.protoArray.nodes.items[current_idx];
// we preserve the tree all the way down from the target anchor and its unfinalized potential canonical descendants
if (canonical_view.contains(current_node.blockRoot) and current_node.slot >= target_anchor_slot) {
try self.protoArray.indices.put(current_node.blockRoot, current_idx);
try old_indices_to_new.put((current_idx + shifted_left), current_idx);
// go to the next node
current_idx += 1;
} else {
// remove the node and continue back to the loop with updating current idx
// because after removal next node would be referred at the same current idx
_ = self.protoArray.nodes.orderedRemove(current_idx);
// don't need order preserving on deltas as they are always set to zero before their use
_ = self.deltas.swapRemove(current_idx);
_ = self.protoArray.indices.remove(current_node.blockRoot);
shifted_left += 1;
}
}
// correct parent, bestChild and bestDescendant indices using the created old to new map
current_idx = 0;
while (current_idx < self.protoArray.nodes.items.len) {
var current_node = self.protoArray.nodes.items[current_idx];
// correct depth
current_node.depth -= target_anchor_depth;
// fix parent, anchor i.e. 0rth entry of forkchoice has no parent and no sibling
if (current_idx == 0) {
current_node.parent = null;
current_node.nextSibling = 0;
} else {
// all other nodes should have parents, otherwise its an irrecoverable error as we have already
// modified forkchoice and can't be restored
const old_parent_idx = current_node.parent orelse @panic("invalid parent of the rebased unfinalized");
const new_parent_idx = old_indices_to_new.get(old_parent_idx);
current_node.parent = new_parent_idx;
if (current_node.nextSibling != 0) {
current_node.nextSibling = old_indices_to_new.get(current_node.nextSibling) orelse @panic("invalid sibling of rebased unfinalized");
}
}
// fix firstChild and latestChild
if (current_node.latestChild != 0) {
current_node.firstChild = old_indices_to_new.get(current_node.firstChild) orelse @panic("invalid first child of rebased tree");
current_node.latestChild = old_indices_to_new.get(current_node.latestChild) orelse @panic("invalid latest child of rebaed tree");
}
// fix bestChild and descendant
if (current_node.bestChild) |old_best_child_idx| {
// we should be able to lookup new index otherwise its an irrecoverable error
const new_best_child_idx = old_indices_to_new.get(old_best_child_idx) orelse @panic("invalid old index lookup for rebased best child");
current_node.bestChild = new_best_child_idx;
// If bestDescendant is null, keep it null (can happen when applyDeltas uses cutoff_weight
// and the best branch has no node >= cutoff). See issue #545.
if (current_node.bestDescendant) |old_best_descendant_idx| {
const new_best_descendant_idx = old_indices_to_new.get(old_best_descendant_idx) orelse @panic("invalid old index lookup for rebase best descendant");
current_node.bestDescendant = new_best_descendant_idx;
}
// else: bestDescendant remains null
} else {
// confirm best descendant is also null
if (current_node.bestDescendant != null) {
@panic("invalid forkchoice with non null best descendant but with null best child");
}
}
self.protoArray.nodes.items[current_idx] = current_node;
current_idx += 1;
}
// confirm the first entry in forkchoice is the target anchor
if (!std.mem.eql(u8, &self.protoArray.nodes.items[0].blockRoot, &targetAnchorRoot)) {
@panic("invalid forkchoice rebasing with forkchoice base not matching target anchor");
}
// cleanup the vote tracker and remove all the entries which are not in canonical
var iterator = self.attestations.iterator();
while (iterator.next()) |entry| {
// fix applied index
if (entry.value_ptr.appliedIndex) |applied_index| {
const new_index_lookup = old_indices_to_new.get(applied_index);
// this simple assignment suffices both for cases where new index is found i.e. is canonical
// or not, in which case it needs to point to null
entry.value_ptr.appliedIndex = new_index_lookup;
}
// fix latestKnown
if (entry.value_ptr.latestKnown) |*latest_known| {
const new_index_lookup = old_indices_to_new.get(latest_known.index);
// if we find the index then update it else change it to null as it was non canonical
if (new_index_lookup) |new_index| {
latest_known.index = new_index;
} else {
entry.value_ptr.latestKnown = null;
}
}
// fix latestNew
if (entry.value_ptr.latestNew) |*latest_new| {
const new_index_lookup = old_indices_to_new.get(latest_new.index);
// if we find the index then update it else change it to null as it was non canonical
if (new_index_lookup) |new_index| {
latest_new.index = new_index;
} else {
entry.value_ptr.latestNew = null;
}
}
}
if (canonicalViewOrNull == null) {
canonical_view.deinit();
}
return;
}
/// Returns the canonical ancestor at the specified depth from the current head.
/// Depth 0 returns the head itself. Traverses parent pointers (not slot arithmetic),
/// so missed slots don't affect depth counting. If depth exceeds chain length,
/// clamps to genesis.
// Internal unlocked version - assumes caller holds lock
fn getCanonicalAncestorAtDepthUnlocked(self: *Self, min_depth: usize) !ProtoBlock {
var depth = min_depth;
var current_idx = self.protoArray.indices.get(self.head.blockRoot) orelse return ForkChoiceError.InvalidHeadIndex;
// If depth exceeds chain length, clamp to genesis
if (current_idx < depth) {
current_idx = 0;
depth = 0;
}
// Traverse parent pointers until we reach the requested depth or genesis.
// This naturally handles missed slots since we follow parent links, not slot numbers.
while (depth > 0 and current_idx > 0) {
const current_node = self.protoArray.nodes.items[current_idx];
current_idx = current_node.parent orelse return ForkChoiceError.InvalidCanonicalTraversal;
depth -= 1;
}
const ancestor_at_depth = zeam_utils.Cast(ProtoBlock, self.protoArray.nodes.items[current_idx]);
return ancestor_at_depth;
}
// Internal unlocked version - assumes caller holds lock
fn tickIntervalUnlocked(self: *Self, hasProposal: bool) !void {
const new_time = self.fcStore.slot_clock.time.fetchAdd(1, .monotonic) + 1;
const currentInterval = new_time % constants.INTERVALS_PER_SLOT;
self.fcStore.slot_clock.slotInterval.store(currentInterval, .monotonic);
switch (currentInterval) {
0 => {
_ = self.fcStore.slot_clock.timeSlots.fetchAdd(1, .monotonic);
// Accept new aggregated payloads only if a proposal exists for this slot.
if (hasProposal) {
_ = try self.acceptNewAttestationsUnlocked();
}
},
1 => {},
2 => {},
3 => {
_ = try self.updateSafeTargetUnlocked();
},
4 => {
_ = try self.acceptNewAttestationsUnlocked();
},
else => @panic("invalid interval"),
}
self.logger.debug("forkchoice ticked to time(intervals)={d} slot={d}", .{ self.fcStore.slot_clock.time.load(.monotonic), self.fcStore.slot_clock.timeSlots.load(.monotonic) });
}
// Internal unlocked version - assumes caller holds lock
fn onIntervalUnlocked(self: *Self, time_intervals: usize, has_proposal: bool) !void {
while (self.fcStore.slot_clock.time.load(.monotonic) < time_intervals) {
try self.tickIntervalUnlocked(has_proposal and (self.fcStore.slot_clock.time.load(.monotonic) + 1) == time_intervals);
}
}
// Internal unlocked version - assumes caller holds lock
fn acceptNewAttestationsUnlocked(self: *Self) !ProtoBlock {
// Capture counts outside lock scope for metrics update
var known_payloads_count: usize = 0;
var new_payloads_count: usize = 0;
var payloads_updated = false;
{
// Keep payload migration synchronized with other signature/payload map writers.
self.signatures_mutex.lock();
defer self.signatures_mutex.unlock();
if (self.latest_new_aggregated_payloads.count() > 0) {
var it = self.latest_new_aggregated_payloads.iterator();
while (it.next()) |entry| {
const sig_key = entry.key_ptr.*;
const source_list = entry.value_ptr;
const gop = try self.latest_known_aggregated_payloads.getOrPut(sig_key);
if (!gop.found_existing) {
gop.value_ptr.* = .empty;
}
// Ensure all required capacity up-front so the move is non-failing.
try gop.value_ptr.ensureUnusedCapacity(self.allocator, source_list.items.len);
for (source_list.items) |stored| {
gop.value_ptr.appendAssumeCapacity(stored);
}
// Source list buffer no longer needed after ownership transfer.
source_list.deinit(self.allocator);
source_list.* = .empty;
}
self.latest_new_aggregated_payloads.clearAndFree();
// Capture counts for metrics update outside lock
known_payloads_count = self.latest_known_aggregated_payloads.count();
new_payloads_count = self.latest_new_aggregated_payloads.count();
payloads_updated = true;
}
}
// Update fork-choice store gauges after promotion (outside lock scope)
if (payloads_updated) {
zeam_metrics.metrics.lean_latest_known_aggregated_payloads.set(@intCast(known_payloads_count));
zeam_metrics.metrics.lean_latest_new_aggregated_payloads.set(@intCast(new_payloads_count));
}
// Promote latestNew → latestKnown in attestation tracker.
// Attestations that were "new" (gossip) are now "known" (accepted).
for (0..self.config.genesis.numValidators()) |validator_id| {
var tracker = self.attestations.get(validator_id) orelse continue;
// latestNew is always ahead of latestKnown (and will be non null if latestknown is not null)
tracker.latestKnown = tracker.latestNew;
try self.attestations.put(validator_id, tracker);
}
return self.updateHeadUnlocked();
}
pub fn getProposalHead(self: *Self, slot: types.Slot) !types.Checkpoint {
const time_intervals = slot * constants.INTERVALS_PER_SLOT;
// this could be called independently by the validator when its a separate process
// and FC would need to be protected by mutex to make it thread safe but for now
// this is deterministally called after the fc has been ticked ahead
// so the following call should be a no-op
try self.onInterval(time_intervals, true);
// accept any new attestations in case previous ontick was a no-op and either the validator
// wasn't registered or there have been new attestations
const head = try self.acceptNewAttestations();
return types.Checkpoint{
.root = head.blockRoot,
.slot = head.slot,
};
}
// Internal unlocked version - assumes caller holds lock
pub const ProposalAttestationsResult = struct {
attestations: types.AggregatedAttestations,
signatures: types.AttestationSignatures,
};
fn getProposalAttestationsUnlocked(
self: *Self,
pre_state: *const types.BeamState,
slot: types.Slot,
proposer_index: types.ValidatorIndex,
parent_root: [32]u8,
) !ProposalAttestationsResult {
var agg_attestations = try types.AggregatedAttestations.init(self.allocator);
var agg_att_cleanup = true;
errdefer if (agg_att_cleanup) {
for (agg_attestations.slice()) |*att| att.deinit();
agg_attestations.deinit();
};
var attestation_signatures = try types.AttestationSignatures.init(self.allocator);
var agg_sig_cleanup = true;
errdefer if (agg_sig_cleanup) {
for (attestation_signatures.slice()) |*sig| sig.deinit();
attestation_signatures.deinit();
};
// Fixed-point attestation collection with greedy proof selection.
//
// For the current latest_justified checkpoint, find matching attestation_data
// entries in latest_known_aggregated_payloads and greedily select proofs that
// maximize new validator coverage. Then apply STF to check if justification
// changed. If it did, look for entries matching the new justified checkpoint
// and repeat. If no matching entries exist or justification did not change,
// block production is done.
var current_justified_root = pre_state.latest_justified.root;
var processed_att_data = std.AutoHashMap(types.AttestationData, void).init(self.allocator);
defer processed_att_data.deinit();
while (true) {
// Find all attestation_data entries whose source matches the current justified checkpoint
// and greedily select proofs maximizing new validator coverage for each.
// Collect entries and sort by target slot for deterministic processing order.
const MapEntry = struct {
att_data: *types.AttestationData,
payloads: *types.AggregatedPayloadsList,
};
var sorted_entries: std.ArrayList(MapEntry) = .empty;
defer sorted_entries.deinit(self.allocator);
var payload_it = self.latest_known_aggregated_payloads.iterator();
while (payload_it.next()) |entry| {
if (!std.mem.eql(u8, ¤t_justified_root, &entry.key_ptr.source.root)) continue;
if (!self.protoArray.indices.contains(entry.key_ptr.head.root)) continue;
if (processed_att_data.contains(entry.key_ptr.*)) continue;
try sorted_entries.append(self.allocator, .{ .att_data = entry.key_ptr, .payloads = entry.value_ptr });
}
std.mem.sort(MapEntry, sorted_entries.items, {}, struct {
fn lessThan(_: void, a: MapEntry, b: MapEntry) bool {
return a.att_data.target.slot < b.att_data.target.slot;
}
}.lessThan);
const found_entries = sorted_entries.items.len > 0;
for (sorted_entries.items) |map_entry| {
try processed_att_data.put(map_entry.att_data.*, {});