This repository was archived by the owner on Apr 7, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmerkleize.go
More file actions
192 lines (174 loc) · 4.32 KB
/
merkleize.go
File metadata and controls
192 lines (174 loc) · 4.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
package merkle
import (
. "github.com/protolambda/zssz/htr"
)
const (
mask0 = ^uint64((1 << (1 << iota)) - 1)
mask1
mask2
mask3
mask4
mask5
)
const (
bit0 = uint8(1 << iota)
bit1
bit2
bit3
bit4
bit5
)
func GetDepth(v uint64) (out uint8) {
// bitmagic: binary search through a uint32, offset down by 1 to not round powers of 2 up.
// Then adding 1 to it to not get the index of the first bit, but the length of the bits (depth of tree)
// Zero is a special case, it has a 0 depth.
// Example:
// (in out): (0 0), (1 1), (2 1), (3 2), (4 2), (5 3), (6 3), (7 3), (8 3), (9 4)
if v == 0 {
return 0
}
v--
if v&mask5 != 0 {
v >>= bit5
out |= bit5
}
if v&mask4 != 0 {
v >>= bit4
out |= bit4
}
if v&mask3 != 0 {
v >>= bit3
out |= bit3
}
if v&mask2 != 0 {
v >>= bit2
out |= bit2
}
if v&mask1 != 0 {
v >>= bit1
out |= bit1
}
if v&mask0 != 0 {
out |= bit0
}
out++
return
}
// Merkleize with log(N) space allocation
func Merkleize(hasher HashFn, count uint64, limit uint64, leaf func(i uint64) []byte) (out [32]byte) {
if count > limit {
panic("merkleizing list that is too large, over limit")
}
if limit == 0 {
return
}
if limit == 1 {
if count == 1 {
copy(out[:], leaf(0))
}
return
}
depth := GetDepth(count)
limitDepth := GetDepth(limit)
tmp := make([][32]byte, limitDepth+1, limitDepth+1)
j := uint8(0)
hArr := [32]byte{}
h := hArr[:]
merge := func(i uint64) {
// merge back up from bottom to top, as far as we can
for j = 0; ; j++ {
// stop merging when we are in the left side of the next combi
if i&(uint64(1)<<j) == 0 {
// if we are at the count, we want to merge in zero-hashes for padding
if i == count && j < depth {
v := hasher.Combi(hArr, ZeroHashes[j])
copy(h, v[:])
} else {
break
}
} else {
// keep merging up if we are the right side
v := hasher.Combi(tmp[j], hArr)
copy(h, v[:])
}
}
// store the merge result (may be no merge, i.e. bottom leaf node)
copy(tmp[j][:], h)
}
// merge in leaf by leaf.
for i := uint64(0); i < count; i++ {
copy(h[:], leaf(i))
merge(i)
}
// complement with 0 if empty, or if not the right power of 2
if (uint64(1) << depth) != count {
copy(h[:], ZeroHashes[0][:])
merge(count)
}
// the next power of two may be smaller than the ultimate virtual size,
// complement with zero-hashes at each depth.
for j := depth; j < limitDepth; j++ {
tmp[j+1] = hasher.Combi(tmp[j], ZeroHashes[j])
}
return tmp[limitDepth]
}
// ConstructProof builds a merkle-branch of the given depth, at the given index (at that depth),
// for a list of leafs of a balanced binary tree.
func ConstructProof(hasher HashFn, count uint64, limit uint64, leaf func(i uint64) []byte, index uint64) (branch [][32]byte) {
if count > limit {
panic("merkleizing list that is too large, over limit")
}
if index >= limit {
panic("index out of range, over limit")
}
if limit <= 1 {
return
}
depth := GetDepth(count)
limitDepth := GetDepth(limit)
branch = append(branch, ZeroHashes[:limitDepth]...)
tmp := make([][32]byte, limitDepth+1, limitDepth+1)
j := uint8(0)
hArr := [32]byte{}
h := hArr[:]
merge := func(i uint64) {
// merge back up from bottom to top, as far as we can
for j = 0; ; j++ {
// TODO: better than full tree allocation, but style/efficiency can be improved.
// if i is a sibling of index at the given depth,
// and i is the last index of the subtree to that depth,
// then put h into the branch
if (i>>j)^1 == (index>>j) && (((1<<j)-1)&i) == ((1<<j)-1) {
// insert sibling into the proof
branch[j] = hArr
}
// stop merging when we are in the left side of the next combi
if i&(uint64(1)<<j) == 0 {
// if we are at the count, we want to merge in zero-hashes for padding
if i == count && j < depth {
v := hasher.Combi(hArr, ZeroHashes[j])
copy(h, v[:])
} else {
break
}
} else {
// keep merging up if we are the right side
v := hasher.Combi(tmp[j], hArr)
copy(h, v[:])
}
}
// store the merge result (may be no merge, i.e. bottom leaf node)
copy(tmp[j][:], h)
}
// merge in leaf by leaf.
for i := uint64(0); i < count; i++ {
copy(h[:], leaf(i))
merge(i)
}
// complement with 0 if empty, or if not the right power of 2
if (uint64(1) << depth) != count {
copy(h[:], ZeroHashes[0][:])
merge(count)
}
return
}