Skip to content

Commit a1d8bcf

Browse files
committed
Add block size benchmarking scripts
1 parent 501b9bf commit a1d8bcf

File tree

3 files changed

+446
-0
lines changed

3 files changed

+446
-0
lines changed
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Block Size Benchmarking Scripts
2+
3+
Script calculates block propagation time as a timestamp difference between “Pre-sealed block for proposal” and “Imported #XXX” lines from partner-chains node logs.
4+
5+
## How to use
6+
7+
1. Install `python3`, `pip`
8+
2. Install pandas - `pip install pandas`
9+
3. Gather logs from nodes. Put logs from each node in the dedicated txt file: alice.txt, bob.txt, etc
10+
4. Extract data from logs: `python3 extractor.py`
11+
5. Generate statistics by node `python3 analyzer.py block_propagation_report.txt analysis.txt`
Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,213 @@
1+
#!/usr/bin/env python3
2+
3+
import sys
4+
import re
5+
import statistics
6+
from typing import Dict, List, Optional, Tuple
7+
8+
9+
class Block:
10+
def __init__(self, number: int, hash_str: Optional[str] = None):
11+
self.number = number
12+
self.hash = hash_str
13+
self.creator: Optional[str] = None
14+
self.imports: Dict[str, float] = {}
15+
16+
def add_import(self, node: str, delay_ms: float):
17+
self.imports[node] = delay_ms
18+
19+
def has_all_nodes(self, required_nodes: List[str]) -> bool:
20+
return all(node in self.imports for node in required_nodes)
21+
22+
def is_complete(self, required_nodes: List[str]) -> bool:
23+
return (self.creator and self.creator != 'unknown'
24+
and self.has_all_nodes(required_nodes))
25+
26+
27+
class BlockPropagationAnalyzer:
28+
def __init__(self, nodes: List[str]):
29+
if not nodes:
30+
raise ValueError("At least one node must be specified")
31+
self.all_nodes = [node.lower() for node in nodes]
32+
self.blocks: List[Block] = []
33+
34+
def parse_file(self, filename: str) -> None:
35+
try:
36+
with open(filename, 'r', encoding='utf-8') as file:
37+
content = file.read()
38+
except FileNotFoundError:
39+
print(f"Error: File '{filename}' not found.")
40+
sys.exit(1)
41+
except Exception as e:
42+
print(f"Error reading file '{filename}': {e}")
43+
sys.exit(1)
44+
self._parse_content(content)
45+
46+
def _parse_content(self, content: str) -> None:
47+
lines = content.split('\n')
48+
current_block = None
49+
for line in lines:
50+
line = line.strip()
51+
if line.startswith('Block #'):
52+
current_block = self._parse_block_header(line)
53+
if current_block:
54+
self.blocks.append(current_block)
55+
elif line.startswith('Created by:') and current_block:
56+
current_block.creator = self._parse_creator(line)
57+
elif line.startswith('Imported by') and current_block:
58+
node, delay = self._parse_import(line)
59+
if node:
60+
current_block.add_import(node, delay)
61+
elif 'Creator unknown' in line and current_block:
62+
current_block.creator = 'unknown'
63+
64+
def _parse_block_header(self, line: str) -> Optional[Block]:
65+
block_match = re.search(r'Block #(\d+)', line)
66+
hash_match = re.search(r'0x[a-f0-9]{4}…[a-f0-9]{4}', line)
67+
if block_match:
68+
number = int(block_match.group(1))
69+
hash_str = hash_match.group(0) if hash_match else None
70+
return Block(number, hash_str)
71+
return None
72+
73+
def _parse_creator(self, line: str) -> Optional[str]:
74+
creator_match = re.search(r'Created by: (\w+)', line)
75+
return creator_match.group(1).lower() if creator_match else None
76+
77+
def _parse_import(self, line: str) -> Tuple[Optional[str], float]:
78+
import_match = re.search(
79+
r'Imported by (\w+)'
80+
r'(?:\s+\(creator node\))?'
81+
r'(?:\s+after ([\d.]+) ms)?',
82+
line
83+
)
84+
if import_match:
85+
node = import_match.group(1).lower()
86+
delay_str = import_match.group(2)
87+
delay = float(delay_str) if delay_str else 0.0
88+
return node, delay
89+
return None, 0.0
90+
91+
def get_complete_blocks(self) -> List[Block]:
92+
return [block for block in self.blocks
93+
if block.is_complete(self.all_nodes)]
94+
95+
def _format_table_row(self, values: List[str], widths: List[int]) -> str:
96+
formatted_values = []
97+
for i, (value, width) in enumerate(zip(values, widths)):
98+
if i == 0:
99+
formatted_values.append(f"{value:<{width}}")
100+
else:
101+
formatted_values.append(f"{value:^{width}}")
102+
return "| " + " | ".join(formatted_values) + " |"
103+
104+
def generate_summary_statistics(self, complete_blocks: List[Block]) -> str:
105+
lines = []
106+
lines.append("=== SUMMARY STATISTICS BY NODE ===")
107+
lines.append("")
108+
109+
stats = {}
110+
for node in self.all_nodes:
111+
blocks_created = len([block for block in complete_blocks if block.creator == node])
112+
113+
import_times = [
114+
float(block.imports[node])
115+
for block in complete_blocks
116+
if block.creator != node
117+
]
118+
119+
avg_import = statistics.mean(import_times) if import_times else 0
120+
121+
stats[node] = {
122+
'blocks_created': blocks_created,
123+
'blocks_imported': len(import_times),
124+
'min_import': min(import_times) if import_times else 0,
125+
'max_import': max(import_times) if import_times else 0,
126+
'avg_import': avg_import
127+
}
128+
129+
header = "| Node | Blocks Created | Blocks Imported | Min Import Time | Max Import Time | Avg Import Time |"
130+
separator = "|---------|----------------|-----------------|-----------------|-----------------|-----------------|"
131+
lines.append(header)
132+
lines.append(separator)
133+
134+
for node in self.all_nodes:
135+
s = stats[node]
136+
row = (f"| {node.capitalize():<7} | {s['blocks_created']:<14} | "
137+
f"{s['blocks_imported']:<15} | {s['min_import']:<15.0f} | "
138+
f"{s['max_import']:<15.0f} | {s['avg_import']:<15.1f} |")
139+
lines.append(row)
140+
141+
return '\n'.join(lines)
142+
143+
def run(self, input_filename: str, output_filename: str) -> None:
144+
"""Main analysis function"""
145+
print(f"Analyzing nodes: {', '.join(self.all_nodes)}")
146+
print(f"Parsing file: {input_filename}")
147+
self.parse_file(input_filename)
148+
print(f"Total blocks parsed: {len(self.blocks)}")
149+
complete_blocks = self.get_complete_blocks()
150+
print(f"Complete blocks: {len(complete_blocks)}")
151+
if not complete_blocks:
152+
print("No complete blocks found. Exiting.")
153+
sys.exit(1)
154+
stats_table = self.generate_summary_statistics(complete_blocks)
155+
try:
156+
with open(output_filename, 'w', encoding='utf-8') as file:
157+
file.write("# Block Propagation Analysis\n\n")
158+
nodes = ', '.join(node.capitalize() for node in self.all_nodes)
159+
file.write(f"**Nodes analyzed:** {nodes}")
160+
file.write("\n\n")
161+
file.write(stats_table)
162+
file.write("\n\n")
163+
print(f"Analysis complete. Results saved to: {output_filename}")
164+
except Exception as e:
165+
print(f"Error writing output file '{output_filename}': {e}")
166+
sys.exit(1)
167+
168+
169+
def main():
170+
nodes = [
171+
"alice",
172+
"bob",
173+
"charlie",
174+
"dave",
175+
"eve",
176+
"ferdie",
177+
"george",
178+
"henry",
179+
"iris",
180+
"jack"
181+
]
182+
183+
if len(sys.argv) < 3:
184+
print(
185+
"Usage: python analyzer.py <input_file.txt> <output_file.txt> "
186+
"[node1 node2 node3 ...]"
187+
)
188+
print(
189+
"Example: python analyzer.py data.txt results.txt "
190+
"alice bob charlie"
191+
)
192+
print(
193+
"If no nodes specified, default nodes will be used: "
194+
f"{', '.join(nodes)}"
195+
)
196+
sys.exit(1)
197+
198+
input_file = sys.argv[1]
199+
output_file = sys.argv[2]
200+
201+
if len(sys.argv) > 3:
202+
nodes = sys.argv[3:]
203+
204+
try:
205+
analyzer = BlockPropagationAnalyzer(nodes)
206+
analyzer.run(input_file, output_file)
207+
except ValueError as e:
208+
print(f"Error: {e}")
209+
sys.exit(1)
210+
211+
212+
if __name__ == "__main__":
213+
main()

0 commit comments

Comments
 (0)