@blinklabs/dingo 0.17.0 → 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/PLUGIN_DEVELOPMENT.md +299 -0
- package/README.md +74 -0
- package/benchmark_results.md +130 -0
- package/devnet.sh +14 -0
- package/dingo.yaml.example +45 -6
- package/generate_benchmarks.sh +423 -0
- package/package.json +1 -1
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
# Plugin Development Guide
|
|
2
|
+
|
|
3
|
+
This guide explains how to develop plugins for Dingo's storage system.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
Dingo supports pluggable storage backends through a registration-based plugin system. Plugins can extend the system with new blob storage (blocks, transactions) and metadata storage (indexes, state) implementations.
|
|
8
|
+
|
|
9
|
+
## Plugin Types
|
|
10
|
+
|
|
11
|
+
### Blob Storage Plugins
|
|
12
|
+
Store blockchain data (blocks, transactions, etc.). Examples:
|
|
13
|
+
- `badger` - Local BadgerDB key-value store
|
|
14
|
+
- `gcs` - Google Cloud Storage
|
|
15
|
+
- `s3` - AWS S3
|
|
16
|
+
|
|
17
|
+
### Metadata Storage Plugins
|
|
18
|
+
Store metadata and indexes. Examples:
|
|
19
|
+
- `sqlite` - SQLite relational database
|
|
20
|
+
|
|
21
|
+
## Plugin Interface
|
|
22
|
+
|
|
23
|
+
All plugins must implement the `plugin.Plugin` interface:
|
|
24
|
+
|
|
25
|
+
```go
|
|
26
|
+
type Plugin interface {
|
|
27
|
+
Start() error
|
|
28
|
+
Stop() error
|
|
29
|
+
}
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Plugin Registration
|
|
33
|
+
|
|
34
|
+
Plugins register themselves during package initialization using the `plugin.Register()` function:
|
|
35
|
+
|
|
36
|
+
```go
|
|
37
|
+
func init() {
|
|
38
|
+
plugin.Register(plugin.PluginEntry{
|
|
39
|
+
Type: plugin.PluginTypeBlob, // or PluginTypeMetadata
|
|
40
|
+
Name: "myplugin",
|
|
41
|
+
Description: "My custom storage plugin",
|
|
42
|
+
NewFromOptionsFunc: NewFromCmdlineOptions,
|
|
43
|
+
Options: []plugin.PluginOption{
|
|
44
|
+
// Plugin-specific options
|
|
45
|
+
},
|
|
46
|
+
})
|
|
47
|
+
}
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Plugin Options
|
|
51
|
+
|
|
52
|
+
Plugins define configuration options using the `PluginOption` struct:
|
|
53
|
+
|
|
54
|
+
```go
|
|
55
|
+
plugin.PluginOption{
|
|
56
|
+
Name: "data-dir", // Option name
|
|
57
|
+
Type: plugin.PluginOptionTypeString, // Data type
|
|
58
|
+
Description: "Data directory path", // Help text
|
|
59
|
+
DefaultValue: "/tmp/data", // Default value
|
|
60
|
+
Dest: &cmdlineOptions.dataDir, // Destination variable
|
|
61
|
+
}
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
Supported option types:
|
|
65
|
+
- `PluginOptionTypeString`
|
|
66
|
+
- `PluginOptionTypeBool`
|
|
67
|
+
- `PluginOptionTypeInt`
|
|
68
|
+
- `PluginOptionTypeUint`
|
|
69
|
+
|
|
70
|
+
## Environment Variables
|
|
71
|
+
|
|
72
|
+
Plugins automatically support environment variables with the pattern:
|
|
73
|
+
`DINGO_DATABASE_{TYPE}_{PLUGIN}_{OPTION}`
|
|
74
|
+
|
|
75
|
+
Examples:
|
|
76
|
+
- `DINGO_DATABASE_BLOB_BADGER_DATA_DIR=/data`
|
|
77
|
+
- `DINGO_DATABASE_METADATA_SQLITE_DATA_DIR=/metadata.db`
|
|
78
|
+
|
|
79
|
+
## YAML Configuration
|
|
80
|
+
|
|
81
|
+
Plugins can be configured in `dingo.yaml`:
|
|
82
|
+
|
|
83
|
+
```yaml
|
|
84
|
+
database:
|
|
85
|
+
blob:
|
|
86
|
+
plugin: "myplugin"
|
|
87
|
+
myplugin:
|
|
88
|
+
option1: "value1"
|
|
89
|
+
option2: 42
|
|
90
|
+
metadata:
|
|
91
|
+
plugin: "sqlite"
|
|
92
|
+
sqlite:
|
|
93
|
+
data-dir: "/data/metadata.db"
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
## Configuration Precedence
|
|
97
|
+
|
|
98
|
+
1. Command-line flags (highest priority)
|
|
99
|
+
2. Environment variables
|
|
100
|
+
3. YAML configuration
|
|
101
|
+
4. Default values (lowest priority)
|
|
102
|
+
|
|
103
|
+
## Command Line Options
|
|
104
|
+
|
|
105
|
+
Plugins support command-line flags with the pattern:
|
|
106
|
+
`--{type}-{plugin}-{option}`
|
|
107
|
+
|
|
108
|
+
Examples:
|
|
109
|
+
- `--blob-badger-data-dir /data`
|
|
110
|
+
- `--metadata-sqlite-data-dir /metadata.db`
|
|
111
|
+
|
|
112
|
+
## Plugin Development Steps
|
|
113
|
+
|
|
114
|
+
### 1. Create Plugin Structure
|
|
115
|
+
|
|
116
|
+
```text
|
|
117
|
+
database/plugin/{type}/{name}/
|
|
118
|
+
├── plugin.go # Registration and options
|
|
119
|
+
├── options.go # Option functions
|
|
120
|
+
├── database.go # Core implementation
|
|
121
|
+
└── options_test.go # Unit tests
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### 2. Implement Core Plugin
|
|
125
|
+
|
|
126
|
+
Create the main plugin struct that implements `plugin.Plugin`:
|
|
127
|
+
|
|
128
|
+
```go
|
|
129
|
+
type MyPlugin struct {
|
|
130
|
+
// Fields
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
func (p *MyPlugin) Start() error {
|
|
134
|
+
// Initialize resources
|
|
135
|
+
return nil
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
func (p *MyPlugin) Stop() error {
|
|
139
|
+
// Clean up resources
|
|
140
|
+
return nil
|
|
141
|
+
}
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
### 3. Define Options
|
|
145
|
+
|
|
146
|
+
Create option functions following the pattern:
|
|
147
|
+
|
|
148
|
+
```go
|
|
149
|
+
func WithOptionName(value Type) OptionFunc {
|
|
150
|
+
return func(p *MyPlugin) {
|
|
151
|
+
p.field = value
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
### 4. Implement Constructors
|
|
157
|
+
|
|
158
|
+
Provide both options-based and legacy constructors:
|
|
159
|
+
|
|
160
|
+
```go
|
|
161
|
+
func NewWithOptions(opts ...OptionFunc) (*MyPlugin, error) {
|
|
162
|
+
p := &MyPlugin{}
|
|
163
|
+
for _, opt := range opts {
|
|
164
|
+
opt(p)
|
|
165
|
+
}
|
|
166
|
+
return p, nil
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
func New(legacyParam1, legacyParam2) (*MyPlugin, error) {
|
|
170
|
+
// For backward compatibility
|
|
171
|
+
return NewWithOptions(
|
|
172
|
+
WithOption1(legacyParam1),
|
|
173
|
+
WithOption2(legacyParam2),
|
|
174
|
+
)
|
|
175
|
+
}
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
### 5. Register Plugin
|
|
179
|
+
|
|
180
|
+
In `plugin.go`, register during initialization:
|
|
181
|
+
|
|
182
|
+
```go
|
|
183
|
+
var cmdlineOptions struct {
|
|
184
|
+
option1 string
|
|
185
|
+
option2 int
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
func init() {
|
|
189
|
+
plugin.Register(plugin.PluginEntry{
|
|
190
|
+
Type: plugin.PluginTypeBlob,
|
|
191
|
+
Name: "myplugin",
|
|
192
|
+
Description: "My custom plugin",
|
|
193
|
+
NewFromOptionsFunc: NewFromCmdlineOptions,
|
|
194
|
+
Options: []plugin.PluginOption{
|
|
195
|
+
{
|
|
196
|
+
Name: "option1",
|
|
197
|
+
Type: plugin.PluginOptionTypeString,
|
|
198
|
+
Description: "First option",
|
|
199
|
+
DefaultValue: "default",
|
|
200
|
+
Dest: &cmdlineOptions.option1,
|
|
201
|
+
},
|
|
202
|
+
// More options...
|
|
203
|
+
},
|
|
204
|
+
})
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
func NewFromCmdlineOptions() plugin.Plugin {
|
|
208
|
+
p, err := NewWithOptions(
|
|
209
|
+
WithOption1(cmdlineOptions.option1),
|
|
210
|
+
WithOption2(cmdlineOptions.option2),
|
|
211
|
+
)
|
|
212
|
+
if err != nil {
|
|
213
|
+
panic(err)
|
|
214
|
+
}
|
|
215
|
+
return p
|
|
216
|
+
}
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
### 6. Add Tests
|
|
220
|
+
|
|
221
|
+
Create comprehensive tests:
|
|
222
|
+
|
|
223
|
+
```go
|
|
224
|
+
func TestOptions(t *testing.T) {
|
|
225
|
+
// Test option functions
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
func TestLifecycle(t *testing.T) {
|
|
229
|
+
p, err := NewWithOptions(WithOption1("test"))
|
|
230
|
+
// Test Start/Stop
|
|
231
|
+
}
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
### 7. Update Imports
|
|
235
|
+
|
|
236
|
+
Add your plugin to the import list in the appropriate store file:
|
|
237
|
+
- `database/plugin/blob/blob.go` for blob plugins
|
|
238
|
+
- `database/plugin/metadata/metadata.go` for metadata plugins
|
|
239
|
+
|
|
240
|
+
## Example: Complete Plugin
|
|
241
|
+
|
|
242
|
+
See the existing plugins for complete examples:
|
|
243
|
+
- `database/plugin/blob/badger/` - BadgerDB implementation
|
|
244
|
+
- `database/plugin/metadata/sqlite/` - SQLite implementation
|
|
245
|
+
- `database/plugin/blob/gcs/` - Google Cloud Storage implementation
|
|
246
|
+
- `database/plugin/blob/aws/` - AWS S3 implementation
|
|
247
|
+
|
|
248
|
+
## Best Practices
|
|
249
|
+
|
|
250
|
+
1. **Error Handling**: Always return descriptive errors
|
|
251
|
+
2. **Resource Management**: Properly implement Start/Stop for resource lifecycle
|
|
252
|
+
3. **Thread Safety**: Ensure plugins are safe for concurrent use
|
|
253
|
+
4. **Configuration Validation**: Validate configuration during construction
|
|
254
|
+
5. **Backward Compatibility**: Maintain compatibility with existing deployments
|
|
255
|
+
6. **Documentation**: Document all options and their effects
|
|
256
|
+
7. **Testing**: Provide comprehensive unit and integration tests
|
|
257
|
+
|
|
258
|
+
## Testing Your Plugin
|
|
259
|
+
|
|
260
|
+
### Unit Tests
|
|
261
|
+
Test individual components and option functions.
|
|
262
|
+
|
|
263
|
+
### Integration Tests
|
|
264
|
+
Test the complete plugin lifecycle and interaction with the plugin system.
|
|
265
|
+
|
|
266
|
+
### CLI Testing
|
|
267
|
+
Use the CLI to test plugin listing and selection:
|
|
268
|
+
|
|
269
|
+
```bash
|
|
270
|
+
./dingo --blob list
|
|
271
|
+
./dingo --metadata list
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
### Configuration Testing
|
|
275
|
+
Test environment variables and YAML configuration:
|
|
276
|
+
|
|
277
|
+
```bash
|
|
278
|
+
DINGO_DATABASE_BLOB_MYPLUGIN_OPTION1=value ./dingo --blob myplugin
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
## Programmatic Option Overrides (for tests)
|
|
282
|
+
|
|
283
|
+
When writing tests or programmatically constructing database instances you can override plugin options
|
|
284
|
+
without importing plugin implementation packages directly by using the plugin registry helper:
|
|
285
|
+
|
|
286
|
+
```go
|
|
287
|
+
// Set data-dir for the blob plugin to a per-test temp directory
|
|
288
|
+
plugin.SetPluginOption(plugin.PluginTypeBlob, "badger", "data-dir", t.TempDir())
|
|
289
|
+
|
|
290
|
+
// Set data-dir for the metadata plugin
|
|
291
|
+
plugin.SetPluginOption(plugin.PluginTypeMetadata, "sqlite", "data-dir", t.TempDir())
|
|
292
|
+
```
|
|
293
|
+
|
|
294
|
+
The helper sets the plugin option's destination variable in the registry before plugin instantiation.
|
|
295
|
+
If the requested option is not defined by the targeted plugin the call is non-fatal and returns nil,
|
|
296
|
+
allowing tests to run regardless of which plugin implementation is selected.
|
|
297
|
+
|
|
298
|
+
Using `t.TempDir()` guarantees each test uses its own on-disk path and prevents concurrent tests from
|
|
299
|
+
colliding on shared directories (for example the default `.dingo` Badger directory).
|
package/README.md
CHANGED
|
@@ -13,6 +13,8 @@
|
|
|
13
13
|
|
|
14
14
|
⚠️ This is a work in progress and is currently under heavy development
|
|
15
15
|
|
|
16
|
+
**Note:** On Windows systems, named pipes are used instead of Unix sockets for node-to-client communication.
|
|
17
|
+
|
|
16
18
|
<div align="center">
|
|
17
19
|
<img src="./.github/dingo-20241210.png" alt="dingo screenshot" width="640">
|
|
18
20
|
</div>
|
|
@@ -62,6 +64,78 @@ This behavior can be changed via the following environment variables:
|
|
|
62
64
|
(default: empty)
|
|
63
65
|
- `TLS_KEY_FILE_PATH` - SSL certificate key to use (default: empty)
|
|
64
66
|
|
|
67
|
+
## Database Plugins
|
|
68
|
+
|
|
69
|
+
Dingo supports pluggable storage backends for both blob storage (blocks, transactions) and metadata storage. This allows you to choose the best storage solution for your use case.
|
|
70
|
+
|
|
71
|
+
### Available Plugins
|
|
72
|
+
|
|
73
|
+
**Blob Storage Plugins:**
|
|
74
|
+
- `badger` - BadgerDB local key-value store (default)
|
|
75
|
+
- `gcs` - Google Cloud Storage blob store
|
|
76
|
+
- `s3` - AWS S3 blob store
|
|
77
|
+
|
|
78
|
+
**Metadata Storage Plugins:**
|
|
79
|
+
- `sqlite` - SQLite relational database (default)
|
|
80
|
+
|
|
81
|
+
### Plugin Selection
|
|
82
|
+
|
|
83
|
+
Plugins can be selected via command-line flags, environment variables, or configuration file:
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
# Command line
|
|
87
|
+
./dingo --blob gcs --metadata sqlite
|
|
88
|
+
|
|
89
|
+
# Environment variables
|
|
90
|
+
DINGO_DATABASE_BLOB_PLUGIN=gcs
|
|
91
|
+
DINGO_DATABASE_METADATA_PLUGIN=sqlite
|
|
92
|
+
|
|
93
|
+
# Configuration file (dingo.yaml)
|
|
94
|
+
database:
|
|
95
|
+
blob:
|
|
96
|
+
plugin: "gcs"
|
|
97
|
+
metadata:
|
|
98
|
+
plugin: "sqlite"
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### Plugin Configuration
|
|
102
|
+
|
|
103
|
+
Each plugin supports specific configuration options. See `dingo.yaml.example` for detailed configuration examples.
|
|
104
|
+
|
|
105
|
+
**BadgerDB Options:**
|
|
106
|
+
- `data-dir` - Directory for database files
|
|
107
|
+
- `block-cache-size` - Block cache size in bytes
|
|
108
|
+
- `index-cache-size` - Index cache size in bytes
|
|
109
|
+
- `gc` - Enable garbage collection
|
|
110
|
+
|
|
111
|
+
**Google Cloud Storage Options:**
|
|
112
|
+
- `bucket` - GCS bucket name
|
|
113
|
+
- `project-id` - Google Cloud project ID
|
|
114
|
+
- `prefix` - Path prefix within bucket
|
|
115
|
+
- `credentials-file` - Path to service account credentials file (optional - uses Application Default Credentials if not provided)
|
|
116
|
+
|
|
117
|
+
**AWS S3 Options:**
|
|
118
|
+
- `bucket` - S3 bucket name
|
|
119
|
+
- `region` - AWS region
|
|
120
|
+
- `prefix` - Path prefix within bucket
|
|
121
|
+
- `access-key-id` - AWS access key ID (optional - uses default credential chain if not provided)
|
|
122
|
+
- `secret-access-key` - AWS secret access key (optional - uses default credential chain if not provided)
|
|
123
|
+
|
|
124
|
+
**SQLite Options:**
|
|
125
|
+
- `data-dir` - Path to SQLite database file
|
|
126
|
+
|
|
127
|
+
### Listing Available Plugins
|
|
128
|
+
|
|
129
|
+
You can see all available plugins and their descriptions:
|
|
130
|
+
|
|
131
|
+
```bash
|
|
132
|
+
./dingo list
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Plugin Development
|
|
136
|
+
|
|
137
|
+
For information on developing custom storage plugins, see [PLUGIN_DEVELOPMENT.md](PLUGIN_DEVELOPMENT.md).
|
|
138
|
+
|
|
65
139
|
### Example
|
|
66
140
|
|
|
67
141
|
Running on mainnet (:sweat_smile:):
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
# Dingo Ledger & Database Benchmark Results
|
|
2
|
+
|
|
3
|
+
## Latest Results
|
|
4
|
+
|
|
5
|
+
### Test Environment
|
|
6
|
+
- **Date**: November 26, 2025
|
|
7
|
+
- **Go Version**: 1.24.1
|
|
8
|
+
- **OS**: Linux
|
|
9
|
+
- **Architecture**: aarch64
|
|
10
|
+
- **CPU Cores**: 128
|
|
11
|
+
- **Data Source**: Real Cardano preview testnet data (40k+ blocks, slots 0-863,996)
|
|
12
|
+
|
|
13
|
+
### Benchmark Results
|
|
14
|
+
|
|
15
|
+
All benchmarks run with `-benchmem` flag showing memory allocations and operation counts.
|
|
16
|
+
|
|
17
|
+
| Benchmark | Operations/sec | Time/op | Memory/op | Allocs/op |
|
|
18
|
+
|-----------|----------------|---------|-----------|-----------|
|
|
19
|
+
| Pool Lookup By Key Hash No Data | 36231 | 33604ns | 4KB | 79 |
|
|
20
|
+
| Pool Registration Lookups No Data | 24210 | 46595ns | 10KB | 93 |
|
|
21
|
+
| Account Lookup By Stake Key Real Data | 39950 | 34026ns | 4KB | 75 |
|
|
22
|
+
| Utxo Lookup By Address No Data | 109825 | 16460ns | 2KB | 19 |
|
|
23
|
+
| Storage Backends/memory | 32593 | 34818ns | 13KB | 70 |
|
|
24
|
+
| Transaction Validation | 230958193 | 5.238ns | 0B | 0 |
|
|
25
|
+
| Real Block Reading | 22 | 53427289ns | 2183KB | 74472 |
|
|
26
|
+
| Block Retrieval By Index Real Data | 303981 | 3868ns | 472B | 11 |
|
|
27
|
+
| Index Building Time | 14078 | 87080ns | 17KB | 119 |
|
|
28
|
+
| Chain Sync From Genesis | 74 | 15096067ns | 100.0blocks_processed | 2247880 |
|
|
29
|
+
| Block Retrieval By Index No Data | 315110 | 3652ns | 472B | 11 |
|
|
30
|
+
| Transaction Create | 77013 | 16824ns | 2KB | 18 |
|
|
31
|
+
| Utxo Lookup By Address Real Data | 82125 | 16014ns | 2KB | 19 |
|
|
32
|
+
| Era Transition Performance | 459890545 | 2.178ns | 0B | 0 |
|
|
33
|
+
| Protocol Parameters Lookup By Epoch Real Data | 44181 | 32903ns | 5KB | 62 |
|
|
34
|
+
| Pool Registration Lookups Real Data | 22616 | 48729ns | 10KB | 93 |
|
|
35
|
+
| Stake Registration Lookups Real Data | 38761 | 33724ns | 5KB | 69 |
|
|
36
|
+
| Era Transition Performance Real Data | 4340 | 267246ns | 83KB | 490 |
|
|
37
|
+
| Real Block Processing | 13651 | 84993ns | 17KB | 119 |
|
|
38
|
+
| Utxo Lookup By Ref No Data | 10461 | 119506ns | 8KB | 131 |
|
|
39
|
+
| Storage Backends/disk | 33004 | 32561ns | 13KB | 70 |
|
|
40
|
+
| Test Load/memory | 1912 | 650898ns | 260KB | 1400 |
|
|
41
|
+
| Stake Registration Lookups No Data | 33963 | 33887ns | 5KB | 69 |
|
|
42
|
+
| Utxo Lookup By Ref Real Data | 10000 | 122404ns | 8KB | 131 |
|
|
43
|
+
| Protocol Parameters Lookup By Epoch No Data | 37996 | 32298ns | 5KB | 62 |
|
|
44
|
+
| Datum Lookup By Hash No Data | 32635 | 34072ns | 4KB | 69 |
|
|
45
|
+
| Block Processing Throughput | 3904 | 290370ns | 3444blocks/sec | 22462 |
|
|
46
|
+
| Real Data Queries | 68570 | 17752ns | 5KB | 43 |
|
|
47
|
+
| Block Nonce Lookup Real Data | 34851 | 37815ns | 4KB | 73 |
|
|
48
|
+
| Test Load/disk | 2068 | 536686ns | 260KB | 1400 |
|
|
49
|
+
| Pool Lookup By Key Hash Real Data | 40214 | 33631ns | 4KB | 79 |
|
|
50
|
+
| D Rep Lookup By Key Hash No Data | 35266 | 36094ns | 4KB | 77 |
|
|
51
|
+
| Transaction History Queries No Data | 34167 | 33889ns | 4KB | 78 |
|
|
52
|
+
| Datum Lookup By Hash Real Data | 44810 | 33686ns | 4KB | 69 |
|
|
53
|
+
| Block Nonce Lookup No Data | 32011 | 36515ns | 4KB | 73 |
|
|
54
|
+
| Account Lookup By Stake Key No Data | 34898 | 35142ns | 4KB | 75 |
|
|
55
|
+
| Transaction History Queries Real Data | 37575 | 34812ns | 4KB | 78 |
|
|
56
|
+
| Concurrent Queries | 15408 | 68781ns | 14581queries/sec | 3943 |
|
|
57
|
+
| D Rep Lookup By Key Hash Real Data | 39360 | 34907ns | 4KB | 77 |
|
|
58
|
+
| Block Memory Usage | 29292 | 44369ns | 14KB | 49 |
|
|
59
|
+
|
|
60
|
+
## Performance Changes
|
|
61
|
+
|
|
62
|
+
Changes since **November 26, 2025**:
|
|
63
|
+
|
|
64
|
+
### Summary
|
|
65
|
+
- **Faster benchmarks**: 19
|
|
66
|
+
- **Slower benchmarks**: 20
|
|
67
|
+
- **New benchmarks**: 0
|
|
68
|
+
- **Removed benchmarks**: 0
|
|
69
|
+
|
|
70
|
+
### Top Improvements
|
|
71
|
+
- Utxo Lookup By Address No Data (+44%)
|
|
72
|
+
- Transaction Validation (+0%)
|
|
73
|
+
- Test Load/memory (+19%)
|
|
74
|
+
- Test Load/disk (+31%)
|
|
75
|
+
- Storage Backends/memory (+3%)
|
|
76
|
+
|
|
77
|
+
### Performance Regressions
|
|
78
|
+
- Pool Lookup By Key Hash No Data (-0%)
|
|
79
|
+
- Index Building Time (-0%)
|
|
80
|
+
- Account Lookup By Stake Key No Data (-0%)
|
|
81
|
+
- Transaction History Queries No Data (-1%)
|
|
82
|
+
- Stake Registration Lookups No Data (-1%)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
## Historical Results
|
|
86
|
+
|
|
87
|
+
### November 26, 2025
|
|
88
|
+
|
|
89
|
+
| Benchmark | Operations/sec | Time/op | Memory/op | Allocs/op |
|
|
90
|
+
|-----------|----------------|---------|-----------|-----------|
|
|
91
|
+
| Pool Lookup By Key Hash No Data | 36328 | 33556ns | 4KB | 79 |
|
|
92
|
+
| Pool Registration Lookups No Data | 24898 | 51298ns | 10KB | 93 |
|
|
93
|
+
| Account Lookup By Stake Key Real Data | 39936 | 33871ns | 4KB | 75 |
|
|
94
|
+
| Utxo Lookup By Address No Data | 75966 | 16953ns | 2KB | 19 |
|
|
95
|
+
| Storage Backends/memory | 31369 | 33394ns | 13KB | 70 |
|
|
96
|
+
| Transaction Validation | 230637091 | 5.213ns | 0B | 0 |
|
|
97
|
+
| Real Block Reading | 20 | 55046083ns | 2183KB | 74472 |
|
|
98
|
+
| Block Retrieval By Index Real Data | 312492 | 4102ns | 472B | 11 |
|
|
99
|
+
| Index Building Time | 14168 | 86378ns | 17KB | 119 |
|
|
100
|
+
| Chain Sync From Genesis | 100 | 14189212ns | 100.0blocks_processed | 2247966 |
|
|
101
|
+
| Block Retrieval By Index No Data | 277410 | 4025ns | 472B | 11 |
|
|
102
|
+
| Transaction Create | 92761 | 16456ns | 2KB | 18 |
|
|
103
|
+
| Utxo Lookup By Address Real Data | 86467 | 15705ns | 2KB | 19 |
|
|
104
|
+
| Era Transition Performance | 499031763 | 2.139ns | 0B | 0 |
|
|
105
|
+
| Protocol Parameters Lookup By Epoch Real Data | 44150 | 30066ns | 5KB | 62 |
|
|
106
|
+
| Pool Registration Lookups Real Data | 23724 | 48201ns | 10KB | 93 |
|
|
107
|
+
| Stake Registration Lookups Real Data | 40082 | 32844ns | 5KB | 69 |
|
|
108
|
+
| Era Transition Performance Real Data | 3525 | 305364ns | 83KB | 490 |
|
|
109
|
+
| Real Block Processing | 13509 | 87846ns | 17KB | 119 |
|
|
110
|
+
| Utxo Lookup By Ref No Data | 10724 | 111792ns | 8KB | 131 |
|
|
111
|
+
| Storage Backends/disk | 28960 | 37643ns | 13KB | 70 |
|
|
112
|
+
| Test Load/memory | 1605 | 669272ns | 260KB | 1400 |
|
|
113
|
+
| Stake Registration Lookups No Data | 34359 | 36417ns | 5KB | 69 |
|
|
114
|
+
| Utxo Lookup By Ref Real Data | 10000 | 111756ns | 8KB | 131 |
|
|
115
|
+
| Protocol Parameters Lookup By Epoch No Data | 37886 | 32905ns | 5KB | 62 |
|
|
116
|
+
| Datum Lookup By Hash No Data | 40302 | 29076ns | 4KB | 69 |
|
|
117
|
+
| Block Processing Throughput | 4632 | 287053ns | 3484blocks/sec | 22466 |
|
|
118
|
+
| Real Data Queries | 62532 | 19011ns | 5KB | 43 |
|
|
119
|
+
| Block Nonce Lookup Real Data | 35782 | 38514ns | 4KB | 73 |
|
|
120
|
+
| Test Load/disk | 1574 | 711525ns | 260KB | 1400 |
|
|
121
|
+
| Pool Lookup By Key Hash Real Data | 39812 | 33599ns | 4KB | 79 |
|
|
122
|
+
| D Rep Lookup By Key Hash No Data | 35139 | 35088ns | 4KB | 77 |
|
|
123
|
+
| Transaction History Queries No Data | 34530 | 36887ns | 4KB | 78 |
|
|
124
|
+
| Datum Lookup By Hash Real Data | 36972 | 31369ns | 4KB | 69 |
|
|
125
|
+
| Block Nonce Lookup No Data | 32977 | 36460ns | 4KB | 73 |
|
|
126
|
+
| Account Lookup By Stake Key No Data | 35020 | 32890ns | 4KB | 75 |
|
|
127
|
+
| Transaction History Queries Real Data | 38739 | 35904ns | 4KB | 78 |
|
|
128
|
+
| D Rep Lookup By Key Hash Real Data | 38977 | 36859ns | 4KB | 77 |
|
|
129
|
+
| Concurrent Queries | 19104 | 59527ns | 168280queries/sec | 3851 |
|
|
130
|
+
| Block Memory Usage | 26552 | 46212ns | 14KB | 49 |
|
package/devnet.sh
CHANGED
|
@@ -1,5 +1,19 @@
|
|
|
1
1
|
#!/usr/bin/env bash
|
|
2
2
|
|
|
3
|
+
# Copyright 2025 Blink Labs Software
|
|
4
|
+
#
|
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
# you may not use this file except in compliance with the License.
|
|
7
|
+
# You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
# See the License for the specific language governing permissions and
|
|
15
|
+
# limitations under the License.
|
|
16
|
+
|
|
3
17
|
export CARDANO_NETWORK=devnet
|
|
4
18
|
export CARDANO_CONFIG=./config/cardano/devnet/config.json
|
|
5
19
|
export CARDANO_DATABASE_PATH=.devnet
|
package/dingo.yaml.example
CHANGED
|
@@ -9,8 +9,51 @@ bindAddr: "0.0.0.0"
|
|
|
9
9
|
# Can be overridden with the config environment variable
|
|
10
10
|
cardanoConfig: "./config/cardano/preview/config.json"
|
|
11
11
|
|
|
12
|
-
#
|
|
13
|
-
|
|
12
|
+
# Database configuration
|
|
13
|
+
database:
|
|
14
|
+
# Blob storage plugin configuration
|
|
15
|
+
blob:
|
|
16
|
+
# Plugin to use for blob storage (badger, gcs, s3)
|
|
17
|
+
plugin: "badger"
|
|
18
|
+
# Configuration options for each plugin
|
|
19
|
+
badger:
|
|
20
|
+
# Data directory for BadgerDB storage
|
|
21
|
+
data-dir: ".dingo/badger"
|
|
22
|
+
# Block cache size in bytes (default: 1610612736 ~1.5GB)
|
|
23
|
+
block-cache-size: 1610612736
|
|
24
|
+
# Index cache size in bytes (default: 536870912 ~512MB)
|
|
25
|
+
index-cache-size: 536870912
|
|
26
|
+
# Enable garbage collection (default: true)
|
|
27
|
+
gc: true
|
|
28
|
+
gcs:
|
|
29
|
+
# Google Cloud Storage bucket name
|
|
30
|
+
bucket: ""
|
|
31
|
+
# Google Cloud project ID
|
|
32
|
+
project-id: ""
|
|
33
|
+
# Path prefix within the bucket
|
|
34
|
+
prefix: ""
|
|
35
|
+
# Path to service account credentials file (optional - uses Application Default Credentials if not set)
|
|
36
|
+
credentials-file: ""
|
|
37
|
+
s3:
|
|
38
|
+
# AWS S3 bucket name
|
|
39
|
+
bucket: ""
|
|
40
|
+
# AWS region
|
|
41
|
+
region: ""
|
|
42
|
+
# Path prefix within the bucket
|
|
43
|
+
prefix: ""
|
|
44
|
+
# AWS access key ID (optional - uses default credential chain if not set)
|
|
45
|
+
access-key-id: ""
|
|
46
|
+
# AWS secret access key (optional - uses default credential chain if not set)
|
|
47
|
+
secret-access-key: ""
|
|
48
|
+
|
|
49
|
+
# Metadata storage plugin configuration
|
|
50
|
+
metadata:
|
|
51
|
+
# Plugin to use for metadata storage (sqlite)
|
|
52
|
+
plugin: "sqlite"
|
|
53
|
+
# Configuration options for each plugin
|
|
54
|
+
sqlite:
|
|
55
|
+
# Data directory for SQLite database file
|
|
56
|
+
data-dir: ".dingo/metadata.db"
|
|
14
57
|
|
|
15
58
|
# Path to the UNIX domain socket file used by the server
|
|
16
59
|
socketPath: "dingo.socket"
|
|
@@ -52,10 +95,6 @@ utxorpcPort: 9090
|
|
|
52
95
|
# This is experimental and may break — use with caution
|
|
53
96
|
intersectTip: false
|
|
54
97
|
|
|
55
|
-
# Maximum cache size in bytes used by BadgerDB for block/index cache
|
|
56
|
-
# Default: 1073741824 (1 GB)
|
|
57
|
-
badgerCacheSize: 1073741824
|
|
58
|
-
|
|
59
98
|
# Maximum total size (in bytes) of all transactions allowed in the mempool.
|
|
60
99
|
# Transactions exceeding this limit will be rejected.
|
|
61
100
|
# Default: 1048576 (1 MB)
|
|
@@ -0,0 +1,423 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
# Copyright 2025 Blink Labs Software
|
|
4
|
+
#
|
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
# you may not use this file except in compliance with the License.
|
|
7
|
+
# You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
# See the License for the specific language governing permissions and
|
|
15
|
+
# limitations under the License.
|
|
16
|
+
|
|
17
|
+
# Script to generate benchmark results for Dingo ledger and database with historical tracking
|
|
18
|
+
# Usage: ./generate_benchmarks.sh [output_file] [--write]
|
|
19
|
+
# --write: Write results to file (default: display only)
|
|
20
|
+
|
|
21
|
+
WRITE_TO_FILE=false
|
|
22
|
+
OUTPUT_FILE="benchmark_results.md"
|
|
23
|
+
|
|
24
|
+
# Parse arguments
|
|
25
|
+
while [[ $# -gt 0 ]]; do
|
|
26
|
+
case $1 in
|
|
27
|
+
--write)
|
|
28
|
+
WRITE_TO_FILE=true
|
|
29
|
+
shift
|
|
30
|
+
;;
|
|
31
|
+
-*)
|
|
32
|
+
echo "Unknown option: $1"
|
|
33
|
+
echo "Usage: $0 [output_file] [--write]"
|
|
34
|
+
exit 1
|
|
35
|
+
;;
|
|
36
|
+
*)
|
|
37
|
+
# First non-option argument is the output file
|
|
38
|
+
if [[ -z "$OUTPUT_FILE_SET" ]]; then
|
|
39
|
+
OUTPUT_FILE="$1"
|
|
40
|
+
OUTPUT_FILE_SET=true
|
|
41
|
+
else
|
|
42
|
+
echo "Too many arguments. Usage: $0 [output_file] [--write]"
|
|
43
|
+
exit 1
|
|
44
|
+
fi
|
|
45
|
+
shift
|
|
46
|
+
;;
|
|
47
|
+
esac
|
|
48
|
+
done
|
|
49
|
+
|
|
50
|
+
DATE=$(date +"%B %d, %Y")
|
|
51
|
+
|
|
52
|
+
# Initialize environment information for benchmark report
|
|
53
|
+
GO_VERSION=$(go version | awk '{print $3}' | sed 's/go//')
|
|
54
|
+
OS=$(uname -s)
|
|
55
|
+
ARCH=$(uname -m)
|
|
56
|
+
CPU_CORES=$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo "unknown")
|
|
57
|
+
|
|
58
|
+
echo "Running all Dingo benchmarks..."
|
|
59
|
+
echo "==============================="
|
|
60
|
+
|
|
61
|
+
# Run benchmarks with progress output first
|
|
62
|
+
echo "Executing benchmarks (this may take a few minutes)..."
|
|
63
|
+
|
|
64
|
+
# Enable pipefail to catch go test failures in the pipeline
|
|
65
|
+
set -o pipefail
|
|
66
|
+
|
|
67
|
+
# Run go test once, capture output while showing progress
|
|
68
|
+
BENCHMARK_OUTPUT=$(go test -bench=. -benchmem ./... -run=^$ 2>&1)
|
|
69
|
+
GO_TEST_EXIT_CODE=$?
|
|
70
|
+
|
|
71
|
+
# Show progress by parsing benchmark names from output
|
|
72
|
+
echo "$BENCHMARK_OUTPUT" | grep "^Benchmark" | sed 's/Benchmark//' | sed 's/-[0-9]*$//' | while read -r name rest; do
|
|
73
|
+
echo "Running: $name-128"
|
|
74
|
+
done
|
|
75
|
+
|
|
76
|
+
# Check if go test succeeded
|
|
77
|
+
if [[ $GO_TEST_EXIT_CODE -ne 0 ]]; then
|
|
78
|
+
echo "Benchmark run failed!"
|
|
79
|
+
exit 1
|
|
80
|
+
fi
|
|
81
|
+
|
|
82
|
+
# Count benchmarks
|
|
83
|
+
BENCHMARK_COUNT=$(echo "$BENCHMARK_OUTPUT" | grep "^Benchmark" | wc -l)
|
|
84
|
+
|
|
85
|
+
echo "Found $BENCHMARK_COUNT benchmarks across all packages"
|
|
86
|
+
echo ""
|
|
87
|
+
|
|
88
|
+
# Function to parse benchmark line
|
|
89
|
+
parse_benchmark() {
|
|
90
|
+
local line="$1"
|
|
91
|
+
local name
|
|
92
|
+
name=$(echo "$line" | awk '{print $1}' | sed 's/Benchmark//' | sed 's/-[0-9]*$//')
|
|
93
|
+
local ops_sec
|
|
94
|
+
ops_sec=$(echo "$line" | awk '{print $2}' | sed 's/,//g')
|
|
95
|
+
local time_val
|
|
96
|
+
time_val=$(echo "$line" | awk '{print $3}')
|
|
97
|
+
local time_unit
|
|
98
|
+
time_unit=$(echo "$line" | awk '{print $4}')
|
|
99
|
+
local mem_val
|
|
100
|
+
mem_val=$(echo "$line" | awk '{print $5}')
|
|
101
|
+
local mem_unit
|
|
102
|
+
mem_unit=$(echo "$line" | awk '{print $6}')
|
|
103
|
+
local allocs_op
|
|
104
|
+
allocs_op=$(echo "$line" | awk '{print $7}')
|
|
105
|
+
|
|
106
|
+
# Format time
|
|
107
|
+
if [[ "$time_unit" == "ns/op" ]]; then
|
|
108
|
+
time_op="${time_val}ns"
|
|
109
|
+
elif [[ "$time_unit" == "μs/op" ]] || [[ "$time_unit" == "µs/op" ]]; then
|
|
110
|
+
time_op="${time_val}μs"
|
|
111
|
+
elif [[ "$time_unit" == "ms/op" ]]; then
|
|
112
|
+
time_op="${time_val}ms"
|
|
113
|
+
elif [[ "$time_unit" == "s/op" ]]; then
|
|
114
|
+
time_op="${time_val}s"
|
|
115
|
+
else
|
|
116
|
+
time_op="${time_val}${time_unit}"
|
|
117
|
+
fi
|
|
118
|
+
|
|
119
|
+
# Format memory
|
|
120
|
+
if [[ "$mem_unit" == "B/op" ]]; then
|
|
121
|
+
if [[ $mem_val -gt 1000 ]]; then
|
|
122
|
+
mem_kb=$((mem_val / 1000))
|
|
123
|
+
mem_op="${mem_kb}KB"
|
|
124
|
+
else
|
|
125
|
+
mem_op="${mem_val}B"
|
|
126
|
+
fi
|
|
127
|
+
else
|
|
128
|
+
mem_op="${mem_val}${mem_unit}"
|
|
129
|
+
fi
|
|
130
|
+
|
|
131
|
+
# Format benchmark name nicely
|
|
132
|
+
formatted_name=$(echo "$name" | sed 's/\([A-Z]\)/ \1/g' | sed 's/^ //' | sed 's/NoData$/ (No Data)/' | sed 's/RealData$/ (Real Data)/')
|
|
133
|
+
|
|
134
|
+
echo "$formatted_name|$ops_sec|$time_op|$mem_op|$allocs_op"
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
# Parse current results into associative array
|
|
138
|
+
declare -A current_results
|
|
139
|
+
while IFS= read -r line; do
|
|
140
|
+
if [[ "$line" =~ ^Benchmark ]]; then
|
|
141
|
+
parsed=$(parse_benchmark "$line")
|
|
142
|
+
name=$(echo "$parsed" | cut -d'|' -f1)
|
|
143
|
+
data=$(echo "$parsed" | cut -d'|' -f2-)
|
|
144
|
+
current_results["$name"]="$data"
|
|
145
|
+
fi
|
|
146
|
+
done <<< "$BENCHMARK_OUTPUT"
|
|
147
|
+
|
|
148
|
+
# Display current results summary
|
|
149
|
+
echo "Current Benchmark Summary"
|
|
150
|
+
echo "-------------------------"
|
|
151
|
+
echo "Fastest benchmarks (>100k ops/sec):"
|
|
152
|
+
echo "$BENCHMARK_OUTPUT" | grep "^Benchmark" | sort -k2 -nr | head -3 | while read -r line; do
|
|
153
|
+
name=$(echo "$line" | awk '{print $1}' | sed 's/Benchmark//' | sed 's/-128$//')
|
|
154
|
+
ops=$(echo "$line" | awk '{print $2}' | sed 's/,//g')
|
|
155
|
+
echo " - $name: ${ops} ops/sec"
|
|
156
|
+
done
|
|
157
|
+
|
|
158
|
+
echo ""
|
|
159
|
+
echo "Slowest benchmarks (<1k ops/sec):"
|
|
160
|
+
echo "$BENCHMARK_OUTPUT" | grep "^Benchmark" | awk '$2 < 1000' | while read -r line; do
|
|
161
|
+
name=$(echo "$line" | awk '{print $1}' | sed 's/Benchmark//' | sed 's/-128$//')
|
|
162
|
+
ops=$(echo "$line" | awk '{print $2}' | sed 's/,//g')
|
|
163
|
+
echo " - $name: ${ops} ops/sec"
|
|
164
|
+
done
|
|
165
|
+
|
|
166
|
+
echo ""
|
|
167
|
+
echo "Memory usage:"
|
|
168
|
+
echo "$BENCHMARK_OUTPUT" | grep "^Benchmark" | sort -k5 -nr | head -3 | while read -r line; do
|
|
169
|
+
name=$(echo "$line" | awk '{print $1}' | sed 's/Benchmark//' | sed 's/-128$//')
|
|
170
|
+
mem=$(echo "$line" | awk '{print $5}')
|
|
171
|
+
echo " - $name: ${mem}B per op"
|
|
172
|
+
done
|
|
173
|
+
|
|
174
|
+
# Read previous results if file exists and we're comparing
|
|
175
|
+
declare -A previous_results
|
|
176
|
+
previous_date=""
|
|
177
|
+
MAJOR_CHANGES=false
|
|
178
|
+
|
|
179
|
+
if [[ -f "$OUTPUT_FILE" && "$WRITE_TO_FILE" == "true" ]]; then
|
|
180
|
+
echo ""
|
|
181
|
+
echo "Comparing with previous results..."
|
|
182
|
+
# Extract previous date
|
|
183
|
+
previous_date=$(grep "\*\*Date\*\*:" "$OUTPUT_FILE" | head -1 | sed 's/.*\*\*Date\*\*: //' || echo "")
|
|
184
|
+
|
|
185
|
+
# Parse previous benchmark table
|
|
186
|
+
in_table=false
|
|
187
|
+
while IFS= read -r line; do
|
|
188
|
+
# Stop parsing at performance changes or historical results sections
|
|
189
|
+
if [[ "$line" == "## Performance Changes" || "$line" == "## Historical Results" ]]; then
|
|
190
|
+
break
|
|
191
|
+
fi
|
|
192
|
+
if [[ "$line" == "| Benchmark | Operations/sec | Time/op | Memory/op | Allocs/op |" ]]; then
|
|
193
|
+
in_table=true
|
|
194
|
+
continue
|
|
195
|
+
fi
|
|
196
|
+
if [[ "$in_table" == true && "$line" =~ ^\|.*\|.*\|.*\|.*\|.*\|$ && "$line" != "|-----------|*" ]]; then
|
|
197
|
+
# Parse table row
|
|
198
|
+
benchmark=$(echo "$line" | sed 's/^| //' | cut -d'|' -f1 | sed 's/ *$//')
|
|
199
|
+
ops_sec=$(echo "$line" | sed 's/^| //' | cut -d'|' -f2 | sed 's/ //g' | sed 's/,//g')
|
|
200
|
+
time_op=$(echo "$line" | sed 's/^| //' | cut -d'|' -f3 | sed 's/ //g')
|
|
201
|
+
mem_op=$(echo "$line" | sed 's/^| //' | cut -d'|' -f4 | sed 's/ //g')
|
|
202
|
+
allocs_op=$(echo "$line" | sed 's/^| //' | cut -d'|' -f5 | sed 's/ //g')
|
|
203
|
+
if [[ -n "$benchmark" && -n "$ops_sec" ]]; then
|
|
204
|
+
previous_results["$benchmark"]="$ops_sec|$time_op|$mem_op|$allocs_op"
|
|
205
|
+
fi
|
|
206
|
+
fi
|
|
207
|
+
if [[ "$in_table" == true && "$line" == "" ]]; then
|
|
208
|
+
in_table=false
|
|
209
|
+
fi
|
|
210
|
+
done < "$OUTPUT_FILE"
|
|
211
|
+
fi
|
|
212
|
+
|
|
213
|
+
# Generate performance comparison if we have previous results
|
|
214
|
+
if [[ -n "$previous_date" && "$WRITE_TO_FILE" == "true" ]]; then
|
|
215
|
+
# Track changes
|
|
216
|
+
declare -a faster_benchmarks
|
|
217
|
+
declare -a slower_benchmarks
|
|
218
|
+
declare -a new_benchmarks
|
|
219
|
+
declare -a removed_benchmarks
|
|
220
|
+
|
|
221
|
+
# Compare results
|
|
222
|
+
for benchmark in "${!current_results[@]}"; do
|
|
223
|
+
if [[ -n "${previous_results[$benchmark]}" ]]; then
|
|
224
|
+
# Benchmark exists in both
|
|
225
|
+
current_data="${current_results[$benchmark]}"
|
|
226
|
+
previous_data="${previous_results[$benchmark]}"
|
|
227
|
+
|
|
228
|
+
current_ops=$(echo "$current_data" | cut -d'|' -f1)
|
|
229
|
+
previous_ops=$(echo "$previous_data" | cut -d'|' -f1)
|
|
230
|
+
|
|
231
|
+
if [[ "$current_ops" =~ ^[0-9]+$ && "$previous_ops" =~ ^[0-9]+$ && $previous_ops -gt 0 ]]; then
|
|
232
|
+
change=$(( (current_ops - previous_ops) * 100 / previous_ops ))
|
|
233
|
+
if [[ $change -gt 10 ]]; then
|
|
234
|
+
faster_benchmarks+=("$benchmark (+${change}%)")
|
|
235
|
+
elif [[ $change -lt -10 ]]; then
|
|
236
|
+
change_abs=$(( (previous_ops - current_ops) * 100 / previous_ops ))
|
|
237
|
+
slower_benchmarks+=("$benchmark (-${change_abs}%)")
|
|
238
|
+
MAJOR_CHANGES=true
|
|
239
|
+
fi
|
|
240
|
+
fi
|
|
241
|
+
else
|
|
242
|
+
new_benchmarks+=("$benchmark")
|
|
243
|
+
fi
|
|
244
|
+
done
|
|
245
|
+
|
|
246
|
+
# Check for removed benchmarks
|
|
247
|
+
for benchmark in "${!previous_results[@]}"; do
|
|
248
|
+
if [[ -z "${current_results[$benchmark]}" ]]; then
|
|
249
|
+
removed_benchmarks+=("$benchmark")
|
|
250
|
+
fi
|
|
251
|
+
done
|
|
252
|
+
|
|
253
|
+
echo ""
|
|
254
|
+
echo "Performance Changes Summary:"
|
|
255
|
+
echo " Faster: ${#faster_benchmarks[@]} | Slower: ${#slower_benchmarks[@]} | New: ${#new_benchmarks[@]} | Removed: ${#removed_benchmarks[@]}"
|
|
256
|
+
|
|
257
|
+
# Report changes if any improvements, regressions, or new benchmarks detected
|
|
258
|
+
if [[ ${#faster_benchmarks[@]} -gt 0 || ${#slower_benchmarks[@]} -gt 0 || ${#new_benchmarks[@]} -gt 0 ]]; then
|
|
259
|
+
MAJOR_CHANGES=true
|
|
260
|
+
fi
|
|
261
|
+
fi
|
|
262
|
+
|
|
263
|
+
# Decide whether to write to file
|
|
264
|
+
if [[ "$WRITE_TO_FILE" == "true" ]]; then
|
|
265
|
+
echo ""
|
|
266
|
+
if [[ "$MAJOR_CHANGES" == "true" ]]; then
|
|
267
|
+
echo "Writing results to file (major changes detected)..."
|
|
268
|
+
elif [[ -z "$previous_date" ]]; then
|
|
269
|
+
echo "Writing results to file (first benchmark run)..."
|
|
270
|
+
else
|
|
271
|
+
echo "Writing results to file (--write flag used)..."
|
|
272
|
+
fi
|
|
273
|
+
|
|
274
|
+
# Generate performance comparison for file
|
|
275
|
+
generate_comparison() {
|
|
276
|
+
echo "## Performance Changes"
|
|
277
|
+
echo ""
|
|
278
|
+
if [[ -z "$previous_date" ]]; then
|
|
279
|
+
echo "No previous results found. This is the first benchmark run."
|
|
280
|
+
echo ""
|
|
281
|
+
return
|
|
282
|
+
fi
|
|
283
|
+
|
|
284
|
+
echo "Changes since **$previous_date**:"
|
|
285
|
+
echo ""
|
|
286
|
+
|
|
287
|
+
# Track changes
|
|
288
|
+
declare -a faster_benchmarks
|
|
289
|
+
declare -a slower_benchmarks
|
|
290
|
+
declare -a new_benchmarks
|
|
291
|
+
declare -a removed_benchmarks
|
|
292
|
+
|
|
293
|
+
# Compare results
|
|
294
|
+
for benchmark in "${!current_results[@]}"; do
|
|
295
|
+
if [[ -n "${previous_results[$benchmark]}" ]]; then
|
|
296
|
+
# Benchmark exists in both
|
|
297
|
+
current_data="${current_results[$benchmark]}"
|
|
298
|
+
previous_data="${previous_results[$benchmark]}"
|
|
299
|
+
|
|
300
|
+
current_ops=$(echo "$current_data" | cut -d'|' -f1)
|
|
301
|
+
previous_ops=$(echo "$previous_data" | cut -d'|' -f1)
|
|
302
|
+
|
|
303
|
+
if [[ "$current_ops" =~ ^[0-9]+$ && "$previous_ops" =~ ^[0-9]+$ && $previous_ops -gt 0 ]]; then
|
|
304
|
+
if [[ $current_ops -gt $previous_ops ]]; then
|
|
305
|
+
change=$(( (current_ops - previous_ops) * 100 / previous_ops ))
|
|
306
|
+
faster_benchmarks+=("$benchmark (+${change}%)")
|
|
307
|
+
elif [[ $current_ops -lt $previous_ops ]]; then
|
|
308
|
+
change=$(( (previous_ops - current_ops) * 100 / previous_ops ))
|
|
309
|
+
slower_benchmarks+=("$benchmark (-${change}%)")
|
|
310
|
+
fi
|
|
311
|
+
fi
|
|
312
|
+
else
|
|
313
|
+
new_benchmarks+=("$benchmark")
|
|
314
|
+
fi
|
|
315
|
+
done
|
|
316
|
+
|
|
317
|
+
# Check for removed benchmarks
|
|
318
|
+
for benchmark in "${!previous_results[@]}"; do
|
|
319
|
+
if [[ -z "${current_results[$benchmark]}" ]]; then
|
|
320
|
+
removed_benchmarks+=("$benchmark")
|
|
321
|
+
fi
|
|
322
|
+
done
|
|
323
|
+
|
|
324
|
+
echo "### Summary"
|
|
325
|
+
echo "- **Faster benchmarks**: ${#faster_benchmarks[@]}"
|
|
326
|
+
echo "- **Slower benchmarks**: ${#slower_benchmarks[@]}"
|
|
327
|
+
echo "- **New benchmarks**: ${#new_benchmarks[@]}"
|
|
328
|
+
echo "- **Removed benchmarks**: ${#removed_benchmarks[@]}"
|
|
329
|
+
echo ""
|
|
330
|
+
|
|
331
|
+
if [[ ${#faster_benchmarks[@]} -gt 0 ]]; then
|
|
332
|
+
echo "### Top Improvements"
|
|
333
|
+
printf '%s\n' "${faster_benchmarks[@]}" | sort -t'(' -k2 -nr | head -5 | sed 's/^/- /'
|
|
334
|
+
echo ""
|
|
335
|
+
fi
|
|
336
|
+
|
|
337
|
+
if [[ ${#slower_benchmarks[@]} -gt 0 ]]; then
|
|
338
|
+
echo "### Performance Regressions"
|
|
339
|
+
printf '%s\n' "${slower_benchmarks[@]}" | sort -t'(' -k2 -nr | head -5 | sed 's/^/- /'
|
|
340
|
+
echo ""
|
|
341
|
+
fi
|
|
342
|
+
|
|
343
|
+
if [[ ${#new_benchmarks[@]} -gt 0 ]]; then
|
|
344
|
+
echo "### New Benchmarks Added"
|
|
345
|
+
printf '%s\n' "${new_benchmarks[@]}" | sed 's/^/- /'
|
|
346
|
+
echo ""
|
|
347
|
+
fi
|
|
348
|
+
|
|
349
|
+
if [[ ${#removed_benchmarks[@]} -gt 0 ]]; then
|
|
350
|
+
echo "### Benchmarks Removed"
|
|
351
|
+
printf '%s\n' "${removed_benchmarks[@]}" | sed 's/^/- /'
|
|
352
|
+
echo ""
|
|
353
|
+
fi
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
# Create the markdown file
|
|
357
|
+
cat > "$OUTPUT_FILE.tmp" << EOF
|
|
358
|
+
# Dingo Ledger & Database Benchmark Results
|
|
359
|
+
|
|
360
|
+
## Latest Results
|
|
361
|
+
|
|
362
|
+
### Test Environment
|
|
363
|
+
- **Date**: $DATE
|
|
364
|
+
- **Go Version**: $GO_VERSION
|
|
365
|
+
- **OS**: $OS
|
|
366
|
+
- **Architecture**: $ARCH
|
|
367
|
+
- **CPU Cores**: $CPU_CORES
|
|
368
|
+
- **Data Source**: Real Cardano preview testnet data (40k+ blocks, slots 0-863,996)
|
|
369
|
+
|
|
370
|
+
### Benchmark Results
|
|
371
|
+
|
|
372
|
+
All benchmarks run with \`-benchmem\` flag showing memory allocations and operation counts.
|
|
373
|
+
|
|
374
|
+
| Benchmark | Operations/sec | Time/op | Memory/op | Allocs/op |
|
|
375
|
+
|-----------|----------------|---------|-----------|-----------|
|
|
376
|
+
EOF
|
|
377
|
+
|
|
378
|
+
# Add current results to table
|
|
379
|
+
for benchmark in "${!current_results[@]}"; do
|
|
380
|
+
data="${current_results[$benchmark]}"
|
|
381
|
+
ops_sec=$(echo "$data" | cut -d'|' -f1)
|
|
382
|
+
time_op=$(echo "$data" | cut -d'|' -f2)
|
|
383
|
+
mem_op=$(echo "$data" | cut -d'|' -f3)
|
|
384
|
+
allocs_op=$(echo "$data" | cut -d'|' -f4)
|
|
385
|
+
echo "| $benchmark | $ops_sec | $time_op | $mem_op | $allocs_op |" >> "$OUTPUT_FILE.tmp"
|
|
386
|
+
done
|
|
387
|
+
|
|
388
|
+
# Add comparison section
|
|
389
|
+
generate_comparison >> "$OUTPUT_FILE.tmp"
|
|
390
|
+
|
|
391
|
+
# Add historical section if previous results exist
|
|
392
|
+
if [[ -n "$previous_date" ]]; then
|
|
393
|
+
echo "" >> "$OUTPUT_FILE.tmp"
|
|
394
|
+
echo "## Historical Results" >> "$OUTPUT_FILE.tmp"
|
|
395
|
+
echo "" >> "$OUTPUT_FILE.tmp"
|
|
396
|
+
echo "### $previous_date" >> "$OUTPUT_FILE.tmp"
|
|
397
|
+
echo "" >> "$OUTPUT_FILE.tmp"
|
|
398
|
+
echo "| Benchmark | Operations/sec | Time/op | Memory/op | Allocs/op |" >> "$OUTPUT_FILE.tmp"
|
|
399
|
+
echo "|-----------|----------------|---------|-----------|-----------|" >> "$OUTPUT_FILE.tmp"
|
|
400
|
+
|
|
401
|
+
# Add previous results
|
|
402
|
+
for benchmark in "${!previous_results[@]}"; do
|
|
403
|
+
data="${previous_results[$benchmark]}"
|
|
404
|
+
ops_sec=$(echo "$data" | cut -d'|' -f1)
|
|
405
|
+
time_op=$(echo "$data" | cut -d'|' -f2)
|
|
406
|
+
mem_op=$(echo "$data" | cut -d'|' -f3)
|
|
407
|
+
allocs_op=$(echo "$data" | cut -d'|' -f4)
|
|
408
|
+
echo "| $benchmark | $ops_sec | $time_op | $mem_op | $allocs_op |" >> "$OUTPUT_FILE.tmp"
|
|
409
|
+
done
|
|
410
|
+
fi
|
|
411
|
+
|
|
412
|
+
# Move temp file to final location
|
|
413
|
+
mv "$OUTPUT_FILE.tmp" "$OUTPUT_FILE"
|
|
414
|
+
|
|
415
|
+
echo "Benchmark results saved to $OUTPUT_FILE"
|
|
416
|
+
else
|
|
417
|
+
echo ""
|
|
418
|
+
echo "To save these results to file, run: ./generate_benchmarks.sh --write"
|
|
419
|
+
echo "Results are only saved when major performance changes are detected."
|
|
420
|
+
fi
|
|
421
|
+
|
|
422
|
+
echo ""
|
|
423
|
+
echo "Benchmark run complete!"
|