@fjell/registry 4.4.5 → 4.4.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +546 -0
- package/dist/Coordinate.cjs +8 -5
- package/dist/Coordinate.d.ts +1 -1
- package/dist/Coordinate.js +8 -5
- package/dist/Instance.cjs +1 -1
- package/dist/Instance.d.ts +1 -1
- package/dist/Instance.js +1 -1
- package/dist/Registry.cjs +99 -90
- package/dist/Registry.d.ts +3 -42
- package/dist/Registry.js +99 -90
- package/dist/RegistryHub.cjs +78 -0
- package/dist/RegistryHub.d.ts +3 -0
- package/dist/RegistryHub.js +74 -0
- package/dist/errors/CoordinateError.cjs +70 -0
- package/dist/errors/CoordinateError.d.ts +28 -0
- package/dist/errors/CoordinateError.js +63 -0
- package/dist/errors/InstanceError.cjs +101 -0
- package/dist/errors/InstanceError.d.ts +42 -0
- package/dist/errors/InstanceError.js +92 -0
- package/dist/errors/RegistryError.cjs +82 -0
- package/dist/errors/RegistryError.d.ts +31 -0
- package/dist/errors/RegistryError.js +75 -0
- package/dist/errors/RegistryHubError.cjs +92 -0
- package/dist/errors/RegistryHubError.d.ts +39 -0
- package/dist/errors/RegistryHubError.js +84 -0
- package/dist/errors/index.d.ts +4 -0
- package/dist/index.cjs +501 -101
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +3 -0
- package/dist/index.js +6 -1
- package/dist/types.d.ts +90 -0
- package/docs/TIMING_NODE_OPTIMIZATION.md +207 -0
- package/docs/TIMING_README.md +170 -0
- package/docs/memory-data/scaling-10-instances.json +526 -0
- package/docs/memory-data/scaling-100-instances.json +526 -0
- package/docs/memory-data/scaling-1000-instances.json +276 -0
- package/docs/memory-data/scaling-10000-instances.json +126 -0
- package/docs/memory-data/scaling-20-instances.json +526 -0
- package/docs/memory-data/scaling-200-instances.json +526 -0
- package/docs/memory-data/scaling-2000-instances.json +276 -0
- package/docs/memory-data/scaling-50-instances.json +526 -0
- package/docs/memory-data/scaling-500-instances.json +276 -0
- package/docs/memory-data/scaling-5000-instances.json +126 -0
- package/docs/memory-overhead.svg +120 -0
- package/docs/memory.md +430 -0
- package/docs/timing-range.svg +174 -0
- package/docs/timing.md +483 -0
- package/examples/README.md +187 -0
- package/examples/multi-level-keys.ts +374 -0
- package/examples/registry-hub-types.ts +437 -0
- package/examples/simple-example.ts +250 -0
- package/package.json +5 -3
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import { Instance } from './Instance';
|
|
2
|
+
import { Coordinate } from './Coordinate';
|
|
3
|
+
/**
|
|
4
|
+
* The RegistryHub interface provides a higher-level registry that manages multiple Registry instances.
|
|
5
|
+
*/
|
|
6
|
+
export interface RegistryHub {
|
|
7
|
+
/**
|
|
8
|
+
* Creates a new registry instance using a RegistryFactory and automatically registers it.
|
|
9
|
+
*/
|
|
10
|
+
readonly createRegistry: (type: string, factory: RegistryFactory) => Registry;
|
|
11
|
+
/**
|
|
12
|
+
* Registers a registry instance, using the registry's type property as the key.
|
|
13
|
+
*/
|
|
14
|
+
readonly registerRegistry: (registry: Registry) => void;
|
|
15
|
+
/**
|
|
16
|
+
* Retrieves an instance by delegating to the appropriate registry.
|
|
17
|
+
*/
|
|
18
|
+
readonly get: (type: string, kta: string[], options?: {
|
|
19
|
+
scopes?: string[];
|
|
20
|
+
}) => Instance<any, any | never, any | never, any | never, any | never, any | never> | null;
|
|
21
|
+
/**
|
|
22
|
+
* Retrieves a registry instance by its type key.
|
|
23
|
+
*/
|
|
24
|
+
readonly getRegistry: (type: string) => Registry | null;
|
|
25
|
+
/**
|
|
26
|
+
* Lists all registered type keys.
|
|
27
|
+
*/
|
|
28
|
+
readonly getRegisteredTypes: () => string[];
|
|
29
|
+
/**
|
|
30
|
+
* Removes a registry from the hub.
|
|
31
|
+
*/
|
|
32
|
+
readonly unregisterRegistry: (type: string) => boolean;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Factory function for creating instances. This function receives a coordinate and context
|
|
36
|
+
* and returns a fully initialized instance.
|
|
37
|
+
*/
|
|
38
|
+
export type InstanceFactory<S extends string, L1 extends string = never, L2 extends string = never, L3 extends string = never, L4 extends string = never, L5 extends string = never> = (coordinate: Coordinate<S, L1, L2, L3, L4, L5>, context: {
|
|
39
|
+
registry: Registry;
|
|
40
|
+
registryHub?: RegistryHub;
|
|
41
|
+
}) => Instance<S, L1, L2, L3, L4, L5>;
|
|
42
|
+
/**
|
|
43
|
+
* Factory function for creating a Registry instance. This function receives the type and hub
|
|
44
|
+
* and returns a fully initialized registry.
|
|
45
|
+
*/
|
|
46
|
+
export type RegistryFactory = (type: string, registryHub?: RegistryHub) => Registry;
|
|
47
|
+
/**
|
|
48
|
+
* Tree structure representing the hierarchy of instances
|
|
49
|
+
*/
|
|
50
|
+
export interface InstanceTree {
|
|
51
|
+
[keyType: string]: InstanceTreeNode;
|
|
52
|
+
}
|
|
53
|
+
export interface InstanceTreeNode {
|
|
54
|
+
instances: ScopedInstance[];
|
|
55
|
+
children: InstanceTree | null;
|
|
56
|
+
}
|
|
57
|
+
export interface ScopedInstance {
|
|
58
|
+
scopes?: string[];
|
|
59
|
+
instance: Instance<any, any | never, any | never, any | never, any | never, any | never>;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* The Registry interface provides a central registry for managing and accessing instances of services.
|
|
63
|
+
* It serves as a dependency injection container that allows libraries to reference and access
|
|
64
|
+
* other library instances they depend on.
|
|
65
|
+
*/
|
|
66
|
+
export interface Registry {
|
|
67
|
+
/** The type identifier for this registry (e.g., 'services', 'data', 'cache') */
|
|
68
|
+
readonly type: string;
|
|
69
|
+
/** Optional reference to the RegistryHub that created this registry */
|
|
70
|
+
readonly registryHub?: RegistryHub;
|
|
71
|
+
/**
|
|
72
|
+
* Creates and registers a new instance in the registry in one atomic operation.
|
|
73
|
+
*/
|
|
74
|
+
createInstance: <S extends string, L1 extends string = never, L2 extends string = never, L3 extends string = never, L4 extends string = never, L5 extends string = never>(kta: S[], scopes: string[], factory: InstanceFactory<S, L1, L2, L3, L4, L5>) => Instance<S, L1, L2, L3, L4, L5>;
|
|
75
|
+
/**
|
|
76
|
+
* Registers an existing instance in the registry (for migration/advanced use cases).
|
|
77
|
+
* @deprecated Use createInstance instead for new code
|
|
78
|
+
*/
|
|
79
|
+
register: (kta: string[], instance: Instance<any, any | never, any | never, any | never, any | never, any | never>, options?: {
|
|
80
|
+
scopes?: string[];
|
|
81
|
+
}) => void;
|
|
82
|
+
/**
|
|
83
|
+
* Retrieves an instance from the registry.
|
|
84
|
+
*/
|
|
85
|
+
get: (kta: string[], options?: {
|
|
86
|
+
scopes?: string[];
|
|
87
|
+
}) => Instance<any, any | never, any | never, any | never, any | never, any | never> | null;
|
|
88
|
+
/** The tree structure representing the hierarchy of instances */
|
|
89
|
+
instanceTree: InstanceTree;
|
|
90
|
+
}
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
# Node.js Optimization for Timing Tests
|
|
2
|
+
|
|
3
|
+
This document outlines Node.js runtime parameters that can be tuned to minimize interference and improve timing test accuracy.
|
|
4
|
+
|
|
5
|
+
## Quick Start: Optimized Timing Test
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pnpm run test:timing:optimized
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
This runs timing tests with pre-configured optimal Node.js flags.
|
|
12
|
+
|
|
13
|
+
## Key Node.js Optimization Flags
|
|
14
|
+
|
|
15
|
+
### Memory Management
|
|
16
|
+
|
|
17
|
+
#### `--max-old-space-size=8192`
|
|
18
|
+
- **Purpose**: Increases maximum heap size to 8GB (default ~1.4GB)
|
|
19
|
+
- **Benefit**: Prevents garbage collection during test runs
|
|
20
|
+
- **Why 8GB**: Provides ample headroom for 100 rounds × multiple tree sizes
|
|
21
|
+
- **Alternative**: `--max-old-space-size=4096` for systems with less RAM
|
|
22
|
+
|
|
23
|
+
#### `--max-semi-space-size=1024`
|
|
24
|
+
- **Purpose**: Increases semi-space size to 1GB (default varies)
|
|
25
|
+
- **Benefit**: Reduces minor GC frequency during object allocation
|
|
26
|
+
- **Impact**: Each semi-space can hold more objects before collection
|
|
27
|
+
|
|
28
|
+
### Garbage Collection Control
|
|
29
|
+
|
|
30
|
+
#### `--expose-gc`
|
|
31
|
+
- **Purpose**: Exposes `global.gc()` function for manual garbage collection
|
|
32
|
+
- **Benefit**: Allows forced GC between test rounds for consistent baselines
|
|
33
|
+
- **Usage**: Can trigger GC at specific points to isolate timing measurements
|
|
34
|
+
|
|
35
|
+
#### `--gc-interval=1000000`
|
|
36
|
+
- **Purpose**: Reduces GC frequency (higher number = less frequent)
|
|
37
|
+
- **Benefit**: Minimizes GC interruptions during timing measurements
|
|
38
|
+
- **Trade-off**: Uses more memory but provides consistent timing
|
|
39
|
+
|
|
40
|
+
### Execution Predictability
|
|
41
|
+
|
|
42
|
+
#### `--predictable`
|
|
43
|
+
- **Purpose**: Makes execution more deterministic
|
|
44
|
+
- **Benefit**: Reduces timing variance from non-deterministic optimizations
|
|
45
|
+
- **Impact**: Slightly slower but more consistent measurements
|
|
46
|
+
|
|
47
|
+
#### `--no-compilation-cache`
|
|
48
|
+
- **Purpose**: Disables V8 compilation cache
|
|
49
|
+
- **Benefit**: Ensures consistent JIT compilation behavior across runs
|
|
50
|
+
- **Use Case**: When testing cold start performance or eliminating cache effects
|
|
51
|
+
|
|
52
|
+
### JIT Compilation Control
|
|
53
|
+
|
|
54
|
+
#### `--optimize-for-size`
|
|
55
|
+
- **Purpose**: Optimizes for code size rather than speed
|
|
56
|
+
- **Benefit**: More predictable compilation behavior
|
|
57
|
+
- **Alternative**: `--max-opt` for maximum optimization (less predictable)
|
|
58
|
+
|
|
59
|
+
#### `--no-opt`
|
|
60
|
+
- **Purpose**: Disables JIT compilation entirely
|
|
61
|
+
- **Benefit**: Eliminates JIT timing variance (but much slower execution)
|
|
62
|
+
- **Use Case**: Testing interpreter-only performance
|
|
63
|
+
|
|
64
|
+
## Complete Optimization Command
|
|
65
|
+
|
|
66
|
+
For maximum timing accuracy, use all optimization flags:
|
|
67
|
+
|
|
68
|
+
```bash
|
|
69
|
+
# Use NODE_OPTIONS for compatibility with modern Node.js
|
|
70
|
+
NODE_OPTIONS="--max-old-space-size=8192 --max-semi-space-size=1024" \
|
|
71
|
+
vitest run tests/timing.test.ts
|
|
72
|
+
|
|
73
|
+
# Or use direct node invocation (requires vitest executable)
|
|
74
|
+
node --max-old-space-size=8192 --max-semi-space-size=1024 \
|
|
75
|
+
./node_modules/vitest/vitest.mjs run tests/timing.test.ts
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## System-Specific Adjustments
|
|
79
|
+
|
|
80
|
+
### Low-Memory Systems (< 8GB RAM)
|
|
81
|
+
```bash
|
|
82
|
+
NODE_OPTIONS="--max-old-space-size=2048 --max-semi-space-size=256" \
|
|
83
|
+
vitest run tests/timing.test.ts
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
### High-Memory Systems (> 16GB RAM)
|
|
87
|
+
```bash
|
|
88
|
+
NODE_OPTIONS="--max-old-space-size=16384 --max-semi-space-size=2048" \
|
|
89
|
+
vitest run tests/timing.test.ts
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### Docker/Container Environments
|
|
93
|
+
```bash
|
|
94
|
+
NODE_OPTIONS="--max-old-space-size=4096 --max-semi-space-size=512" \
|
|
95
|
+
vitest run tests/timing.test.ts
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Environment Variables
|
|
99
|
+
|
|
100
|
+
### Additional V8 Options
|
|
101
|
+
```bash
|
|
102
|
+
export NODE_OPTIONS="--max-old-space-size=8192 --max-semi-space-size=1024"
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Memory Monitoring
|
|
106
|
+
```bash
|
|
107
|
+
export NODE_ENV=production # Disable development overhead
|
|
108
|
+
export UV_THREADPOOL_SIZE=8 # Increase thread pool if needed
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## Monitoring Memory During Tests
|
|
112
|
+
|
|
113
|
+
Add memory monitoring to timing tests:
|
|
114
|
+
|
|
115
|
+
```typescript
|
|
116
|
+
// Add to timing test setup
|
|
117
|
+
if (global.gc) {
|
|
118
|
+
console.log('Memory before tests:', process.memoryUsage());
|
|
119
|
+
global.gc();
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Add between test rounds
|
|
123
|
+
if (global.gc && round % 20 === 0) {
|
|
124
|
+
global.gc();
|
|
125
|
+
console.log(`Memory after round ${round}:`, process.memoryUsage());
|
|
126
|
+
}
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
## Platform-Specific Considerations
|
|
130
|
+
|
|
131
|
+
### macOS
|
|
132
|
+
- Default memory limits are generous
|
|
133
|
+
- Use `--max-old-space-size=8192` safely
|
|
134
|
+
- Activity Monitor shows Node.js memory usage
|
|
135
|
+
|
|
136
|
+
### Linux
|
|
137
|
+
- Check `ulimit -v` for virtual memory limits
|
|
138
|
+
- May need `sudo sysctl vm.max_map_count=262144` for large heaps
|
|
139
|
+
- Use `htop` or `ps` to monitor memory
|
|
140
|
+
|
|
141
|
+
### Windows
|
|
142
|
+
- WSL may have memory constraints
|
|
143
|
+
- PowerShell: `Get-Process node | Select-Object WorkingSet64`
|
|
144
|
+
- Task Manager shows detailed memory breakdown
|
|
145
|
+
|
|
146
|
+
### CI/CD Environments
|
|
147
|
+
- GitHub Actions: 7GB RAM limit, use `--max-old-space-size=4096`
|
|
148
|
+
- GitLab CI: Variable memory, check runner specs
|
|
149
|
+
- Docker: Set container memory limits appropriately
|
|
150
|
+
|
|
151
|
+
## Performance Analysis Tools
|
|
152
|
+
|
|
153
|
+
### V8 Profiling
|
|
154
|
+
```bash
|
|
155
|
+
node --prof --log-timer-events ./timing-test.js
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### Heap Snapshots
|
|
159
|
+
```bash
|
|
160
|
+
node --inspect --heap-prof ./timing-test.js
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
### GC Logging
|
|
164
|
+
```bash
|
|
165
|
+
node --trace-gc --trace-gc-verbose ./timing-test.js
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
## Validation
|
|
169
|
+
|
|
170
|
+
Test that optimizations are working:
|
|
171
|
+
|
|
172
|
+
```bash
|
|
173
|
+
# Run normal timing tests
|
|
174
|
+
pnpm run test:timing
|
|
175
|
+
|
|
176
|
+
# Run optimized timing tests
|
|
177
|
+
pnpm run test:timing:optimized
|
|
178
|
+
|
|
179
|
+
# Compare results - optimized should show:
|
|
180
|
+
# 1. Lower timing variance (smaller standard deviations)
|
|
181
|
+
# 2. More consistent Bollinger bands
|
|
182
|
+
# 3. Fewer outlier measurements
|
|
183
|
+
# 4. Reduced memory-related timing spikes
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
## Recommended Configuration
|
|
187
|
+
|
|
188
|
+
For most use cases, the optimized configuration provides the best balance:
|
|
189
|
+
|
|
190
|
+
```json
|
|
191
|
+
{
|
|
192
|
+
"scripts": {
|
|
193
|
+
"test:timing:optimized": "NODE_OPTIONS=\"--max-old-space-size=8192 --max-semi-space-size=1024\" vitest run tests/timing.test.ts"
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
This configuration:
|
|
199
|
+
- ✅ Eliminates most GC interference
|
|
200
|
+
- ✅ Provides consistent execution
|
|
201
|
+
- ✅ Works on most development machines
|
|
202
|
+
- ✅ Maintains reasonable test execution time
|
|
203
|
+
- ✅ Produces reliable performance metrics
|
|
204
|
+
|
|
205
|
+
---
|
|
206
|
+
|
|
207
|
+
*For production timing analysis, always use optimized Node.js flags to ensure accurate performance measurements.*
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# Timing Test System
|
|
2
|
+
|
|
3
|
+
This document explains the timing test system implemented for the @fjell/registry project.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The timing test system measures the performance of key operations in the fjell-registry library using robust multi-round testing and generates comprehensive documentation including heatmap visualizations. It runs hundreds of test rounds to smooth out system load variations and provides reliable performance metrics. The system ensures that performance remains within acceptable bounds and helps detect performance regressions.
|
|
8
|
+
|
|
9
|
+
## Running Timing Tests
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
# Run timing tests specifically
|
|
13
|
+
pnpm run test:timing
|
|
14
|
+
|
|
15
|
+
# Run all tests (including timing tests)
|
|
16
|
+
pnpm test
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Measured Operations
|
|
20
|
+
|
|
21
|
+
The timing system measures the following operations:
|
|
22
|
+
|
|
23
|
+
- **createRegistry**: Time to create a new registry
|
|
24
|
+
- **createRegistryHub**: Time to create a new registry hub
|
|
25
|
+
- **createInstance**: Time to create a new instance
|
|
26
|
+
- **registerInstance**: Time to register an instance in registry
|
|
27
|
+
- **lookupInstance**: Time to lookup an existing instance from registry
|
|
28
|
+
- **completeWorkflow**: Time for complete workflow including all operations
|
|
29
|
+
|
|
30
|
+
### Scaling Tests
|
|
31
|
+
|
|
32
|
+
Additionally, the system includes comprehensive scaling tests that measure performance across a wide range of tree sizes:
|
|
33
|
+
|
|
34
|
+
- **Tree sizes tested**: 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000
|
|
35
|
+
- **registerInstance_[size]**: Registration performance with varying numbers of existing items
|
|
36
|
+
- **lookupInstance_[size]**: Lookup performance with different tree sizes
|
|
37
|
+
|
|
38
|
+
These extensive tests help identify potential O(n) performance issues and ensure the registry scales well from small applications to enterprise-scale deployments.
|
|
39
|
+
|
|
40
|
+
## Timing Constraints
|
|
41
|
+
|
|
42
|
+
The following timing constraints are enforced (all values in microseconds):
|
|
43
|
+
|
|
44
|
+
### Basic Operations
|
|
45
|
+
- **createRegistry**: ≤ 5000µs (5ms)
|
|
46
|
+
- **createRegistryHub**: ≤ 5000µs (5ms)
|
|
47
|
+
- **createInstance**: ≤ 10000µs (10ms)
|
|
48
|
+
- **lookupInstance**: ≤ 2000µs (2ms)
|
|
49
|
+
- **registerInstance**: ≤ 5000µs (5ms)
|
|
50
|
+
- **completeWorkflow**: ≤ 25000µs (25ms)
|
|
51
|
+
|
|
52
|
+
### Scaling Operations
|
|
53
|
+
- **registerInstance** (≤100 items): ≤ 5000µs (5ms)
|
|
54
|
+
- **registerInstance** (100-1000 items): ≤ 10000µs (10ms)
|
|
55
|
+
- **registerInstance** (1000-10000 items): ≤ 20000µs (20ms)
|
|
56
|
+
- **registerInstance** (>10000 items): ≤ 50000µs (50ms)
|
|
57
|
+
- **lookupInstance** (≤100 items): ≤ 2000µs (2ms)
|
|
58
|
+
- **lookupInstance** (100-1000 items): ≤ 5000µs (5ms)
|
|
59
|
+
- **lookupInstance** (1000-10000 items): ≤ 10000µs (10ms)
|
|
60
|
+
- **lookupInstance** (>10000 items): ≤ 25000µs (25ms)
|
|
61
|
+
|
|
62
|
+
## Configuration
|
|
63
|
+
|
|
64
|
+
### Adjusting Timing Constraints
|
|
65
|
+
|
|
66
|
+
Edit the `TIMING_CONSTRAINTS` object in `tests/timing.test.ts`:
|
|
67
|
+
|
|
68
|
+
```typescript
|
|
69
|
+
const TIMING_CONSTRAINTS: TimingConstraints = {
|
|
70
|
+
createRegistry: 5000, // 5000µs (5ms) max
|
|
71
|
+
createRegistryHub: 5000, // 5000µs (5ms) max
|
|
72
|
+
createInstance: 10000, // 10000µs (10ms) max
|
|
73
|
+
lookupInstance: 2000, // 2000µs (2ms) max
|
|
74
|
+
registerInstance: 5000, // 5000µs (5ms) max
|
|
75
|
+
};
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Adjusting Test Iterations
|
|
79
|
+
|
|
80
|
+
Change the `ITERATIONS` constant to adjust the number of test iterations:
|
|
81
|
+
|
|
82
|
+
```typescript
|
|
83
|
+
const ITERATIONS = 1000; // Number of iterations for timing tests
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Generated Documentation
|
|
87
|
+
|
|
88
|
+
When timing tests run, they automatically generate documentation at `./docs/timing.md`. This file includes:
|
|
89
|
+
|
|
90
|
+
- Performance summary table for basic operations
|
|
91
|
+
- Scaling performance table showing results for different tree sizes
|
|
92
|
+
- **SVG performance graph** visualizing scaling trends (`./docs/scaling-performance.svg`)
|
|
93
|
+
- **SVG performance range chart** showing performance consistency with ±1σ bands (`./docs/timing-range.svg`)
|
|
94
|
+
- Detailed timing results for each operation (in microseconds)
|
|
95
|
+
- Performance analysis and scaling recommendations
|
|
96
|
+
- System information (Node.js version, platform, etc.)
|
|
97
|
+
|
|
98
|
+
### Multi-Round Testing
|
|
99
|
+
|
|
100
|
+
The system runs 100 test rounds (configurable) for smaller tree sizes and reduces rounds for larger sizes to balance thoroughness with test duration:
|
|
101
|
+
|
|
102
|
+
- **≤1000 items**: 100 rounds × 200 iterations = ~20,000 measurements per operation
|
|
103
|
+
- **1000-10000 items**: 50 rounds × 100 iterations = ~5,000 measurements per operation
|
|
104
|
+
- **>10000 items**: 25 rounds × 50 iterations = ~1,250 measurements per operation
|
|
105
|
+
|
|
106
|
+
For performance range visualization, the system calculates mean and standard deviation for each tree size, displaying ±1σ confidence bands that show performance consistency.
|
|
107
|
+
|
|
108
|
+
**Randomized Batching**: Instead of running all iterations sequentially, the system uses randomized batch sizes (10-50 iterations) with brief pauses between batches. This approach simulates real-world usage patterns and prevents artificial performance artifacts from caching, JIT optimization, and memory allocation patterns.
|
|
109
|
+
|
|
110
|
+
This comprehensive approach provides robust results that smooth out system load variations and clearly show performance characteristics and variability.
|
|
111
|
+
|
|
112
|
+
## Best Practices
|
|
113
|
+
|
|
114
|
+
### When to Update Constraints
|
|
115
|
+
|
|
116
|
+
- **After optimization**: If you've improved performance, consider tightening constraints
|
|
117
|
+
- **Hardware changes**: Adjust constraints when changing build/test infrastructure
|
|
118
|
+
- **Performance regressions**: Investigate and fix before loosening constraints
|
|
119
|
+
|
|
120
|
+
### Interpreting Results
|
|
121
|
+
|
|
122
|
+
- **Average Time**: Most important metric for consistent performance
|
|
123
|
+
- **Max Time**: Helps identify performance spikes
|
|
124
|
+
- **Min Time**: Usually very low due to CPU caching effects
|
|
125
|
+
|
|
126
|
+
### Troubleshooting Failed Tests
|
|
127
|
+
|
|
128
|
+
If timing tests fail:
|
|
129
|
+
|
|
130
|
+
1. **Check recent changes**: Review code changes that might affect performance
|
|
131
|
+
2. **System load**: Ensure the system isn't under heavy load during testing
|
|
132
|
+
3. **Multiple runs**: Run tests multiple times to account for system variance
|
|
133
|
+
4. **Profile code**: Use Node.js profiling tools to identify bottlenecks
|
|
134
|
+
|
|
135
|
+
## Integration with CI/CD
|
|
136
|
+
|
|
137
|
+
The timing tests are designed to be run in CI/CD pipelines:
|
|
138
|
+
|
|
139
|
+
- Tests fail if performance thresholds are exceeded
|
|
140
|
+
- Documentation is automatically updated on each run
|
|
141
|
+
- Results can be tracked over time to monitor performance trends
|
|
142
|
+
|
|
143
|
+
## File Structure
|
|
144
|
+
|
|
145
|
+
```
|
|
146
|
+
docs/
|
|
147
|
+
├── timing.md # Generated timing report (auto-updated)
|
|
148
|
+
└── TIMING_README.md # This documentation file
|
|
149
|
+
|
|
150
|
+
tests/
|
|
151
|
+
└── timing.test.ts # Timing test implementation
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
## Maintenance
|
|
155
|
+
|
|
156
|
+
### Regular Tasks
|
|
157
|
+
|
|
158
|
+
1. **Review thresholds** quarterly to ensure they remain appropriate
|
|
159
|
+
2. **Update documentation** after significant performance improvements
|
|
160
|
+
3. **Monitor trends** in the generated timing reports over releases
|
|
161
|
+
|
|
162
|
+
### Release Process
|
|
163
|
+
|
|
164
|
+
The timing documentation (`docs/timing.md`) should be:
|
|
165
|
+
|
|
166
|
+
1. Generated before each release
|
|
167
|
+
2. Committed to the repository
|
|
168
|
+
3. Published with the package
|
|
169
|
+
|
|
170
|
+
This ensures users can see performance characteristics of each version.
|