@benchkit/format 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +521 -0
- package/dist/build-otlp-result.d.ts +43 -0
- package/dist/build-otlp-result.d.ts.map +1 -0
- package/dist/build-otlp-result.js +102 -0
- package/dist/build-otlp-result.js.map +1 -0
- package/dist/build-otlp-result.test.d.ts +2 -0
- package/dist/build-otlp-result.test.d.ts.map +1 -0
- package/dist/build-otlp-result.test.js +148 -0
- package/dist/build-otlp-result.test.js.map +1 -0
- package/dist/compare.d.ts +20 -0
- package/dist/compare.d.ts.map +1 -0
- package/dist/compare.js +92 -0
- package/dist/compare.js.map +1 -0
- package/dist/compare.test.d.ts +2 -0
- package/dist/compare.test.d.ts.map +1 -0
- package/dist/compare.test.js +233 -0
- package/dist/compare.test.js.map +1 -0
- package/dist/format-comparison-markdown.d.ts +3 -0
- package/dist/format-comparison-markdown.d.ts.map +1 -0
- package/dist/format-comparison-markdown.js +142 -0
- package/dist/format-comparison-markdown.js.map +1 -0
- package/dist/format-comparison-markdown.test.d.ts +2 -0
- package/dist/format-comparison-markdown.test.d.ts.map +1 -0
- package/dist/format-comparison-markdown.test.js +173 -0
- package/dist/format-comparison-markdown.test.js.map +1 -0
- package/dist/index.d.ts +38 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +100 -0
- package/dist/index.js.map +1 -0
- package/dist/infer-direction.d.ts +10 -0
- package/dist/infer-direction.d.ts.map +1 -0
- package/dist/infer-direction.js +23 -0
- package/dist/infer-direction.js.map +1 -0
- package/dist/infer-direction.test.d.ts +2 -0
- package/dist/infer-direction.test.d.ts.map +1 -0
- package/dist/infer-direction.test.js +28 -0
- package/dist/infer-direction.test.js.map +1 -0
- package/dist/metrics-batch.d.ts +55 -0
- package/dist/metrics-batch.d.ts.map +1 -0
- package/dist/metrics-batch.js +296 -0
- package/dist/metrics-batch.js.map +1 -0
- package/dist/metrics-batch.test.d.ts +2 -0
- package/dist/metrics-batch.test.d.ts.map +1 -0
- package/dist/metrics-batch.test.js +306 -0
- package/dist/metrics-batch.test.js.map +1 -0
- package/dist/native-builder.d.ts.map +1 -0
- package/dist/native-builder.js.map +1 -0
- package/dist/native-builder.test.d.ts.map +1 -0
- package/dist/native-builder.test.js.map +1 -0
- package/dist/otlp-conventions.d.ts +80 -0
- package/dist/otlp-conventions.d.ts.map +1 -0
- package/dist/otlp-conventions.js +114 -0
- package/dist/otlp-conventions.js.map +1 -0
- package/dist/otlp-conventions.test.d.ts +2 -0
- package/dist/otlp-conventions.test.d.ts.map +1 -0
- package/dist/otlp-conventions.test.js +65 -0
- package/dist/otlp-conventions.test.js.map +1 -0
- package/dist/otlp-projections.d.ts.map +1 -0
- package/dist/otlp-projections.js.map +1 -0
- package/dist/otlp-projections.test.d.ts.map +1 -0
- package/dist/otlp-projections.test.js.map +1 -0
- package/dist/otlp-validation.d.ts +40 -0
- package/dist/otlp-validation.d.ts.map +1 -0
- package/dist/otlp-validation.js +110 -0
- package/dist/otlp-validation.js.map +1 -0
- package/dist/otlp-validation.test.d.ts +2 -0
- package/dist/otlp-validation.test.d.ts.map +1 -0
- package/dist/otlp-validation.test.js +160 -0
- package/dist/otlp-validation.test.js.map +1 -0
- package/dist/parse-benchmark-action.d.ts +11 -0
- package/dist/parse-benchmark-action.d.ts.map +1 -0
- package/dist/parse-benchmark-action.js +54 -0
- package/dist/parse-benchmark-action.js.map +1 -0
- package/dist/parse-benchmark-action.test.d.ts +2 -0
- package/dist/parse-benchmark-action.test.d.ts.map +1 -0
- package/dist/parse-benchmark-action.test.js +66 -0
- package/dist/parse-benchmark-action.test.js.map +1 -0
- package/dist/parse-go.d.ts +12 -0
- package/dist/parse-go.d.ts.map +1 -0
- package/dist/parse-go.js +63 -0
- package/dist/parse-go.js.map +1 -0
- package/dist/parse-go.test.d.ts +2 -0
- package/dist/parse-go.test.d.ts.map +1 -0
- package/dist/parse-go.test.js +110 -0
- package/dist/parse-go.test.js.map +1 -0
- package/dist/parse-hyperfine.d.ts +3 -0
- package/dist/parse-hyperfine.d.ts.map +1 -0
- package/dist/parse-hyperfine.js +59 -0
- package/dist/parse-hyperfine.js.map +1 -0
- package/dist/parse-hyperfine.test.d.ts +2 -0
- package/dist/parse-hyperfine.test.d.ts.map +1 -0
- package/dist/parse-hyperfine.test.js +64 -0
- package/dist/parse-hyperfine.test.js.map +1 -0
- package/dist/parse-otlp.d.ts +45 -0
- package/dist/parse-otlp.d.ts.map +1 -0
- package/dist/parse-otlp.js +92 -0
- package/dist/parse-otlp.js.map +1 -0
- package/dist/parse-otlp.test.d.ts +2 -0
- package/dist/parse-otlp.test.d.ts.map +1 -0
- package/dist/parse-otlp.test.js +133 -0
- package/dist/parse-otlp.test.js.map +1 -0
- package/dist/parse-pytest-benchmark.d.ts +3 -0
- package/dist/parse-pytest-benchmark.d.ts.map +1 -0
- package/dist/parse-pytest-benchmark.js +72 -0
- package/dist/parse-pytest-benchmark.js.map +1 -0
- package/dist/parse-pytest-benchmark.test.d.ts +2 -0
- package/dist/parse-pytest-benchmark.test.d.ts.map +1 -0
- package/dist/parse-pytest-benchmark.test.js +85 -0
- package/dist/parse-pytest-benchmark.test.js.map +1 -0
- package/dist/parse-rust.d.ts +9 -0
- package/dist/parse-rust.d.ts.map +1 -0
- package/dist/parse-rust.js +50 -0
- package/dist/parse-rust.js.map +1 -0
- package/dist/parse-rust.test.d.ts +2 -0
- package/dist/parse-rust.test.d.ts.map +1 -0
- package/dist/parse-rust.test.js +62 -0
- package/dist/parse-rust.test.js.map +1 -0
- package/dist/parse.d.ts +7 -0
- package/dist/parse.d.ts.map +1 -0
- package/dist/parse.js +87 -0
- package/dist/parse.js.map +1 -0
- package/dist/parse.test.d.ts +2 -0
- package/dist/parse.test.d.ts.map +1 -0
- package/dist/parse.test.js +171 -0
- package/dist/parse.test.js.map +1 -0
- package/dist/parser-utils.d.ts +17 -0
- package/dist/parser-utils.d.ts.map +1 -0
- package/dist/parser-utils.js +29 -0
- package/dist/parser-utils.js.map +1 -0
- package/dist/parser-utils.test.d.ts +2 -0
- package/dist/parser-utils.test.d.ts.map +1 -0
- package/dist/parser-utils.test.js +27 -0
- package/dist/parser-utils.test.js.map +1 -0
- package/dist/retry.d.ts +6 -0
- package/dist/retry.d.ts.map +1 -0
- package/dist/retry.js +18 -0
- package/dist/retry.js.map +1 -0
- package/dist/retry.test.d.ts +2 -0
- package/dist/retry.test.d.ts.map +1 -0
- package/dist/retry.test.js +29 -0
- package/dist/retry.test.js.map +1 -0
- package/dist/test-otlp-helpers.d.ts.map +1 -0
- package/dist/test-otlp-helpers.js.map +1 -0
- package/dist/types.d.ts +58 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +3 -0
- package/dist/types.js.map +1 -0
- package/package.json +22 -0
package/README.md
ADDED
|
@@ -0,0 +1,521 @@
|
|
|
1
|
+
# @benchkit/format
|
|
2
|
+
|
|
3
|
+
Benchmark result types and format parsers for [benchkit](../../README.md). Parses Go bench output, [Hyperfine](https://github.com/sharkdp/hyperfine) JSON, [benchmark-action](https://github.com/benchmark-action/github-action-benchmark) JSON, [pytest-benchmark](https://pytest-benchmark.readthedocs.io/) JSON, and OTLP metrics JSON into a single normalized OTLP metrics document.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
> **Note:** `@benchkit/format` is not yet published to the npm registry.
|
|
8
|
+
> Until the first release, install from source as described below.
|
|
9
|
+
|
|
10
|
+
Clone the benchkit repository, install dependencies, and build the package:
|
|
11
|
+
|
|
12
|
+
```bash
|
|
13
|
+
git clone https://github.com/strawgate/benchkit.git
|
|
14
|
+
cd benchkit
|
|
15
|
+
npm ci
|
|
16
|
+
npm run build --workspace=packages/format
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
Then, from your project directory, link the local package (adjust the path
|
|
20
|
+
to where you cloned benchkit):
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
npm link <path-to-benchkit>/packages/format
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
Or use a `file:` reference in your project's `package.json`:
|
|
27
|
+
|
|
28
|
+
```jsonc
|
|
29
|
+
{
|
|
30
|
+
"dependencies": {
|
|
31
|
+
"@benchkit/format": "file:<path-to-benchkit>/packages/format"
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
Once published, you will be able to install directly:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
npm install @benchkit/format
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## Quick start
|
|
43
|
+
|
|
44
|
+
```ts
|
|
45
|
+
import { parseBenchmarks } from "@benchkit/format";
|
|
46
|
+
|
|
47
|
+
// Auto-detect the format and parse
|
|
48
|
+
const result = parseBenchmarks(input);
|
|
49
|
+
|
|
50
|
+
for (const bench of result.benchmarks) {
|
|
51
|
+
for (const [name, metric] of Object.entries(bench.metrics)) {
|
|
52
|
+
console.log(`${bench.name} ${name}: ${metric.value} ${metric.unit ?? ""}`);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Building native results
|
|
58
|
+
|
|
59
|
+
If your benchmark does not come from a tool like `go test -bench`, you can
|
|
60
|
+
build benchkit-native results programmatically and then write them to disk.
|
|
61
|
+
|
|
62
|
+
```ts
|
|
63
|
+
import {
|
|
64
|
+
buildNativeResult,
|
|
65
|
+
stringifyNativeResult,
|
|
66
|
+
} from "@benchkit/format";
|
|
67
|
+
|
|
68
|
+
const result = buildNativeResult({
|
|
69
|
+
context: {
|
|
70
|
+
commit: process.env.GITHUB_SHA,
|
|
71
|
+
ref: process.env.GITHUB_REF,
|
|
72
|
+
timestamp: new Date().toISOString(),
|
|
73
|
+
},
|
|
74
|
+
benchmarks: [
|
|
75
|
+
{
|
|
76
|
+
name: "mock-http-ingest",
|
|
77
|
+
tags: {
|
|
78
|
+
kind: "workflow",
|
|
79
|
+
scenario: "json-ingest",
|
|
80
|
+
},
|
|
81
|
+
metrics: {
|
|
82
|
+
events_per_sec: { value: 13240.5, unit: "events/sec" },
|
|
83
|
+
p95_batch_ms: { value: 143.2, unit: "ms", direction: "smaller_is_better" },
|
|
84
|
+
service_rss_mb: { value: 543.1, unit: "MB", direction: "smaller_is_better" },
|
|
85
|
+
},
|
|
86
|
+
samples: [
|
|
87
|
+
{ t: 0, events_per_sec: 0, service_rss_mb: 112.3 },
|
|
88
|
+
{ t: 1, events_per_sec: 11884.2, service_rss_mb: 241.7 },
|
|
89
|
+
],
|
|
90
|
+
},
|
|
91
|
+
],
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
const json = stringifyNativeResult(result);
|
|
95
|
+
// write json to workflow-bench.json
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
Shorthands:
|
|
99
|
+
|
|
100
|
+
- numeric metrics like `{ parse_errors: 0 }` are accepted
|
|
101
|
+
- direction is inferred from `unit` when omitted, e.g. `events/sec` becomes
|
|
102
|
+
`bigger_is_better`
|
|
103
|
+
|
|
104
|
+
## Parser entry points
|
|
105
|
+
|
|
106
|
+
### `parseBenchmarks(input, format?)`
|
|
107
|
+
|
|
108
|
+
Main entry point. Accepts a string and an optional format hint. When `format` is
|
|
109
|
+
omitted or `"auto"`, the parser inspects the input and picks the right strategy:
|
|
110
|
+
|
|
111
|
+
| Detected shape | Trigger | Format |
|
|
112
|
+
|---|---|---|
|
|
113
|
+
| JSON object with a `benchmarks` array whose entries have a `stats` object | `benchmarks[0].stats` is an object | `pytest-benchmark` |
|
|
114
|
+
| JSON object with a `benchmarks` array | Top-level `benchmarks` key present | `native` |
|
|
115
|
+
| JSON object with a `resourceMetrics` array | Top-level `resourceMetrics` key present | `otlp` |
|
|
116
|
+
| JSON object with a `results` array | Top-level `results` key with objects containing a `command` string | `hyperfine` |
|
|
117
|
+
| JSON array of objects | Array whose first element has both a string `name` and a numeric `value` | `benchmark-action` |
|
|
118
|
+
| Plain text lines | Lines matching `/^Benchmark\w.*\s+\d+\s+[\d.]+\s+\w+\/\w+/` | `go` |
|
|
119
|
+
| Plain text lines | Lines matching `/^test\s+\S+\s+\.\.\.\s+bench:/` | `rust` |
|
|
120
|
+
|
|
121
|
+
If auto-detection fails, `parseBenchmarks` throws with a message listing the supported formats.
|
|
122
|
+
|
|
123
|
+
```ts
|
|
124
|
+
import { parseBenchmarks } from "@benchkit/format";
|
|
125
|
+
|
|
126
|
+
// Explicit format
|
|
127
|
+
const result = parseBenchmarks(goOutput, "go");
|
|
128
|
+
|
|
129
|
+
// Auto-detect (default)
|
|
130
|
+
const result = parseBenchmarks(unknownInput);
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### `parseOtlp(input)`
|
|
134
|
+
|
|
135
|
+
Parses OTLP metrics JSON and provides helpers for:
|
|
136
|
+
|
|
137
|
+
- reading resource and datapoint attributes
|
|
138
|
+
- discriminating metric kinds (`gauge`, `sum`, `histogram`)
|
|
139
|
+
- reading aggregation temporality
|
|
140
|
+
|
|
141
|
+
```ts
|
|
142
|
+
import { parseOtlp } from "@benchkit/format";
|
|
143
|
+
|
|
144
|
+
const document = parseOtlp(otlpJson);
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
### `parseGoBench(input)`
|
|
148
|
+
|
|
149
|
+
Parses Go `testing.B` text output. Each benchmark line produces one `Benchmark`
|
|
150
|
+
entry. The `-P` processor suffix is extracted into a `procs` tag. Multiple
|
|
151
|
+
value/unit pairs on the same line produce separate named metrics.
|
|
152
|
+
|
|
153
|
+
**Input** (typical `go test -bench=. -benchmem` output):
|
|
154
|
+
|
|
155
|
+
```
|
|
156
|
+
goos: linux
|
|
157
|
+
goarch: amd64
|
|
158
|
+
BenchmarkSort/small-8 500000 2345 ns/op 128 B/op 3 allocs/op
|
|
159
|
+
BenchmarkSort/large-8 1000 987654 ns/op 65536 B/op 512 allocs/op
|
|
160
|
+
BenchmarkHash-8 1000000 890 ns/op 0 B/op 0 allocs/op
|
|
161
|
+
PASS
|
|
162
|
+
ok example.com/mypackage 3.456s
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
**Call:**
|
|
166
|
+
|
|
167
|
+
```ts
|
|
168
|
+
import { parseGoBench } from "@benchkit/format";
|
|
169
|
+
|
|
170
|
+
const input = `
|
|
171
|
+
BenchmarkSort/small-8 500000 2345 ns/op 128 B/op 3 allocs/op
|
|
172
|
+
BenchmarkSort/large-8 1000 987654 ns/op 65536 B/op 512 allocs/op
|
|
173
|
+
BenchmarkHash-8 1000000 890 ns/op 0 B/op 0 allocs/op
|
|
174
|
+
`.trim();
|
|
175
|
+
|
|
176
|
+
const result = parseGoBench(input);
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
**Result** (abbreviated):
|
|
180
|
+
|
|
181
|
+
```json
|
|
182
|
+
{
|
|
183
|
+
"benchmarks": [
|
|
184
|
+
{
|
|
185
|
+
"name": "BenchmarkSort/small",
|
|
186
|
+
"tags": { "procs": "8" },
|
|
187
|
+
"metrics": {
|
|
188
|
+
"ns_per_op": { "value": 2345, "unit": "ns/op", "direction": "smaller_is_better" },
|
|
189
|
+
"bytes_per_op": { "value": 128, "unit": "B/op", "direction": "smaller_is_better" },
|
|
190
|
+
"allocs_per_op": { "value": 3, "unit": "allocs/op", "direction": "smaller_is_better" }
|
|
191
|
+
}
|
|
192
|
+
},
|
|
193
|
+
{
|
|
194
|
+
"name": "BenchmarkSort/large",
|
|
195
|
+
"tags": { "procs": "8" },
|
|
196
|
+
"metrics": {
|
|
197
|
+
"ns_per_op": { "value": 987654, "unit": "ns/op", "direction": "smaller_is_better" },
|
|
198
|
+
"bytes_per_op": { "value": 65536, "unit": "B/op", "direction": "smaller_is_better" },
|
|
199
|
+
"allocs_per_op": { "value": 512, "unit": "allocs/op", "direction": "smaller_is_better" }
|
|
200
|
+
}
|
|
201
|
+
},
|
|
202
|
+
{
|
|
203
|
+
"name": "BenchmarkHash",
|
|
204
|
+
"tags": { "procs": "8" },
|
|
205
|
+
"metrics": {
|
|
206
|
+
"ns_per_op": { "value": 890, "unit": "ns/op", "direction": "smaller_is_better" },
|
|
207
|
+
"bytes_per_op": { "value": 0, "unit": "B/op", "direction": "smaller_is_better" },
|
|
208
|
+
"allocs_per_op": { "value": 0, "unit": "allocs/op", "direction": "smaller_is_better" }
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
]
|
|
212
|
+
}
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
### `parseBenchmarkAction(input)`
|
|
216
|
+
|
|
217
|
+
Parses the JSON array format used by
|
|
218
|
+
[benchmark-action/github-action-benchmark](https://github.com/benchmark-action/github-action-benchmark).
|
|
219
|
+
Each array entry becomes one `Benchmark` with a single metric called `value`.
|
|
220
|
+
The `range` string (e.g. `"± 300"`) is parsed into a numeric `range` field.
|
|
221
|
+
|
|
222
|
+
**Input** (JSON produced by the benchmark tool):
|
|
223
|
+
|
|
224
|
+
```json
|
|
225
|
+
[
|
|
226
|
+
{ "name": "encode/small", "value": 125430, "unit": "ops/sec", "range": "± 1200" },
|
|
227
|
+
{ "name": "encode/medium", "value": 48200, "unit": "ops/sec", "range": "± 480" },
|
|
228
|
+
{ "name": "decode/small", "value": 98700, "unit": "ops/sec" },
|
|
229
|
+
{ "name": "latency/p99", "value": 4.2, "unit": "ms", "range": "+/- 0.3" }
|
|
230
|
+
]
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
**Call:**
|
|
234
|
+
|
|
235
|
+
```ts
|
|
236
|
+
import { parseBenchmarkAction } from "@benchkit/format";
|
|
237
|
+
|
|
238
|
+
const result = parseBenchmarkAction(input);
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
**Result** (abbreviated):
|
|
242
|
+
|
|
243
|
+
```json
|
|
244
|
+
{
|
|
245
|
+
"benchmarks": [
|
|
246
|
+
{
|
|
247
|
+
"name": "encode/small",
|
|
248
|
+
"metrics": {
|
|
249
|
+
"value": { "value": 125430, "unit": "ops/sec", "direction": "bigger_is_better", "range": 1200 }
|
|
250
|
+
}
|
|
251
|
+
},
|
|
252
|
+
{
|
|
253
|
+
"name": "encode/medium",
|
|
254
|
+
"metrics": {
|
|
255
|
+
"value": { "value": 48200, "unit": "ops/sec", "direction": "bigger_is_better", "range": 480 }
|
|
256
|
+
}
|
|
257
|
+
},
|
|
258
|
+
{
|
|
259
|
+
"name": "decode/small",
|
|
260
|
+
"metrics": {
|
|
261
|
+
"value": { "value": 98700, "unit": "ops/sec", "direction": "bigger_is_better" }
|
|
262
|
+
}
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
"name": "latency/p99",
|
|
266
|
+
"metrics": {
|
|
267
|
+
"value": { "value": 4.2, "unit": "ms", "direction": "smaller_is_better", "range": 0.3 }
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
]
|
|
271
|
+
}
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
### `parseRustBench(input)`
|
|
275
|
+
|
|
276
|
+
Parses Rust `cargo bench` (libtest) text output. Each benchmark line produces one
|
|
277
|
+
`Benchmark` entry.
|
|
278
|
+
|
|
279
|
+
```ts
|
|
280
|
+
import { parseRustBench } from "@benchkit/format";
|
|
281
|
+
|
|
282
|
+
const result = parseRustBench(
|
|
283
|
+
"test sort::bench_sort ... bench: 320 ns/iter (+/- 42)"
|
|
284
|
+
);
|
|
285
|
+
// result.benchmarks[0].metrics => { ns_per_iter: { value: 320, unit: "ns/iter", range: 42 } }
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
### `parseHyperfine(input)`
|
|
289
|
+
|
|
290
|
+
Parses the JSON export from [Hyperfine](https://github.com/sharkdp/hyperfine)
|
|
291
|
+
(`hyperfine --export-json`). Each result becomes a benchmark named after the
|
|
292
|
+
command, with `mean`, `stddev`, `median`, `min`, and `max` metrics.
|
|
293
|
+
|
|
294
|
+
```ts
|
|
295
|
+
import { parseHyperfine } from "@benchkit/format";
|
|
296
|
+
|
|
297
|
+
const result = parseHyperfine(JSON.stringify({
|
|
298
|
+
results: [
|
|
299
|
+
{
|
|
300
|
+
command: "sleep 0.1",
|
|
301
|
+
mean: 0.105,
|
|
302
|
+
stddev: 0.002,
|
|
303
|
+
median: 0.105,
|
|
304
|
+
min: 0.103,
|
|
305
|
+
max: 0.108,
|
|
306
|
+
times: [0.103, 0.105, 0.108]
|
|
307
|
+
}
|
|
308
|
+
]
|
|
309
|
+
}));
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
### `parsePytestBenchmark(input)`
|
|
313
|
+
|
|
314
|
+
Parses [pytest-benchmark](https://pytest-benchmark.readthedocs.io/) JSON output
|
|
315
|
+
(`pytest --benchmark-json=results.json`). Each benchmark entry becomes a
|
|
316
|
+
`Benchmark` with metrics for `mean` (primary, seconds), `ops`, `rounds`,
|
|
317
|
+
`median`, `min`, `max`, and `stddev`.
|
|
318
|
+
|
|
319
|
+
```ts
|
|
320
|
+
import { parsePytestBenchmark } from "@benchkit/format";
|
|
321
|
+
|
|
322
|
+
const result = parsePytestBenchmark(JSON.stringify({
|
|
323
|
+
benchmarks: [
|
|
324
|
+
{
|
|
325
|
+
name: "test_sort",
|
|
326
|
+
fullname: "tests/test_perf.py::test_sort",
|
|
327
|
+
stats: {
|
|
328
|
+
min: 0.000123,
|
|
329
|
+
max: 0.000156,
|
|
330
|
+
mean: 0.000134,
|
|
331
|
+
stddev: 0.0000089,
|
|
332
|
+
rounds: 1000,
|
|
333
|
+
median: 0.000132,
|
|
334
|
+
ops: 7462.68
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
]
|
|
338
|
+
}));
|
|
339
|
+
// result.benchmarks[0].metrics.mean => { value: 0.000134, unit: "s", direction: "smaller_is_better", range: 0.0000089 }
|
|
340
|
+
// result.benchmarks[0].metrics.ops => { value: 7462.68, unit: "ops/s", direction: "bigger_is_better" }
|
|
341
|
+
// result.benchmarks[0].metrics.rounds => { value: 1000, direction: "bigger_is_better" }
|
|
342
|
+
```
|
|
343
|
+
|
|
344
|
+
**Python example** — generate and consume pytest-benchmark output:
|
|
345
|
+
|
|
346
|
+
```python
|
|
347
|
+
# conftest.py / test_perf.py
|
|
348
|
+
def test_sort(benchmark):
|
|
349
|
+
benchmark(sorted, range(1000))
|
|
350
|
+
```
|
|
351
|
+
|
|
352
|
+
```bash
|
|
353
|
+
pytest --benchmark-json=results.json
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
```ts
|
|
357
|
+
import { readFileSync } from "fs";
|
|
358
|
+
import { parsePytestBenchmark } from "@benchkit/format";
|
|
359
|
+
|
|
360
|
+
const result = parsePytestBenchmark(readFileSync("results.json", "utf-8"));
|
|
361
|
+
for (const bench of result.benchmarks) {
|
|
362
|
+
console.log(`${bench.name}: ${bench.metrics.mean.value}s (${bench.metrics.ops.value} ops/s)`);
|
|
363
|
+
}
|
|
364
|
+
```
|
|
365
|
+
|
|
366
|
+
### `inferDirection(unit)`
|
|
367
|
+
|
|
368
|
+
Infers whether a unit string represents a "bigger is better" or "smaller is
|
|
369
|
+
better" metric. Used internally by all parsers when no explicit `direction` is
|
|
370
|
+
provided.
|
|
371
|
+
|
|
372
|
+
```ts
|
|
373
|
+
import { inferDirection } from "@benchkit/format";
|
|
374
|
+
|
|
375
|
+
inferDirection("ops/sec"); // "bigger_is_better"
|
|
376
|
+
inferDirection("MB/s"); // "bigger_is_better"
|
|
377
|
+
inferDirection("throughput"); // "bigger_is_better"
|
|
378
|
+
inferDirection("ns/op"); // "smaller_is_better"
|
|
379
|
+
inferDirection("ms"); // "smaller_is_better"
|
|
380
|
+
inferDirection("B/op"); // "smaller_is_better"
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
The heuristic scans the lowercased unit string for substrings:
|
|
384
|
+
|
|
385
|
+
| Matched substring | Direction | Example units |
|
|
386
|
+
|---|---|---|
|
|
387
|
+
| `ops/s` | `bigger_is_better` | `ops/sec`, `ops/s` |
|
|
388
|
+
| `op/s` | `bigger_is_better` | `op/sec`, `op/s` |
|
|
389
|
+
| `/sec` | `bigger_is_better` | `req/sec`, `events/sec` |
|
|
390
|
+
| `mb/s` | `bigger_is_better` | `MB/s`, `mb/s` |
|
|
391
|
+
| `throughput` | `bigger_is_better` | `throughput` |
|
|
392
|
+
| `events` | `bigger_is_better` | `events`, `events/sec` |
|
|
393
|
+
| _(no match)_ | `smaller_is_better` | `ns/op`, `ms`, `B/op`, `allocs/op`, `ns/iter`, `bytes` |
|
|
394
|
+
|
|
395
|
+
## Types
|
|
396
|
+
|
|
397
|
+
All types mirror the JSON schemas in [`schema/`](../../schema/README.md).
|
|
398
|
+
|
|
399
|
+
### `compareRuns(current, baseline[], config?)`
|
|
400
|
+
|
|
401
|
+
Compare a current benchmark run against one or more baseline runs.
|
|
402
|
+
|
|
403
|
+
```ts
|
|
404
|
+
import { compareRuns } from "@benchkit/format";
|
|
405
|
+
|
|
406
|
+
const result = compareRuns(current, [baseline]);
|
|
407
|
+
if (result.hasRegression) {
|
|
408
|
+
console.log("Regressions detected!");
|
|
409
|
+
}
|
|
410
|
+
```
|
|
411
|
+
|
|
412
|
+
### `Sample`
|
|
413
|
+
|
|
414
|
+
A time-series data point within a benchmark run. `t` is seconds since
|
|
415
|
+
benchmark start; all other keys are metric values at that instant.
|
|
416
|
+
|
|
417
|
+
```ts
|
|
418
|
+
interface Sample {
|
|
419
|
+
t: number;
|
|
420
|
+
[metricName: string]: number;
|
|
421
|
+
}
|
|
422
|
+
```
|
|
423
|
+
|
|
424
|
+
### `MonitorContext`
|
|
425
|
+
|
|
426
|
+
Metadata about the resource monitoring context (when monitor output is merged via stash action).
|
|
427
|
+
|
|
428
|
+
```ts
|
|
429
|
+
interface MonitorContext {
|
|
430
|
+
monitor_version: string;
|
|
431
|
+
poll_interval_ms: number;
|
|
432
|
+
duration_ms: number;
|
|
433
|
+
runner_os?: string;
|
|
434
|
+
runner_arch?: string;
|
|
435
|
+
poll_count?: number;
|
|
436
|
+
kernel?: string;
|
|
437
|
+
cpu_model?: string;
|
|
438
|
+
cpu_count?: number;
|
|
439
|
+
total_memory_mb?: number;
|
|
440
|
+
}
|
|
441
|
+
```
|
|
442
|
+
|
|
443
|
+
### Series and index types
|
|
444
|
+
|
|
445
|
+
These types describe the aggregated files on the `bench-data` branch (see
|
|
446
|
+
[Data files](#data-files) below):
|
|
447
|
+
|
|
448
|
+
| Type | Schema | Purpose |
|
|
449
|
+
|---|---|---|
|
|
450
|
+
| `IndexFile` | [`index.schema.json`](../../schema/index.schema.json) | Run listing with per-run metadata |
|
|
451
|
+
| `RunEntry` | (inline in index schema) | Single entry inside `IndexFile.runs` |
|
|
452
|
+
| `SeriesFile` | [`series.schema.json`](../../schema/series.schema.json) | Pre-aggregated time-series for one metric |
|
|
453
|
+
| `SeriesEntry` | (inline in series schema) | Points array for one benchmark within a series |
|
|
454
|
+
| `DataPoint` | (inline in series schema) | Single `{timestamp, value}` point |
|
|
455
|
+
|
|
456
|
+
## Metric naming conventions
|
|
457
|
+
|
|
458
|
+
When the Go and benchmark-action parsers normalize metrics they apply these
|
|
459
|
+
rules:
|
|
460
|
+
|
|
461
|
+
| Go unit | Metric name | Rule |
|
|
462
|
+
|---|---|---|
|
|
463
|
+
| `ns/op` | `ns_per_op` | Replace `/` with `_per_`, lowercase |
|
|
464
|
+
| `B/op` | `bytes_per_op` | Known alias |
|
|
465
|
+
| `allocs/op` | `allocs_per_op` | Replace `/` with `_per_`, lowercase |
|
|
466
|
+
| `MB/s` | `mb_per_s` | Known alias |
|
|
467
|
+
|
|
468
|
+
General algorithm: replace every `/` with `_per_`, replace spaces with `_`,
|
|
469
|
+
then lowercase. Specific aliases (`B/op` → `bytes_per_op`, `MB/s` → `mb_per_s`, `ns/iter` → `ns_per_iter`)
|
|
470
|
+
take precedence.
|
|
471
|
+
|
|
472
|
+
## Direction semantics
|
|
473
|
+
|
|
474
|
+
Every metric may declare whether higher or lower values represent improvement.
|
|
475
|
+
|
|
476
|
+
| Direction | Meaning | Examples |
|
|
477
|
+
|---|---|---|
|
|
478
|
+
| `bigger_is_better` | Higher values are improvements | throughput, ops/sec, MB/s |
|
|
479
|
+
| `smaller_is_better` | Lower values are improvements | latency, ns/op, allocations |
|
|
480
|
+
|
|
481
|
+
When direction is not specified in the input, all parsers call `inferDirection(unit)`
|
|
482
|
+
to infer it from the unit string. See the [`inferDirection` section](#inferdirectionunit)
|
|
483
|
+
for the full list of recognized unit patterns.
|
|
484
|
+
|
|
485
|
+
If no unit is provided and no direction is set, consumers should treat the
|
|
486
|
+
metric as `smaller_is_better`.
|
|
487
|
+
|
|
488
|
+
## Data files
|
|
489
|
+
|
|
490
|
+
The `bench-stash` and `bench-aggregate` actions maintain a set of JSON files
|
|
491
|
+
on a dedicated Git branch (default `bench-data`). The branch layout is:
|
|
492
|
+
|
|
493
|
+
```
|
|
494
|
+
data/
|
|
495
|
+
├── index.json # All runs (IndexFile)
|
|
496
|
+
├── runs/
|
|
497
|
+
│ ├── {runId}.json # OTLP metrics JSON for one run
|
|
498
|
+
│ └── ...
|
|
499
|
+
└── series/
|
|
500
|
+
├── {metricName}.json # Time-series for one metric (SeriesFile)
|
|
501
|
+
└── ...
|
|
502
|
+
```
|
|
503
|
+
|
|
504
|
+
| File | Schema | Written by |
|
|
505
|
+
|---|---|---|
|
|
506
|
+
| `data/index.json` | [`index.schema.json`](../../schema/index.schema.json) | `bench-aggregate` |
|
|
507
|
+
| `data/runs/{id}.json` | OTLP metrics JSON | `bench-stash` |
|
|
508
|
+
| `data/series/{metric}.json` | [`series.schema.json`](../../schema/series.schema.json) | `bench-aggregate` |
|
|
509
|
+
|
|
510
|
+
## Validating your own output
|
|
511
|
+
|
|
512
|
+
Validate aggregated output against the JSON schemas:
|
|
513
|
+
|
|
514
|
+
```bash
|
|
515
|
+
npx ajv validate -s schema/index.schema.json -d data/index.json
|
|
516
|
+
npx ajv validate -s schema/series.schema.json -d data/series/ns_per_op.json
|
|
517
|
+
```
|
|
518
|
+
|
|
519
|
+
## License
|
|
520
|
+
|
|
521
|
+
MIT
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Build an OtlpMetricsDocument from a simple benchmark input shape.
|
|
3
|
+
*
|
|
4
|
+
* This is the canonical way to produce OTLP output from any parser or
|
|
5
|
+
* builder. Modeled after emit-metric's buildOtlpMetricPayload().
|
|
6
|
+
*/
|
|
7
|
+
import type { OtlpMetricsDocument } from "./types.js";
|
|
8
|
+
import type { Direction, RunKind, SourceFormat } from "./otlp-conventions.js";
|
|
9
|
+
export interface OtlpResultMetric {
|
|
10
|
+
value: number;
|
|
11
|
+
unit?: string;
|
|
12
|
+
direction?: Direction;
|
|
13
|
+
}
|
|
14
|
+
export interface OtlpResultBenchmark {
|
|
15
|
+
name: string;
|
|
16
|
+
tags?: Record<string, string>;
|
|
17
|
+
metrics: Record<string, OtlpResultMetric | number>;
|
|
18
|
+
}
|
|
19
|
+
export interface OtlpResultContext {
|
|
20
|
+
runId?: string;
|
|
21
|
+
kind?: RunKind;
|
|
22
|
+
sourceFormat: SourceFormat;
|
|
23
|
+
ref?: string;
|
|
24
|
+
commit?: string;
|
|
25
|
+
workflow?: string;
|
|
26
|
+
job?: string;
|
|
27
|
+
runAttempt?: string;
|
|
28
|
+
runner?: string;
|
|
29
|
+
serviceName?: string;
|
|
30
|
+
}
|
|
31
|
+
export interface BuildOtlpResultOptions {
|
|
32
|
+
benchmarks: OtlpResultBenchmark[];
|
|
33
|
+
context?: OtlpResultContext;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Build an OtlpMetricsDocument from a list of benchmarks and optional context.
|
|
37
|
+
*
|
|
38
|
+
* Each benchmark becomes a scenario. Each metric key within a benchmark
|
|
39
|
+
* becomes a separate OTLP metric with a single gauge datapoint carrying
|
|
40
|
+
* benchkit semantic attributes.
|
|
41
|
+
*/
|
|
42
|
+
export declare function buildOtlpResult(options: BuildOtlpResultOptions): OtlpMetricsDocument;
|
|
43
|
+
//# sourceMappingURL=build-otlp-result.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"build-otlp-result.d.ts","sourceRoot":"","sources":["../src/build-otlp-result.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,OAAO,KAAK,EACV,mBAAmB,EAIpB,MAAM,YAAY,CAAC;AACpB,OAAO,KAAK,EAAE,SAAS,EAAE,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAoB9E,MAAM,WAAW,gBAAgB;IAC/B,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,SAAS,CAAC,EAAE,SAAS,CAAC;CACvB;AAED,MAAM,WAAW,mBAAmB;IAClC,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,gBAAgB,GAAG,MAAM,CAAC,CAAC;CACpD;AAED,MAAM,WAAW,iBAAiB;IAChC,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,IAAI,CAAC,EAAE,OAAO,CAAC;IACf,YAAY,EAAE,YAAY,CAAC;IAC3B,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;CACtB;AAED,MAAM,WAAW,sBAAsB;IACrC,UAAU,EAAE,mBAAmB,EAAE,CAAC;IAClC,OAAO,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AA+CD;;;;;;GAMG;AACH,wBAAgB,eAAe,CAAC,OAAO,EAAE,sBAAsB,GAAG,mBAAmB,CAkDpF"}
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.buildOtlpResult = buildOtlpResult;
|
|
4
|
+
const otlp_conventions_js_1 = require("./otlp-conventions.js");
|
|
5
|
+
// ---- Attribute helpers ----------------------------------------------------
|
|
6
|
+
function toOtlpValue(value) {
|
|
7
|
+
if (typeof value === "boolean")
|
|
8
|
+
return { boolValue: value };
|
|
9
|
+
if (typeof value === "number") {
|
|
10
|
+
return Number.isSafeInteger(value)
|
|
11
|
+
? { intValue: String(value) }
|
|
12
|
+
: { doubleValue: value };
|
|
13
|
+
}
|
|
14
|
+
return { stringValue: value };
|
|
15
|
+
}
|
|
16
|
+
function attr(key, value) {
|
|
17
|
+
return { key, value: toOtlpValue(value) };
|
|
18
|
+
}
|
|
19
|
+
function dataPointValue(value) {
|
|
20
|
+
return Number.isSafeInteger(value)
|
|
21
|
+
? { asInt: String(value) }
|
|
22
|
+
: { asDouble: value };
|
|
23
|
+
}
|
|
24
|
+
// ---- Build ----------------------------------------------------------------
|
|
25
|
+
function buildResourceAttributes(ctx) {
|
|
26
|
+
const attrs = [];
|
|
27
|
+
if (ctx.runId)
|
|
28
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_RUN_ID, ctx.runId));
|
|
29
|
+
if (ctx.kind)
|
|
30
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_KIND, ctx.kind));
|
|
31
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_SOURCE_FORMAT, ctx.sourceFormat));
|
|
32
|
+
if (ctx.ref)
|
|
33
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_REF, ctx.ref));
|
|
34
|
+
if (ctx.commit)
|
|
35
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_COMMIT, ctx.commit));
|
|
36
|
+
if (ctx.workflow)
|
|
37
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_WORKFLOW, ctx.workflow));
|
|
38
|
+
if (ctx.job)
|
|
39
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_JOB, ctx.job));
|
|
40
|
+
if (ctx.runAttempt)
|
|
41
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_RUN_ATTEMPT, ctx.runAttempt));
|
|
42
|
+
if (ctx.runner)
|
|
43
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_RUNNER, ctx.runner));
|
|
44
|
+
if (ctx.serviceName)
|
|
45
|
+
attrs.push(attr(otlp_conventions_js_1.ATTR_SERVICE_NAME, ctx.serviceName));
|
|
46
|
+
return attrs;
|
|
47
|
+
}
|
|
48
|
+
function normalizeMetric(input) {
|
|
49
|
+
return typeof input === "number" ? { value: input } : input;
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Build an OtlpMetricsDocument from a list of benchmarks and optional context.
|
|
53
|
+
*
|
|
54
|
+
* Each benchmark becomes a scenario. Each metric key within a benchmark
|
|
55
|
+
* becomes a separate OTLP metric with a single gauge datapoint carrying
|
|
56
|
+
* benchkit semantic attributes.
|
|
57
|
+
*/
|
|
58
|
+
function buildOtlpResult(options) {
|
|
59
|
+
const ctx = options.context ?? { sourceFormat: "otlp" };
|
|
60
|
+
const now = String(BigInt(Date.now()) * 1000000n);
|
|
61
|
+
const metrics = [];
|
|
62
|
+
for (const bench of options.benchmarks) {
|
|
63
|
+
for (const [metricName, rawMetric] of Object.entries(bench.metrics)) {
|
|
64
|
+
const m = normalizeMetric(rawMetric);
|
|
65
|
+
const pointAttrs = [
|
|
66
|
+
attr(otlp_conventions_js_1.ATTR_SCENARIO, bench.name),
|
|
67
|
+
attr(otlp_conventions_js_1.ATTR_SERIES, bench.name),
|
|
68
|
+
attr(otlp_conventions_js_1.ATTR_METRIC_ROLE, "outcome"),
|
|
69
|
+
];
|
|
70
|
+
if (m.direction) {
|
|
71
|
+
pointAttrs.push(attr(otlp_conventions_js_1.ATTR_METRIC_DIRECTION, m.direction));
|
|
72
|
+
}
|
|
73
|
+
if (bench.tags) {
|
|
74
|
+
for (const [k, v] of Object.entries(bench.tags)) {
|
|
75
|
+
pointAttrs.push(attr(k, v));
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
metrics.push({
|
|
79
|
+
name: metricName,
|
|
80
|
+
unit: m.unit,
|
|
81
|
+
gauge: {
|
|
82
|
+
dataPoints: [{
|
|
83
|
+
timeUnixNano: now,
|
|
84
|
+
attributes: pointAttrs,
|
|
85
|
+
...dataPointValue(m.value),
|
|
86
|
+
}],
|
|
87
|
+
},
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
return {
|
|
92
|
+
resourceMetrics: [{
|
|
93
|
+
resource: {
|
|
94
|
+
attributes: buildResourceAttributes(ctx),
|
|
95
|
+
},
|
|
96
|
+
scopeMetrics: [{
|
|
97
|
+
metrics,
|
|
98
|
+
}],
|
|
99
|
+
}],
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
//# sourceMappingURL=build-otlp-result.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"build-otlp-result.js","sourceRoot":"","sources":["../src/build-otlp-result.ts"],"names":[],"mappings":";;AAkHA,0CAkDC;AAvJD,+DAe+B;AAkC/B,8EAA8E;AAE9E,SAAS,WAAW,CAAC,KAAgC;IACnD,IAAI,OAAO,KAAK,KAAK,SAAS;QAAE,OAAO,EAAE,SAAS,EAAE,KAAK,EAAE,CAAC;IAC5D,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE,CAAC;QAC9B,OAAO,MAAM,CAAC,aAAa,CAAC,KAAK,CAAC;YAChC,CAAC,CAAC,EAAE,QAAQ,EAAE,MAAM,CAAC,KAAK,CAAC,EAAE;YAC7B,CAAC,CAAC,EAAE,WAAW,EAAE,KAAK,EAAE,CAAC;IAC7B,CAAC;IACD,OAAO,EAAE,WAAW,EAAE,KAAK,EAAE,CAAC;AAChC,CAAC;AAED,SAAS,IAAI,CAAC,GAAW,EAAE,KAAgC;IACzD,OAAO,EAAE,GAAG,EAAE,KAAK,EAAE,WAAW,CAAC,KAAK,CAAC,EAAE,CAAC;AAC5C,CAAC;AAED,SAAS,cAAc,CAAC,KAAa;IACnC,OAAO,MAAM,CAAC,aAAa,CAAC,KAAK,CAAC;QAChC,CAAC,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,EAAE;QAC1B,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC;AAC1B,CAAC;AAED,8EAA8E;AAE9E,SAAS,uBAAuB,CAAC,GAAsB;IACrD,MAAM,KAAK,GAAoB,EAAE,CAAC;IAClC,IAAI,GAAG,CAAC,KAAK;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,iCAAW,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;IACxD,IAAI,GAAG,CAAC,IAAI;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,+BAAS,EAAE,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;IACpD,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,wCAAkB,EAAE,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC;IACvD,IAAI,GAAG,CAAC,GAAG;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,8BAAQ,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;IACjD,IAAI,GAAG,CAAC,MAAM;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,iCAAW,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC;IAC1D,IAAI,GAAG,CAAC,QAAQ;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,mCAAa,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC;IAChE,IAAI,GAAG,CAAC,GAAG;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,8BAAQ,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;IACjD,IAAI,GAAG,CAAC,UAAU;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,sCAAgB,EAAE,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC;IACvE,IAAI,GAAG,CAAC,MAAM;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,iCAAW,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC;IAC1D,IAAI,GAAG,CAAC,WAAW;QAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,uCAAiB,EAAE,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC;IAC1E,OAAO,KAAK,CAAC;AACf,CAAC;AAED,SAAS,eAAe,CACtB,KAAgC;IAEhC,OAAO,OAAO,KAAK,KAAK,QAAQ,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC;AAC9D,CAAC;AAED;;;;;;GAMG;AACH,SAAgB,eAAe,CAAC,OAA+B;IAC7D,MAAM,GAAG,GAAsB,OAAO,CAAC,OAAO,IAAI,EAAE,YAAY,EAAE,MAAM,EAAE,CAAC;IAC3E,MAAM,GAAG,GAAG,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,QAAU,CAAC,CAAC;IAEpD,MAAM,OAAO,GAAiB,EAAE,CAAC;IAEjC,KAAK,MAAM,KAAK,IAAI,OAAO,CAAC,UAAU,EAAE,CAAC;QACvC,KAAK,MAAM,CAAC,UAAU,EAAE,SAAS,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC;YACpE,MAAM,CAAC,GAAG,eAAe,CAAC,SAAS,CAAC,CAAC;YAErC,MAAM,UAAU,GAAoB;gBAClC,IAAI,CAAC,mCAAa,EAAE,KAAK,CAAC,IAAI,CAAC;gBAC/B,IAAI,CAAC,iCAAW,EAAE,KAAK,CAAC,IAAI,CAAC;gBAC7B,IAAI,CAAC,sCAAgB,EAAE,SAAS,CAAC;aAClC,CAAC;YAEF,IAAI,CAAC,CAAC,SAAS,EAAE,CAAC;gBAChB,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,2CAAqB,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;YAC5D,CAAC;YAED,IAAI,KAAK,CAAC,IAAI,EAAE,CAAC;gBACf,KAAK,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC;oBAChD,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;gBAC9B,CAAC;YACH,CAAC;YAED,OAAO,CAAC,IAAI,CAAC;gBACX,IAAI,EAAE,UAAU;gBAChB,IAAI,EAAE,CAAC,CAAC,IAAI;gBACZ,KAAK,EAAE;oBACL,UAAU,EAAE,CAAC;4BACX,YAAY,EAAE,GAAG;4BACjB,UAAU,EAAE,UAAU;4BACtB,GAAG,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC;yBAC3B,CAAC;iBACH;aACF,CAAC,CAAC;QACL,CAAC;IACH,CAAC;IAED,OAAO;QACL,eAAe,EAAE,CAAC;gBAChB,QAAQ,EAAE;oBACR,UAAU,EAAE,uBAAuB,CAAC,GAAG,CAAC;iBACzC;gBACD,YAAY,EAAE,CAAC;wBACb,OAAO;qBACR,CAAC;aACH,CAAC;KACH,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"build-otlp-result.test.d.ts","sourceRoot":"","sources":["../src/build-otlp-result.test.ts"],"names":[],"mappings":""}
|