akm-cli 0.7.0-rc1 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{src/cli.js → cli.js} +100 -16
- package/dist/{src/commands → commands}/config-cli.js +42 -0
- package/dist/{src/commands → commands}/history.js +78 -7
- package/dist/{src/commands → commands}/registry-search.js +69 -6
- package/dist/{src/commands → commands}/search.js +30 -3
- package/dist/{src/commands → commands}/show.js +29 -0
- package/dist/{src/commands → commands}/source-add.js +5 -1
- package/dist/{src/commands → commands}/source-manage.js +7 -1
- package/dist/{src/core → core}/config.js +28 -0
- package/dist/{src/indexer → indexer}/db-search.js +1 -0
- package/dist/{src/indexer → indexer}/indexer.js +16 -2
- package/dist/{src/indexer → indexer}/matchers.js +1 -1
- package/dist/{src/indexer → indexer}/search-source.js +4 -2
- package/dist/{src/integrations → integrations}/agent/profiles.js +1 -1
- package/dist/{src/integrations → integrations}/agent/spawn.js +67 -16
- package/dist/{src/integrations → integrations}/github.js +9 -3
- package/dist/{src/llm → llm}/embedders/remote.js +37 -3
- package/dist/{src/output → output}/cli-hints.js +15 -2
- package/dist/{src/output → output}/renderers.js +3 -1
- package/dist/{src/output → output}/shapes.js +8 -1
- package/dist/{src/output → output}/text.js +156 -3
- package/dist/{src/registry → registry}/build-index.js +5 -4
- package/dist/{src/registry → registry}/providers/static-index.js +3 -1
- package/dist/{src/setup → setup}/setup.js +9 -0
- package/dist/{src/wiki → wiki}/wiki.js +54 -6
- package/dist/{src/workflows → workflows}/runs.js +37 -3
- package/package.json +8 -8
- package/dist/tests/add-website-source.test.js +0 -119
- package/dist/tests/agent/agent-config-loader.test.js +0 -70
- package/dist/tests/agent/agent-config.test.js +0 -221
- package/dist/tests/agent/agent-detect.test.js +0 -100
- package/dist/tests/agent/agent-spawn.test.js +0 -234
- package/dist/tests/agent-output.test.js +0 -186
- package/dist/tests/architecture/agent-no-llm-sdk-guard.test.js +0 -103
- package/dist/tests/architecture/agent-spawn-seam.test.js +0 -193
- package/dist/tests/architecture/llm-stateless-seam.test.js +0 -112
- package/dist/tests/asset-ref.test.js +0 -192
- package/dist/tests/asset-registry.test.js +0 -103
- package/dist/tests/asset-spec.test.js +0 -241
- package/dist/tests/bench/attribution.test.js +0 -995
- package/dist/tests/bench/cleanup-sigint.test.js +0 -83
- package/dist/tests/bench/cleanup.js +0 -203
- package/dist/tests/bench/cleanup.test.js +0 -166
- package/dist/tests/bench/cli.js +0 -683
- package/dist/tests/bench/cli.test.js +0 -177
- package/dist/tests/bench/compare.test.js +0 -556
- package/dist/tests/bench/corpus.js +0 -314
- package/dist/tests/bench/corpus.test.js +0 -258
- package/dist/tests/bench/driver.js +0 -346
- package/dist/tests/bench/driver.test.js +0 -443
- package/dist/tests/bench/evolve-metrics.js +0 -179
- package/dist/tests/bench/evolve-metrics.test.js +0 -187
- package/dist/tests/bench/evolve.js +0 -580
- package/dist/tests/bench/evolve.test.js +0 -616
- package/dist/tests/bench/failure-modes.test.js +0 -300
- package/dist/tests/bench/feedback-integrity.test.js +0 -456
- package/dist/tests/bench/leakage.test.js +0 -125
- package/dist/tests/bench/learning-curve.test.js +0 -133
- package/dist/tests/bench/metrics.js +0 -2319
- package/dist/tests/bench/metrics.test.js +0 -1144
- package/dist/tests/bench/no-os-tmpdir-invariant.test.js +0 -43
- package/dist/tests/bench/report.js +0 -1821
- package/dist/tests/bench/report.test.js +0 -989
- package/dist/tests/bench/runner.js +0 -536
- package/dist/tests/bench/runner.test.js +0 -958
- package/dist/tests/bench/search-bridge.test.js +0 -331
- package/dist/tests/bench/tmp.js +0 -41
- package/dist/tests/bench/trajectory.js +0 -116
- package/dist/tests/bench/trajectory.test.js +0 -127
- package/dist/tests/bench/verifier.js +0 -109
- package/dist/tests/bench/verifier.test.js +0 -118
- package/dist/tests/bench/workflow-evaluator.js +0 -557
- package/dist/tests/bench/workflow-evaluator.test.js +0 -421
- package/dist/tests/bench/workflow-spec.js +0 -358
- package/dist/tests/bench/workflow-spec.test.js +0 -363
- package/dist/tests/bench/workflow-trace.js +0 -438
- package/dist/tests/bench/workflow-trace.test.js +0 -254
- package/dist/tests/benchmark-search-quality.js +0 -536
- package/dist/tests/benchmark-suite.js +0 -1441
- package/dist/tests/capture-cli.test.js +0 -112
- package/dist/tests/cli-errors.test.js +0 -203
- package/dist/tests/commands/events.test.js +0 -370
- package/dist/tests/commands/history.test.js +0 -223
- package/dist/tests/commands/import.test.js +0 -103
- package/dist/tests/commands/proposal-cli.test.js +0 -209
- package/dist/tests/commands/reflect-propose-cli.test.js +0 -333
- package/dist/tests/commands/remember.test.js +0 -97
- package/dist/tests/commands/scope-flags.test.js +0 -300
- package/dist/tests/commands/search.test.js +0 -537
- package/dist/tests/commands/show-indexer-parity.test.js +0 -117
- package/dist/tests/commands/show.test.js +0 -294
- package/dist/tests/common.test.js +0 -266
- package/dist/tests/completions.test.js +0 -142
- package/dist/tests/config-cli.test.js +0 -193
- package/dist/tests/config-llm-features.test.js +0 -139
- package/dist/tests/config.test.js +0 -544
- package/dist/tests/contracts/migration-baseline.test.js +0 -43
- package/dist/tests/contracts/reflect-propose-envelope.test.js +0 -139
- package/dist/tests/contracts/spec-helpers.js +0 -46
- package/dist/tests/contracts/v1-spec-section-11-proposal-queue.test.js +0 -228
- package/dist/tests/contracts/v1-spec-section-12-agent-config.test.js +0 -56
- package/dist/tests/contracts/v1-spec-section-13-lesson-type.test.js +0 -34
- package/dist/tests/contracts/v1-spec-section-14-llm-features.test.js +0 -94
- package/dist/tests/contracts/v1-spec-section-4-1-asset-types.test.js +0 -39
- package/dist/tests/contracts/v1-spec-section-4-2-quality-rules.test.js +0 -44
- package/dist/tests/contracts/v1-spec-section-5-configuration.test.js +0 -47
- package/dist/tests/contracts/v1-spec-section-6-orchestration.test.js +0 -40
- package/dist/tests/contracts/v1-spec-section-7-module-layout.test.js +0 -58
- package/dist/tests/contracts/v1-spec-section-8-extension-points.test.js +0 -34
- package/dist/tests/contracts/v1-spec-section-9-4-cli-surface.test.js +0 -75
- package/dist/tests/contracts/v1-spec-section-9-7-llm-agent-boundary.test.js +0 -36
- package/dist/tests/core/write-source.test.js +0 -366
- package/dist/tests/curate-command.test.js +0 -87
- package/dist/tests/db-scoring.test.js +0 -201
- package/dist/tests/db.test.js +0 -654
- package/dist/tests/distill-cli-flag.test.js +0 -208
- package/dist/tests/distill.test.js +0 -515
- package/dist/tests/docker-install.test.js +0 -120
- package/dist/tests/e2e.test.js +0 -1398
- package/dist/tests/embedder.test.js +0 -340
- package/dist/tests/embedding-model-config.test.js +0 -379
- package/dist/tests/feedback-command.test.js +0 -172
- package/dist/tests/file-context.test.js +0 -552
- package/dist/tests/fixtures/scripts/git/summarize-diff.js +0 -9
- package/dist/tests/fixtures/scripts/lint/eslint-check.js +0 -7
- package/dist/tests/fixtures/stashes/load.js +0 -166
- package/dist/tests/fixtures/stashes/load.test.js +0 -88
- package/dist/tests/fixtures/stashes/ranking-baseline/scripts/mem0-search.js +0 -12
- package/dist/tests/frontmatter.test.js +0 -190
- package/dist/tests/fts-field-weighting.test.js +0 -254
- package/dist/tests/fuzzy-search.test.js +0 -230
- package/dist/tests/git-provider-clone.test.js +0 -45
- package/dist/tests/github.test.js +0 -161
- package/dist/tests/graph-boost-ranking.test.js +0 -305
- package/dist/tests/graph-extraction.test.js +0 -282
- package/dist/tests/helpers/usage-events.js +0 -8
- package/dist/tests/index-pass-llm.test.js +0 -161
- package/dist/tests/indexer.test.js +0 -559
- package/dist/tests/info-command.test.js +0 -166
- package/dist/tests/init.test.js +0 -69
- package/dist/tests/install-script.test.js +0 -246
- package/dist/tests/integration/agent-real-profile.test.js +0 -94
- package/dist/tests/issue-36-repro.test.js +0 -304
- package/dist/tests/issues-191-194.test.js +0 -160
- package/dist/tests/lesson-lint.test.js +0 -111
- package/dist/tests/llm-client.test.js +0 -115
- package/dist/tests/llm-feature-gate.test.js +0 -151
- package/dist/tests/llm.test.js +0 -139
- package/dist/tests/lockfile.test.js +0 -216
- package/dist/tests/manifest.test.js +0 -205
- package/dist/tests/markdown.test.js +0 -126
- package/dist/tests/matchers-unit.test.js +0 -189
- package/dist/tests/memory-inference.test.js +0 -299
- package/dist/tests/merge-scoring.test.js +0 -136
- package/dist/tests/metadata.test.js +0 -313
- package/dist/tests/migration-help.test.js +0 -89
- package/dist/tests/origin-resolve.test.js +0 -124
- package/dist/tests/output-baseline.test.js +0 -217
- package/dist/tests/output-shapes-unit.test.js +0 -476
- package/dist/tests/parallel-search.test.js +0 -272
- package/dist/tests/parameter-metadata.test.js +0 -365
- package/dist/tests/paths.test.js +0 -177
- package/dist/tests/progressive-disclosure.test.js +0 -280
- package/dist/tests/proposals.test.js +0 -279
- package/dist/tests/proposed-quality.test.js +0 -271
- package/dist/tests/provider-registry.test.js +0 -32
- package/dist/tests/ranking-regression.test.js +0 -548
- package/dist/tests/reflect-propose.test.js +0 -455
- package/dist/tests/registry-build-index.test.js +0 -378
- package/dist/tests/registry-cli.test.js +0 -290
- package/dist/tests/registry-index-v2.test.js +0 -430
- package/dist/tests/registry-install.test.js +0 -728
- package/dist/tests/registry-providers/parity.test.js +0 -189
- package/dist/tests/registry-providers/skills-sh.test.js +0 -309
- package/dist/tests/registry-providers/static-index.test.js +0 -204
- package/dist/tests/registry-resolve.test.js +0 -126
- package/dist/tests/registry-search.test.js +0 -723
- package/dist/tests/remember-frontmatter.test.js +0 -380
- package/dist/tests/remember-unit.test.js +0 -123
- package/dist/tests/ripgrep-install.test.js +0 -251
- package/dist/tests/ripgrep-resolve.test.js +0 -108
- package/dist/tests/ripgrep.test.js +0 -163
- package/dist/tests/save-command.test.js +0 -94
- package/dist/tests/save-trust-qa-fixes.test.js +0 -270
- package/dist/tests/scoring-pipeline.test.js +0 -648
- package/dist/tests/search-include-proposed-cli.test.js +0 -118
- package/dist/tests/self-update.test.js +0 -442
- package/dist/tests/semantic-search-e2e.test.js +0 -512
- package/dist/tests/semantic-status.test.js +0 -471
- package/dist/tests/setup-run.integration.js +0 -877
- package/dist/tests/setup-wizard.test.js +0 -198
- package/dist/tests/setup.test.js +0 -131
- package/dist/tests/source-add.test.js +0 -11
- package/dist/tests/source-clone.test.js +0 -254
- package/dist/tests/source-manage.test.js +0 -366
- package/dist/tests/source-providers/filesystem.test.js +0 -82
- package/dist/tests/source-providers/git.test.js +0 -252
- package/dist/tests/source-providers/website.test.js +0 -128
- package/dist/tests/source-qa-fixes.test.js +0 -268
- package/dist/tests/source-registry.test.js +0 -350
- package/dist/tests/source-resolve.test.js +0 -100
- package/dist/tests/source-source.test.js +0 -221
- package/dist/tests/source.test.js +0 -533
- package/dist/tests/tar-utils-scan.test.js +0 -73
- package/dist/tests/toggle-components.test.js +0 -73
- package/dist/tests/usage-telemetry.test.js +0 -265
- package/dist/tests/utility-scoring.test.js +0 -558
- package/dist/tests/vault-load-error.test.js +0 -78
- package/dist/tests/vault-qa-fixes.test.js +0 -194
- package/dist/tests/vault.test.js +0 -429
- package/dist/tests/vector-search.test.js +0 -608
- package/dist/tests/walker.test.js +0 -252
- package/dist/tests/wave2-cluster-bc.test.js +0 -228
- package/dist/tests/wave2-cluster-d.test.js +0 -180
- package/dist/tests/wave2-cluster-e.test.js +0 -179
- package/dist/tests/wiki-qa-fixes.test.js +0 -270
- package/dist/tests/wiki.test.js +0 -529
- package/dist/tests/workflow-cli.test.js +0 -271
- package/dist/tests/workflow-markdown.test.js +0 -171
- package/dist/tests/workflow-path-escape.test.js +0 -132
- package/dist/tests/workflow-qa-fixes.test.js +0 -377
- package/dist/tests/workflows/indexer-rejection.test.js +0 -213
- /package/dist/{src/commands → commands}/completions.js +0 -0
- /package/dist/{src/commands → commands}/curate.js +0 -0
- /package/dist/{src/commands → commands}/distill.js +0 -0
- /package/dist/{src/commands → commands}/events.js +0 -0
- /package/dist/{src/commands → commands}/info.js +0 -0
- /package/dist/{src/commands → commands}/init.js +0 -0
- /package/dist/{src/commands → commands}/install-audit.js +0 -0
- /package/dist/{src/commands → commands}/installed-stashes.js +0 -0
- /package/dist/{src/commands → commands}/migration-help.js +0 -0
- /package/dist/{src/commands → commands}/proposal.js +0 -0
- /package/dist/{src/commands → commands}/propose.js +0 -0
- /package/dist/{src/commands → commands}/reflect.js +0 -0
- /package/dist/{src/commands → commands}/remember.js +0 -0
- /package/dist/{src/commands → commands}/self-update.js +0 -0
- /package/dist/{src/commands → commands}/source-clone.js +0 -0
- /package/dist/{src/commands → commands}/vault.js +0 -0
- /package/dist/{src/core → core}/asset-ref.js +0 -0
- /package/dist/{src/core → core}/asset-registry.js +0 -0
- /package/dist/{src/core → core}/asset-spec.js +0 -0
- /package/dist/{src/core → core}/common.js +0 -0
- /package/dist/{src/core → core}/errors.js +0 -0
- /package/dist/{src/core → core}/events.js +0 -0
- /package/dist/{src/core → core}/frontmatter.js +0 -0
- /package/dist/{src/core → core}/lesson-lint.js +0 -0
- /package/dist/{src/core → core}/markdown.js +0 -0
- /package/dist/{src/core → core}/paths.js +0 -0
- /package/dist/{src/core → core}/proposals.js +0 -0
- /package/dist/{src/core → core}/warn.js +0 -0
- /package/dist/{src/core → core}/write-source.js +0 -0
- /package/dist/{src/indexer → indexer}/db.js +0 -0
- /package/dist/{src/indexer → indexer}/file-context.js +0 -0
- /package/dist/{src/indexer → indexer}/graph-boost.js +0 -0
- /package/dist/{src/indexer → indexer}/graph-extraction.js +0 -0
- /package/dist/{src/indexer → indexer}/manifest.js +0 -0
- /package/dist/{src/indexer → indexer}/memory-inference.js +0 -0
- /package/dist/{src/indexer → indexer}/metadata.js +0 -0
- /package/dist/{src/indexer → indexer}/search-fields.js +0 -0
- /package/dist/{src/indexer → indexer}/semantic-status.js +0 -0
- /package/dist/{src/indexer → indexer}/usage-events.js +0 -0
- /package/dist/{src/indexer → indexer}/walker.js +0 -0
- /package/dist/{src/integrations → integrations}/agent/config.js +0 -0
- /package/dist/{src/integrations → integrations}/agent/detect.js +0 -0
- /package/dist/{src/integrations → integrations}/agent/index.js +0 -0
- /package/dist/{src/integrations → integrations}/agent/prompts.js +0 -0
- /package/dist/{src/integrations → integrations}/lockfile.js +0 -0
- /package/dist/{src/llm → llm}/client.js +0 -0
- /package/dist/{src/llm → llm}/embedder.js +0 -0
- /package/dist/{src/llm → llm}/embedders/cache.js +0 -0
- /package/dist/{src/llm → llm}/embedders/local.js +0 -0
- /package/dist/{src/llm → llm}/embedders/types.js +0 -0
- /package/dist/{src/llm → llm}/feature-gate.js +0 -0
- /package/dist/{src/llm → llm}/graph-extract.js +0 -0
- /package/dist/{src/llm → llm}/index-passes.js +0 -0
- /package/dist/{src/llm → llm}/memory-infer.js +0 -0
- /package/dist/{src/llm → llm}/metadata-enhance.js +0 -0
- /package/dist/{src/output → output}/context.js +0 -0
- /package/dist/{src/registry → registry}/create-provider-registry.js +0 -0
- /package/dist/{src/registry → registry}/factory.js +0 -0
- /package/dist/{src/registry → registry}/origin-resolve.js +0 -0
- /package/dist/{src/registry → registry}/providers/index.js +0 -0
- /package/dist/{src/registry → registry}/providers/skills-sh.js +0 -0
- /package/dist/{src/registry → registry}/providers/types.js +0 -0
- /package/dist/{src/registry → registry}/resolve.js +0 -0
- /package/dist/{src/registry → registry}/types.js +0 -0
- /package/dist/{src/setup → setup}/detect.js +0 -0
- /package/dist/{src/setup → setup}/ripgrep-install.js +0 -0
- /package/dist/{src/setup → setup}/ripgrep-resolve.js +0 -0
- /package/dist/{src/setup → setup}/steps.js +0 -0
- /package/dist/{src/sources → sources}/include.js +0 -0
- /package/dist/{src/sources → sources}/provider-factory.js +0 -0
- /package/dist/{src/sources → sources}/provider.js +0 -0
- /package/dist/{src/sources → sources}/providers/filesystem.js +0 -0
- /package/dist/{src/sources → sources}/providers/git.js +0 -0
- /package/dist/{src/sources → sources}/providers/index.js +0 -0
- /package/dist/{src/sources → sources}/providers/install-types.js +0 -0
- /package/dist/{src/sources → sources}/providers/npm.js +0 -0
- /package/dist/{src/sources → sources}/providers/provider-utils.js +0 -0
- /package/dist/{src/sources → sources}/providers/sync-from-ref.js +0 -0
- /package/dist/{src/sources → sources}/providers/tar-utils.js +0 -0
- /package/dist/{src/sources → sources}/providers/website.js +0 -0
- /package/dist/{src/sources → sources}/resolve.js +0 -0
- /package/dist/{src/sources → sources}/types.js +0 -0
- /package/dist/{src/templates → templates}/wiki-templates.js +0 -0
- /package/dist/{src/version.js → version.js} +0 -0
- /package/dist/{src/workflows → workflows}/authoring.js +0 -0
- /package/dist/{src/workflows → workflows}/cli.js +0 -0
- /package/dist/{src/workflows → workflows}/db.js +0 -0
- /package/dist/{src/workflows → workflows}/document-cache.js +0 -0
- /package/dist/{src/workflows → workflows}/parser.js +0 -0
- /package/dist/{src/workflows → workflows}/renderer.js +0 -0
- /package/dist/{src/workflows → workflows}/schema.js +0 -0
- /package/dist/{src/workflows → workflows}/validator.js +0 -0
|
@@ -1,1144 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Unit tests for outcome / per-task / corpus / trajectory aggregation.
|
|
3
|
-
*/
|
|
4
|
-
import { describe, expect, test } from "bun:test";
|
|
5
|
-
import fs from "node:fs";
|
|
6
|
-
import path from "node:path";
|
|
7
|
-
import { aggregateAkmOverhead, aggregateByMemoryAbility, aggregateByTaskFamily, aggregateCorpus, aggregatePerTask, aggregateTrajectory, computeAkmOverhead, computeAssetRegressionCandidates, computeCorpusCoverage, computeCorpusDelta, computeDomainAggregates, computeLearningCurve, computeNegativeTransfer, computeOutcomeAggregate, computePerTaskDelta, computeWorkflowReliability, domainOfTaskId, isPathContained, LEARNING_IMPROVEMENT_THRESHOLD, materialiseMaskedStash, } from "./metrics";
|
|
8
|
-
import { benchMkdtemp } from "./tmp";
|
|
9
|
-
function ptm(overrides = {}) {
|
|
10
|
-
return {
|
|
11
|
-
passRate: 0,
|
|
12
|
-
passAt1: 0,
|
|
13
|
-
tokensPerPass: null,
|
|
14
|
-
wallclockMs: 0,
|
|
15
|
-
passRateStdev: 0,
|
|
16
|
-
budgetExceededCount: 0,
|
|
17
|
-
harnessErrorCount: 0,
|
|
18
|
-
count: 1,
|
|
19
|
-
runsWithMeasuredTokens: 0,
|
|
20
|
-
...overrides,
|
|
21
|
-
};
|
|
22
|
-
}
|
|
23
|
-
function fakeResult(overrides) {
|
|
24
|
-
return {
|
|
25
|
-
schemaVersion: 1,
|
|
26
|
-
taskId: "t",
|
|
27
|
-
arm: "akm",
|
|
28
|
-
seed: 0,
|
|
29
|
-
model: "m",
|
|
30
|
-
outcome: "pass",
|
|
31
|
-
tokens: { input: 0, output: 0 },
|
|
32
|
-
wallclockMs: 0,
|
|
33
|
-
trajectory: { correctAssetLoaded: null, feedbackRecorded: null },
|
|
34
|
-
events: [],
|
|
35
|
-
verifierStdout: "",
|
|
36
|
-
verifierExitCode: 0,
|
|
37
|
-
assetsLoaded: [],
|
|
38
|
-
...overrides,
|
|
39
|
-
};
|
|
40
|
-
}
|
|
41
|
-
describe("computeOutcomeAggregate", () => {
|
|
42
|
-
test("returns zeros on empty input", () => {
|
|
43
|
-
expect(computeOutcomeAggregate([])).toEqual({
|
|
44
|
-
passRate: 0,
|
|
45
|
-
tokensPerPass: 0,
|
|
46
|
-
wallclockMs: 0,
|
|
47
|
-
budgetExceeded: 0,
|
|
48
|
-
runsWithMeasuredTokens: 0,
|
|
49
|
-
});
|
|
50
|
-
});
|
|
51
|
-
test("computes passRate, tokensPerPass, wallclockMs across mixed outcomes", () => {
|
|
52
|
-
const results = [
|
|
53
|
-
fakeResult({ outcome: "pass", tokens: { input: 1000, output: 500 }, wallclockMs: 1000 }),
|
|
54
|
-
fakeResult({ outcome: "pass", tokens: { input: 2000, output: 1000 }, wallclockMs: 2000 }),
|
|
55
|
-
fakeResult({ outcome: "fail", tokens: { input: 500, output: 200 }, wallclockMs: 1500 }),
|
|
56
|
-
fakeResult({ outcome: "budget_exceeded", tokens: { input: 100, output: 50 }, wallclockMs: 500 }),
|
|
57
|
-
];
|
|
58
|
-
const agg = computeOutcomeAggregate(results);
|
|
59
|
-
expect(agg.passRate).toBeCloseTo(0.5);
|
|
60
|
-
expect(agg.tokensPerPass).toBeCloseTo((1500 + 3000) / 2);
|
|
61
|
-
expect(agg.wallclockMs).toBeCloseTo((1000 + 2000 + 1500 + 500) / 4);
|
|
62
|
-
expect(agg.budgetExceeded).toBe(1);
|
|
63
|
-
});
|
|
64
|
-
test("tokensPerPass is 0 (not NaN) when no runs passed", () => {
|
|
65
|
-
const results = [fakeResult({ outcome: "fail", wallclockMs: 100 })];
|
|
66
|
-
const agg = computeOutcomeAggregate(results);
|
|
67
|
-
expect(agg.passRate).toBe(0);
|
|
68
|
-
expect(agg.tokensPerPass).toBe(0);
|
|
69
|
-
});
|
|
70
|
-
test("missing token measurement is NOT silently treated as zero (issue #252)", () => {
|
|
71
|
-
// Two passes: one parsed at 1000, one missing measurement. The mean must
|
|
72
|
-
// be 1000 (the measured pass), not (1000+0)/2 = 500.
|
|
73
|
-
const results = [
|
|
74
|
-
fakeResult({
|
|
75
|
-
outcome: "pass",
|
|
76
|
-
tokens: { input: 700, output: 300 },
|
|
77
|
-
tokenMeasurement: "parsed",
|
|
78
|
-
}),
|
|
79
|
-
fakeResult({
|
|
80
|
-
outcome: "pass",
|
|
81
|
-
tokens: { input: 0, output: 0 },
|
|
82
|
-
tokenMeasurement: "missing",
|
|
83
|
-
}),
|
|
84
|
-
];
|
|
85
|
-
const agg = computeOutcomeAggregate(results);
|
|
86
|
-
expect(agg.passRate).toBeCloseTo(1);
|
|
87
|
-
expect(agg.tokensPerPass).toBeCloseTo(1000);
|
|
88
|
-
expect(agg.runsWithMeasuredTokens).toBe(1);
|
|
89
|
-
});
|
|
90
|
-
test("unsupported token measurement is also skipped from token aggregation", () => {
|
|
91
|
-
const results = [
|
|
92
|
-
fakeResult({
|
|
93
|
-
outcome: "pass",
|
|
94
|
-
tokens: { input: 0, output: 0 },
|
|
95
|
-
tokenMeasurement: "unsupported",
|
|
96
|
-
}),
|
|
97
|
-
];
|
|
98
|
-
const agg = computeOutcomeAggregate(results);
|
|
99
|
-
// No measured passes → tokensPerPass collapses to 0, but runsWithMeasuredTokens=0
|
|
100
|
-
// signals that the 0 is "unknown", not "free".
|
|
101
|
-
expect(agg.tokensPerPass).toBe(0);
|
|
102
|
-
expect(agg.runsWithMeasuredTokens).toBe(0);
|
|
103
|
-
});
|
|
104
|
-
});
|
|
105
|
-
describe("aggregatePerTask", () => {
|
|
106
|
-
test("0 of K passes — tokensPerPass is null, passRate is 0", () => {
|
|
107
|
-
const runs = [
|
|
108
|
-
fakeResult({ seed: 0, outcome: "fail", wallclockMs: 1000 }),
|
|
109
|
-
fakeResult({ seed: 1, outcome: "fail", wallclockMs: 2000 }),
|
|
110
|
-
fakeResult({ seed: 2, outcome: "harness_error", wallclockMs: 3000 }),
|
|
111
|
-
];
|
|
112
|
-
const m = aggregatePerTask(runs);
|
|
113
|
-
expect(m.passRate).toBe(0);
|
|
114
|
-
expect(m.passAt1).toBe(0);
|
|
115
|
-
expect(m.tokensPerPass).toBeNull();
|
|
116
|
-
expect(m.wallclockMs).toBe(2000);
|
|
117
|
-
expect(m.harnessErrorCount).toBe(1);
|
|
118
|
-
expect(m.budgetExceededCount).toBe(0);
|
|
119
|
-
expect(m.count).toBe(3);
|
|
120
|
-
});
|
|
121
|
-
test("K of K passes — passRate is 1, stdev is 0", () => {
|
|
122
|
-
const runs = Array.from({ length: 5 }, (_, i) => fakeResult({ seed: i, outcome: "pass", tokens: { input: 1000, output: 0 }, wallclockMs: 1000 }));
|
|
123
|
-
const m = aggregatePerTask(runs);
|
|
124
|
-
expect(m.passRate).toBe(1);
|
|
125
|
-
expect(m.passAt1).toBe(1);
|
|
126
|
-
expect(m.tokensPerPass).toBe(1000);
|
|
127
|
-
expect(m.passRateStdev).toBe(0);
|
|
128
|
-
});
|
|
129
|
-
test("partial passes — passRate, stdev, and budget_exceeded count are computed", () => {
|
|
130
|
-
const runs = [
|
|
131
|
-
fakeResult({ seed: 0, outcome: "pass", tokens: { input: 800, output: 200 }, wallclockMs: 1000 }),
|
|
132
|
-
fakeResult({ seed: 1, outcome: "pass", tokens: { input: 1200, output: 300 }, wallclockMs: 1500 }),
|
|
133
|
-
fakeResult({ seed: 2, outcome: "fail", wallclockMs: 2000 }),
|
|
134
|
-
fakeResult({ seed: 3, outcome: "budget_exceeded", wallclockMs: 3000 }),
|
|
135
|
-
];
|
|
136
|
-
const m = aggregatePerTask(runs);
|
|
137
|
-
expect(m.passRate).toBeCloseTo(0.5);
|
|
138
|
-
expect(m.passAt1).toBe(1);
|
|
139
|
-
expect(m.tokensPerPass).toBeCloseTo((1000 + 1500) / 2);
|
|
140
|
-
expect(m.budgetExceededCount).toBe(1);
|
|
141
|
-
// Sample stdev of [1, 1, 0, 0] over 4 samples = sqrt(4/3 * 0.25) — non-zero.
|
|
142
|
-
expect(m.passRateStdev).toBeGreaterThan(0);
|
|
143
|
-
});
|
|
144
|
-
test("passAt1 honours seed=0 specifically when present", () => {
|
|
145
|
-
const runs = [
|
|
146
|
-
fakeResult({ seed: 1, outcome: "pass" }),
|
|
147
|
-
fakeResult({ seed: 0, outcome: "fail" }),
|
|
148
|
-
fakeResult({ seed: 2, outcome: "pass" }),
|
|
149
|
-
];
|
|
150
|
-
const m = aggregatePerTask(runs);
|
|
151
|
-
expect(m.passAt1).toBe(0);
|
|
152
|
-
});
|
|
153
|
-
test("empty input returns a zeroed envelope", () => {
|
|
154
|
-
const m = aggregatePerTask([]);
|
|
155
|
-
expect(m.count).toBe(0);
|
|
156
|
-
expect(m.passRate).toBe(0);
|
|
157
|
-
expect(m.tokensPerPass).toBeNull();
|
|
158
|
-
expect(m.runsWithMeasuredTokens).toBe(0);
|
|
159
|
-
});
|
|
160
|
-
test("aggregatePerTask: passes with missing measurement do NOT pull tokensPerPass to zero", () => {
|
|
161
|
-
const runs = [
|
|
162
|
-
fakeResult({
|
|
163
|
-
seed: 0,
|
|
164
|
-
outcome: "pass",
|
|
165
|
-
tokens: { input: 800, output: 200 },
|
|
166
|
-
tokenMeasurement: "parsed",
|
|
167
|
-
wallclockMs: 1000,
|
|
168
|
-
}),
|
|
169
|
-
fakeResult({
|
|
170
|
-
seed: 1,
|
|
171
|
-
outcome: "pass",
|
|
172
|
-
tokens: { input: 0, output: 0 },
|
|
173
|
-
tokenMeasurement: "missing",
|
|
174
|
-
wallclockMs: 1000,
|
|
175
|
-
}),
|
|
176
|
-
];
|
|
177
|
-
const m = aggregatePerTask(runs);
|
|
178
|
-
expect(m.passRate).toBe(1);
|
|
179
|
-
// Mean is over the single measured pass, not (1000 + 0) / 2.
|
|
180
|
-
expect(m.tokensPerPass).toBeCloseTo(1000);
|
|
181
|
-
expect(m.runsWithMeasuredTokens).toBe(1);
|
|
182
|
-
expect(m.count).toBe(2);
|
|
183
|
-
});
|
|
184
|
-
test("aggregatePerTask: tokensPerPass is null when every pass has missing measurement", () => {
|
|
185
|
-
const runs = [
|
|
186
|
-
fakeResult({
|
|
187
|
-
seed: 0,
|
|
188
|
-
outcome: "pass",
|
|
189
|
-
tokens: { input: 0, output: 0 },
|
|
190
|
-
tokenMeasurement: "missing",
|
|
191
|
-
}),
|
|
192
|
-
fakeResult({
|
|
193
|
-
seed: 1,
|
|
194
|
-
outcome: "pass",
|
|
195
|
-
tokens: { input: 0, output: 0 },
|
|
196
|
-
tokenMeasurement: "unsupported",
|
|
197
|
-
}),
|
|
198
|
-
];
|
|
199
|
-
const m = aggregatePerTask(runs);
|
|
200
|
-
expect(m.passRate).toBe(1);
|
|
201
|
-
expect(m.tokensPerPass).toBeNull();
|
|
202
|
-
expect(m.runsWithMeasuredTokens).toBe(0);
|
|
203
|
-
});
|
|
204
|
-
});
|
|
205
|
-
describe("aggregateCorpus", () => {
|
|
206
|
-
test("weights every task equally regardless of seed count", () => {
|
|
207
|
-
const perTask = {
|
|
208
|
-
a: {
|
|
209
|
-
passRate: 1,
|
|
210
|
-
passAt1: 1,
|
|
211
|
-
tokensPerPass: 1000,
|
|
212
|
-
wallclockMs: 1000,
|
|
213
|
-
passRateStdev: 0,
|
|
214
|
-
budgetExceededCount: 0,
|
|
215
|
-
harnessErrorCount: 0,
|
|
216
|
-
count: 5,
|
|
217
|
-
runsWithMeasuredTokens: 5,
|
|
218
|
-
},
|
|
219
|
-
b: {
|
|
220
|
-
passRate: 0,
|
|
221
|
-
passAt1: 0,
|
|
222
|
-
tokensPerPass: null,
|
|
223
|
-
wallclockMs: 2000,
|
|
224
|
-
passRateStdev: 0,
|
|
225
|
-
budgetExceededCount: 0,
|
|
226
|
-
harnessErrorCount: 0,
|
|
227
|
-
count: 1,
|
|
228
|
-
runsWithMeasuredTokens: 0,
|
|
229
|
-
},
|
|
230
|
-
};
|
|
231
|
-
const corpus = aggregateCorpus(perTask);
|
|
232
|
-
expect(corpus.passRate).toBeCloseTo(0.5);
|
|
233
|
-
expect(corpus.wallclockMs).toBeCloseTo(1500);
|
|
234
|
-
expect(corpus.tokensPerPass).toBeCloseTo(1000); // null is dropped
|
|
235
|
-
});
|
|
236
|
-
test("tokensPerPass is null when every task has null tokensPerPass", () => {
|
|
237
|
-
const perTask = {
|
|
238
|
-
a: {
|
|
239
|
-
passRate: 0,
|
|
240
|
-
passAt1: 0,
|
|
241
|
-
tokensPerPass: null,
|
|
242
|
-
wallclockMs: 1000,
|
|
243
|
-
passRateStdev: 0,
|
|
244
|
-
budgetExceededCount: 0,
|
|
245
|
-
harnessErrorCount: 0,
|
|
246
|
-
count: 1,
|
|
247
|
-
runsWithMeasuredTokens: 0,
|
|
248
|
-
},
|
|
249
|
-
};
|
|
250
|
-
const corpus = aggregateCorpus(perTask);
|
|
251
|
-
expect(corpus.tokensPerPass).toBeNull();
|
|
252
|
-
});
|
|
253
|
-
test("empty input returns zeros + null tokens", () => {
|
|
254
|
-
const corpus = aggregateCorpus({});
|
|
255
|
-
expect(corpus.passRate).toBe(0);
|
|
256
|
-
expect(corpus.tokensPerPass).toBeNull();
|
|
257
|
-
});
|
|
258
|
-
});
|
|
259
|
-
describe("delta helpers", () => {
|
|
260
|
-
test("computeCorpusDelta — akm − noakm", () => {
|
|
261
|
-
const noakm = { passRate: 0.3, tokensPerPass: 18000, wallclockMs: 4000 };
|
|
262
|
-
const akm = { passRate: 0.7, tokensPerPass: 14000, wallclockMs: 3000 };
|
|
263
|
-
const d = computeCorpusDelta(noakm, akm);
|
|
264
|
-
expect(d.passRate).toBeCloseTo(0.4);
|
|
265
|
-
expect(d.tokensPerPass).toBeCloseTo(-4000);
|
|
266
|
-
expect(d.wallclockMs).toBeCloseTo(-1000);
|
|
267
|
-
});
|
|
268
|
-
test("computeCorpusDelta — null tokensPerPass propagates", () => {
|
|
269
|
-
const noakm = { passRate: 0, tokensPerPass: null, wallclockMs: 1 };
|
|
270
|
-
const akm = { passRate: 1, tokensPerPass: 5, wallclockMs: 2 };
|
|
271
|
-
expect(computeCorpusDelta(noakm, akm).tokensPerPass).toBeNull();
|
|
272
|
-
});
|
|
273
|
-
test("computePerTaskDelta — same null-safety rule", () => {
|
|
274
|
-
const noakm = {
|
|
275
|
-
passRate: 0,
|
|
276
|
-
passAt1: 0,
|
|
277
|
-
tokensPerPass: null,
|
|
278
|
-
wallclockMs: 0,
|
|
279
|
-
passRateStdev: 0,
|
|
280
|
-
budgetExceededCount: 0,
|
|
281
|
-
harnessErrorCount: 0,
|
|
282
|
-
count: 1,
|
|
283
|
-
runsWithMeasuredTokens: 0,
|
|
284
|
-
};
|
|
285
|
-
const akm = {
|
|
286
|
-
passRate: 1,
|
|
287
|
-
passAt1: 1,
|
|
288
|
-
tokensPerPass: 1000,
|
|
289
|
-
wallclockMs: 100,
|
|
290
|
-
passRateStdev: 0,
|
|
291
|
-
budgetExceededCount: 0,
|
|
292
|
-
harnessErrorCount: 0,
|
|
293
|
-
count: 1,
|
|
294
|
-
runsWithMeasuredTokens: 1,
|
|
295
|
-
};
|
|
296
|
-
expect(computePerTaskDelta(noakm, akm).tokensPerPass).toBeNull();
|
|
297
|
-
});
|
|
298
|
-
});
|
|
299
|
-
describe("aggregateTrajectory", () => {
|
|
300
|
-
test("returns null/0 on empty input", () => {
|
|
301
|
-
const t = aggregateTrajectory([]);
|
|
302
|
-
expect(t.correctAssetLoaded).toBeNull();
|
|
303
|
-
expect(t.feedbackRecorded).toBe(0);
|
|
304
|
-
});
|
|
305
|
-
test("correctAssetLoaded is null when no run had a known goldRef", () => {
|
|
306
|
-
const runs = [
|
|
307
|
-
fakeResult({ trajectory: { correctAssetLoaded: null, feedbackRecorded: false } }),
|
|
308
|
-
fakeResult({ trajectory: { correctAssetLoaded: null, feedbackRecorded: true } }),
|
|
309
|
-
];
|
|
310
|
-
const t = aggregateTrajectory(runs);
|
|
311
|
-
expect(t.correctAssetLoaded).toBeNull();
|
|
312
|
-
expect(t.feedbackRecorded).toBeCloseTo(0.5);
|
|
313
|
-
});
|
|
314
|
-
test("correctAssetLoaded is fraction over runs with goldRef", () => {
|
|
315
|
-
const runs = [
|
|
316
|
-
fakeResult({ trajectory: { correctAssetLoaded: true, feedbackRecorded: false } }),
|
|
317
|
-
fakeResult({ trajectory: { correctAssetLoaded: false, feedbackRecorded: false } }),
|
|
318
|
-
fakeResult({ trajectory: { correctAssetLoaded: null, feedbackRecorded: false } }),
|
|
319
|
-
];
|
|
320
|
-
const t = aggregateTrajectory(runs);
|
|
321
|
-
expect(t.correctAssetLoaded).toBeCloseTo(0.5);
|
|
322
|
-
expect(t.feedbackRecorded).toBe(0);
|
|
323
|
-
});
|
|
324
|
-
});
|
|
325
|
-
describe("domainOfTaskId", () => {
|
|
326
|
-
test("returns the segment before the first slash", () => {
|
|
327
|
-
expect(domainOfTaskId("docker-homelab/redis-healthcheck")).toBe("docker-homelab");
|
|
328
|
-
});
|
|
329
|
-
test("falls back to 'unknown' when there is no slash", () => {
|
|
330
|
-
expect(domainOfTaskId("noslash")).toBe("unknown");
|
|
331
|
-
});
|
|
332
|
-
test("falls back to 'unknown' when the slash is at index 0", () => {
|
|
333
|
-
expect(domainOfTaskId("/leading")).toBe("unknown");
|
|
334
|
-
});
|
|
335
|
-
});
|
|
336
|
-
describe("computeNegativeTransfer", () => {
|
|
337
|
-
test("returns zero count and severity when no regressions are present", () => {
|
|
338
|
-
const tasks = [
|
|
339
|
-
{ id: "d/a", noakm: ptm({ passRate: 0.4 }), akm: ptm({ passRate: 0.8 }) },
|
|
340
|
-
{ id: "d/b", noakm: ptm({ passRate: 0.5 }), akm: ptm({ passRate: 0.5 }) },
|
|
341
|
-
];
|
|
342
|
-
const out = computeNegativeTransfer(tasks);
|
|
343
|
-
expect(out.count).toBe(0);
|
|
344
|
-
expect(out.severity).toBe(0);
|
|
345
|
-
expect(out.topRegressedTasks).toEqual([]);
|
|
346
|
-
});
|
|
347
|
-
test("captures a single regression with correct delta and severity", () => {
|
|
348
|
-
const tasks = [
|
|
349
|
-
{ id: "d/a", noakm: ptm({ passRate: 0.4 }), akm: ptm({ passRate: 0.8 }) },
|
|
350
|
-
{ id: "d/regressed", noakm: ptm({ passRate: 0.6 }), akm: ptm({ passRate: 0.2 }) },
|
|
351
|
-
];
|
|
352
|
-
const out = computeNegativeTransfer(tasks);
|
|
353
|
-
expect(out.count).toBe(1);
|
|
354
|
-
expect(out.severity).toBeCloseTo(0.4);
|
|
355
|
-
expect(out.topRegressedTasks).toHaveLength(1);
|
|
356
|
-
const row = out.topRegressedTasks[0];
|
|
357
|
-
if (!row)
|
|
358
|
-
throw new Error("expected row");
|
|
359
|
-
expect(row.taskId).toBe("d/regressed");
|
|
360
|
-
expect(row.domain).toBe("d");
|
|
361
|
-
expect(row.delta).toBeCloseTo(-0.4);
|
|
362
|
-
expect(row.severity).toBeCloseTo(0.4);
|
|
363
|
-
});
|
|
364
|
-
test("multiple regressions are sorted by severity desc with deterministic tiebreak", () => {
|
|
365
|
-
const tasks = [
|
|
366
|
-
// Mild regression -0.1.
|
|
367
|
-
{ id: "alpha/x", noakm: ptm({ passRate: 0.6 }), akm: ptm({ passRate: 0.5 }) },
|
|
368
|
-
// Tied severity -0.3 (first tiebreaks by taskId asc).
|
|
369
|
-
{ id: "beta/y", noakm: ptm({ passRate: 0.8 }), akm: ptm({ passRate: 0.5 }) },
|
|
370
|
-
{ id: "alpha/z", noakm: ptm({ passRate: 0.8 }), akm: ptm({ passRate: 0.5 }) },
|
|
371
|
-
// Improvement (no regression).
|
|
372
|
-
{ id: "alpha/w", noakm: ptm({ passRate: 0.1 }), akm: ptm({ passRate: 0.9 }) },
|
|
373
|
-
];
|
|
374
|
-
const out = computeNegativeTransfer(tasks);
|
|
375
|
-
expect(out.count).toBe(3);
|
|
376
|
-
expect(out.severity).toBeCloseTo(0.7);
|
|
377
|
-
expect(out.topRegressedTasks.map((r) => r.taskId)).toEqual(["alpha/z", "beta/y", "alpha/x"]);
|
|
378
|
-
});
|
|
379
|
-
test("a task with equal pass rate is not counted as regressed", () => {
|
|
380
|
-
const tasks = [{ id: "d/eq", noakm: ptm({ passRate: 0.5 }), akm: ptm({ passRate: 0.5 }) }];
|
|
381
|
-
expect(computeNegativeTransfer(tasks).count).toBe(0);
|
|
382
|
-
});
|
|
383
|
-
});
|
|
384
|
-
describe("computeDomainAggregates", () => {
|
|
385
|
-
test("groups tasks by domain prefix", () => {
|
|
386
|
-
const tasks = [
|
|
387
|
-
{
|
|
388
|
-
id: "alpha/a",
|
|
389
|
-
noakm: ptm({ passRate: 0.4, tokensPerPass: 10000, wallclockMs: 1000 }),
|
|
390
|
-
akm: ptm({ passRate: 0.8, tokensPerPass: 8000, wallclockMs: 900 }),
|
|
391
|
-
},
|
|
392
|
-
{
|
|
393
|
-
id: "alpha/b",
|
|
394
|
-
noakm: ptm({ passRate: 0.6, tokensPerPass: 12000, wallclockMs: 2000 }),
|
|
395
|
-
akm: ptm({ passRate: 0.4, tokensPerPass: 9000, wallclockMs: 1500 }),
|
|
396
|
-
},
|
|
397
|
-
{
|
|
398
|
-
id: "beta/c",
|
|
399
|
-
noakm: ptm({ passRate: 0.2, tokensPerPass: null, wallclockMs: 500 }),
|
|
400
|
-
akm: ptm({ passRate: 0.5, tokensPerPass: 5000, wallclockMs: 600 }),
|
|
401
|
-
},
|
|
402
|
-
];
|
|
403
|
-
const rows = computeDomainAggregates(tasks);
|
|
404
|
-
expect(rows.map((r) => r.domain)).toEqual(["alpha", "beta"]);
|
|
405
|
-
const alpha = rows.find((r) => r.domain === "alpha");
|
|
406
|
-
if (!alpha)
|
|
407
|
-
throw new Error("alpha missing");
|
|
408
|
-
expect(alpha.taskCount).toBe(2);
|
|
409
|
-
expect(alpha.regressionCount).toBe(1);
|
|
410
|
-
expect(alpha.passRateNoakm).toBeCloseTo(0.5);
|
|
411
|
-
expect(alpha.passRateAkm).toBeCloseTo(0.6);
|
|
412
|
-
expect(alpha.passRateDelta).toBeCloseTo(0.1);
|
|
413
|
-
expect(alpha.tokensPerPassDelta).toBeCloseTo(8500 - 11000);
|
|
414
|
-
expect(alpha.wallclockMsDelta).toBeCloseTo(1200 - 1500);
|
|
415
|
-
const beta = rows.find((r) => r.domain === "beta");
|
|
416
|
-
if (!beta)
|
|
417
|
-
throw new Error("beta missing");
|
|
418
|
-
expect(beta.regressionCount).toBe(0);
|
|
419
|
-
// Single-side null tokensPerPass yields null delta.
|
|
420
|
-
expect(beta.tokensPerPassDelta).toBeNull();
|
|
421
|
-
});
|
|
422
|
-
test("emits an empty array on no tasks", () => {
|
|
423
|
-
expect(computeDomainAggregates([])).toEqual([]);
|
|
424
|
-
});
|
|
425
|
-
});
|
|
426
|
-
describe("computeAssetRegressionCandidates", () => {
|
|
427
|
-
function fakeRun(taskId, assets) {
|
|
428
|
-
return {
|
|
429
|
-
schemaVersion: 1,
|
|
430
|
-
taskId,
|
|
431
|
-
arm: "akm",
|
|
432
|
-
seed: 0,
|
|
433
|
-
model: "m",
|
|
434
|
-
outcome: "pass",
|
|
435
|
-
tokens: { input: 0, output: 0 },
|
|
436
|
-
wallclockMs: 0,
|
|
437
|
-
trajectory: { correctAssetLoaded: null, feedbackRecorded: null },
|
|
438
|
-
events: [],
|
|
439
|
-
verifierStdout: "",
|
|
440
|
-
verifierExitCode: 0,
|
|
441
|
-
assetsLoaded: assets,
|
|
442
|
-
};
|
|
443
|
-
}
|
|
444
|
-
test("returns empty when no regressed tasks were provided", () => {
|
|
445
|
-
expect(computeAssetRegressionCandidates([], [fakeRun("d/a", ["skill:x"])])).toEqual([]);
|
|
446
|
-
});
|
|
447
|
-
test("counts distinct regressed tasks per asset and totals raw load volume", () => {
|
|
448
|
-
const akmRuns = [
|
|
449
|
-
// task d/r1 across two seeds, same asset.
|
|
450
|
-
fakeRun("d/r1", ["skill:foo", "skill:bar"]),
|
|
451
|
-
fakeRun("d/r1", ["skill:foo"]),
|
|
452
|
-
// task d/r2 loads skill:foo (again) plus skill:baz.
|
|
453
|
-
fakeRun("d/r2", ["skill:foo", "skill:baz"]),
|
|
454
|
-
// Non-regressed task is ignored entirely.
|
|
455
|
-
fakeRun("d/clean", ["skill:foo", "skill:bar", "skill:baz"]),
|
|
456
|
-
];
|
|
457
|
-
const rows = computeAssetRegressionCandidates(["d/r1", "d/r2"], akmRuns);
|
|
458
|
-
expect(rows.map((r) => r.assetRef)).toEqual(["skill:foo", "skill:bar", "skill:baz"]);
|
|
459
|
-
const foo = rows[0];
|
|
460
|
-
if (!foo)
|
|
461
|
-
throw new Error("foo missing");
|
|
462
|
-
expect(foo.regressedTaskCount).toBe(2);
|
|
463
|
-
expect(foo.regressedTaskIds).toEqual(["d/r1", "d/r2"]);
|
|
464
|
-
expect(foo.totalLoadCount).toBe(3);
|
|
465
|
-
const bar = rows[1];
|
|
466
|
-
if (!bar)
|
|
467
|
-
throw new Error("bar missing");
|
|
468
|
-
expect(bar.regressedTaskCount).toBe(1);
|
|
469
|
-
expect(bar.totalLoadCount).toBe(1);
|
|
470
|
-
});
|
|
471
|
-
});
|
|
472
|
-
// ── Memory-operation aggregations (#262) ───────────────────────────────────
|
|
473
|
-
describe("aggregateByMemoryAbility / aggregateByTaskFamily (#262)", () => {
|
|
474
|
-
function entry(id, noakmPass, akmPass, extras = {}) {
|
|
475
|
-
return {
|
|
476
|
-
id,
|
|
477
|
-
noakm: ptm({ passRate: noakmPass }),
|
|
478
|
-
akm: ptm({ passRate: akmPass }),
|
|
479
|
-
...extras,
|
|
480
|
-
};
|
|
481
|
-
}
|
|
482
|
-
test("returns empty when no entries carry the keying tag", () => {
|
|
483
|
-
const entries = [entry("d/a", 0.4, 0.6), entry("d/b", 0.5, 0.7)];
|
|
484
|
-
expect(aggregateByMemoryAbility(entries)).toEqual([]);
|
|
485
|
-
expect(aggregateByTaskFamily(entries)).toEqual([]);
|
|
486
|
-
});
|
|
487
|
-
test("aggregateByMemoryAbility groups tasks, computes deltas + negative transfer", () => {
|
|
488
|
-
const entries = [
|
|
489
|
-
entry("d/lookup-1", 0.4, 0.8, { memoryAbility: "procedural_lookup" }),
|
|
490
|
-
entry("d/lookup-2", 0.6, 0.4, { memoryAbility: "procedural_lookup" }),
|
|
491
|
-
entry("d/compose-1", 0.0, 1.0, { memoryAbility: "multi_asset_composition" }),
|
|
492
|
-
entry("d/no-tag", 0.5, 0.7),
|
|
493
|
-
];
|
|
494
|
-
const rows = aggregateByMemoryAbility(entries);
|
|
495
|
-
expect(rows.map((r) => r.category)).toEqual(["multi_asset_composition", "procedural_lookup"]);
|
|
496
|
-
const lookup = rows.find((r) => r.category === "procedural_lookup");
|
|
497
|
-
expect(lookup?.taskCount).toBe(2);
|
|
498
|
-
expect(lookup?.passRateNoakm).toBeCloseTo(0.5);
|
|
499
|
-
expect(lookup?.passRateAkm).toBeCloseTo(0.6);
|
|
500
|
-
expect(lookup?.passRateDelta).toBeCloseTo(0.1);
|
|
501
|
-
// d/lookup-2 regressed (akm < noakm).
|
|
502
|
-
expect(lookup?.negativeTransferCount).toBe(1);
|
|
503
|
-
expect(lookup?.workflowCompliance).toBeNull();
|
|
504
|
-
});
|
|
505
|
-
test("aggregateByMemoryAbility folds workflow_compliance when at least one task supplies it", () => {
|
|
506
|
-
const entries = [
|
|
507
|
-
entry("d/a", 0.5, 0.7, { memoryAbility: "procedural_lookup", workflowCompliance: 0.8 }),
|
|
508
|
-
entry("d/b", 0.5, 0.7, { memoryAbility: "procedural_lookup" }),
|
|
509
|
-
entry("d/c", 0.5, 0.7, { memoryAbility: "procedural_lookup", workflowCompliance: 0.6 }),
|
|
510
|
-
];
|
|
511
|
-
const [row] = aggregateByMemoryAbility(entries);
|
|
512
|
-
expect(row?.workflowCompliance).toBeCloseTo(0.7);
|
|
513
|
-
});
|
|
514
|
-
test("aggregateByTaskFamily groups by family", () => {
|
|
515
|
-
const entries = [
|
|
516
|
-
entry("d/a", 0.4, 0.6, { taskFamily: "d/group-1" }),
|
|
517
|
-
entry("d/b", 0.4, 0.4, { taskFamily: "d/group-1" }),
|
|
518
|
-
entry("d/c", 0.0, 1.0, { taskFamily: "d/group-2" }),
|
|
519
|
-
];
|
|
520
|
-
const rows = aggregateByTaskFamily(entries);
|
|
521
|
-
expect(rows.map((r) => r.category)).toEqual(["d/group-1", "d/group-2"]);
|
|
522
|
-
const g1 = rows.find((r) => r.category === "d/group-1");
|
|
523
|
-
expect(g1?.taskCount).toBe(2);
|
|
524
|
-
expect(g1?.passRateDelta).toBeCloseTo(0.1);
|
|
525
|
-
});
|
|
526
|
-
test("computeCorpusCoverage counts every closed-set ability + an untagged bucket", () => {
|
|
527
|
-
const cov = computeCorpusCoverage([
|
|
528
|
-
{ memoryAbility: "procedural_lookup", taskFamily: "d/family-a" },
|
|
529
|
-
{ memoryAbility: "procedural_lookup", taskFamily: "d/family-a" },
|
|
530
|
-
{ memoryAbility: "abstention", taskFamily: "d/family-b" },
|
|
531
|
-
{ taskFamily: "d/family-c" },
|
|
532
|
-
{},
|
|
533
|
-
]);
|
|
534
|
-
expect(cov.totalTasks).toBe(5);
|
|
535
|
-
expect(cov.memoryAbilityCounts.procedural_lookup).toBe(2);
|
|
536
|
-
expect(cov.memoryAbilityCounts.abstention).toBe(1);
|
|
537
|
-
expect(cov.memoryAbilityCounts.conflict_resolution).toBe(0);
|
|
538
|
-
expect(cov.memoryAbilityCounts.untagged).toBe(2);
|
|
539
|
-
expect(cov.taskFamilyCounts["d/family-a"]).toBe(2);
|
|
540
|
-
expect(cov.taskFamilyCounts.untagged).toBe(1);
|
|
541
|
-
});
|
|
542
|
-
});
|
|
543
|
-
// ── AKM overhead (#263) ────────────────────────────────────────────────────
|
|
544
|
-
function akmEvent(eventType, ts, ref, metadata) {
|
|
545
|
-
return {
|
|
546
|
-
schemaVersion: 1,
|
|
547
|
-
id: 0,
|
|
548
|
-
ts,
|
|
549
|
-
eventType,
|
|
550
|
-
...(ref ? { ref } : {}),
|
|
551
|
-
...(metadata ? { metadata } : {}),
|
|
552
|
-
};
|
|
553
|
-
}
|
|
554
|
-
function metaMap(entries) {
|
|
555
|
-
const m = new Map();
|
|
556
|
-
for (const e of entries)
|
|
557
|
-
m.set(e.id, { goldRef: e.goldRef, expectedTransferFrom: e.expectedTransferFrom });
|
|
558
|
-
return m;
|
|
559
|
-
}
|
|
560
|
-
describe("computeAkmOverhead — no AKM calls", () => {
|
|
561
|
-
test("zero counts and null timings when run had no AKM events", () => {
|
|
562
|
-
const run = fakeResult({ taskId: "demo/none", events: [] });
|
|
563
|
-
const rows = computeAkmOverhead([run]);
|
|
564
|
-
expect(rows).toHaveLength(1);
|
|
565
|
-
const r = rows[0];
|
|
566
|
-
expect(r.searchCount).toBe(0);
|
|
567
|
-
expect(r.showCount).toBe(0);
|
|
568
|
-
expect(r.feedbackCount).toBe(0);
|
|
569
|
-
expect(r.totalToolCalls).toBe(0);
|
|
570
|
-
expect(r.assetsLoadedCount).toBe(0);
|
|
571
|
-
expect(r.timeToFirstSearchMs).toBeNull();
|
|
572
|
-
expect(r.timeToFirstCorrectAssetMs).toBeNull();
|
|
573
|
-
expect(r.contextBytesLoaded).toBeNull();
|
|
574
|
-
expect(r.assetBytesLoaded).toBeNull();
|
|
575
|
-
// Without metadata, irrelevance is unjudgeable -> null.
|
|
576
|
-
expect(r.irrelevantAssetsLoadedCount).toBeNull();
|
|
577
|
-
});
|
|
578
|
-
test("aggregate over empty array is the zero envelope", () => {
|
|
579
|
-
const agg = aggregateAkmOverhead([]);
|
|
580
|
-
expect(agg.totalRuns).toBe(0);
|
|
581
|
-
expect(agg.passingRuns).toBe(0);
|
|
582
|
-
expect(agg.toolCallsPerSuccess).toBeNull();
|
|
583
|
-
expect(agg.costPerSuccess).toBeNull();
|
|
584
|
-
expect(agg.meanTimeToFirstSearchMs).toBeNull();
|
|
585
|
-
});
|
|
586
|
-
});
|
|
587
|
-
describe("computeAkmOverhead — successful AKM use", () => {
|
|
588
|
-
test("counts search/show/feedback, computes timings and relevance", () => {
|
|
589
|
-
const run = fakeResult({
|
|
590
|
-
taskId: "demo/ok",
|
|
591
|
-
outcome: "pass",
|
|
592
|
-
tokenMeasurement: "parsed",
|
|
593
|
-
tokens: { input: 100, output: 50 },
|
|
594
|
-
events: [
|
|
595
|
-
akmEvent("search", "2026-04-27T10:00:00.000Z", undefined, { query: "deploy" }),
|
|
596
|
-
akmEvent("show", "2026-04-27T10:00:00.500Z", "skill:deploy"),
|
|
597
|
-
akmEvent("feedback", "2026-04-27T10:00:01.000Z", "skill:deploy"),
|
|
598
|
-
],
|
|
599
|
-
});
|
|
600
|
-
const tasks = metaMap([{ id: "demo/ok", goldRef: "skill:deploy", expectedTransferFrom: [] }]);
|
|
601
|
-
const rows = computeAkmOverhead([run], { taskMetadata: tasks });
|
|
602
|
-
const r = rows[0];
|
|
603
|
-
expect(r.searchCount).toBe(1);
|
|
604
|
-
expect(r.showCount).toBe(1);
|
|
605
|
-
expect(r.feedbackCount).toBe(1);
|
|
606
|
-
expect(r.totalToolCalls).toBe(3);
|
|
607
|
-
expect(r.assetsLoadedCount).toBe(1);
|
|
608
|
-
expect(r.irrelevantAssetsLoadedCount).toBe(0);
|
|
609
|
-
expect(r.timeToFirstSearchMs).toBe(0); // first search IS the run-start anchor
|
|
610
|
-
expect(r.timeToFirstCorrectAssetMs).toBe(500);
|
|
611
|
-
const agg = aggregateAkmOverhead(rows, [run]);
|
|
612
|
-
expect(agg.passingRuns).toBe(1);
|
|
613
|
-
expect(agg.toolCallsPerSuccess).toBe(3);
|
|
614
|
-
expect(agg.costPerSuccess).toBe(150);
|
|
615
|
-
});
|
|
616
|
-
test("expected_transfer_from refs are not counted as irrelevant", () => {
|
|
617
|
-
const run = fakeResult({
|
|
618
|
-
taskId: "demo/transfer",
|
|
619
|
-
events: [
|
|
620
|
-
akmEvent("show", "2026-04-27T10:00:00.000Z", "skill:foo"),
|
|
621
|
-
akmEvent("show", "2026-04-27T10:00:01.000Z", "skill:helper"),
|
|
622
|
-
],
|
|
623
|
-
});
|
|
624
|
-
const tasks = metaMap([{ id: "demo/transfer", goldRef: "skill:foo", expectedTransferFrom: ["skill:helper"] }]);
|
|
625
|
-
const rows = computeAkmOverhead([run], { taskMetadata: tasks });
|
|
626
|
-
expect(rows[0].assetsLoadedCount).toBe(2);
|
|
627
|
-
expect(rows[0].irrelevantAssetsLoadedCount).toBe(0);
|
|
628
|
-
});
|
|
629
|
-
});
|
|
630
|
-
describe("computeAkmOverhead — excessive AKM calls", () => {
|
|
631
|
-
test("high counts and low calls-per-success are surfaced", () => {
|
|
632
|
-
const goldRef = "skill:gold";
|
|
633
|
-
const noisyRun = fakeResult({
|
|
634
|
-
taskId: "demo/noisy",
|
|
635
|
-
outcome: "fail",
|
|
636
|
-
events: [
|
|
637
|
-
akmEvent("search", "2026-04-27T10:00:00.000Z"),
|
|
638
|
-
akmEvent("search", "2026-04-27T10:00:00.100Z"),
|
|
639
|
-
akmEvent("search", "2026-04-27T10:00:00.200Z"),
|
|
640
|
-
akmEvent("show", "2026-04-27T10:00:00.300Z", "skill:other"),
|
|
641
|
-
akmEvent("show", "2026-04-27T10:00:00.400Z", "skill:other2"),
|
|
642
|
-
akmEvent("show", "2026-04-27T10:00:00.500Z", "skill:other3"),
|
|
643
|
-
akmEvent("show", "2026-04-27T10:00:00.600Z", goldRef),
|
|
644
|
-
],
|
|
645
|
-
});
|
|
646
|
-
const passingRun = fakeResult({
|
|
647
|
-
taskId: "demo/easy",
|
|
648
|
-
outcome: "pass",
|
|
649
|
-
tokenMeasurement: "parsed",
|
|
650
|
-
tokens: { input: 10, output: 10 },
|
|
651
|
-
events: [akmEvent("search", "2026-04-27T10:00:00.000Z"), akmEvent("show", "2026-04-27T10:00:00.100Z", goldRef)],
|
|
652
|
-
});
|
|
653
|
-
const tasks = metaMap([
|
|
654
|
-
{ id: "demo/noisy", goldRef, expectedTransferFrom: [] },
|
|
655
|
-
{ id: "demo/easy", goldRef, expectedTransferFrom: [] },
|
|
656
|
-
]);
|
|
657
|
-
const rows = computeAkmOverhead([noisyRun, passingRun], { taskMetadata: tasks });
|
|
658
|
-
expect(rows[0].totalToolCalls).toBe(7);
|
|
659
|
-
expect(rows[0].irrelevantAssetsLoadedCount).toBe(3);
|
|
660
|
-
expect(rows[0].timeToFirstCorrectAssetMs).toBe(600);
|
|
661
|
-
expect(rows[1].totalToolCalls).toBe(2);
|
|
662
|
-
const agg = aggregateAkmOverhead(rows, [noisyRun, passingRun]);
|
|
663
|
-
expect(agg.totalToolCalls).toBe(9);
|
|
664
|
-
expect(agg.passingRuns).toBe(1);
|
|
665
|
-
// 9 tool calls for one passing run = high overhead per success.
|
|
666
|
-
expect(agg.toolCallsPerSuccess).toBe(9);
|
|
667
|
-
});
|
|
668
|
-
});
|
|
669
|
-
describe("computeAkmOverhead — missing timing/byte data", () => {
|
|
670
|
-
test("event without ts -> null first-search timing (NOT zero)", () => {
|
|
671
|
-
const run = fakeResult({
|
|
672
|
-
taskId: "demo/notime",
|
|
673
|
-
events: [
|
|
674
|
-
// No ts on event — workflow-trace assigns a synthetic order hint but
|
|
675
|
-
// ts stays undefined, so we cannot anchor a real time-offset.
|
|
676
|
-
{ schemaVersion: 1, id: 0, eventType: "search" },
|
|
677
|
-
],
|
|
678
|
-
});
|
|
679
|
-
const rows = computeAkmOverhead([run]);
|
|
680
|
-
expect(rows[0].searchCount).toBe(1);
|
|
681
|
-
expect(rows[0].timeToFirstSearchMs).toBeNull();
|
|
682
|
-
expect(rows[0].timeToFirstCorrectAssetMs).toBeNull();
|
|
683
|
-
});
|
|
684
|
-
test("byte sizes are always null for now (NOT zero)", () => {
|
|
685
|
-
const run = fakeResult({
|
|
686
|
-
events: [akmEvent("show", "2026-04-27T10:00:00.000Z", "skill:foo")],
|
|
687
|
-
});
|
|
688
|
-
const rows = computeAkmOverhead([run]);
|
|
689
|
-
expect(rows[0].contextBytesLoaded).toBeNull();
|
|
690
|
-
expect(rows[0].assetBytesLoaded).toBeNull();
|
|
691
|
-
const agg = aggregateAkmOverhead(rows, [run]);
|
|
692
|
-
expect(agg.meanContextBytesLoaded).toBeNull();
|
|
693
|
-
expect(agg.meanAssetBytesLoaded).toBeNull();
|
|
694
|
-
});
|
|
695
|
-
test("cost_per_success is null when any passing run lacks parsed token measurement", () => {
|
|
696
|
-
const passParsed = fakeResult({
|
|
697
|
-
taskId: "t1",
|
|
698
|
-
outcome: "pass",
|
|
699
|
-
tokenMeasurement: "parsed",
|
|
700
|
-
tokens: { input: 10, output: 5 },
|
|
701
|
-
events: [akmEvent("search", "2026-04-27T10:00:00.000Z")],
|
|
702
|
-
});
|
|
703
|
-
const passMissing = fakeResult({
|
|
704
|
-
taskId: "t2",
|
|
705
|
-
outcome: "pass",
|
|
706
|
-
tokenMeasurement: "missing",
|
|
707
|
-
tokens: { input: 0, output: 0 },
|
|
708
|
-
events: [akmEvent("search", "2026-04-27T10:00:00.000Z")],
|
|
709
|
-
});
|
|
710
|
-
const rows = computeAkmOverhead([passParsed, passMissing]);
|
|
711
|
-
const agg = aggregateAkmOverhead(rows, [passParsed, passMissing]);
|
|
712
|
-
expect(agg.passingRuns).toBe(2);
|
|
713
|
-
expect(agg.costPerSuccess).toBeNull();
|
|
714
|
-
});
|
|
715
|
-
test("missing task metadata -> irrelevantAssetsLoadedCount is null (not 0)", () => {
|
|
716
|
-
const run = fakeResult({
|
|
717
|
-
taskId: "demo/unknown",
|
|
718
|
-
events: [akmEvent("show", "2026-04-27T10:00:00.000Z", "skill:foo")],
|
|
719
|
-
});
|
|
720
|
-
// No metadata supplied for this task.
|
|
721
|
-
const rows = computeAkmOverhead([run]);
|
|
722
|
-
expect(rows[0].assetsLoadedCount).toBe(1);
|
|
723
|
-
expect(rows[0].irrelevantAssetsLoadedCount).toBeNull();
|
|
724
|
-
});
|
|
725
|
-
test("aggregate skips null timings rather than zero-filling", () => {
|
|
726
|
-
const noTime = fakeResult({
|
|
727
|
-
taskId: "t1",
|
|
728
|
-
outcome: "fail",
|
|
729
|
-
events: [{ schemaVersion: 1, id: 0, eventType: "search" }],
|
|
730
|
-
});
|
|
731
|
-
const withTime = fakeResult({
|
|
732
|
-
taskId: "t2",
|
|
733
|
-
outcome: "fail",
|
|
734
|
-
events: [akmEvent("search", "2026-04-27T10:00:01.000Z")],
|
|
735
|
-
});
|
|
736
|
-
const rows = computeAkmOverhead([noTime, withTime]);
|
|
737
|
-
// First run: search event has no ts -> no run-start anchor, timing null.
|
|
738
|
-
// Second run: search event IS the only event with ts, so it's both the
|
|
739
|
-
// anchor and the first search -> offset 0.
|
|
740
|
-
expect(rows[0].timeToFirstSearchMs).toBeNull();
|
|
741
|
-
expect(rows[1].timeToFirstSearchMs).toBe(0);
|
|
742
|
-
const agg = aggregateAkmOverhead(rows, [noTime, withTime]);
|
|
743
|
-
// Mean honours only the parseable observation; the null is skipped, NOT
|
|
744
|
-
// treated as zero in the numerator.
|
|
745
|
-
expect(agg.meanTimeToFirstSearchMs).toBe(0);
|
|
746
|
-
// tool_calls_per_success is null because no run passed.
|
|
747
|
-
expect(agg.toolCallsPerSuccess).toBeNull();
|
|
748
|
-
});
|
|
749
|
-
});
|
|
750
|
-
// ── computeWorkflowReliability (#258) ──────────────────────────────────────
|
|
751
|
-
function wfCheck(overrides = {}) {
|
|
752
|
-
return {
|
|
753
|
-
schemaVersion: 1,
|
|
754
|
-
workflowId: "wf-1",
|
|
755
|
-
taskId: "t1",
|
|
756
|
-
arm: "akm",
|
|
757
|
-
seed: 0,
|
|
758
|
-
status: "pass",
|
|
759
|
-
score: 1,
|
|
760
|
-
requiredPassed: 1,
|
|
761
|
-
requiredTotal: 1,
|
|
762
|
-
violations: [],
|
|
763
|
-
evidence: {
|
|
764
|
-
matchedEvents: 1,
|
|
765
|
-
feedbackRecorded: false,
|
|
766
|
-
goldAssetLoaded: false,
|
|
767
|
-
traceTruncated: false,
|
|
768
|
-
},
|
|
769
|
-
...overrides,
|
|
770
|
-
};
|
|
771
|
-
}
|
|
772
|
-
// ── Learning curve across episodes (issue #265) ────────────────────────────
|
|
773
|
-
function ep(overrides) {
|
|
774
|
-
return {
|
|
775
|
-
delta_from_previous_episode: 0,
|
|
776
|
-
cumulative_feedback_events: 0,
|
|
777
|
-
cumulative_proposals_created: 0,
|
|
778
|
-
cumulative_proposals_accepted: 0,
|
|
779
|
-
cumulative_lessons_created: 0,
|
|
780
|
-
lesson_reuse_rate: null,
|
|
781
|
-
...overrides,
|
|
782
|
-
};
|
|
783
|
-
}
|
|
784
|
-
function statuses(workflowId, taskId, statusList) {
|
|
785
|
-
return statusList.map((status, seed) => wfCheck({ workflowId, taskId, seed, status }));
|
|
786
|
-
}
|
|
787
|
-
describe("computeWorkflowReliability (#258)", () => {
|
|
788
|
-
test("empty input yields zeroed corpus + empty by_workflow", () => {
|
|
789
|
-
const result = computeWorkflowReliability([]);
|
|
790
|
-
expect(result.byWorkflow).toEqual({});
|
|
791
|
-
expect(result.corpus).toEqual({ pass_at_k: 0, pass_all_k: 0, groups: 0, tasks: 0 });
|
|
792
|
-
});
|
|
793
|
-
test("all-pass: every (task, seed) is pass → pass@k=1, pass^k=1", () => {
|
|
794
|
-
const checks = [
|
|
795
|
-
...statuses("wf-1", "t1", ["pass", "pass", "pass"]),
|
|
796
|
-
...statuses("wf-1", "t2", ["pass", "pass", "pass"]),
|
|
797
|
-
];
|
|
798
|
-
const result = computeWorkflowReliability(checks);
|
|
799
|
-
expect(result.byWorkflow["wf-1"].pass_at_k).toBe(1);
|
|
800
|
-
expect(result.byWorkflow["wf-1"].pass_all_k).toBe(1);
|
|
801
|
-
expect(result.byWorkflow["wf-1"].tasks).toBe(2);
|
|
802
|
-
expect(result.byWorkflow["wf-1"].k).toBe(3);
|
|
803
|
-
expect(result.corpus.pass_at_k).toBe(1);
|
|
804
|
-
expect(result.corpus.pass_all_k).toBe(1);
|
|
805
|
-
expect(result.corpus.groups).toBe(2);
|
|
806
|
-
expect(result.corpus.tasks).toBe(2);
|
|
807
|
-
});
|
|
808
|
-
test("none-pass: no seed is pass → pass@k=0, pass^k=0", () => {
|
|
809
|
-
const checks = [
|
|
810
|
-
...statuses("wf-1", "t1", ["fail", "fail", "fail"]),
|
|
811
|
-
...statuses("wf-1", "t2", ["partial", "fail", "harness_error"]),
|
|
812
|
-
];
|
|
813
|
-
const result = computeWorkflowReliability(checks);
|
|
814
|
-
expect(result.byWorkflow["wf-1"].pass_at_k).toBe(0);
|
|
815
|
-
expect(result.byWorkflow["wf-1"].pass_all_k).toBe(0);
|
|
816
|
-
expect(result.corpus.pass_at_k).toBe(0);
|
|
817
|
-
expect(result.corpus.pass_all_k).toBe(0);
|
|
818
|
-
});
|
|
819
|
-
test("some-pass: pass@k > 0, pass^k < pass@k when seeds disagree per task", () => {
|
|
820
|
-
// t1: 1 pass, 2 fail → counts toward pass@k (anyPass) but NOT pass^k
|
|
821
|
-
// t2: 3 pass → counts toward both
|
|
822
|
-
const checks = [
|
|
823
|
-
...statuses("wf-1", "t1", ["pass", "fail", "fail"]),
|
|
824
|
-
...statuses("wf-1", "t2", ["pass", "pass", "pass"]),
|
|
825
|
-
];
|
|
826
|
-
const result = computeWorkflowReliability(checks);
|
|
827
|
-
expect(result.byWorkflow["wf-1"].pass_at_k).toBeCloseTo(1); // both tasks have at least one pass
|
|
828
|
-
expect(result.byWorkflow["wf-1"].pass_all_k).toBeCloseTo(0.5); // only t2 is all-pass
|
|
829
|
-
expect(result.corpus.pass_at_k).toBeCloseTo(1);
|
|
830
|
-
expect(result.corpus.pass_all_k).toBeCloseTo(0.5);
|
|
831
|
-
});
|
|
832
|
-
test("mixed partial/fail: partial does NOT count as pass for reliability", () => {
|
|
833
|
-
// partial is non-pass per the strict reliability bucketing.
|
|
834
|
-
const checks = [...statuses("wf-1", "t1", ["partial", "partial", "partial"])];
|
|
835
|
-
const result = computeWorkflowReliability(checks);
|
|
836
|
-
expect(result.byWorkflow["wf-1"].pass_at_k).toBe(0);
|
|
837
|
-
expect(result.byWorkflow["wf-1"].pass_all_k).toBe(0);
|
|
838
|
-
});
|
|
839
|
-
test("not_applicable seeds are excluded from numerator and denominator", () => {
|
|
840
|
-
// Only the 2 applicable seeds matter; both are pass → 1 task all-pass.
|
|
841
|
-
const checks = [...statuses("wf-1", "t1", ["not_applicable", "pass", "pass", "not_applicable"])];
|
|
842
|
-
const result = computeWorkflowReliability(checks);
|
|
843
|
-
expect(result.byWorkflow["wf-1"].pass_at_k).toBe(1);
|
|
844
|
-
expect(result.byWorkflow["wf-1"].pass_all_k).toBe(1);
|
|
845
|
-
expect(result.byWorkflow["wf-1"].tasks).toBe(1);
|
|
846
|
-
expect(result.byWorkflow["wf-1"].k).toBe(2);
|
|
847
|
-
});
|
|
848
|
-
test("workflow with every check not_applicable is omitted (no group counted)", () => {
|
|
849
|
-
const checks = [...statuses("wf-skips", "t1", ["not_applicable", "not_applicable"])];
|
|
850
|
-
const result = computeWorkflowReliability(checks);
|
|
851
|
-
expect(result.byWorkflow["wf-skips"]).toBeUndefined();
|
|
852
|
-
expect(result.corpus.groups).toBe(0);
|
|
853
|
-
expect(result.corpus.tasks).toBe(0);
|
|
854
|
-
});
|
|
855
|
-
test("multiple workflows compute independently; corpus weights groups equally", () => {
|
|
856
|
-
// wf-a: t1 all pass (1/1 = 1, 1/1 = 1)
|
|
857
|
-
// wf-b: t2 mixed (anyPass=1, allPass=0); t3 none (0, 0)
|
|
858
|
-
// per-workflow: pass@k=0.5, pass^k=0
|
|
859
|
-
// corpus: 3 groups → pass@k = (1+1+0)/3 = 2/3; pass^k = (1+0+0)/3 = 1/3
|
|
860
|
-
const checks = [
|
|
861
|
-
...statuses("wf-a", "t1", ["pass", "pass"]),
|
|
862
|
-
...statuses("wf-b", "t2", ["pass", "fail"]),
|
|
863
|
-
...statuses("wf-b", "t3", ["fail", "fail"]),
|
|
864
|
-
];
|
|
865
|
-
const result = computeWorkflowReliability(checks);
|
|
866
|
-
expect(result.byWorkflow["wf-a"].pass_at_k).toBe(1);
|
|
867
|
-
expect(result.byWorkflow["wf-a"].pass_all_k).toBe(1);
|
|
868
|
-
expect(result.byWorkflow["wf-b"].pass_at_k).toBeCloseTo(0.5);
|
|
869
|
-
expect(result.byWorkflow["wf-b"].pass_all_k).toBe(0);
|
|
870
|
-
expect(result.corpus.pass_at_k).toBeCloseTo(2 / 3);
|
|
871
|
-
expect(result.corpus.pass_all_k).toBeCloseTo(1 / 3);
|
|
872
|
-
expect(result.corpus.groups).toBe(3);
|
|
873
|
-
expect(result.corpus.tasks).toBe(3);
|
|
874
|
-
});
|
|
875
|
-
test("harness_error is treated as non-pass (consistent with #257 bucketing)", () => {
|
|
876
|
-
const checks = [...statuses("wf-1", "t1", ["pass", "harness_error", "pass"])];
|
|
877
|
-
const result = computeWorkflowReliability(checks);
|
|
878
|
-
expect(result.byWorkflow["wf-1"].pass_at_k).toBe(1);
|
|
879
|
-
expect(result.byWorkflow["wf-1"].pass_all_k).toBe(0);
|
|
880
|
-
});
|
|
881
|
-
});
|
|
882
|
-
describe("computeLearningCurve", () => {
|
|
883
|
-
test("monotonic improvement: positive slope, time_to_improvement at first crossing", () => {
|
|
884
|
-
const episodes = [
|
|
885
|
-
ep({ episode_index: 0, pass_rate: 0.4, cumulative_feedback_events: 10 }),
|
|
886
|
-
ep({ episode_index: 1, pass_rate: 0.5, cumulative_feedback_events: 22 }),
|
|
887
|
-
ep({ episode_index: 2, pass_rate: 0.6, cumulative_feedback_events: 35 }),
|
|
888
|
-
ep({ episode_index: 3, pass_rate: 0.7, cumulative_feedback_events: 48 }),
|
|
889
|
-
];
|
|
890
|
-
const curve = computeLearningCurve(episodes);
|
|
891
|
-
expect(curve.pass_rate_by_episode).toEqual([0.4, 0.5, 0.6, 0.7]);
|
|
892
|
-
// Slope is exactly 0.1 per episode for evenly spaced 0.1 increments.
|
|
893
|
-
expect(curve.learning_slope).toBeCloseTo(0.1, 6);
|
|
894
|
-
// Episode 1 first exceeds 0.4 + 0.05 = 0.45 (0.5 > 0.45).
|
|
895
|
-
expect(curve.time_to_improvement).toBe(1);
|
|
896
|
-
// Deltas: 0, 0.1, 0.1, 0.1
|
|
897
|
-
expect(curve.episodes[0].delta_from_previous_episode).toBe(0);
|
|
898
|
-
expect(curve.episodes[1].delta_from_previous_episode).toBeCloseTo(0.1);
|
|
899
|
-
expect(curve.episodes[3].delta_from_previous_episode).toBeCloseTo(0.1);
|
|
900
|
-
});
|
|
901
|
-
test("no improvement: flat pass rate yields zero slope and null time_to_improvement", () => {
|
|
902
|
-
const episodes = [
|
|
903
|
-
ep({ episode_index: 0, pass_rate: 0.5 }),
|
|
904
|
-
ep({ episode_index: 1, pass_rate: 0.5 }),
|
|
905
|
-
ep({ episode_index: 2, pass_rate: 0.51 }),
|
|
906
|
-
ep({ episode_index: 3, pass_rate: 0.52 }),
|
|
907
|
-
];
|
|
908
|
-
const curve = computeLearningCurve(episodes);
|
|
909
|
-
expect(curve.learning_slope).toBeCloseTo(0.0073, 3);
|
|
910
|
-
// Never crosses 0.5 + 0.05 = 0.55.
|
|
911
|
-
expect(curve.time_to_improvement).toBeNull();
|
|
912
|
-
});
|
|
913
|
-
test("regression mid-episode: slope still computed, time_to_improvement honours first qualifying episode", () => {
|
|
914
|
-
// Pass rate climbs, then regresses below baseline+threshold, then recovers.
|
|
915
|
-
const episodes = [
|
|
916
|
-
ep({ episode_index: 0, pass_rate: 0.4 }),
|
|
917
|
-
ep({ episode_index: 1, pass_rate: 0.6 }), // > 0.45 → first crossing
|
|
918
|
-
ep({ episode_index: 2, pass_rate: 0.42 }), // mid-episode regression
|
|
919
|
-
ep({ episode_index: 3, pass_rate: 0.55 }),
|
|
920
|
-
];
|
|
921
|
-
const curve = computeLearningCurve(episodes);
|
|
922
|
-
// First crossing wins, even though episode 2 regresses below threshold.
|
|
923
|
-
expect(curve.time_to_improvement).toBe(1);
|
|
924
|
-
expect(curve.episodes[2].delta_from_previous_episode).toBeCloseTo(-0.18);
|
|
925
|
-
// Slope is computed across all four points; should be positive overall.
|
|
926
|
-
expect(curve.learning_slope).toBeGreaterThan(0);
|
|
927
|
-
});
|
|
928
|
-
test("single-episode degenerate input: slope is 0, time_to_improvement is null", () => {
|
|
929
|
-
const curve = computeLearningCurve([ep({ episode_index: 0, pass_rate: 0.7 })]);
|
|
930
|
-
expect(curve.pass_rate_by_episode).toEqual([0.7]);
|
|
931
|
-
expect(curve.learning_slope).toBe(0);
|
|
932
|
-
expect(curve.time_to_improvement).toBeNull();
|
|
933
|
-
// Episode 0's delta is always 0 by definition.
|
|
934
|
-
expect(curve.episodes[0].delta_from_previous_episode).toBe(0);
|
|
935
|
-
});
|
|
936
|
-
test("empty input is degenerate: empty arrays, zero slope, null time", () => {
|
|
937
|
-
const curve = computeLearningCurve([]);
|
|
938
|
-
expect(curve.episodes).toEqual([]);
|
|
939
|
-
expect(curve.pass_rate_by_episode).toEqual([]);
|
|
940
|
-
expect(curve.learning_slope).toBe(0);
|
|
941
|
-
expect(curve.time_to_improvement).toBeNull();
|
|
942
|
-
});
|
|
943
|
-
test("unsorted input is sorted by episode_index before processing", () => {
|
|
944
|
-
const episodes = [
|
|
945
|
-
ep({ episode_index: 2, pass_rate: 0.6 }),
|
|
946
|
-
ep({ episode_index: 0, pass_rate: 0.4 }),
|
|
947
|
-
ep({ episode_index: 1, pass_rate: 0.5 }),
|
|
948
|
-
];
|
|
949
|
-
const curve = computeLearningCurve(episodes);
|
|
950
|
-
expect(curve.episodes.map((e) => e.episode_index)).toEqual([0, 1, 2]);
|
|
951
|
-
expect(curve.pass_rate_by_episode).toEqual([0.4, 0.5, 0.6]);
|
|
952
|
-
expect(curve.time_to_improvement).toBe(1);
|
|
953
|
-
});
|
|
954
|
-
test("delta_from_previous_episode is recomputed defensively from sorted pass_rates", () => {
|
|
955
|
-
// Caller stamps wrong deltas — function recomputes.
|
|
956
|
-
const episodes = [
|
|
957
|
-
ep({ episode_index: 0, pass_rate: 0.4, delta_from_previous_episode: 99 }),
|
|
958
|
-
ep({ episode_index: 1, pass_rate: 0.6, delta_from_previous_episode: -42 }),
|
|
959
|
-
];
|
|
960
|
-
const curve = computeLearningCurve(episodes);
|
|
961
|
-
expect(curve.episodes[0].delta_from_previous_episode).toBe(0);
|
|
962
|
-
expect(curve.episodes[1].delta_from_previous_episode).toBeCloseTo(0.2);
|
|
963
|
-
});
|
|
964
|
-
test("threshold is strictly greater-than (exact baseline+threshold does not count)", () => {
|
|
965
|
-
// baseline = 0.5; threshold = 0.05 → must exceed 0.55.
|
|
966
|
-
const episodes = [
|
|
967
|
-
ep({ episode_index: 0, pass_rate: 0.5 }),
|
|
968
|
-
ep({ episode_index: 1, pass_rate: 0.5 + LEARNING_IMPROVEMENT_THRESHOLD }), // 0.55 exactly
|
|
969
|
-
ep({ episode_index: 2, pass_rate: 0.5 + LEARNING_IMPROVEMENT_THRESHOLD + 0.001 }),
|
|
970
|
-
];
|
|
971
|
-
const curve = computeLearningCurve(episodes);
|
|
972
|
-
expect(curve.time_to_improvement).toBe(2);
|
|
973
|
-
});
|
|
974
|
-
test("cumulative counters are echoed verbatim (caller-provided)", () => {
|
|
975
|
-
const episodes = [
|
|
976
|
-
ep({
|
|
977
|
-
episode_index: 0,
|
|
978
|
-
pass_rate: 0.4,
|
|
979
|
-
cumulative_feedback_events: 10,
|
|
980
|
-
cumulative_proposals_created: 0,
|
|
981
|
-
cumulative_proposals_accepted: 0,
|
|
982
|
-
cumulative_lessons_created: 0,
|
|
983
|
-
lesson_reuse_rate: null,
|
|
984
|
-
}),
|
|
985
|
-
ep({
|
|
986
|
-
episode_index: 1,
|
|
987
|
-
pass_rate: 0.55,
|
|
988
|
-
cumulative_feedback_events: 25,
|
|
989
|
-
cumulative_proposals_created: 4,
|
|
990
|
-
cumulative_proposals_accepted: 3,
|
|
991
|
-
cumulative_lessons_created: 3,
|
|
992
|
-
lesson_reuse_rate: 0.42,
|
|
993
|
-
}),
|
|
994
|
-
];
|
|
995
|
-
const curve = computeLearningCurve(episodes);
|
|
996
|
-
expect(curve.episodes[1].cumulative_feedback_events).toBe(25);
|
|
997
|
-
expect(curve.episodes[1].cumulative_proposals_accepted).toBe(3);
|
|
998
|
-
expect(curve.episodes[1].cumulative_lessons_created).toBe(3);
|
|
999
|
-
expect(curve.episodes[1].lesson_reuse_rate).toBeCloseTo(0.42);
|
|
1000
|
-
});
|
|
1001
|
-
});
|
|
1002
|
-
// ── #271: masked-stash path-traversal hardening ─────────────────────────────
|
|
1003
|
-
describe("materialiseMaskedStash stashName containment (#271)", () => {
|
|
1004
|
-
function makeFixturesRoot() {
|
|
1005
|
-
const fixturesRoot = benchMkdtemp("akm-bench-fixtures-");
|
|
1006
|
-
// Plant a sibling outside fixturesRoot that a traversal-shaped stashName
|
|
1007
|
-
// could otherwise reach. The MANIFEST.json existence check would pass
|
|
1008
|
-
// there if containment were not enforced.
|
|
1009
|
-
const sibling = path.join(path.dirname(fixturesRoot), `sibling-${path.basename(fixturesRoot)}`);
|
|
1010
|
-
fs.mkdirSync(sibling, { recursive: true });
|
|
1011
|
-
fs.writeFileSync(path.join(sibling, "MANIFEST.json"), "{}");
|
|
1012
|
-
return {
|
|
1013
|
-
fixturesRoot,
|
|
1014
|
-
cleanup: () => {
|
|
1015
|
-
fs.rmSync(fixturesRoot, { recursive: true, force: true });
|
|
1016
|
-
fs.rmSync(sibling, { recursive: true, force: true });
|
|
1017
|
-
},
|
|
1018
|
-
};
|
|
1019
|
-
}
|
|
1020
|
-
test("rejects stashName starting with '..' (relative traversal)", () => {
|
|
1021
|
-
const { fixturesRoot, cleanup } = makeFixturesRoot();
|
|
1022
|
-
try {
|
|
1023
|
-
const sibling = `../sibling-${path.basename(fixturesRoot)}`;
|
|
1024
|
-
const result = materialiseMaskedStash(fixturesRoot, sibling, "skill:foo");
|
|
1025
|
-
expect(result).toBeNull();
|
|
1026
|
-
}
|
|
1027
|
-
finally {
|
|
1028
|
-
cleanup();
|
|
1029
|
-
}
|
|
1030
|
-
});
|
|
1031
|
-
test("rejects absolute stashName", () => {
|
|
1032
|
-
const { fixturesRoot, cleanup } = makeFixturesRoot();
|
|
1033
|
-
try {
|
|
1034
|
-
const result = materialiseMaskedStash(fixturesRoot, "/etc", "skill:foo");
|
|
1035
|
-
expect(result).toBeNull();
|
|
1036
|
-
}
|
|
1037
|
-
finally {
|
|
1038
|
-
cleanup();
|
|
1039
|
-
}
|
|
1040
|
-
});
|
|
1041
|
-
test("rejects nested traversal that would escape fixturesRoot", () => {
|
|
1042
|
-
const { fixturesRoot, cleanup } = makeFixturesRoot();
|
|
1043
|
-
try {
|
|
1044
|
-
// path.resolve(fixturesRoot, "a/../../sibling-xyz") would land on
|
|
1045
|
-
// the sibling directory if containment is not enforced.
|
|
1046
|
-
const escapePath = `a/../../sibling-${path.basename(fixturesRoot)}`;
|
|
1047
|
-
const result = materialiseMaskedStash(fixturesRoot, escapePath, "skill:foo");
|
|
1048
|
-
expect(result).toBeNull();
|
|
1049
|
-
}
|
|
1050
|
-
finally {
|
|
1051
|
-
cleanup();
|
|
1052
|
-
}
|
|
1053
|
-
});
|
|
1054
|
-
test("returns null (not a crash) for a contained stashName with no MANIFEST", () => {
|
|
1055
|
-
const { fixturesRoot, cleanup } = makeFixturesRoot();
|
|
1056
|
-
try {
|
|
1057
|
-
// 'inner' is inside fixturesRoot but has no MANIFEST.json. Containment
|
|
1058
|
-
// passes; the existing MANIFEST gate returns null. Sanity check that
|
|
1059
|
-
// the new containment check did not accidentally reject the happy path.
|
|
1060
|
-
fs.mkdirSync(path.join(fixturesRoot, "inner"));
|
|
1061
|
-
const result = materialiseMaskedStash(fixturesRoot, "inner", "skill:foo");
|
|
1062
|
-
expect(result).toBeNull();
|
|
1063
|
-
}
|
|
1064
|
-
finally {
|
|
1065
|
-
cleanup();
|
|
1066
|
-
}
|
|
1067
|
-
});
|
|
1068
|
-
});
|
|
1069
|
-
describe("isPathContained symlink resolution (#271)", () => {
|
|
1070
|
-
test("rejects a symlink inside root that points outside (alignment with isWithin)", () => {
|
|
1071
|
-
const tmpRoot = benchMkdtemp("akm-bench-contain-root-");
|
|
1072
|
-
const outside = benchMkdtemp("akm-bench-contain-outside-");
|
|
1073
|
-
try {
|
|
1074
|
-
// The actual file lives outside `tmpRoot`. Without realpath alignment,
|
|
1075
|
-
// path.resolve(tmpRoot, "escape") looks contained ('escape' is just a
|
|
1076
|
-
// basename) and the masking heuristic would happily rmSync it.
|
|
1077
|
-
const escapeTarget = path.join(outside, "victim");
|
|
1078
|
-
fs.writeFileSync(escapeTarget, "do-not-delete");
|
|
1079
|
-
const symlinkPath = path.join(tmpRoot, "escape");
|
|
1080
|
-
try {
|
|
1081
|
-
fs.symlinkSync(escapeTarget, symlinkPath);
|
|
1082
|
-
}
|
|
1083
|
-
catch (err) {
|
|
1084
|
-
// Some sandboxes (e.g. Windows w/o dev mode) deny symlink creation —
|
|
1085
|
-
// skip rather than fail in those environments.
|
|
1086
|
-
if (process.platform === "win32")
|
|
1087
|
-
return;
|
|
1088
|
-
throw err;
|
|
1089
|
-
}
|
|
1090
|
-
// Without #271 alignment: rel === "escape" (contained). With
|
|
1091
|
-
// safeRealpath: target resolves to `outside/victim` → rel starts with
|
|
1092
|
-
// "..".
|
|
1093
|
-
expect(isPathContained(tmpRoot, symlinkPath)).toBe(false);
|
|
1094
|
-
// The victim file must still exist after the rejection (sanity check
|
|
1095
|
-
// for the test fixture, not the function under test).
|
|
1096
|
-
expect(fs.existsSync(escapeTarget)).toBe(true);
|
|
1097
|
-
}
|
|
1098
|
-
finally {
|
|
1099
|
-
fs.rmSync(tmpRoot, { recursive: true, force: true });
|
|
1100
|
-
fs.rmSync(outside, { recursive: true, force: true });
|
|
1101
|
-
}
|
|
1102
|
-
});
|
|
1103
|
-
test("accepts a symlink inside root that points back inside root", () => {
|
|
1104
|
-
const tmpRoot = benchMkdtemp("akm-bench-contain-inside-");
|
|
1105
|
-
try {
|
|
1106
|
-
const realFile = path.join(tmpRoot, "real");
|
|
1107
|
-
fs.writeFileSync(realFile, "ok");
|
|
1108
|
-
const linkPath = path.join(tmpRoot, "link");
|
|
1109
|
-
try {
|
|
1110
|
-
fs.symlinkSync(realFile, linkPath);
|
|
1111
|
-
}
|
|
1112
|
-
catch (err) {
|
|
1113
|
-
if (process.platform === "win32")
|
|
1114
|
-
return;
|
|
1115
|
-
throw err;
|
|
1116
|
-
}
|
|
1117
|
-
expect(isPathContained(tmpRoot, linkPath)).toBe(true);
|
|
1118
|
-
}
|
|
1119
|
-
finally {
|
|
1120
|
-
fs.rmSync(tmpRoot, { recursive: true, force: true });
|
|
1121
|
-
}
|
|
1122
|
-
});
|
|
1123
|
-
test("accepts a non-existent child path under root (covers safeRealpath ancestor walk)", () => {
|
|
1124
|
-
const tmpRoot = benchMkdtemp("akm-bench-contain-pending-");
|
|
1125
|
-
try {
|
|
1126
|
-
const pending = path.join(tmpRoot, "not-yet-created", "child.md");
|
|
1127
|
-
expect(isPathContained(tmpRoot, pending)).toBe(true);
|
|
1128
|
-
}
|
|
1129
|
-
finally {
|
|
1130
|
-
fs.rmSync(tmpRoot, { recursive: true, force: true });
|
|
1131
|
-
}
|
|
1132
|
-
});
|
|
1133
|
-
test("rejects an absolute target outside root", () => {
|
|
1134
|
-
const tmpRoot = benchMkdtemp("akm-bench-contain-abs-");
|
|
1135
|
-
const outside = benchMkdtemp("akm-bench-contain-abs-outside-");
|
|
1136
|
-
try {
|
|
1137
|
-
expect(isPathContained(tmpRoot, path.join(outside, "x"))).toBe(false);
|
|
1138
|
-
}
|
|
1139
|
-
finally {
|
|
1140
|
-
fs.rmSync(tmpRoot, { recursive: true, force: true });
|
|
1141
|
-
fs.rmSync(outside, { recursive: true, force: true });
|
|
1142
|
-
}
|
|
1143
|
-
});
|
|
1144
|
-
});
|