@nuggetslife/vc 0.0.28 → 0.0.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,47 @@
1
+ # W3C Conformance Gate
2
+
3
+ The CI runs the W3C JSON-LD 1.1 API and RDF Dataset Canonicalization (RDFC-1.0)
4
+ test suites against this binding via `test_w3c_conformance.mjs`. The job fails
5
+ only when a test that **previously passed** starts failing — never on tests
6
+ that were already failing in the committed baseline.
7
+
8
+ ## Files
9
+
10
+ | File | Purpose |
11
+ |------|---------|
12
+ | `test_w3c_conformance.mjs` | Runner. Modes: `--check`, `--update`, `--verbose`, `--only <algo>`. |
13
+ | `w3c-baseline.json` | Committed baseline. The "passing" set per algo is what CI gates against. |
14
+ | `w3c-denylist.json` | Tests skipped because they trigger Rust-level deadlocks in clientffi. Each entry is a real bug — remove as fixed. |
15
+ | `scripts/fetch-w3c-tests.sh` | Clones the W3C test suites at pinned SHAs into `tmp-w3c-tests/` (gitignored). |
16
+
17
+ ## Local usage
18
+
19
+ ```bash
20
+ # Fetch test fixtures (one-off, ~50 MB)
21
+ yarn fetch:w3c-tests
22
+
23
+ # Verify nothing has regressed vs baseline
24
+ yarn test:w3c
25
+
26
+ # Re-run the suite and overwrite the baseline. Add `--runs 3` for
27
+ # fast algos (canonize, fromRdf) to take the intersection of 3 runs
28
+ # and filter out flaky tests.
29
+ yarn test:w3c:update
30
+ ```
31
+
32
+ ## Adding a denylist entry
33
+
34
+ If a new clientffi-side bug causes the runner to hang on a particular test,
35
+ add the test ID to the relevant `algo` array in `w3c-denylist.json` and open
36
+ an issue for the underlying bug.
37
+
38
+ ## Coverage caveats
39
+
40
+ - `flatten` and `toRdf` algos are **not yet** in the gated baseline. Each has
41
+ multiple deep deadlocks in clientffi that the JS-level per-test timeout
42
+ cannot unblock. They will be added once the underlying issues are mapped
43
+ (or after the migration to a mature JSON-LD crate).
44
+ - `expand` and `compact` use single-run baselines because intersection-of-3
45
+ runs exhausts the NAPI worker pool on these large suites. Minor flakes
46
+ (1-3 tests) may produce false regressions; rerun the CI job once before
47
+ investigating.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nuggetslife/vc",
3
- "version": "0.0.28",
3
+ "version": "0.0.30",
4
4
  "main": "index.js",
5
5
  "types": "index.d.ts",
6
6
  "napi": {
@@ -34,15 +34,18 @@
34
34
  "prepublishOnly": "napi prepublish -t npm",
35
35
  "test": "node test.mjs && node test_jose.mjs && node test_sd_jwt.mjs && node test_bbs_ietf.mjs && node test_bbs_2023.mjs && node test_jsonld_crossverify.mjs && node test_backward_compat.mjs",
36
36
  "universal": "napi universal",
37
- "version": "napi version"
37
+ "version": "napi version",
38
+ "test:w3c": "node test_w3c_conformance.mjs --check",
39
+ "test:w3c:update": "node test_w3c_conformance.mjs --update",
40
+ "fetch:w3c-tests": "bash scripts/fetch-w3c-tests.sh"
38
41
  },
39
42
  "packageManager": "yarn@4.3.1",
40
43
  "optionalDependencies": {
41
- "@nuggetslife/vc-darwin-arm64": "0.0.28",
42
- "@nuggetslife/vc-linux-arm64-gnu": "0.0.28",
43
- "@nuggetslife/vc-linux-arm64-musl": "0.0.28",
44
- "@nuggetslife/vc-linux-x64-gnu": "0.0.28",
45
- "@nuggetslife/vc-linux-x64-musl": "0.0.28"
44
+ "@nuggetslife/vc-darwin-arm64": "0.0.30",
45
+ "@nuggetslife/vc-linux-arm64-gnu": "0.0.30",
46
+ "@nuggetslife/vc-linux-arm64-musl": "0.0.30",
47
+ "@nuggetslife/vc-linux-x64-gnu": "0.0.30",
48
+ "@nuggetslife/vc-linux-x64-musl": "0.0.30"
46
49
  },
47
50
  "dependencies": {}
48
51
  }
@@ -0,0 +1,28 @@
1
+ #!/usr/bin/env bash
2
+ # Clone the W3C JSON-LD 1.1 API and RDF Dataset Canonicalization test suites
3
+ # into vc/js/tmp-w3c-tests/. Pinned to specific commits for reproducibility.
4
+
5
+ set -euo pipefail
6
+
7
+ cd "$(dirname "$0")/.."
8
+
9
+ JLD_API_REPO="https://github.com/w3c/json-ld-api.git"
10
+ JLD_API_SHA="04a4eb7dc7cbc313f3f5be7ad9a3b06e87741693"
11
+
12
+ RDF_CANON_REPO="https://github.com/w3c/rdf-canon.git"
13
+ RDF_CANON_SHA="15619df2fda7a4ca88308733789b6774517f9638"
14
+
15
+ mkdir -p tmp-w3c-tests
16
+ cd tmp-w3c-tests
17
+
18
+ if [ ! -d json-ld-api ]; then
19
+ git clone --filter=blob:none "$JLD_API_REPO"
20
+ fi
21
+ (cd json-ld-api && git fetch --depth=1 origin "$JLD_API_SHA" && git checkout --detach "$JLD_API_SHA")
22
+
23
+ if [ ! -d rdf-canon ]; then
24
+ git clone --filter=blob:none "$RDF_CANON_REPO"
25
+ fi
26
+ (cd rdf-canon && git fetch --depth=1 origin "$RDF_CANON_SHA" && git checkout --detach "$RDF_CANON_SHA")
27
+
28
+ echo "W3C suites fetched at pinned SHAs."
@@ -254,3 +254,179 @@ test('cross-verify: expand → toRDF → fromRDF round-trip matches JS reference
254
254
 
255
255
  assert.deepStrictEqual(napiResult, jsResult);
256
256
  });
257
+
258
+ //
259
+ // Regression: subjects without @id must get blank node identifiers
260
+ // (https://github.com/NuggetsLtd/montpellier/issues/4642)
261
+ //
262
+
263
+ test('cross-verify: toRDF preserves nested typed objects without @id', async () => {
264
+ const doc = {
265
+ "@context": {
266
+ "schema": "http://schema.org/",
267
+ "metrics": "schema:metrics",
268
+ "name": "schema:name",
269
+ "count": "schema:count",
270
+ "MyType": {
271
+ "@id": "schema:MyType",
272
+ "@context": {
273
+ "type": "@type",
274
+ "count": { "@id": "schema:count", "@type": "http://www.w3.org/2001/XMLSchema#integer" },
275
+ },
276
+ },
277
+ },
278
+ "@id": "http://example.org/thing",
279
+ "name": "Test",
280
+ "metrics": {
281
+ "type": "MyType",
282
+ "count": 42,
283
+ },
284
+ };
285
+
286
+ const proc = new JsonLd();
287
+
288
+ const napiResult = await proc.toRDF(doc, { format: 'application/n-quads' });
289
+ const jsResult = await jsonld.toRDF(doc, { format: 'application/n-quads' });
290
+
291
+ const napiStmts = napiResult.split('\n').filter(s => s.trim()).sort();
292
+ const jsStmts = jsResult.split('\n').filter(s => s.trim()).sort();
293
+
294
+ assert.strictEqual(napiStmts.length, jsStmts.length,
295
+ `Statement count mismatch: NAPI=${napiStmts.length}, JS=${jsStmts.length}`);
296
+ });
297
+
298
+ test('cross-verify: canonize preserves nested typed objects without @id', async () => {
299
+ const doc = {
300
+ "@context": {
301
+ "schema": "http://schema.org/",
302
+ "metrics": "schema:metrics",
303
+ "name": "schema:name",
304
+ "MyType": {
305
+ "@id": "schema:MyType",
306
+ "@context": {
307
+ "type": "@type",
308
+ "count": { "@id": "schema:count", "@type": "http://www.w3.org/2001/XMLSchema#integer" },
309
+ },
310
+ },
311
+ },
312
+ "@id": "http://example.org/thing",
313
+ "name": "Test",
314
+ "metrics": {
315
+ "type": "MyType",
316
+ "count": 42,
317
+ },
318
+ };
319
+
320
+ const proc = new JsonLd();
321
+
322
+ const napiResult = await proc.canonize(doc);
323
+ const jsResult = await jsonld.canonize(doc, {
324
+ algorithm: 'URDNA2015',
325
+ format: 'application/n-quads',
326
+ safe: false,
327
+ });
328
+
329
+ assert.strictEqual(napiResult, jsResult);
330
+ });
331
+
332
+ //
333
+ // Regression: terms with @type: "@json" must produce JSON literal quads
334
+ // (parity with jsonld@3.1.0 / jsonld@8.3.3 — the libs that sign Nuggets VCs)
335
+ //
336
+
337
+ test('cross-verify: expand emits JSON literal for term with @type: @json (flat context)', async () => {
338
+ const doc = {
339
+ '@context': {
340
+ '@version': 1.1,
341
+ inner: { '@id': 'https://example.com/inner', '@type': '@json' }
342
+ },
343
+ inner: { hello: 'world' }
344
+ };
345
+
346
+ const proc = new JsonLd();
347
+ const napiResult = await proc.expand(doc);
348
+ const jsResult = await jsonld.expand(doc);
349
+
350
+ assert.deepStrictEqual(napiResult, jsResult);
351
+ });
352
+
353
+ test('cross-verify: expand emits JSON literal for term with @type: @json (term-scoped context)', async () => {
354
+ // Mirrors the KYB v1 shape where `dnbData` is defined inside `result`'s
355
+ // term-scoped context. See packages/document-loader/context/kybV1.json.
356
+ const doc = {
357
+ '@context': {
358
+ '@version': 1.1,
359
+ outer: {
360
+ '@id': 'https://example.com/outer',
361
+ '@context': {
362
+ '@version': 1.1,
363
+ inner: { '@id': 'https://example.com/inner', '@type': '@json' }
364
+ }
365
+ }
366
+ },
367
+ outer: { inner: { organization: { primaryName: 'Acme' } } }
368
+ };
369
+
370
+ const proc = new JsonLd();
371
+ const napiResult = await proc.expand(doc);
372
+ const jsResult = await jsonld.expand(doc);
373
+
374
+ assert.deepStrictEqual(napiResult, jsResult);
375
+ });
376
+
377
+ test('cross-verify: canonize emits JSON literal quad for term with @type: @json', async () => {
378
+ const doc = {
379
+ '@context': {
380
+ '@version': 1.1,
381
+ inner: { '@id': 'https://example.com/inner', '@type': '@json' }
382
+ },
383
+ inner: { b: 2, a: 1 }
384
+ };
385
+
386
+ const proc = new JsonLd();
387
+ const napiResult = await proc.canonize(doc);
388
+ const jsResult = await jsonld.canonize(doc, {
389
+ algorithm: 'URDNA2015',
390
+ format: 'application/n-quads',
391
+ safe: false,
392
+ });
393
+
394
+ assert.strictEqual(napiResult, jsResult);
395
+ });
396
+
397
+ //
398
+ // Regression: blank-node objects (no @id) in arrays must each produce a
399
+ // distinct quad. Earlier `add_value` deduped equal serde Values, collapsing
400
+ // `[{}, {}, {}]` to a single blank node and silently shrinking signed
401
+ // message sets — breaking any BBS+ proof over arrays that contain repeats.
402
+ //
403
+
404
+ test('cross-verify: array of empty objects keeps each blank node distinct', async () => {
405
+ const doc = {
406
+ '@context': { items: { '@id': 'https://example.com/items' } },
407
+ items: [{}, {}, {}]
408
+ };
409
+
410
+ const proc = new JsonLd();
411
+ const napiResult = await proc.expand(doc);
412
+ const jsResult = await jsonld.expand(doc);
413
+
414
+ assert.deepStrictEqual(napiResult, jsResult);
415
+ });
416
+
417
+ test('cross-verify: toRDF with array of empty objects emits one quad per element', async () => {
418
+ const doc = {
419
+ '@context': { items: { '@id': 'https://example.com/items' } },
420
+ items: [{}, {}, 'a', {}]
421
+ };
422
+
423
+ const proc = new JsonLd();
424
+ const napiNquads = await proc.toRDF(doc, { format: 'application/n-quads' });
425
+ const jsNquads = await jsonld.toRDF(doc, { format: 'application/n-quads' });
426
+
427
+ const napiCount = napiNquads.split('\n').filter(Boolean).length;
428
+ const jsCount = jsNquads.split('\n').filter(Boolean).length;
429
+
430
+ assert.strictEqual(napiCount, jsCount,
431
+ `expected ${jsCount} quads (one per array element), got ${napiCount}`);
432
+ });
@@ -0,0 +1,346 @@
1
+ // W3C JSON-LD 1.1 API + RDF Dataset Canonicalization (RDFC-1.0) conformance
2
+ // runner for the clientffi NAPI JsonLd binding.
3
+ //
4
+ // Two modes:
5
+ // node test_w3c_conformance.mjs --check (CI default)
6
+ // run all suites, compare against w3c-baseline.json, exit non-zero
7
+ // if any test that previously passed now fails (a regression).
8
+ //
9
+ // node test_w3c_conformance.mjs --update
10
+ // run all suites, overwrite w3c-baseline.json with the current
11
+ // pass set. Use when intentionally widening conformance.
12
+ //
13
+ // Without flags, runs and prints a summary table without baseline comparison.
14
+ //
15
+ // Other flags:
16
+ // --only <algo> limit to one of: expand|compact|flatten|toRdf|fromRdf|canonize
17
+ // --verbose print PASS/FAIL/ERR per test
18
+
19
+ import { readFileSync, writeFileSync, existsSync, readdirSync, statSync } from 'node:fs';
20
+ import { resolve, dirname, join } from 'node:path';
21
+ import { fileURLToPath } from 'node:url';
22
+ import { JsonLd } from './index.js';
23
+
24
+ const __dirname = dirname(fileURLToPath(import.meta.url));
25
+ const args = process.argv.slice(2);
26
+ const flag = (n) => args.includes(n);
27
+ const valOf = (n) => { const i = args.indexOf(n); return i >= 0 ? args[i + 1] : null; };
28
+ const verbose = flag('--verbose');
29
+ const onlyArg = valOf('--only');
30
+ const checkBaseline = flag('--check');
31
+ const updateBaseline = flag('--update');
32
+
33
+ const TESTS_ROOT = resolve(__dirname, 'tmp-w3c-tests');
34
+ const JLD_TESTS = join(TESTS_ROOT, 'json-ld-api', 'tests');
35
+ const RDFC_TESTS = join(TESTS_ROOT, 'rdf-canon', 'tests');
36
+ const JLD_BASE = 'https://w3c.github.io/json-ld-api/tests/';
37
+ const BASELINE_PATH = join(__dirname, 'w3c-baseline.json');
38
+
39
+ // Per-test wall-clock cap. clientffi has known hangs in a few hot paths;
40
+ // without this they freeze the runner.
41
+ const TIMEOUT_MS = 30000;
42
+ const PROGRESS_EVERY = 25; // emit a heartbeat every N tests so CI's
43
+ // no_output_timeout doesn't kill the runner
44
+
45
+ function withTimeout(promiseOrValue, label) {
46
+ // proc.fromRDF returns synchronously; everything else is async.
47
+ const p = Promise.resolve(promiseOrValue);
48
+ let t;
49
+ const timeoutPromise = new Promise((_, reject) => {
50
+ t = setTimeout(() => reject(new Error('timeout: ' + label)), TIMEOUT_MS);
51
+ });
52
+ // If the main op settles first, the timeout-rejection still fires later;
53
+ // consume that specific rejection rather than suppressing process-wide.
54
+ timeoutPromise.catch(() => {});
55
+ return Promise.race([p.finally(() => clearTimeout(t)), timeoutPromise]);
56
+ }
57
+
58
+ const loadJson = (p) => JSON.parse(readFileSync(p, 'utf8'));
59
+ const loadText = (p) => readFileSync(p, 'utf8');
60
+
61
+ function preloadJldContexts() {
62
+ const ctxs = {};
63
+ function walk(dir, prefix) {
64
+ for (const name of readdirSync(dir)) {
65
+ const full = join(dir, name);
66
+ if (statSync(full).isDirectory()) walk(full, prefix + name + '/');
67
+ else if (name.endsWith('.jsonld') || name.endsWith('.json')) {
68
+ try { ctxs[JLD_BASE + prefix + name] = loadJson(full); } catch {}
69
+ }
70
+ }
71
+ }
72
+ walk(JLD_TESTS, '');
73
+ return ctxs;
74
+ }
75
+
76
+ function canonicalStringify(v) {
77
+ if (v === null || typeof v !== 'object') return JSON.stringify(v);
78
+ if (Array.isArray(v)) return '[' + v.map(canonicalStringify).join(',') + ']';
79
+ const keys = Object.keys(v).sort();
80
+ return '{' + keys.map(k => JSON.stringify(k) + ':' + canonicalStringify(v[k])).join(',') + '}';
81
+ }
82
+ const jsonEqual = (a, b) => canonicalStringify(a) === canonicalStringify(b);
83
+ const normNQuads = (s) => typeof s === 'string'
84
+ ? s.split('\n').map(l => l.trim()).filter(Boolean).sort().join('\n')
85
+ : s;
86
+
87
+ const DENYLIST_PATH = join(__dirname, 'w3c-denylist.json');
88
+ let DENYLIST = {};
89
+ if (existsSync(DENYLIST_PATH)) {
90
+ DENYLIST = loadJson(DENYLIST_PATH);
91
+ }
92
+ const isDenied = (algo, id) => (DENYLIST[algo] || []).includes(id);
93
+
94
+ // In --check mode, restrict the run to baseline-passing tests only — the
95
+ // gate's purpose is regression detection, not full re-evaluation. Cuts
96
+ // CI runtime ~70% (skips ~450 baseline-failing tests).
97
+ let CHECK_ALLOW = null;
98
+ if (checkBaseline && existsSync(BASELINE_PATH)) {
99
+ const b = loadJson(BASELINE_PATH);
100
+ CHECK_ALLOW = {};
101
+ for (const [algo, r] of Object.entries(b)) {
102
+ CHECK_ALLOW[algo] = new Set(r.passing || []);
103
+ }
104
+ }
105
+ const isInCheckScope = (algo, id) =>
106
+ !CHECK_ALLOW || (CHECK_ALLOW[algo] && CHECK_ALLOW[algo].has(id));
107
+
108
+ const RESULTS = {}; // { algo: { passing: Set, failing: Set, erroring: Set } }
109
+ function record(algo, id, status) {
110
+ const r = RESULTS[algo] ||= { passing: new Set(), failing: new Set(), erroring: new Set() };
111
+ if (status === 'pass') r.passing.add(id);
112
+ else if (status === 'fail') r.failing.add(id);
113
+ else r.erroring.add(id);
114
+ if (verbose) {
115
+ const tag = { pass: 'PASS', fail: 'FAIL', err: 'ERR ' }[status];
116
+ process.stderr.write(`[${tag}] ${algo} ${id}\n`);
117
+ }
118
+ }
119
+
120
+ async function runJld(algo, methodName) {
121
+ const manifestPath = join(JLD_TESTS, `${algo}-manifest.jsonld`);
122
+ if (!existsSync(manifestPath)) return;
123
+ const manifest = loadJson(manifestPath);
124
+ const contexts = preloadJldContexts();
125
+ const proc = new JsonLd({ contexts });
126
+ let seen = 0;
127
+
128
+ for (const entry of manifest.sequence) {
129
+ const types = Array.isArray(entry['@type']) ? entry['@type'] : [entry['@type']];
130
+ const isPositive = types.includes('jld:PositiveEvaluationTest');
131
+ const isNegative = types.includes('jld:NegativeEvaluationTest');
132
+ const id = entry['@id'];
133
+ if (!isPositive && !isNegative) continue;
134
+ if (isDenied(algo, id)) continue;
135
+ if (!isInCheckScope(algo, id)) continue;
136
+ seen += 1;
137
+ if (seen % PROGRESS_EVERY === 0) {
138
+ process.stderr.write(` ${algo}: ${seen} tests processed\n`);
139
+ }
140
+ const inputUrl = JLD_BASE + entry.input;
141
+ const inputPath = join(JLD_TESTS, entry.input);
142
+ if (!existsSync(inputPath)) { record(algo, id, 'err'); continue; }
143
+
144
+ const opt = entry.option || {};
145
+ const callOpts = { base: opt.base || inputUrl };
146
+ if (opt.processingMode) callOpts.processingMode = opt.processingMode;
147
+ if (opt.expandContext) {
148
+ const ctxUrl = JLD_BASE + opt.expandContext;
149
+ callOpts.expandContext = contexts[ctxUrl] || ctxUrl;
150
+ }
151
+
152
+ try {
153
+ const input = methodName === 'fromRDF' ? loadText(inputPath) : loadJson(inputPath);
154
+ let napiResult;
155
+ if (methodName === 'expand') napiResult = await withTimeout(proc.expand(input, callOpts), id);
156
+ else if (methodName === 'compact') {
157
+ const ctxUrl = JLD_BASE + entry.context;
158
+ const ctx = contexts[ctxUrl] || ctxUrl;
159
+ napiResult = await withTimeout(proc.compact(input, ctx, callOpts), id);
160
+ } else if (methodName === 'flatten') {
161
+ const ctxUrl = entry.context ? (JLD_BASE + entry.context) : null;
162
+ const ctx = ctxUrl ? (contexts[ctxUrl] || ctxUrl) : null;
163
+ napiResult = await withTimeout(proc.flatten(input, ctx, callOpts), id);
164
+ } else if (methodName === 'toRDF') {
165
+ napiResult = await withTimeout(proc.toRDF(input, { ...callOpts, format: 'application/n-quads' }), id);
166
+ } else if (methodName === 'fromRDF') napiResult = await withTimeout(proc.fromRDF(input, callOpts), id);
167
+
168
+ if (isNegative) { record(algo, id, 'fail'); continue; }
169
+
170
+ const expectPath = join(JLD_TESTS, entry.expect);
171
+ if (!existsSync(expectPath)) { record(algo, id, 'err'); continue; }
172
+ const expected = methodName === 'toRDF' ? loadText(expectPath) : loadJson(expectPath);
173
+
174
+ const ok = methodName === 'toRDF'
175
+ ? normNQuads(napiResult) === normNQuads(expected)
176
+ : jsonEqual(napiResult, expected);
177
+ record(algo, id, ok ? 'pass' : 'fail');
178
+ } catch (e) {
179
+ // Negative tests expect a processor error; only count as pass if it
180
+ // genuinely came from the processor, not from a timeout or harness bug.
181
+ const isTimeout = e && /^timeout:/.test(e.message || '');
182
+ record(algo, id, isNegative && !isTimeout ? 'pass' : 'err');
183
+ }
184
+ }
185
+ }
186
+
187
+ async function runRdfc() {
188
+ const manifestPath = join(RDFC_TESTS, 'manifest.jsonld');
189
+ if (!existsSync(manifestPath)) return;
190
+ const manifest = loadJson(manifestPath);
191
+ const algo = 'canonize';
192
+ const proc = new JsonLd();
193
+ let seen = 0;
194
+
195
+ for (const entry of manifest.entries) {
196
+ if (entry.type !== 'rdfc:RDFC10EvalTest') continue;
197
+ const id = entry.id;
198
+ if (isDenied('canonize', id)) continue;
199
+ if (!isInCheckScope('canonize', id)) continue;
200
+ seen += 1;
201
+ if (seen % PROGRESS_EVERY === 0) {
202
+ process.stderr.write(` canonize: ${seen} tests processed\n`);
203
+ }
204
+ const inputPath = join(RDFC_TESTS, entry.action);
205
+ const expectedPath = join(RDFC_TESTS, entry.result);
206
+ if (!existsSync(inputPath) || !existsSync(expectedPath)) { record(algo, id, 'err'); continue; }
207
+ try {
208
+ const input = loadText(inputPath);
209
+ const expected = loadText(expectedPath);
210
+ const opts = {
211
+ inputFormat: 'application/n-quads',
212
+ format: 'application/n-quads',
213
+ algorithm: 'RDFC-1.0',
214
+ };
215
+ if (entry.hashAlgorithm) opts.hashAlgorithm = entry.hashAlgorithm;
216
+ const got = await withTimeout(proc.canonize(input, opts), id);
217
+ record(algo, id, got === expected ? 'pass' : 'fail');
218
+ } catch (e) {
219
+ record(algo, id, 'err');
220
+ }
221
+ }
222
+ }
223
+
224
+ async function runOnce(algo) {
225
+ process.stderr.write(`Running ${algo}...\n`);
226
+ if (algo === 'canonize') await runRdfc();
227
+ else if (algo === 'expand') await runJld('expand', 'expand');
228
+ else if (algo === 'compact') await runJld('compact', 'compact');
229
+ else if (algo === 'flatten') await runJld('flatten', 'flatten');
230
+ else if (algo === 'toRdf') await runJld('toRdf', 'toRDF');
231
+ else if (algo === 'fromRdf') await runJld('fromRdf', 'fromRDF');
232
+ }
233
+
234
+ async function main() {
235
+ if (!existsSync(JLD_TESTS) || !existsSync(RDFC_TESTS)) {
236
+ console.error('W3C test suites not found at ' + TESTS_ROOT);
237
+ console.error('Run: vc/js/scripts/fetch-w3c-tests.sh');
238
+ process.exit(2);
239
+ }
240
+ // flatten and toRdf are not in the default run because clientffi has
241
+ // Rust-level deadlocks in those algos. Pass --only flatten or --only toRdf
242
+ // to opt in when investigating. They'll move into the default list once
243
+ // their hangs are fixed.
244
+ const algos = ['canonize', 'fromRdf', 'expand', 'compact'];
245
+ const allAlgos = ['expand', 'compact', 'flatten', 'toRdf', 'fromRdf', 'canonize'];
246
+ if (onlyArg && !allAlgos.includes(onlyArg)) {
247
+ console.error(`Invalid --only value: ${onlyArg}. Expected one of: ${allAlgos.join(', ')}`);
248
+ process.exit(2);
249
+ }
250
+ const filtered = onlyArg ? [onlyArg] : algos;
251
+
252
+ // --runs N: run each algo N times and intersect the passing sets so
253
+ // flaky tests don't end up in the baseline. Default 1.
254
+ // Note: slow algos (expand, compact) hang on N>1 because clientffi's
255
+ // NAPI worker pool fills with hung threads from earlier passes. Use
256
+ // --runs 3 only for fast algos like canonize/fromRdf.
257
+ const runsArg = valOf('--runs');
258
+ const repeats = runsArg == null ? 1 : Number.parseInt(runsArg, 10);
259
+ if (!Number.isInteger(repeats) || repeats <= 0 || String(repeats) !== runsArg) {
260
+ if (runsArg != null) {
261
+ console.error(`Invalid value for --runs: ${runsArg}. Expected a positive integer.`);
262
+ process.exit(2);
263
+ }
264
+ }
265
+ const passingPerRun = []; // array of { algo: Set<id> }, length = repeats
266
+
267
+ for (let r = 0; r < repeats; r++) {
268
+ if (repeats > 1) process.stderr.write(`-- pass ${r + 1}/${repeats} --\n`);
269
+ // Fresh RESULTS for this pass
270
+ for (const k of Object.keys(RESULTS)) delete RESULTS[k];
271
+ for (const algo of filtered) await runOnce(algo);
272
+ const snapshot = {};
273
+ for (const [algo, r2] of Object.entries(RESULTS)) snapshot[algo] = new Set(r2.passing);
274
+ passingPerRun.push(snapshot);
275
+ }
276
+
277
+ // Build current state. For --update with repeats=3, "passing" is the
278
+ // intersection across all runs. For --check (single run), use the last run.
279
+ const current = {};
280
+ for (const [algo, r] of Object.entries(RESULTS)) {
281
+ let passing;
282
+ if (repeats > 1) {
283
+ passing = [...passingPerRun[0][algo] || []].filter(id =>
284
+ passingPerRun.every(snap => snap[algo]?.has(id))
285
+ );
286
+ } else {
287
+ passing = [...r.passing];
288
+ }
289
+ current[algo] = {
290
+ passing: passing.sort(),
291
+ failing: [...r.failing].sort(),
292
+ erroring: [...r.erroring].sort(),
293
+ };
294
+ }
295
+
296
+ // Print summary
297
+ console.log('\n========== W3C CONFORMANCE ==========');
298
+ console.log(['Algo'.padEnd(10), 'Total'.padEnd(8), 'Pass'.padEnd(8), 'Fail'.padEnd(8), 'Error'.padEnd(8), 'Pass%'].join(''));
299
+ for (const [algo, r] of Object.entries(current)) {
300
+ const total = r.passing.length + r.failing.length + r.erroring.length;
301
+ const pct = total ? ((r.passing.length / total) * 100).toFixed(1) + '%' : '-';
302
+ console.log([algo.padEnd(10), String(total).padEnd(8), String(r.passing.length).padEnd(8), String(r.failing.length).padEnd(8), String(r.erroring.length).padEnd(8), pct].join(''));
303
+ }
304
+
305
+ if (updateBaseline) {
306
+ // Merge with existing baseline so single-algo runs accumulate.
307
+ const merged = existsSync(BASELINE_PATH) ? loadJson(BASELINE_PATH) : {};
308
+ for (const [algo, r] of Object.entries(current)) merged[algo] = r;
309
+ writeFileSync(BASELINE_PATH, JSON.stringify(merged, null, 2) + '\n');
310
+ console.log(`\nBaseline ${onlyArg ? 'updated for ' + onlyArg : 'written'} at ${BASELINE_PATH}`);
311
+ return;
312
+ }
313
+
314
+ if (checkBaseline) {
315
+ if (!existsSync(BASELINE_PATH)) {
316
+ console.error(`\nNo baseline at ${BASELINE_PATH}. Run with --update to create one.`);
317
+ process.exit(2);
318
+ }
319
+ const baseline = loadJson(BASELINE_PATH);
320
+ const regressions = [];
321
+ const improvements = [];
322
+ const algosToCheck = onlyArg ? [onlyArg] : Object.keys(baseline);
323
+ for (const algo of algosToCheck) {
324
+ if (!baseline[algo]) continue;
325
+ const wasPassing = new Set(baseline[algo].passing);
326
+ const nowPassing = new Set(current[algo]?.passing || []);
327
+ for (const id of wasPassing) if (!nowPassing.has(id)) regressions.push({ algo, id });
328
+ for (const id of nowPassing) if (!wasPassing.has(id)) improvements.push({ algo, id });
329
+ }
330
+
331
+ if (improvements.length) {
332
+ console.log(`\n${improvements.length} test(s) newly passing — please run --update to ratchet the baseline:`);
333
+ for (const { algo, id } of improvements.slice(0, 20)) console.log(` + ${algo} ${id}`);
334
+ if (improvements.length > 20) console.log(` ... ${improvements.length - 20} more`);
335
+ }
336
+
337
+ if (regressions.length) {
338
+ console.log(`\n${regressions.length} REGRESSION(S) — these tests passed in baseline but now fail:`);
339
+ for (const { algo, id } of regressions) console.log(` - ${algo} ${id}`);
340
+ process.exit(1);
341
+ }
342
+ console.log('\nNo regressions vs baseline.');
343
+ }
344
+ }
345
+
346
+ main().catch(e => { console.error(e); process.exit(1); });