wauldo 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -174,6 +174,7 @@ const followUp = await conv.say('Give me an example');
174
174
  - **Post-generation grounding check** — every answer verified against sources
175
175
  - **Citation validation** — detects phantom references
176
176
  - **Analytics & Insights** — track token savings, cache performance, cost per hour, and per-tenant traffic
177
+ - **Guard method** — one-call hallucination firewall (`client.guard(text, source)` → safe/unsafe)
177
178
  - **Fact-check API** — verify any claim against any source (3 modes)
178
179
  - **Native PDF/DOCX upload** — server-side extraction with quality scoring
179
180
  - **Smart model routing** — auto-selects cheapest model that meets quality
package/dist/index.d.mts CHANGED
@@ -380,6 +380,7 @@ interface VerifyCitationResponse {
380
380
  phantom_count?: number;
381
381
  processing_time_ms: number;
382
382
  }
383
+ /** @deprecated Use GuardResponse instead */
383
384
  interface GuardResult {
384
385
  safe: boolean;
385
386
  verdict: string;
@@ -387,6 +388,30 @@ interface GuardResult {
387
388
  reason: string | null;
388
389
  confidence: number;
389
390
  }
391
+ interface GuardClaim {
392
+ text: string;
393
+ claim_type?: string;
394
+ supported: boolean;
395
+ confidence: number;
396
+ confidence_label?: string;
397
+ verdict: string;
398
+ action: string;
399
+ reason?: string | null;
400
+ evidence?: string | null;
401
+ }
402
+ interface GuardResponse {
403
+ verdict: string;
404
+ action: string;
405
+ hallucination_rate: number;
406
+ mode: string;
407
+ total_claims: number;
408
+ supported_claims: number;
409
+ confidence: number;
410
+ claims: GuardClaim[];
411
+ mode_warning?: string;
412
+ processing_time_ms?: number;
413
+ }
414
+ type GuardMode = 'lexical' | 'hybrid' | 'semantic';
390
415
  interface InsightsResponse {
391
416
  tig_key: string;
392
417
  total_requests: number;
@@ -633,10 +658,27 @@ declare class HttpClient {
633
658
  */
634
659
  verifyCitation(request: VerifyCitationRequest): Promise<VerifyCitationResponse>;
635
660
  /**
636
- * Verify an LLM output against a source document.
637
- * Convenience wrapper around factCheck(). Returns a simple safe/unsafe result.
661
+ * POST /v1/fact-check — Verify text claims against source context.
662
+ *
663
+ * Guard is a hallucination firewall: checks whether LLM output is
664
+ * supported by source documents. Blocks wrong answers before users see them.
665
+ *
666
+ * @param text - The LLM-generated text to verify
667
+ * @param sourceContext - The ground-truth source document(s)
668
+ * @param mode - "lexical" (<1ms), "hybrid" (~50ms), or "semantic" (~500ms)
669
+ *
670
+ * @example
671
+ * ```typescript
672
+ * const result = await client.guard(
673
+ * 'Returns accepted within 60 days',
674
+ * 'Our return policy: 14 days.',
675
+ * );
676
+ * if (guardIsBlocked(result)) {
677
+ * console.log('Hallucination caught:', result.claims[0]?.reason);
678
+ * }
679
+ * ```
638
680
  */
639
- guard(text: string, source: string, mode?: 'lexical' | 'hybrid' | 'semantic'): Promise<GuardResult>;
681
+ guard(text: string, sourceContext: string, mode?: GuardMode): Promise<GuardResponse>;
640
682
  /**
641
683
  * GET /v1/insights — ROI metrics for your API key
642
684
  */
@@ -725,7 +767,7 @@ declare class MockHttpClient {
725
767
  timeoutMs?: number;
726
768
  }): Promise<UploadFileResponse>;
727
769
  factCheck(request: FactCheckRequest): Promise<FactCheckResponse>;
728
- guard(text: string, source: string, mode?: string): Promise<GuardResult>;
770
+ guard(text: string, sourceContext: string, mode?: 'lexical' | 'hybrid' | 'semantic'): Promise<GuardResponse>;
729
771
  verifyCitation(request: VerifyCitationRequest): Promise<VerifyCitationResponse>;
730
772
  getInsights(): Promise<InsightsResponse>;
731
773
  getAnalytics(minutes?: number): Promise<AnalyticsResponse>;
package/dist/index.d.ts CHANGED
@@ -380,6 +380,7 @@ interface VerifyCitationResponse {
380
380
  phantom_count?: number;
381
381
  processing_time_ms: number;
382
382
  }
383
+ /** @deprecated Use GuardResponse instead */
383
384
  interface GuardResult {
384
385
  safe: boolean;
385
386
  verdict: string;
@@ -387,6 +388,30 @@ interface GuardResult {
387
388
  reason: string | null;
388
389
  confidence: number;
389
390
  }
391
+ interface GuardClaim {
392
+ text: string;
393
+ claim_type?: string;
394
+ supported: boolean;
395
+ confidence: number;
396
+ confidence_label?: string;
397
+ verdict: string;
398
+ action: string;
399
+ reason?: string | null;
400
+ evidence?: string | null;
401
+ }
402
+ interface GuardResponse {
403
+ verdict: string;
404
+ action: string;
405
+ hallucination_rate: number;
406
+ mode: string;
407
+ total_claims: number;
408
+ supported_claims: number;
409
+ confidence: number;
410
+ claims: GuardClaim[];
411
+ mode_warning?: string;
412
+ processing_time_ms?: number;
413
+ }
414
+ type GuardMode = 'lexical' | 'hybrid' | 'semantic';
390
415
  interface InsightsResponse {
391
416
  tig_key: string;
392
417
  total_requests: number;
@@ -633,10 +658,27 @@ declare class HttpClient {
633
658
  */
634
659
  verifyCitation(request: VerifyCitationRequest): Promise<VerifyCitationResponse>;
635
660
  /**
636
- * Verify an LLM output against a source document.
637
- * Convenience wrapper around factCheck(). Returns a simple safe/unsafe result.
661
+ * POST /v1/fact-check — Verify text claims against source context.
662
+ *
663
+ * Guard is a hallucination firewall: checks whether LLM output is
664
+ * supported by source documents. Blocks wrong answers before users see them.
665
+ *
666
+ * @param text - The LLM-generated text to verify
667
+ * @param sourceContext - The ground-truth source document(s)
668
+ * @param mode - "lexical" (<1ms), "hybrid" (~50ms), or "semantic" (~500ms)
669
+ *
670
+ * @example
671
+ * ```typescript
672
+ * const result = await client.guard(
673
+ * 'Returns accepted within 60 days',
674
+ * 'Our return policy: 14 days.',
675
+ * );
676
+ * if (guardIsBlocked(result)) {
677
+ * console.log('Hallucination caught:', result.claims[0]?.reason);
678
+ * }
679
+ * ```
638
680
  */
639
- guard(text: string, source: string, mode?: 'lexical' | 'hybrid' | 'semantic'): Promise<GuardResult>;
681
+ guard(text: string, sourceContext: string, mode?: GuardMode): Promise<GuardResponse>;
640
682
  /**
641
683
  * GET /v1/insights — ROI metrics for your API key
642
684
  */
@@ -725,7 +767,7 @@ declare class MockHttpClient {
725
767
  timeoutMs?: number;
726
768
  }): Promise<UploadFileResponse>;
727
769
  factCheck(request: FactCheckRequest): Promise<FactCheckResponse>;
728
- guard(text: string, source: string, mode?: string): Promise<GuardResult>;
770
+ guard(text: string, sourceContext: string, mode?: 'lexical' | 'hybrid' | 'semantic'): Promise<GuardResponse>;
729
771
  verifyCitation(request: VerifyCitationRequest): Promise<VerifyCitationResponse>;
730
772
  getInsights(): Promise<InsightsResponse>;
731
773
  getAnalytics(minutes?: number): Promise<AnalyticsResponse>;
package/dist/index.js CHANGED
@@ -1200,19 +1200,34 @@ ${options.tags}\r
1200
1200
  return validateResponse(data, "VerifyCitationResponse");
1201
1201
  }
1202
1202
  /**
1203
- * Verify an LLM output against a source document.
1204
- * Convenience wrapper around factCheck(). Returns a simple safe/unsafe result.
1203
+ * POST /v1/fact-check — Verify text claims against source context.
1204
+ *
1205
+ * Guard is a hallucination firewall: checks whether LLM output is
1206
+ * supported by source documents. Blocks wrong answers before users see them.
1207
+ *
1208
+ * @param text - The LLM-generated text to verify
1209
+ * @param sourceContext - The ground-truth source document(s)
1210
+ * @param mode - "lexical" (<1ms), "hybrid" (~50ms), or "semantic" (~500ms)
1211
+ *
1212
+ * @example
1213
+ * ```typescript
1214
+ * const result = await client.guard(
1215
+ * 'Returns accepted within 60 days',
1216
+ * 'Our return policy: 14 days.',
1217
+ * );
1218
+ * if (guardIsBlocked(result)) {
1219
+ * console.log('Hallucination caught:', result.claims[0]?.reason);
1220
+ * }
1221
+ * ```
1205
1222
  */
1206
- async guard(text, source, mode = "lexical") {
1207
- const result = await this.factCheck({ text, source_context: source, mode });
1208
- const claim = result.claims?.[0];
1209
- return {
1210
- safe: claim?.verdict === "verified",
1211
- verdict: claim?.verdict ?? "rejected",
1212
- action: claim?.action ?? "block",
1213
- reason: claim?.reason ?? "no_claims",
1214
- confidence: claim?.confidence ?? 0
1215
- };
1223
+ async guard(text, sourceContext, mode = "lexical") {
1224
+ const data = await fetchWithRetry(
1225
+ this.retryConfig,
1226
+ "POST",
1227
+ "/v1/fact-check",
1228
+ { text, source_context: sourceContext, mode }
1229
+ );
1230
+ return validateResponse(data, "GuardResponse");
1216
1231
  }
1217
1232
  // ── Analytics & Insights endpoints ───────────────────────────────────
1218
1233
  /**
@@ -1394,14 +1409,28 @@ var MockHttpClient = class {
1394
1409
  processing_time_ms: 1
1395
1410
  };
1396
1411
  }
1397
- async guard(text, source, mode = "lexical") {
1398
- this.record("guard", text, source, mode);
1412
+ async guard(text, sourceContext, mode = "lexical") {
1413
+ this.record("guard", text, sourceContext, mode);
1399
1414
  return {
1400
- safe: true,
1401
1415
  verdict: "verified",
1402
1416
  action: "allow",
1403
- reason: null,
1404
- confidence: 0.95
1417
+ hallucination_rate: 0,
1418
+ mode,
1419
+ total_claims: 1,
1420
+ supported_claims: 1,
1421
+ confidence: 0.95,
1422
+ claims: [{
1423
+ text,
1424
+ claim_type: "Fact",
1425
+ supported: true,
1426
+ confidence: 0.95,
1427
+ confidence_label: "high",
1428
+ verdict: "verified",
1429
+ action: "allow",
1430
+ reason: null,
1431
+ evidence: sourceContext
1432
+ }],
1433
+ processing_time_ms: 0
1405
1434
  };
1406
1435
  }
1407
1436
  async verifyCitation(request) {
package/dist/index.mjs CHANGED
@@ -1164,19 +1164,34 @@ ${options.tags}\r
1164
1164
  return validateResponse(data, "VerifyCitationResponse");
1165
1165
  }
1166
1166
  /**
1167
- * Verify an LLM output against a source document.
1168
- * Convenience wrapper around factCheck(). Returns a simple safe/unsafe result.
1167
+ * POST /v1/fact-check — Verify text claims against source context.
1168
+ *
1169
+ * Guard is a hallucination firewall: checks whether LLM output is
1170
+ * supported by source documents. Blocks wrong answers before users see them.
1171
+ *
1172
+ * @param text - The LLM-generated text to verify
1173
+ * @param sourceContext - The ground-truth source document(s)
1174
+ * @param mode - "lexical" (<1ms), "hybrid" (~50ms), or "semantic" (~500ms)
1175
+ *
1176
+ * @example
1177
+ * ```typescript
1178
+ * const result = await client.guard(
1179
+ * 'Returns accepted within 60 days',
1180
+ * 'Our return policy: 14 days.',
1181
+ * );
1182
+ * if (guardIsBlocked(result)) {
1183
+ * console.log('Hallucination caught:', result.claims[0]?.reason);
1184
+ * }
1185
+ * ```
1169
1186
  */
1170
- async guard(text, source, mode = "lexical") {
1171
- const result = await this.factCheck({ text, source_context: source, mode });
1172
- const claim = result.claims?.[0];
1173
- return {
1174
- safe: claim?.verdict === "verified",
1175
- verdict: claim?.verdict ?? "rejected",
1176
- action: claim?.action ?? "block",
1177
- reason: claim?.reason ?? "no_claims",
1178
- confidence: claim?.confidence ?? 0
1179
- };
1187
+ async guard(text, sourceContext, mode = "lexical") {
1188
+ const data = await fetchWithRetry(
1189
+ this.retryConfig,
1190
+ "POST",
1191
+ "/v1/fact-check",
1192
+ { text, source_context: sourceContext, mode }
1193
+ );
1194
+ return validateResponse(data, "GuardResponse");
1180
1195
  }
1181
1196
  // ── Analytics & Insights endpoints ───────────────────────────────────
1182
1197
  /**
@@ -1358,14 +1373,28 @@ var MockHttpClient = class {
1358
1373
  processing_time_ms: 1
1359
1374
  };
1360
1375
  }
1361
- async guard(text, source, mode = "lexical") {
1362
- this.record("guard", text, source, mode);
1376
+ async guard(text, sourceContext, mode = "lexical") {
1377
+ this.record("guard", text, sourceContext, mode);
1363
1378
  return {
1364
- safe: true,
1365
1379
  verdict: "verified",
1366
1380
  action: "allow",
1367
- reason: null,
1368
- confidence: 0.95
1381
+ hallucination_rate: 0,
1382
+ mode,
1383
+ total_claims: 1,
1384
+ supported_claims: 1,
1385
+ confidence: 0.95,
1386
+ claims: [{
1387
+ text,
1388
+ claim_type: "Fact",
1389
+ supported: true,
1390
+ confidence: 0.95,
1391
+ confidence_label: "high",
1392
+ verdict: "verified",
1393
+ action: "allow",
1394
+ reason: null,
1395
+ evidence: sourceContext
1396
+ }],
1397
+ processing_time_ms: 0
1369
1398
  };
1370
1399
  }
1371
1400
  async verifyCitation(request) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wauldo",
3
- "version": "0.6.0",
3
+ "version": "0.7.0",
4
4
  "description": "Official TypeScript SDK for Wauldo — Verified AI answers from your documents",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",