wauldo 0.7.0 → 0.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -30,7 +30,9 @@ __export(index_exports, {
30
30
  ToolNotFoundError: () => ToolNotFoundError,
31
31
  ValidationError: () => ValidationError,
32
32
  WauldoError: () => WauldoError,
33
- chatContent: () => chatContent
33
+ chatContent: () => chatContent,
34
+ guardIsBlocked: () => guardIsBlocked,
35
+ guardIsSafe: () => guardIsSafe
34
36
  });
35
37
  module.exports = __toCommonJS(index_exports);
36
38
 
@@ -900,16 +902,6 @@ async function* parseSSEStream(body) {
900
902
  }
901
903
 
902
904
  // src/http_client.ts
903
- function concatUint8Arrays(arrays) {
904
- const total = arrays.reduce((n, a) => n + a.length, 0);
905
- const result = new Uint8Array(total);
906
- let offset = 0;
907
- for (const a of arrays) {
908
- result.set(a, offset);
909
- offset += a.length;
910
- }
911
- return result;
912
- }
913
905
  function validateResponse(data, typeName) {
914
906
  if (data === null || data === void 0) {
915
907
  throw new ServerError(`Invalid ${typeName}: response is null`, 0);
@@ -1044,51 +1036,6 @@ var HttpClient = class {
1044
1036
  );
1045
1037
  return validateResponse(data, "RagUploadResponse");
1046
1038
  }
1047
- /**
1048
- * POST /v1/upload-file — Upload a file (PDF, DOCX, text, image) for RAG indexing.
1049
- *
1050
- * @param file - File content as Buffer/Uint8Array
1051
- * @param filename - The filename (determines content type detection)
1052
- * @param options - Optional title, tags, timeoutMs
1053
- * @returns Upload confirmation with quality scoring
1054
- */
1055
- async uploadFile(file, filename, options) {
1056
- const boundary = "----WauldoSDKBoundary";
1057
- const parts = [];
1058
- const enc = new TextEncoder();
1059
- parts.push(enc.encode(`--${boundary}\r
1060
- Content-Disposition: form-data; name="file"; filename="${filename}"\r
1061
- Content-Type: application/octet-stream\r
1062
- \r
1063
- `));
1064
- parts.push(file instanceof Uint8Array ? file : new Uint8Array(file));
1065
- parts.push(enc.encode("\r\n"));
1066
- if (options?.title) {
1067
- parts.push(enc.encode(`--${boundary}\r
1068
- Content-Disposition: form-data; name="title"\r
1069
- \r
1070
- ${options.title}\r
1071
- `));
1072
- }
1073
- if (options?.tags) {
1074
- parts.push(enc.encode(`--${boundary}\r
1075
- Content-Disposition: form-data; name="tags"\r
1076
- \r
1077
- ${options.tags}\r
1078
- `));
1079
- }
1080
- parts.push(enc.encode(`--${boundary}--\r
1081
- `));
1082
- const body = concatUint8Arrays(parts);
1083
- const data = await fetchWithRetry(
1084
- { ...this.retryConfig, headers: { ...this.retryConfig.headers, "Content-Type": `multipart/form-data; boundary=${boundary}` } },
1085
- "POST",
1086
- "/v1/upload-file",
1087
- body,
1088
- options?.timeoutMs
1089
- );
1090
- return validateResponse(data, "UploadFileResponse");
1091
- }
1092
1039
  /** POST /v1/query — Query RAG knowledge base */
1093
1040
  async ragQuery(query, topK = 5, options) {
1094
1041
  const body = { query, top_k: topK };
@@ -1131,83 +1078,17 @@ ${options.tags}\r
1131
1078
  const result = await this.ragQuery(question, 3);
1132
1079
  return result.answer ?? JSON.stringify(result.sources);
1133
1080
  }
1134
- // ── Orchestrator endpoints ───────────────────────────────────────────
1135
- /** POST /v1/orchestrator/execute — Route to best specialist agent */
1136
- async orchestrate(prompt) {
1137
- const data = await fetchWithRetry(
1138
- this.retryConfig,
1139
- "POST",
1140
- "/v1/orchestrator/execute",
1141
- { prompt }
1142
- );
1143
- return validateResponse(data, "OrchestratorResponse");
1144
- }
1145
- /** POST /v1/orchestrator/parallel — Run all 4 specialists in parallel */
1146
- async orchestrateParallel(prompt) {
1147
- const data = await fetchWithRetry(
1148
- this.retryConfig,
1149
- "POST",
1150
- "/v1/orchestrator/parallel",
1151
- { prompt }
1152
- );
1153
- return validateResponse(data, "OrchestratorResponse");
1154
- }
1155
- // ── Fact-Check endpoints ──────────────────────────────────────────────
1156
- /**
1157
- * POST /v1/fact-check — Verify claims against source context.
1158
- *
1159
- * @param request - Text and source context to verify
1160
- * @returns FactCheckResponse with verdict, action, and per-claim results
1161
- *
1162
- * @example
1163
- * ```typescript
1164
- * const result = await client.factCheck({
1165
- * text: 'Returns accepted within 60 days.',
1166
- * source_context: 'Our policy allows returns within 14 days.',
1167
- * mode: 'lexical',
1168
- * });
1169
- * console.log(result.verdict); // "rejected"
1170
- * ```
1171
- */
1172
- async factCheck(request) {
1173
- const data = await fetchWithRetry(
1174
- this.retryConfig,
1175
- "POST",
1176
- "/v1/fact-check",
1177
- request
1178
- );
1179
- return validateResponse(data, "FactCheckResponse");
1180
- }
1181
- /**
1182
- * POST /v1/verify — Verify citations in AI-generated text.
1183
- *
1184
- * @example
1185
- * ```ts
1186
- * const result = await client.verifyCitation({
1187
- * text: 'Rust was released in 2010 [Source: rust_book].',
1188
- * sources: [{ name: 'rust_book', content: 'Rust was first released in 2010.' }],
1189
- * });
1190
- * console.log(result.phantom_count); // 0
1191
- * ```
1192
- */
1193
- async verifyCitation(request) {
1194
- const data = await fetchWithRetry(
1195
- this.retryConfig,
1196
- "POST",
1197
- "/v1/verify",
1198
- request
1199
- );
1200
- return validateResponse(data, "VerifyCitationResponse");
1201
- }
1081
+ // ── Guard (Fact-Check) ─────────────────────────────────────────────
1202
1082
  /**
1203
1083
  * POST /v1/fact-check — Verify text claims against source context.
1204
1084
  *
1205
- * Guard is a hallucination firewall: checks whether LLM output is
1206
- * supported by source documents. Blocks wrong answers before users see them.
1085
+ * Guard is a hallucination firewall: checks whether LLM output is supported
1086
+ * by source documents. Blocks wrong answers before they reach users.
1207
1087
  *
1208
1088
  * @param text - The LLM-generated text to verify
1209
1089
  * @param sourceContext - The ground-truth source document(s)
1210
1090
  * @param mode - "lexical" (<1ms), "hybrid" (~50ms), or "semantic" (~500ms)
1091
+ * @param options - Optional per-request overrides
1211
1092
  *
1212
1093
  * @example
1213
1094
  * ```typescript
@@ -1215,53 +1096,41 @@ ${options.tags}\r
1215
1096
  * 'Returns accepted within 60 days',
1216
1097
  * 'Our return policy: 14 days.',
1217
1098
  * );
1218
- * if (guardIsBlocked(result)) {
1099
+ * if (result.action === 'block') {
1219
1100
  * console.log('Hallucination caught:', result.claims[0]?.reason);
1220
1101
  * }
1221
1102
  * ```
1222
1103
  */
1223
- async guard(text, sourceContext, mode = "lexical") {
1104
+ async guard(text, sourceContext, mode = "lexical", options) {
1224
1105
  const data = await fetchWithRetry(
1225
1106
  this.retryConfig,
1226
1107
  "POST",
1227
1108
  "/v1/fact-check",
1228
- { text, source_context: sourceContext, mode }
1109
+ { text, source_context: sourceContext, mode },
1110
+ options?.timeoutMs
1229
1111
  );
1230
1112
  return validateResponse(data, "GuardResponse");
1231
1113
  }
1232
- // ── Analytics & Insights endpoints ───────────────────────────────────
1233
- /**
1234
- * GET /v1/insights — ROI metrics for your API key
1235
- */
1236
- async getInsights() {
1237
- const data = await fetchWithRetry(
1238
- this.retryConfig,
1239
- "GET",
1240
- "/v1/insights"
1241
- );
1242
- return validateResponse(data, "InsightsResponse");
1243
- }
1244
- /**
1245
- * GET /v1/analytics — Usage analytics and cache performance
1246
- */
1247
- async getAnalytics(minutes = 60) {
1114
+ // ── Orchestrator endpoints ───────────────────────────────────────────
1115
+ /** POST /v1/orchestrator/execute — Route to best specialist agent */
1116
+ async orchestrate(prompt) {
1248
1117
  const data = await fetchWithRetry(
1249
1118
  this.retryConfig,
1250
- "GET",
1251
- `/v1/analytics?minutes=${minutes}`
1119
+ "POST",
1120
+ "/v1/orchestrator/execute",
1121
+ { prompt }
1252
1122
  );
1253
- return validateResponse(data, "AnalyticsResponse");
1123
+ return validateResponse(data, "OrchestratorResponse");
1254
1124
  }
1255
- /**
1256
- * GET /v1/analytics/traffic — Per-tenant traffic monitoring
1257
- */
1258
- async getAnalyticsTraffic() {
1125
+ /** POST /v1/orchestrator/parallel — Run all 4 specialists in parallel */
1126
+ async orchestrateParallel(prompt) {
1259
1127
  const data = await fetchWithRetry(
1260
1128
  this.retryConfig,
1261
- "GET",
1262
- "/v1/analytics/traffic"
1129
+ "POST",
1130
+ "/v1/orchestrator/parallel",
1131
+ { prompt }
1263
1132
  );
1264
- return validateResponse(data, "TrafficSummary");
1133
+ return validateResponse(data, "OrchestratorResponse");
1265
1134
  }
1266
1135
  };
1267
1136
 
@@ -1366,51 +1235,23 @@ var MockHttpClient = class {
1366
1235
  this.record("conversation", options);
1367
1236
  return new Conversation(this, options);
1368
1237
  }
1369
- async uploadFile(_file, filename, options) {
1370
- this.record("uploadFile", filename, options);
1371
- return {
1372
- document_id: "mock-doc-file-1",
1373
- chunks_count: 5,
1374
- indexed_at: (/* @__PURE__ */ new Date()).toISOString(),
1375
- content_type: "application/pdf",
1376
- trace_id: "mock-trace-1",
1377
- quality: {
1378
- score: 0.85,
1379
- label: "good",
1380
- word_count: 1200,
1381
- line_density: 8.5,
1382
- avg_line_length: 72,
1383
- paragraph_count: 15
1384
- }
1385
- };
1386
- }
1387
- async factCheck(request) {
1388
- this.record("factCheck", request);
1389
- const hasConflict = request.text !== request.source_context;
1390
- return {
1391
- verdict: hasConflict ? "rejected" : "verified",
1392
- action: hasConflict ? "block" : "allow",
1393
- hallucination_rate: hasConflict ? 1 : 0,
1394
- mode: request.mode ?? "lexical",
1395
- total_claims: 1,
1396
- supported_claims: hasConflict ? 0 : 1,
1397
- confidence: hasConflict ? 0.25 : 0.92,
1398
- claims: [{
1399
- text: request.text,
1400
- claim_type: "factual",
1401
- supported: !hasConflict,
1402
- confidence: hasConflict ? 0.25 : 0.92,
1403
- confidence_label: hasConflict ? "low" : "high",
1404
- verdict: hasConflict ? "rejected" : "verified",
1405
- action: hasConflict ? "block" : "allow",
1406
- reason: hasConflict ? "numerical_mismatch" : null,
1407
- evidence: request.source_context
1408
- }],
1409
- processing_time_ms: 1
1410
- };
1411
- }
1412
- async guard(text, sourceContext, mode = "lexical") {
1238
+ async guard(text, sourceContext, mode = "lexical", _options) {
1413
1239
  this.record("guard", text, sourceContext, mode);
1240
+ const textNums = new Set(text.match(/\b\d+(?:\.\d+)?\b/g) ?? []);
1241
+ const srcNums = new Set(sourceContext.match(/\b\d+(?:\.\d+)?\b/g) ?? []);
1242
+ const mismatch = textNums.size > 0 && srcNums.size > 0 && [...textNums].some((n) => !srcNums.has(n));
1243
+ if (mismatch) {
1244
+ return {
1245
+ verdict: "rejected",
1246
+ action: "block",
1247
+ hallucination_rate: 1,
1248
+ mode,
1249
+ total_claims: 1,
1250
+ supported_claims: 0,
1251
+ confidence: 0,
1252
+ claims: [{ text, supported: false, confidence: 0.3, verdict: "rejected", action: "block", reason: "numerical_mismatch" }]
1253
+ };
1254
+ }
1414
1255
  return {
1415
1256
  verdict: "verified",
1416
1257
  action: "allow",
@@ -1419,91 +1260,7 @@ var MockHttpClient = class {
1419
1260
  total_claims: 1,
1420
1261
  supported_claims: 1,
1421
1262
  confidence: 0.95,
1422
- claims: [{
1423
- text,
1424
- claim_type: "Fact",
1425
- supported: true,
1426
- confidence: 0.95,
1427
- confidence_label: "high",
1428
- verdict: "verified",
1429
- action: "allow",
1430
- reason: null,
1431
- evidence: sourceContext
1432
- }],
1433
- processing_time_ms: 0
1434
- };
1435
- }
1436
- async verifyCitation(request) {
1437
- this.record("verifyCitation", request);
1438
- const citations = request.text.match(/\[(?:Source:\s*[^\]]+|\d+|Ref:\s*[^\]]+)\]/g) ?? [];
1439
- const sentences = request.text.split(/[.!?]+/).filter((s) => s.trim().length > 0);
1440
- const citedSentences = sentences.filter((s) => /\[(?:Source:\s*[^\]]+|\d+|Ref:\s*[^\]]+)\]/.test(s));
1441
- const ratio = sentences.length > 0 ? citedSentences.length / sentences.length : 0;
1442
- return {
1443
- citation_ratio: ratio,
1444
- has_sufficient_citations: ratio >= (request.threshold ?? 0.5),
1445
- sentence_count: sentences.length,
1446
- citation_count: citations.length,
1447
- uncited_sentences: sentences.filter((s) => !/\[(?:Source:\s*[^\]]+|\d+|Ref:\s*[^\]]+)\]/.test(s)).map((s) => s.trim()),
1448
- citations: citations.map((c) => ({
1449
- citation: c,
1450
- source_name: c.replace(/[\[\]]/g, "").replace("Source: ", ""),
1451
- is_valid: (request.sources ?? []).some((src) => c.includes(src.name))
1452
- })),
1453
- phantom_count: 0,
1454
- processing_time_ms: 1
1455
- };
1456
- }
1457
- async getInsights() {
1458
- this.record("getInsights");
1459
- return {
1460
- tig_key: "mock-tig-key",
1461
- total_requests: 1250,
1462
- intelligence_requests: 980,
1463
- fallback_requests: 270,
1464
- tokens: {
1465
- baseline_total: 5e5,
1466
- real_total: 325e3,
1467
- saved_total: 175e3,
1468
- saved_percent_avg: 35
1469
- },
1470
- cost: {
1471
- estimated_usd_saved: 12.5
1472
- }
1473
- };
1474
- }
1475
- async getAnalytics(minutes = 60) {
1476
- this.record("getAnalytics", minutes);
1477
- return {
1478
- cache: {
1479
- total_requests: 450,
1480
- cache_hit_rate: 0.42,
1481
- avg_latency_ms: 180,
1482
- p95_latency_ms: 850
1483
- },
1484
- tokens: {
1485
- total_baseline: 12e4,
1486
- total_real: 78e3,
1487
- total_saved: 42e3,
1488
- avg_savings_percent: 35
1489
- },
1490
- uptime_secs: 86400
1491
- };
1492
- }
1493
- async getAnalyticsTraffic() {
1494
- this.record("getAnalyticsTraffic");
1495
- return {
1496
- total_requests_today: 3200,
1497
- total_tokens_today: 15e5,
1498
- top_tenants: [
1499
- { tenant_id: "tenant-alpha", requests_today: 1200, tokens_used: 58e4, success_rate: 0.98, avg_latency_ms: 220 },
1500
- { tenant_id: "tenant-beta", requests_today: 850, tokens_used: 42e4, success_rate: 0.96, avg_latency_ms: 310 },
1501
- { tenant_id: "tenant-gamma", requests_today: 600, tokens_used: 28e4, success_rate: 0.99, avg_latency_ms: 150 }
1502
- ],
1503
- error_rate: 0.02,
1504
- avg_latency_ms: 240,
1505
- p95_latency_ms: 890,
1506
- uptime_secs: 86400
1263
+ claims: [{ text, supported: true, confidence: 0.95, verdict: "verified", action: "allow" }]
1507
1264
  };
1508
1265
  }
1509
1266
  async ragAsk(question, text, source = "document") {
@@ -1521,6 +1278,12 @@ var MockHttpClient = class {
1521
1278
  function chatContent(response) {
1522
1279
  return response.choices[0]?.message?.content ?? "";
1523
1280
  }
1281
+ function guardIsSafe(response) {
1282
+ return response.verdict === "verified";
1283
+ }
1284
+ function guardIsBlocked(response) {
1285
+ return response.action === "block";
1286
+ }
1524
1287
  // Annotate the CommonJS export names for ESM import in node:
1525
1288
  0 && (module.exports = {
1526
1289
  AgentClient,
@@ -1533,5 +1296,7 @@ function chatContent(response) {
1533
1296
  ToolNotFoundError,
1534
1297
  ValidationError,
1535
1298
  WauldoError,
1536
- chatContent
1299
+ chatContent,
1300
+ guardIsBlocked,
1301
+ guardIsSafe
1537
1302
  });