@tenova/swt3-ai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +246 -0
- package/dist/adapters/anthropic.d.ts +32 -0
- package/dist/adapters/anthropic.d.ts.map +1 -0
- package/dist/adapters/anthropic.js +267 -0
- package/dist/adapters/anthropic.js.map +1 -0
- package/dist/adapters/openai.d.ts +19 -0
- package/dist/adapters/openai.d.ts.map +1 -0
- package/dist/adapters/openai.js +252 -0
- package/dist/adapters/openai.js.map +1 -0
- package/dist/adapters/vercel-ai.d.ts +64 -0
- package/dist/adapters/vercel-ai.d.ts.map +1 -0
- package/dist/adapters/vercel-ai.js +68 -0
- package/dist/adapters/vercel-ai.js.map +1 -0
- package/dist/buffer.d.ts +41 -0
- package/dist/buffer.d.ts.map +1 -0
- package/dist/buffer.js +154 -0
- package/dist/buffer.js.map +1 -0
- package/dist/clearing.d.ts +20 -0
- package/dist/clearing.d.ts.map +1 -0
- package/dist/clearing.js +145 -0
- package/dist/clearing.js.map +1 -0
- package/dist/fingerprint.d.ts +29 -0
- package/dist/fingerprint.d.ts.map +1 -0
- package/dist/fingerprint.js +57 -0
- package/dist/fingerprint.js.map +1 -0
- package/dist/index.d.ts +21 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +19 -0
- package/dist/index.js.map +1 -0
- package/dist/types.d.ts +75 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +14 -0
- package/dist/types.js.map +1 -0
- package/dist/witness.d.ts +76 -0
- package/dist/witness.d.ts.map +1 -0
- package/dist/witness.js +121 -0
- package/dist/witness.js.map +1 -0
- package/package.json +55 -0
package/README.md
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
Cryptographic AI Governance for the Edge. Zero Latency. Zero Data Retention.
|
|
2
|
+
|
|
3
|
+
# @tenova/swt3-ai
|
|
4
|
+
|
|
5
|
+
**SWT3 AI Witness SDK for TypeScript** — continuous, cryptographic attestation for AI systems. Prove your models are running approved weights, safety guardrails are active, and inferences are traceable. All without a single prompt or response ever leaving your infrastructure.
|
|
6
|
+
|
|
7
|
+
Works with OpenAI, Anthropic, Vercel AI SDK, and any OpenAI-compatible endpoint (vLLM, Ollama, Azure, Llama.cpp).
|
|
8
|
+
|
|
9
|
+
## Three Lines of Code
|
|
10
|
+
|
|
11
|
+
### OpenAI
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { Witness } from "@tenova/swt3-ai";
|
|
15
|
+
import OpenAI from "openai";
|
|
16
|
+
|
|
17
|
+
const witness = new Witness({
|
|
18
|
+
endpoint: "https://sovereign.tenova.io",
|
|
19
|
+
apiKey: "axm_live_...",
|
|
20
|
+
tenantId: "YOUR_ENCLAVE",
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
const client = witness.wrap(new OpenAI()) as OpenAI;
|
|
24
|
+
|
|
25
|
+
// Non-streaming — works exactly as before
|
|
26
|
+
const response = await client.chat.completions.create({
|
|
27
|
+
model: "gpt-4o",
|
|
28
|
+
messages: [{ role: "user", content: "Summarize this contract..." }],
|
|
29
|
+
});
|
|
30
|
+
console.log(response.choices[0].message.content);
|
|
31
|
+
|
|
32
|
+
// Streaming — also works. Chunks arrive in real-time, witnessing happens after.
|
|
33
|
+
const stream = await client.chat.completions.create({
|
|
34
|
+
model: "gpt-4o",
|
|
35
|
+
messages: [{ role: "user", content: "Explain quantum computing" }],
|
|
36
|
+
stream: true,
|
|
37
|
+
});
|
|
38
|
+
for await (const chunk of stream) {
|
|
39
|
+
process.stdout.write(chunk.choices[0]?.delta?.content ?? "");
|
|
40
|
+
}
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### Anthropic
|
|
44
|
+
|
|
45
|
+
```typescript
|
|
46
|
+
import { Witness } from "@tenova/swt3-ai";
|
|
47
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
48
|
+
|
|
49
|
+
const witness = new Witness({
|
|
50
|
+
endpoint: "https://sovereign.tenova.io",
|
|
51
|
+
apiKey: "axm_live_...",
|
|
52
|
+
tenantId: "YOUR_ENCLAVE",
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
const client = witness.wrap(new Anthropic()) as Anthropic;
|
|
56
|
+
|
|
57
|
+
const message = await client.messages.create({
|
|
58
|
+
model: "claude-sonnet-4-20250514",
|
|
59
|
+
max_tokens: 1024,
|
|
60
|
+
messages: [{ role: "user", content: "Draft a compliance memo" }],
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
// Streaming with Anthropic
|
|
64
|
+
const stream = await client.messages.create({
|
|
65
|
+
model: "claude-sonnet-4-20250514",
|
|
66
|
+
max_tokens: 1024,
|
|
67
|
+
messages: [{ role: "user", content: "Analyze this dataset" }],
|
|
68
|
+
stream: true,
|
|
69
|
+
});
|
|
70
|
+
for await (const event of stream) {
|
|
71
|
+
// events arrive in real-time, witnessing happens after stream ends
|
|
72
|
+
}
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### Vercel AI SDK (Next.js / React)
|
|
76
|
+
|
|
77
|
+
```typescript
|
|
78
|
+
import { Witness } from "@tenova/swt3-ai";
|
|
79
|
+
import { streamText } from "ai";
|
|
80
|
+
import { openai } from "@ai-sdk/openai";
|
|
81
|
+
|
|
82
|
+
const witness = new Witness({
|
|
83
|
+
endpoint: "https://sovereign.tenova.io",
|
|
84
|
+
apiKey: "axm_live_...",
|
|
85
|
+
tenantId: "YOUR_ENCLAVE",
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
const prompt = "Summarize this contract for the board";
|
|
89
|
+
|
|
90
|
+
const result = await streamText({
|
|
91
|
+
model: openai("gpt-4o"),
|
|
92
|
+
prompt,
|
|
93
|
+
onFinish: witness.vercelOnFinish({ promptText: prompt }),
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
// Works with any Vercel AI SDK provider — OpenAI, Anthropic, Google, Mistral, custom
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
The `onFinish` hook is framework-idiomatic: no wrapping, no proxying, no monkey-patching. It fires after the stream completes and receives a normalized result regardless of provider.
|
|
100
|
+
|
|
101
|
+
## Sovereign Cloud Support
|
|
102
|
+
|
|
103
|
+
The SDK works out-of-the-box with any OpenAI-compatible endpoint. Run Llama 3 on vLLM, Mistral on Ollama, or any model behind an OpenAI-compatible API — every inference is witnessed identically.
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
// vLLM with Llama 3 — sovereign cloud, your hardware
|
|
107
|
+
const client = witness.wrap(
|
|
108
|
+
new OpenAI({ baseURL: "http://gpu-cluster.internal:8000/v1" }),
|
|
109
|
+
) as OpenAI;
|
|
110
|
+
|
|
111
|
+
const response = await client.chat.completions.create({
|
|
112
|
+
model: "meta-llama/Meta-Llama-3-70B-Instruct",
|
|
113
|
+
messages: [{ role: "user", content: "Classify this threat indicator" }],
|
|
114
|
+
});
|
|
115
|
+
// Same SWT3 anchor, same ledger, same audit trail — regardless of where the model runs
|
|
116
|
+
|
|
117
|
+
// Ollama (local development)
|
|
118
|
+
const localClient = witness.wrap(
|
|
119
|
+
new OpenAI({ baseURL: "http://localhost:11434/v1" }),
|
|
120
|
+
) as OpenAI;
|
|
121
|
+
|
|
122
|
+
// Azure OpenAI (enterprise deployment)
|
|
123
|
+
const azureClient = witness.wrap(
|
|
124
|
+
new OpenAI({
|
|
125
|
+
apiKey: process.env.AZURE_OPENAI_KEY,
|
|
126
|
+
baseURL: "https://your-resource.openai.azure.com/openai/deployments/gpt-4o",
|
|
127
|
+
}),
|
|
128
|
+
) as OpenAI;
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## What Happens Per Inference
|
|
132
|
+
|
|
133
|
+
1. **Intercept** — ES6 Proxy wraps your AI client transparently
|
|
134
|
+
2. **Hash** — Prompts and responses are SHA-256 hashed in-process
|
|
135
|
+
3. **Extract** — Model hash, latency, token count, refusal status → numeric factors
|
|
136
|
+
4. **Clear** — Raw text is purged from the wire payload (configurable clearing level)
|
|
137
|
+
5. **Buffer** — Factors queued in background, flushed to the SWT3 ledger asynchronously
|
|
138
|
+
6. **Return** — Your original response returns untouched, zero added latency
|
|
139
|
+
|
|
140
|
+
For streaming: chunks arrive to the developer in real-time. The SDK accumulates content in the background and witnesses after the stream completes.
|
|
141
|
+
|
|
142
|
+
## Clearing Levels
|
|
143
|
+
|
|
144
|
+
| Level | Name | On the Wire | Use Case |
|
|
145
|
+
|-------|------|------------|----------|
|
|
146
|
+
| 0 | Analytics | Hashes + factors + model + provider + guardrails | Internal analytics |
|
|
147
|
+
| 1 | Standard | Hashes + factors + model + provider | **Default.** Production apps |
|
|
148
|
+
| 2 | Sensitive | Hashes + factors + model only | Healthcare, legal, PII |
|
|
149
|
+
| 3 | Classified | Numeric factors only. Model ID hashed. | Defense, air-gapped |
|
|
150
|
+
|
|
151
|
+
At Level 1+, raw prompts and responses **never leave your infrastructure**. The witness endpoint is a "Blind Registrar" — it stores cryptographic proofs, not data.
|
|
152
|
+
|
|
153
|
+
```typescript
|
|
154
|
+
// Healthcare deployment: Level 2
|
|
155
|
+
const witness = new Witness({
|
|
156
|
+
endpoint: "https://sovereign.tenova.io",
|
|
157
|
+
apiKey: "axm_live_...",
|
|
158
|
+
tenantId: "HOSPITAL_ENCLAVE",
|
|
159
|
+
clearingLevel: 2,
|
|
160
|
+
});
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
## What Gets Witnessed
|
|
164
|
+
|
|
165
|
+
| Procedure | What It Proves | Regulatory Mapping |
|
|
166
|
+
|-----------|---------------|-------------------|
|
|
167
|
+
| AI-INF.1 | Inference provenance (prompt + response hashed) | EU AI Act Art. 12 |
|
|
168
|
+
| AI-INF.2 | Latency within threshold (detects model swaps) | NIST AI RMF MEASURE 2.6 |
|
|
169
|
+
| AI-MDL.1 | Model hash matches approved version | EU AI Act Art. 9 |
|
|
170
|
+
| AI-MDL.2 | Model version identifier recorded | EU AI Act Art. 72 |
|
|
171
|
+
| AI-GRD.1 | Required guardrails were active | NIST AI RMF MANAGE 4.1 |
|
|
172
|
+
| AI-GRD.2 | No content filter / refusal triggered | EU AI Act Art. 9 |
|
|
173
|
+
|
|
174
|
+
## Resilience (Flight Recorder)
|
|
175
|
+
|
|
176
|
+
If the witness endpoint is unreachable, payloads move to a dead-letter queue instead of being dropped. When connectivity is restored, the backlog drains automatically.
|
|
177
|
+
|
|
178
|
+
```typescript
|
|
179
|
+
const witness = new Witness({
|
|
180
|
+
endpoint: "...",
|
|
181
|
+
apiKey: "axm_...",
|
|
182
|
+
tenantId: "...",
|
|
183
|
+
bufferSize: 50, // flush every 50 anchors
|
|
184
|
+
flushInterval: 10, // or every 10 seconds
|
|
185
|
+
maxRetries: 5, // retry before dead-lettering
|
|
186
|
+
});
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
## Cross-Language Parity
|
|
190
|
+
|
|
191
|
+
This SDK produces **identical SWT3 fingerprints** to the Python SDK (`swt3-ai`) and the Axiom ingestion endpoint. A unified audit trail across your entire stack:
|
|
192
|
+
|
|
193
|
+
| Layer | Language | Package |
|
|
194
|
+
|-------|----------|---------|
|
|
195
|
+
| Backend services | Python | `swt3-ai` |
|
|
196
|
+
| API routes / Edge | TypeScript | `@tenova/swt3-ai` |
|
|
197
|
+
| Frontend (Next.js) | TypeScript | `@tenova/swt3-ai` + Vercel AI SDK |
|
|
198
|
+
| CLI / scripts | Python | `swt3-ai` |
|
|
199
|
+
|
|
200
|
+
10 cross-language test vectors validated at build time.
|
|
201
|
+
|
|
202
|
+
## Installation
|
|
203
|
+
|
|
204
|
+
```bash
|
|
205
|
+
npm install @tenova/swt3-ai
|
|
206
|
+
|
|
207
|
+
# Peer dependencies (install whichever you use)
|
|
208
|
+
npm install openai # for OpenAI adapter
|
|
209
|
+
npm install @anthropic-ai/sdk # for Anthropic adapter
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
## API Reference
|
|
213
|
+
|
|
214
|
+
### `new Witness(options)`
|
|
215
|
+
|
|
216
|
+
| Option | Default | Description |
|
|
217
|
+
|--------|---------|-------------|
|
|
218
|
+
| `endpoint` | *required* | Witness endpoint URL |
|
|
219
|
+
| `apiKey` | *required* | API key (`axm_*`) |
|
|
220
|
+
| `tenantId` | *required* | Enclave identifier |
|
|
221
|
+
| `clearingLevel` | `1` | Clearing level (0-3) |
|
|
222
|
+
| `bufferSize` | `10` | Flush after N anchors |
|
|
223
|
+
| `flushInterval` | `5` | Flush after N seconds |
|
|
224
|
+
| `timeout` | `10000` | HTTP timeout (ms) |
|
|
225
|
+
| `maxRetries` | `3` | Retries before dead-letter |
|
|
226
|
+
| `guardrailNames` | `[]` | Active guardrail names |
|
|
227
|
+
|
|
228
|
+
### `witness.wrap(client)`
|
|
229
|
+
|
|
230
|
+
Returns a Proxy that behaves identically to the original client. Supports OpenAI and Anthropic.
|
|
231
|
+
|
|
232
|
+
### `witness.vercelOnFinish(options?)`
|
|
233
|
+
|
|
234
|
+
Returns an `onFinish` callback for `streamText()` / `generateText()`. Pass `{ promptText }` for full provenance hashing.
|
|
235
|
+
|
|
236
|
+
### `witness.flush()`
|
|
237
|
+
|
|
238
|
+
Force-flush all buffered payloads. Returns receipts.
|
|
239
|
+
|
|
240
|
+
### `witness.stop()`
|
|
241
|
+
|
|
242
|
+
Stop the witness and flush remaining payloads.
|
|
243
|
+
|
|
244
|
+
---
|
|
245
|
+
|
|
246
|
+
*SWT3: Sovereign Witness Traceability. We don't run your models. We witness them.*
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SWT3 AI Witness SDK — Anthropic Adapter (ES6 Proxy).
|
|
3
|
+
*
|
|
4
|
+
* Wraps the Anthropic client so that `client.messages.create()` is
|
|
5
|
+
* intercepted for witnessing. Two levels deep (simpler than OpenAI).
|
|
6
|
+
*
|
|
7
|
+
* Handles both:
|
|
8
|
+
* - Non-streaming: Message response object
|
|
9
|
+
* - Streaming: MessageStream — accumulates content_block_delta events
|
|
10
|
+
* for hashing, witnesses after stream_message_stop
|
|
11
|
+
*
|
|
12
|
+
* Anthropic response structure:
|
|
13
|
+
* response.content → ContentBlock[] (text, tool_use, etc.)
|
|
14
|
+
* response.model → string
|
|
15
|
+
* response.stop_reason → "end_turn" | "max_tokens" | "stop_sequence" | "tool_use"
|
|
16
|
+
* response.usage → { input_tokens, output_tokens }
|
|
17
|
+
*
|
|
18
|
+
* Anthropic streaming:
|
|
19
|
+
* The SDK returns a MessageStream with:
|
|
20
|
+
* - Symbol.asyncIterator → yields MessageStreamEvent objects
|
|
21
|
+
* - .on("message", cb) → fires when complete message is assembled
|
|
22
|
+
* - .finalMessage() → Promise<Message> (the assembled message)
|
|
23
|
+
*
|
|
24
|
+
* Events: message_start, content_block_start, content_block_delta,
|
|
25
|
+
* content_block_stop, message_delta, message_stop
|
|
26
|
+
*/
|
|
27
|
+
import type { Witness } from "../witness.js";
|
|
28
|
+
/**
|
|
29
|
+
* Wrap an Anthropic client with an ES6 Proxy for transparent witnessing.
|
|
30
|
+
*/
|
|
31
|
+
export declare function wrapAnthropic(client: unknown, witness: Witness): unknown;
|
|
32
|
+
//# sourceMappingURL=anthropic.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../../src/adapters/anthropic.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AAIH,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,eAAe,CAAC;AAE7C;;GAEG;AACH,wBAAgB,aAAa,CAAC,MAAM,EAAE,OAAO,EAAE,OAAO,EAAE,OAAO,GAAG,OAAO,CAYxE"}
|
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SWT3 AI Witness SDK — Anthropic Adapter (ES6 Proxy).
|
|
3
|
+
*
|
|
4
|
+
* Wraps the Anthropic client so that `client.messages.create()` is
|
|
5
|
+
* intercepted for witnessing. Two levels deep (simpler than OpenAI).
|
|
6
|
+
*
|
|
7
|
+
* Handles both:
|
|
8
|
+
* - Non-streaming: Message response object
|
|
9
|
+
* - Streaming: MessageStream — accumulates content_block_delta events
|
|
10
|
+
* for hashing, witnesses after stream_message_stop
|
|
11
|
+
*
|
|
12
|
+
* Anthropic response structure:
|
|
13
|
+
* response.content → ContentBlock[] (text, tool_use, etc.)
|
|
14
|
+
* response.model → string
|
|
15
|
+
* response.stop_reason → "end_turn" | "max_tokens" | "stop_sequence" | "tool_use"
|
|
16
|
+
* response.usage → { input_tokens, output_tokens }
|
|
17
|
+
*
|
|
18
|
+
* Anthropic streaming:
|
|
19
|
+
* The SDK returns a MessageStream with:
|
|
20
|
+
* - Symbol.asyncIterator → yields MessageStreamEvent objects
|
|
21
|
+
* - .on("message", cb) → fires when complete message is assembled
|
|
22
|
+
* - .finalMessage() → Promise<Message> (the assembled message)
|
|
23
|
+
*
|
|
24
|
+
* Events: message_start, content_block_start, content_block_delta,
|
|
25
|
+
* content_block_stop, message_delta, message_stop
|
|
26
|
+
*/
|
|
27
|
+
import { sha256Truncated } from "../fingerprint.js";
|
|
28
|
+
/**
|
|
29
|
+
* Wrap an Anthropic client with an ES6 Proxy for transparent witnessing.
|
|
30
|
+
*/
|
|
31
|
+
export function wrapAnthropic(client, witness) {
|
|
32
|
+
return new Proxy(client, {
|
|
33
|
+
get(target, prop) {
|
|
34
|
+
if (typeof prop === "symbol")
|
|
35
|
+
return Reflect.get(target, prop);
|
|
36
|
+
const real = Reflect.get(target, prop);
|
|
37
|
+
if (prop === "messages") {
|
|
38
|
+
return createMessagesProxy(real, witness);
|
|
39
|
+
}
|
|
40
|
+
return real;
|
|
41
|
+
},
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
function createMessagesProxy(messages, witness) {
|
|
45
|
+
return new Proxy(messages, {
|
|
46
|
+
get(target, prop) {
|
|
47
|
+
if (typeof prop === "symbol")
|
|
48
|
+
return Reflect.get(target, prop);
|
|
49
|
+
const real = Reflect.get(target, prop);
|
|
50
|
+
if (prop === "create") {
|
|
51
|
+
return createInterceptor(real, witness);
|
|
52
|
+
}
|
|
53
|
+
// Anthropic also has messages.stream() — intercept that too
|
|
54
|
+
if (prop === "stream") {
|
|
55
|
+
return createStreamInterceptor(real, witness);
|
|
56
|
+
}
|
|
57
|
+
return real;
|
|
58
|
+
},
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
// ── Non-streaming / auto-detect interceptor ─────────────────────────
|
|
62
|
+
function createInterceptor(realMethod, witness) {
|
|
63
|
+
return function interceptedCreate(...args) {
|
|
64
|
+
const kwargs = (args[0] ?? {});
|
|
65
|
+
const messages = kwargs.messages;
|
|
66
|
+
const system = kwargs.system;
|
|
67
|
+
const model = kwargs.model ?? "unknown";
|
|
68
|
+
const isStreaming = kwargs.stream === true;
|
|
69
|
+
const promptText = extractPromptText(messages, system);
|
|
70
|
+
const promptHash = sha256Truncated(promptText);
|
|
71
|
+
const start = performance.now();
|
|
72
|
+
const result = realMethod.call(this, ...args);
|
|
73
|
+
if (isStreaming) {
|
|
74
|
+
// Streaming via create({ stream: true }) — returns a Stream
|
|
75
|
+
return handleStreaming(result, witness, model, promptHash, start);
|
|
76
|
+
}
|
|
77
|
+
// Non-streaming — result is Promise<Message>
|
|
78
|
+
return result.then((response) => {
|
|
79
|
+
const elapsedMs = Math.round(performance.now() - start);
|
|
80
|
+
const record = extractRecord(response, model, promptHash, elapsedMs);
|
|
81
|
+
witness.record(record);
|
|
82
|
+
return response;
|
|
83
|
+
});
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
// ── Explicit .stream() interceptor ──────────────────────────────────
|
|
87
|
+
function createStreamInterceptor(realMethod, witness) {
|
|
88
|
+
return function interceptedStream(...args) {
|
|
89
|
+
const kwargs = (args[0] ?? {});
|
|
90
|
+
const messages = kwargs.messages;
|
|
91
|
+
const system = kwargs.system;
|
|
92
|
+
const model = kwargs.model ?? "unknown";
|
|
93
|
+
const promptText = extractPromptText(messages, system);
|
|
94
|
+
const promptHash = sha256Truncated(promptText);
|
|
95
|
+
const start = performance.now();
|
|
96
|
+
const result = realMethod.call(this, ...args);
|
|
97
|
+
return handleStreaming(result, witness, model, promptHash, start);
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
// ── Streaming Handler ───────────────────────────────────────────────
|
|
101
|
+
async function* streamAccumulator(stream, witness, model, promptHash, startTime) {
|
|
102
|
+
const textParts = [];
|
|
103
|
+
let actualModel = model;
|
|
104
|
+
let stopReason = "";
|
|
105
|
+
let inputTokens;
|
|
106
|
+
let outputTokens;
|
|
107
|
+
for await (const event of stream) {
|
|
108
|
+
// Yield to developer immediately
|
|
109
|
+
yield event;
|
|
110
|
+
const e = event;
|
|
111
|
+
const type = e.type;
|
|
112
|
+
if (type === "message_start") {
|
|
113
|
+
const msg = e.message;
|
|
114
|
+
if (msg?.model)
|
|
115
|
+
actualModel = msg.model;
|
|
116
|
+
const usage = msg?.usage;
|
|
117
|
+
if (usage?.input_tokens)
|
|
118
|
+
inputTokens = usage.input_tokens;
|
|
119
|
+
}
|
|
120
|
+
if (type === "content_block_delta") {
|
|
121
|
+
const delta = e.delta;
|
|
122
|
+
if (delta?.type === "text_delta" && delta?.text) {
|
|
123
|
+
textParts.push(delta.text);
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
if (type === "message_delta") {
|
|
127
|
+
const delta = e.delta;
|
|
128
|
+
if (delta?.stop_reason)
|
|
129
|
+
stopReason = delta.stop_reason;
|
|
130
|
+
const usage = e.usage;
|
|
131
|
+
if (usage?.output_tokens)
|
|
132
|
+
outputTokens = usage.output_tokens;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
// Stream complete — witness
|
|
136
|
+
const elapsedMs = Math.round(performance.now() - startTime);
|
|
137
|
+
const responseText = textParts.join("");
|
|
138
|
+
const hasRefusal = !["end_turn", "max_tokens", "stop_sequence", "tool_use"].includes(stopReason);
|
|
139
|
+
const record = {
|
|
140
|
+
modelId: actualModel,
|
|
141
|
+
modelHash: sha256Truncated(actualModel),
|
|
142
|
+
promptHash,
|
|
143
|
+
responseHash: sha256Truncated(responseText),
|
|
144
|
+
latencyMs: elapsedMs,
|
|
145
|
+
inputTokens,
|
|
146
|
+
outputTokens,
|
|
147
|
+
guardrailsActive: 0,
|
|
148
|
+
guardrailsRequired: 0,
|
|
149
|
+
guardrailPassed: true,
|
|
150
|
+
hasRefusal,
|
|
151
|
+
provider: "anthropic",
|
|
152
|
+
guardrailNames: [],
|
|
153
|
+
};
|
|
154
|
+
witness.record(record);
|
|
155
|
+
}
|
|
156
|
+
function handleStreaming(streamResult, witness, model, promptHash, startTime) {
|
|
157
|
+
// Anthropic's stream() returns a MessageStream directly (not a Promise)
|
|
158
|
+
// But create({ stream: true }) may return a Promise<Stream>
|
|
159
|
+
if (streamResult && typeof streamResult.then === "function") {
|
|
160
|
+
return streamResult.then((stream) => wrapAnthropicStream(stream, witness, model, promptHash, startTime));
|
|
161
|
+
}
|
|
162
|
+
return wrapAnthropicStream(streamResult, witness, model, promptHash, startTime);
|
|
163
|
+
}
|
|
164
|
+
function wrapAnthropicStream(stream, witness, model, promptHash, startTime) {
|
|
165
|
+
const s = stream;
|
|
166
|
+
const gen = streamAccumulator(s, witness, model, promptHash, startTime);
|
|
167
|
+
return new Proxy(s, {
|
|
168
|
+
get(target, prop) {
|
|
169
|
+
if (prop === Symbol.asyncIterator) {
|
|
170
|
+
return () => gen;
|
|
171
|
+
}
|
|
172
|
+
const value = Reflect.get(target, prop);
|
|
173
|
+
if (typeof value === "function") {
|
|
174
|
+
// Wrap finalMessage() to also witness the complete response
|
|
175
|
+
if (prop === "finalMessage") {
|
|
176
|
+
return async function wrappedFinalMessage() {
|
|
177
|
+
const msg = await value.call(target);
|
|
178
|
+
// The stream accumulator will have already witnessed via iteration,
|
|
179
|
+
// but if someone calls finalMessage() WITHOUT iterating, we need
|
|
180
|
+
// to witness from the assembled message.
|
|
181
|
+
return msg;
|
|
182
|
+
};
|
|
183
|
+
}
|
|
184
|
+
return value.bind(target);
|
|
185
|
+
}
|
|
186
|
+
return value;
|
|
187
|
+
},
|
|
188
|
+
});
|
|
189
|
+
}
|
|
190
|
+
// ── Factor Extraction ──────────────────────────────────────────────
|
|
191
|
+
function extractPromptText(messages, system = "") {
|
|
192
|
+
const parts = [];
|
|
193
|
+
// System prompt
|
|
194
|
+
if (typeof system === "string" && system) {
|
|
195
|
+
parts.push(system);
|
|
196
|
+
}
|
|
197
|
+
else if (Array.isArray(system)) {
|
|
198
|
+
for (const block of system) {
|
|
199
|
+
if (typeof block === "object" && block !== null) {
|
|
200
|
+
const b = block;
|
|
201
|
+
if (b.type === "text" && typeof b.text === "string") {
|
|
202
|
+
parts.push(b.text);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
// Messages
|
|
208
|
+
if (Array.isArray(messages)) {
|
|
209
|
+
for (const msg of messages) {
|
|
210
|
+
if (typeof msg === "object" && msg !== null) {
|
|
211
|
+
const m = msg;
|
|
212
|
+
const content = m.content;
|
|
213
|
+
if (typeof content === "string") {
|
|
214
|
+
parts.push(content);
|
|
215
|
+
}
|
|
216
|
+
else if (Array.isArray(content)) {
|
|
217
|
+
for (const block of content) {
|
|
218
|
+
if (typeof block === "object" && block !== null) {
|
|
219
|
+
const b = block;
|
|
220
|
+
if (b.type === "text" && typeof b.text === "string") {
|
|
221
|
+
parts.push(b.text);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
return parts.join("\n");
|
|
230
|
+
}
|
|
231
|
+
function extractRecord(response, model, promptHash, elapsedMs) {
|
|
232
|
+
const r = response;
|
|
233
|
+
// Extract text from content blocks
|
|
234
|
+
let responseText = "";
|
|
235
|
+
const contentBlocks = r.content;
|
|
236
|
+
if (Array.isArray(contentBlocks)) {
|
|
237
|
+
const texts = [];
|
|
238
|
+
for (const block of contentBlocks) {
|
|
239
|
+
if (block.type === "text" && typeof block.text === "string") {
|
|
240
|
+
texts.push(block.text);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
responseText = texts.join("\n");
|
|
244
|
+
}
|
|
245
|
+
const stopReason = r.stop_reason ?? "";
|
|
246
|
+
const hasRefusal = !["end_turn", "max_tokens", "stop_sequence", "tool_use"].includes(stopReason);
|
|
247
|
+
const usage = r.usage;
|
|
248
|
+
const inputTokens = usage?.input_tokens;
|
|
249
|
+
const outputTokens = usage?.output_tokens;
|
|
250
|
+
const actualModel = r.model ?? model;
|
|
251
|
+
return {
|
|
252
|
+
modelId: actualModel,
|
|
253
|
+
modelHash: sha256Truncated(actualModel),
|
|
254
|
+
promptHash,
|
|
255
|
+
responseHash: sha256Truncated(responseText),
|
|
256
|
+
latencyMs: elapsedMs,
|
|
257
|
+
inputTokens,
|
|
258
|
+
outputTokens,
|
|
259
|
+
guardrailsActive: 0,
|
|
260
|
+
guardrailsRequired: 0,
|
|
261
|
+
guardrailPassed: true,
|
|
262
|
+
hasRefusal,
|
|
263
|
+
provider: "anthropic",
|
|
264
|
+
guardrailNames: [],
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
//# sourceMappingURL=anthropic.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"anthropic.js","sourceRoot":"","sources":["../../src/adapters/anthropic.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AAEH,OAAO,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AAIpD;;GAEG;AACH,MAAM,UAAU,aAAa,CAAC,MAAe,EAAE,OAAgB;IAC7D,OAAO,IAAI,KAAK,CAAC,MAAgB,EAAE;QACjC,GAAG,CAAC,MAAc,EAAE,IAAqB;YACvC,IAAI,OAAO,IAAI,KAAK,QAAQ;gBAAE,OAAO,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;YAC/D,MAAM,IAAI,GAAG,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;YAEvC,IAAI,IAAI,KAAK,UAAU,EAAE,CAAC;gBACxB,OAAO,mBAAmB,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;YAC5C,CAAC;YACD,OAAO,IAAI,CAAC;QACd,CAAC;KACF,CAAC,CAAC;AACL,CAAC;AAED,SAAS,mBAAmB,CAAC,QAAiB,EAAE,OAAgB;IAC9D,OAAO,IAAI,KAAK,CAAC,QAAkB,EAAE;QACnC,GAAG,CAAC,MAAc,EAAE,IAAqB;YACvC,IAAI,OAAO,IAAI,KAAK,QAAQ;gBAAE,OAAO,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;YAC/D,MAAM,IAAI,GAAG,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;YAEvC,IAAI,IAAI,KAAK,QAAQ,EAAE,CAAC;gBACtB,OAAO,iBAAiB,CAAC,IAAuC,EAAE,OAAO,CAAC,CAAC;YAC7E,CAAC;YAED,4DAA4D;YAC5D,IAAI,IAAI,KAAK,QAAQ,EAAE,CAAC;gBACtB,OAAO,uBAAuB,CAAC,IAAuC,EAAE,OAAO,CAAC,CAAC;YACnF,CAAC;YAED,OAAO,IAAI,CAAC;QACd,CAAC;KACF,CAAC,CAAC;AACL,CAAC;AAED,uEAAuE;AAEvE,SAAS,iBAAiB,CACxB,UAA2C,EAC3C,OAAgB;IAEhB,OAAO,SAAS,iBAAiB,CAAgB,GAAG,IAAe;QACjE,MAAM,MAAM,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,EAAE,CAA4B,CAAC;QAC1D,MAAM,QAAQ,GAAG,MAAM,CAAC,QAAqB,CAAC;QAC9C,MAAM,MAAM,GAAG,MAAM,CAAC,MAAiB,CAAC;QACxC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAgB,IAAI,SAAS,CAAC;QACpD,MAAM,WAAW,GAAG,MAAM,CAAC,MAAM,KAAK,IAAI,CAAC;QAE3C,MAAM,UAAU,GAAG,iBAAiB,CAAC,QAAQ,EAAE,MAAM,CAAC,CAAC;QACvD,MAAM,UAAU,GAAG,eAAe,CAAC,UAAU,CAAC,CAAC;QAC/C,MAAM,KAAK,GAAG,WAAW,CAAC,GAAG,EAAE,CAAC;QAEhC,MAAM,MAAM,GAAG,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,GAAG,IAAI,CAAC,CAAC;QAE9C,IAAI,WAAW,EAAE,CAAC;YAChB,4DAA4D;YAC5D,OAAO,eAAe,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,UAAU,EAAE,KAAK,CAAC,CAAC;QACpE,CAAC;QAED,6CAA6C;QAC7C,OAAQ,MAA2B,CAAC,IAAI,CAAC,CAAC,QAAiB,EAAE,EAAE;YAC7D,MAAM,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,GAAG,EAAE,GAAG,KAAK,CAAC,CAAC;YACxD,MAAM,MAAM,GAAG,aAAa,CAAC,QAAQ,EAAE,KAAK,EAAE,UAAU,EAAE,SAAS,CAAC,CAAC;YACrE,OAAO,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YACvB,OAAO,QAAQ,CAAC;QAClB,CAAC,CAAC,CAAC;IACL,CAAC,CAAC;AACJ,CAAC;AAED,uEAAuE;AAEvE,SAAS,uBAAuB,CAC9B,UAA2C,EAC3C,OAAgB;IAEhB,OAAO,SAAS,iBAAiB,CAAgB,GAAG,IAAe;QACjE,MAAM,MAAM,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,EAAE,CAA4B,CAAC;QAC1D,MAAM,QAAQ,GAAG,MAAM,CAAC,QAAqB,CAAC;QAC9C,MAAM,MAAM,GAAG,MAAM,CAAC,MAAiB,CAAC;QACxC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAgB,IAAI,SAAS,CAAC;QAEpD,MAAM,UAAU,GAAG,iBAAiB,CAAC,QAAQ,EAAE,MAAM,CAAC,CAAC;QACvD,MAAM,UAAU,GAAG,eAAe,CAAC,UAAU,CAAC,CAAC;QAC/C,MAAM,KAAK,GAAG,WAAW,CAAC,GAAG,EAAE,CAAC;QAEhC,MAAM,MAAM,GAAG,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,GAAG,IAAI,CAAC,CAAC;QAC9C,OAAO,eAAe,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,UAAU,EAAE,KAAK,CAAC,CAAC;IACpE,CAAC,CAAC;AACJ,CAAC;AAED,uEAAuE;AAEvE,KAAK,SAAS,CAAC,CAAC,iBAAiB,CAC/B,MAA8B,EAC9B,OAAgB,EAChB,KAAa,EACb,UAAkB,EAClB,SAAiB;IAEjB,MAAM,SAAS,GAAa,EAAE,CAAC;IAC/B,IAAI,WAAW,GAAG,KAAK,CAAC;IACxB,IAAI,UAAU,GAAG,EAAE,CAAC;IACpB,IAAI,WAA+B,CAAC;IACpC,IAAI,YAAgC,CAAC;IAErC,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;QACjC,iCAAiC;QACjC,MAAM,KAAK,CAAC;QAEZ,MAAM,CAAC,GAAG,KAAgC,CAAC;QAC3C,MAAM,IAAI,GAAG,CAAC,CAAC,IAAc,CAAC;QAE9B,IAAI,IAAI,KAAK,eAAe,EAAE,CAAC;YAC7B,MAAM,GAAG,GAAG,CAAC,CAAC,OAA8C,CAAC;YAC7D,IAAI,GAAG,EAAE,KAAK;gBAAE,WAAW,GAAG,GAAG,CAAC,KAAe,CAAC;YAClD,MAAM,KAAK,GAAG,GAAG,EAAE,KAA4C,CAAC;YAChE,IAAI,KAAK,EAAE,YAAY;gBAAE,WAAW,GAAG,KAAK,CAAC,YAAsB,CAAC;QACtE,CAAC;QAED,IAAI,IAAI,KAAK,qBAAqB,EAAE,CAAC;YACnC,MAAM,KAAK,GAAG,CAAC,CAAC,KAA4C,CAAC;YAC7D,IAAI,KAAK,EAAE,IAAI,KAAK,YAAY,IAAI,KAAK,EAAE,IAAI,EAAE,CAAC;gBAChD,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,IAAc,CAAC,CAAC;YACvC,CAAC;QACH,CAAC;QAED,IAAI,IAAI,KAAK,eAAe,EAAE,CAAC;YAC7B,MAAM,KAAK,GAAG,CAAC,CAAC,KAA4C,CAAC;YAC7D,IAAI,KAAK,EAAE,WAAW;gBAAE,UAAU,GAAG,KAAK,CAAC,WAAqB,CAAC;YACjE,MAAM,KAAK,GAAG,CAAC,CAAC,KAA4C,CAAC;YAC7D,IAAI,KAAK,EAAE,aAAa;gBAAE,YAAY,GAAG,KAAK,CAAC,aAAuB,CAAC;QACzE,CAAC;IACH,CAAC;IAED,4BAA4B;IAC5B,MAAM,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,GAAG,EAAE,GAAG,SAAS,CAAC,CAAC;IAC5D,MAAM,YAAY,GAAG,SAAS,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;IACxC,MAAM,UAAU,GAAG,CAAC,CAAC,UAAU,EAAE,YAAY,EAAE,eAAe,EAAE,UAAU,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAEjG,MAAM,MAAM,GAAoB;QAC9B,OAAO,EAAE,WAAW;QACpB,SAAS,EAAE,eAAe,CAAC,WAAW,CAAC;QACvC,UAAU;QACV,YAAY,EAAE,eAAe,CAAC,YAAY,CAAC;QAC3C,SAAS,EAAE,SAAS;QACpB,WAAW;QACX,YAAY;QACZ,gBAAgB,EAAE,CAAC;QACnB,kBAAkB,EAAE,CAAC;QACrB,eAAe,EAAE,IAAI;QACrB,UAAU;QACV,QAAQ,EAAE,WAAW;QACrB,cAAc,EAAE,EAAE;KACnB,CAAC;IAEF,OAAO,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;AACzB,CAAC;AAED,SAAS,eAAe,CACtB,YAAqB,EACrB,OAAgB,EAChB,KAAa,EACb,UAAkB,EAClB,SAAiB;IAEjB,wEAAwE;IACxE,4DAA4D;IAC5D,IAAI,YAAY,IAAI,OAAQ,YAAiC,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;QAClF,OAAQ,YAAiC,CAAC,IAAI,CAAC,CAAC,MAAe,EAAE,EAAE,CACjE,mBAAmB,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,UAAU,EAAE,SAAS,CAAC,CACnE,CAAC;IACJ,CAAC;IACD,OAAO,mBAAmB,CAAC,YAAY,EAAE,OAAO,EAAE,KAAK,EAAE,UAAU,EAAE,SAAS,CAAC,CAAC;AAClF,CAAC;AAED,SAAS,mBAAmB,CAC1B,MAAe,EACf,OAAgB,EAChB,KAAa,EACb,UAAkB,EAClB,SAAiB;IAEjB,MAAM,CAAC,GAAG,MAA0C,CAAC;IAErD,MAAM,GAAG,GAAG,iBAAiB,CAC3B,CAAsC,EACtC,OAAO,EACP,KAAK,EACL,UAAU,EACV,SAAS,CACV,CAAC;IAEF,OAAO,IAAI,KAAK,CAAC,CAAC,EAAE;QAClB,GAAG,CAAC,MAAwC,EAAE,IAAqB;YACjE,IAAI,IAAI,KAAK,MAAM,CAAC,aAAa,EAAE,CAAC;gBAClC,OAAO,GAAG,EAAE,CAAC,GAAG,CAAC;YACnB,CAAC;YAED,MAAM,KAAK,GAAG,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;YACxC,IAAI,OAAO,KAAK,KAAK,UAAU,EAAE,CAAC;gBAChC,4DAA4D;gBAC5D,IAAI,IAAI,KAAK,cAAc,EAAE,CAAC;oBAC5B,OAAO,KAAK,UAAU,mBAAmB;wBACvC,MAAM,GAAG,GAAG,MAAO,KAAgC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;wBACjE,oEAAoE;wBACpE,iEAAiE;wBACjE,yCAAyC;wBACzC,OAAO,GAAG,CAAC;oBACb,CAAC,CAAC;gBACJ,CAAC;gBACD,OAAQ,KAAkB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YAC1C,CAAC;YACD,OAAO,KAAK,CAAC;QACf,CAAC;KACF,CAAC,CAAC;AACL,CAAC;AAED,sEAAsE;AAEtE,SAAS,iBAAiB,CAAC,QAAiB,EAAE,SAAkB,EAAE;IAChE,MAAM,KAAK,GAAa,EAAE,CAAC;IAE3B,gBAAgB;IAChB,IAAI,OAAO,MAAM,KAAK,QAAQ,IAAI,MAAM,EAAE,CAAC;QACzC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;IACrB,CAAC;SAAM,IAAI,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC;QACjC,KAAK,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;YAC3B,IAAI,OAAO,KAAK,KAAK,QAAQ,IAAI,KAAK,KAAK,IAAI,EAAE,CAAC;gBAChD,MAAM,CAAC,GAAG,KAAgC,CAAC;gBAC3C,IAAI,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,OAAO,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE,CAAC;oBACpD,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;gBACrB,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC;IAED,WAAW;IACX,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC5B,KAAK,MAAM,GAAG,IAAI,QAAQ,EAAE,CAAC;YAC3B,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,KAAK,IAAI,EAAE,CAAC;gBAC5C,MAAM,CAAC,GAAG,GAA8B,CAAC;gBACzC,MAAM,OAAO,GAAG,CAAC,CAAC,OAAO,CAAC;gBAC1B,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE,CAAC;oBAChC,KAAK,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;gBACtB,CAAC;qBAAM,IAAI,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC;oBAClC,KAAK,MAAM,KAAK,IAAI,OAAO,EAAE,CAAC;wBAC5B,IAAI,OAAO,KAAK,KAAK,QAAQ,IAAI,KAAK,KAAK,IAAI,EAAE,CAAC;4BAChD,MAAM,CAAC,GAAG,KAAgC,CAAC;4BAC3C,IAAI,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,OAAO,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE,CAAC;gCACpD,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;4BACrB,CAAC;wBACH,CAAC;oBACH,CAAC;gBACH,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC;IAED,OAAO,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;AAC1B,CAAC;AAED,SAAS,aAAa,CACpB,QAAiB,EACjB,KAAa,EACb,UAAkB,EAClB,SAAiB;IAEjB,MAAM,CAAC,GAAG,QAAmC,CAAC;IAE9C,mCAAmC;IACnC,IAAI,YAAY,GAAG,EAAE,CAAC;IACtB,MAAM,aAAa,GAAG,CAAC,CAAC,OAAqD,CAAC;IAC9E,IAAI,KAAK,CAAC,OAAO,CAAC,aAAa,CAAC,EAAE,CAAC;QACjC,MAAM,KAAK,GAAa,EAAE,CAAC;QAC3B,KAAK,MAAM,KAAK,IAAI,aAAa,EAAE,CAAC;YAClC,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,IAAI,OAAO,KAAK,CAAC,IAAI,KAAK,QAAQ,EAAE,CAAC;gBAC5D,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;YACzB,CAAC;QACH,CAAC;QACD,YAAY,GAAG,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;IAClC,CAAC;IAED,MAAM,UAAU,GAAI,CAAC,CAAC,WAAsB,IAAI,EAAE,CAAC;IACnD,MAAM,UAAU,GAAG,CAAC,CAAC,UAAU,EAAE,YAAY,EAAE,eAAe,EAAE,UAAU,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAEjG,MAAM,KAAK,GAAG,CAAC,CAAC,KAA4C,CAAC;IAC7D,MAAM,WAAW,GAAG,KAAK,EAAE,YAAkC,CAAC;IAC9D,MAAM,YAAY,GAAG,KAAK,EAAE,aAAmC,CAAC;IAEhE,MAAM,WAAW,GAAI,CAAC,CAAC,KAAgB,IAAI,KAAK,CAAC;IAEjD,OAAO;QACL,OAAO,EAAE,WAAW;QACpB,SAAS,EAAE,eAAe,CAAC,WAAW,CAAC;QACvC,UAAU;QACV,YAAY,EAAE,eAAe,CAAC,YAAY,CAAC;QAC3C,SAAS,EAAE,SAAS;QACpB,WAAW;QACX,YAAY;QACZ,gBAAgB,EAAE,CAAC;QACnB,kBAAkB,EAAE,CAAC;QACrB,eAAe,EAAE,IAAI;QACrB,UAAU;QACV,QAAQ,EAAE,WAAW;QACrB,cAAc,EAAE,EAAE;KACnB,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SWT3 AI Witness SDK — OpenAI Adapter (ES6 Proxy).
|
|
3
|
+
*
|
|
4
|
+
* Uses JavaScript's native Proxy to intercept property access on the
|
|
5
|
+
* OpenAI client, following the chain: client.chat.completions.create()
|
|
6
|
+
*
|
|
7
|
+
* Handles both:
|
|
8
|
+
* - Non-streaming: ChatCompletion response object
|
|
9
|
+
* - Streaming: AsyncIterable<ChatCompletionChunk> — accumulates chunks
|
|
10
|
+
* for hashing, then witnesses after stream completes
|
|
11
|
+
*
|
|
12
|
+
* The developer's code sees zero difference from using the raw client.
|
|
13
|
+
*/
|
|
14
|
+
import type { Witness } from "../witness.js";
|
|
15
|
+
/**
|
|
16
|
+
* Wrap an OpenAI client with an ES6 Proxy for transparent witnessing.
|
|
17
|
+
*/
|
|
18
|
+
export declare function wrapOpenAI(client: unknown, witness: Witness): unknown;
|
|
19
|
+
//# sourceMappingURL=openai.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../src/adapters/openai.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAIH,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,eAAe,CAAC;AAU7C;;GAEG;AACH,wBAAgB,UAAU,CAAC,MAAM,EAAE,OAAO,EAAE,OAAO,EAAE,OAAO,GAAG,OAAO,CAErE"}
|