@diabolicallabs/llm-client 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +120 -0
- package/dist/index.d.ts +101 -0
- package/dist/index.js +916 -0
- package/dist/index.js.map +1 -0
- package/package.json +51 -0
package/README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# @diabolicallabs/llm-client
|
|
2
|
+
|
|
3
|
+
Unified LLM API across Anthropic, OpenAI, Google Gemini, and DeepSeek. Single interface for completion, streaming, and structured output. All provider errors are normalized into a consistent `LlmError` shape. © Diabolical Labs
|
|
4
|
+
|
|
5
|
+
**Pre-1.0. APIs may change between minor versions.**
|
|
6
|
+
|
|
7
|
+
## Status
|
|
8
|
+
|
|
9
|
+
**Published — v0.1.0.** All four providers are implemented. A fifth provider (Perplexity) is a stub and will be implemented in a future release.
|
|
10
|
+
|
|
11
|
+
## Install
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
# .npmrc must point @diabolicallabs scope at GitHub Packages
|
|
15
|
+
pnpm add @diabolicallabs/llm-client
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
Consumer `.npmrc`:
|
|
19
|
+
|
|
20
|
+
```
|
|
21
|
+
@diabolicallabs:registry=https://npm.pkg.github.com
|
|
22
|
+
//npm.pkg.github.com/:_authToken=${GITHUB_TOKEN}
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## Usage
|
|
26
|
+
|
|
27
|
+
```typescript
|
|
28
|
+
import { createClient, createClientFromEnv } from '@diabolicallabs/llm-client';
|
|
29
|
+
|
|
30
|
+
// From explicit config
|
|
31
|
+
const client = createClient({
|
|
32
|
+
provider: 'anthropic',
|
|
33
|
+
model: 'claude-sonnet-4-6',
|
|
34
|
+
apiKey: process.env.ANTHROPIC_API_KEY!,
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
// From environment variables
|
|
38
|
+
const client = createClientFromEnv('anthropic', 'claude-sonnet-4-6');
|
|
39
|
+
|
|
40
|
+
// Non-streaming completion
|
|
41
|
+
const response = await client.complete([
|
|
42
|
+
{ role: 'user', content: 'Hello' },
|
|
43
|
+
]);
|
|
44
|
+
console.log(response.content, response.usage);
|
|
45
|
+
|
|
46
|
+
// Streaming
|
|
47
|
+
for await (const chunk of client.stream([{ role: 'user', content: 'Hello' }])) {
|
|
48
|
+
process.stdout.write(chunk.token);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Structured output (Zod schema)
|
|
52
|
+
import { z } from 'zod';
|
|
53
|
+
const schema = z.object({ name: z.string(), score: z.number() });
|
|
54
|
+
const result = await client.structured(messages, schema);
|
|
55
|
+
// result.data is typed as { name: string; score: number }
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Provider universe
|
|
59
|
+
|
|
60
|
+
| Provider | Status | Env var |
|
|
61
|
+
|---|---|---|
|
|
62
|
+
| `anthropic` | Implemented | `ANTHROPIC_API_KEY` |
|
|
63
|
+
| `openai` | Implemented | `OPENAI_API_KEY` |
|
|
64
|
+
| `google` | Implemented | `GOOGLE_AI_API_KEY` |
|
|
65
|
+
| `deepseek` | Implemented | `DEEPSEEK_API_KEY` |
|
|
66
|
+
| `perplexity` | Stub — throws `LlmError` | — |
|
|
67
|
+
|
|
68
|
+
## API
|
|
69
|
+
|
|
70
|
+
### `createClient(config: LlmClientConfig): LlmClient`
|
|
71
|
+
|
|
72
|
+
Creates an `LlmClient` for the given provider.
|
|
73
|
+
|
|
74
|
+
### `createClientFromEnv(provider, model, overrides?): LlmClient`
|
|
75
|
+
|
|
76
|
+
Reads the API key from the environment automatically:
|
|
77
|
+
- `anthropic` → `ANTHROPIC_API_KEY`
|
|
78
|
+
- `openai` → `OPENAI_API_KEY`
|
|
79
|
+
- `google` → `GOOGLE_AI_API_KEY`
|
|
80
|
+
- `deepseek` → `DEEPSEEK_API_KEY`
|
|
81
|
+
|
|
82
|
+
### `LlmClient` interface
|
|
83
|
+
|
|
84
|
+
| Method | Description |
|
|
85
|
+
|---|---|
|
|
86
|
+
| `complete(messages, options?)` | Non-streaming completion. Returns `LlmResponse`. |
|
|
87
|
+
| `stream(messages, options?)` | Streaming — async generator of `LlmStreamChunk`. Final chunk includes `usage`. |
|
|
88
|
+
| `structured(messages, schema, options?)` | Structured output validated against a Zod schema. Returns `LlmStructuredResponse<T>`. |
|
|
89
|
+
|
|
90
|
+
## Error handling
|
|
91
|
+
|
|
92
|
+
All provider errors are normalized into `LlmError`:
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
import { LlmError } from '@diabolicallabs/llm-client';
|
|
96
|
+
|
|
97
|
+
try {
|
|
98
|
+
const response = await client.complete(messages);
|
|
99
|
+
} catch (err) {
|
|
100
|
+
if (err instanceof LlmError) {
|
|
101
|
+
console.error(err.provider, err.statusCode, err.retryable);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
Retryable errors (429, 5xx, network failures) are retried automatically with exponential backoff and full jitter before throwing.
|
|
107
|
+
|
|
108
|
+
## Token normalization
|
|
109
|
+
|
|
110
|
+
All providers return `LlmUsage` in a consistent shape regardless of the underlying API's field names:
|
|
111
|
+
|
|
112
|
+
```typescript
|
|
113
|
+
interface LlmUsage {
|
|
114
|
+
inputTokens: number;
|
|
115
|
+
outputTokens: number;
|
|
116
|
+
totalTokens: number;
|
|
117
|
+
cacheCreationTokens?: number; // Anthropic prompt cache only
|
|
118
|
+
cacheReadTokens?: number; // Anthropic prompt cache only
|
|
119
|
+
}
|
|
120
|
+
```
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core type definitions for @diabolicallabs/llm-client.
|
|
3
|
+
* These are the stable public API surface — implementation is in Week 2.
|
|
4
|
+
* Types here match the spec in briefs/brief-platform.md §4.1 exactly.
|
|
5
|
+
*/
|
|
6
|
+
interface LlmMessage {
|
|
7
|
+
role: 'system' | 'user' | 'assistant';
|
|
8
|
+
content: string;
|
|
9
|
+
}
|
|
10
|
+
interface LlmClientConfig {
|
|
11
|
+
provider: 'anthropic' | 'openai' | 'gemini' | 'deepseek' | 'perplexity';
|
|
12
|
+
model: string;
|
|
13
|
+
apiKey: string;
|
|
14
|
+
maxRetries?: number;
|
|
15
|
+
baseDelayMs?: number;
|
|
16
|
+
maxTokens?: number;
|
|
17
|
+
temperature?: number;
|
|
18
|
+
timeoutMs?: number;
|
|
19
|
+
}
|
|
20
|
+
interface LlmUsage {
|
|
21
|
+
inputTokens: number;
|
|
22
|
+
outputTokens: number;
|
|
23
|
+
totalTokens: number;
|
|
24
|
+
cacheCreationTokens?: number;
|
|
25
|
+
cacheReadTokens?: number;
|
|
26
|
+
}
|
|
27
|
+
interface LlmResponse {
|
|
28
|
+
content: string;
|
|
29
|
+
model: string;
|
|
30
|
+
usage: LlmUsage;
|
|
31
|
+
latencyMs: number;
|
|
32
|
+
}
|
|
33
|
+
interface LlmStreamChunk {
|
|
34
|
+
token: string;
|
|
35
|
+
usage?: LlmUsage;
|
|
36
|
+
}
|
|
37
|
+
declare class LlmError extends Error {
|
|
38
|
+
readonly name = "LlmError";
|
|
39
|
+
readonly provider: string;
|
|
40
|
+
readonly statusCode: number | undefined;
|
|
41
|
+
readonly retryable: boolean;
|
|
42
|
+
readonly cause: unknown;
|
|
43
|
+
constructor(opts: {
|
|
44
|
+
message: string;
|
|
45
|
+
provider: string;
|
|
46
|
+
statusCode?: number;
|
|
47
|
+
retryable: boolean;
|
|
48
|
+
cause?: unknown;
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
type LlmStructuredResponse<T> = {
|
|
52
|
+
data: T;
|
|
53
|
+
usage: LlmUsage;
|
|
54
|
+
latencyMs: number;
|
|
55
|
+
};
|
|
56
|
+
interface LlmClient {
|
|
57
|
+
readonly config: Readonly<LlmClientConfig>;
|
|
58
|
+
complete(messages: LlmMessage[], options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>): Promise<LlmResponse>;
|
|
59
|
+
stream(messages: LlmMessage[], options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>): AsyncGenerator<LlmStreamChunk>;
|
|
60
|
+
structured<T>(messages: LlmMessage[], schema: {
|
|
61
|
+
parse: (data: unknown) => T;
|
|
62
|
+
}, options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>): Promise<LlmStructuredResponse<T>>;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Factory functions for LlmClient.
|
|
67
|
+
*
|
|
68
|
+
* createClient — dispatches to the correct provider implementation.
|
|
69
|
+
* createClientFromEnv — convenience wrapper that reads API keys from env vars.
|
|
70
|
+
*
|
|
71
|
+
* Provider dispatch:
|
|
72
|
+
* 'anthropic' → fully implemented (Week 2)
|
|
73
|
+
* 'openai' → fully implemented (Week 2)
|
|
74
|
+
* 'gemini' → fully implemented (Week 3)
|
|
75
|
+
* 'deepseek' → fully implemented (Week 3)
|
|
76
|
+
* 'perplexity' → stub, throws "not yet implemented" (later week)
|
|
77
|
+
*/
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Create an LlmClient for the given provider and config.
|
|
81
|
+
* Dispatches to the provider-specific implementation.
|
|
82
|
+
*
|
|
83
|
+
* Anthropic, OpenAI, Gemini, and DeepSeek are fully implemented.
|
|
84
|
+
* Perplexity is a type-registered stub that throws "not yet implemented".
|
|
85
|
+
*/
|
|
86
|
+
declare function createClient(config: LlmClientConfig): LlmClient;
|
|
87
|
+
/**
|
|
88
|
+
* Convenience: create an LlmClient from environment variables.
|
|
89
|
+
*
|
|
90
|
+
* Reads API keys from the environment based on provider:
|
|
91
|
+
* anthropic → ANTHROPIC_API_KEY
|
|
92
|
+
* openai → OPENAI_API_KEY
|
|
93
|
+
* gemini → GOOGLE_AI_API_KEY
|
|
94
|
+
* deepseek → DEEPSEEK_API_KEY
|
|
95
|
+
* perplexity → PERPLEXITY_API_KEY
|
|
96
|
+
*
|
|
97
|
+
* Throws LlmError if the required env var is not set.
|
|
98
|
+
*/
|
|
99
|
+
declare function createClientFromEnv(provider: LlmClientConfig['provider'], model: string, overrides?: Partial<Omit<LlmClientConfig, 'provider' | 'model' | 'apiKey'>>): LlmClient;
|
|
100
|
+
|
|
101
|
+
export { type LlmClient, type LlmClientConfig, LlmError, type LlmMessage, type LlmResponse, type LlmStreamChunk, type LlmStructuredResponse, type LlmUsage, createClient, createClientFromEnv };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,916 @@
|
|
|
1
|
+
// src/providers/anthropic.ts
|
|
2
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
3
|
+
|
|
4
|
+
// src/types.ts
|
|
5
|
+
var LlmError = class extends Error {
|
|
6
|
+
name = "LlmError";
|
|
7
|
+
provider;
|
|
8
|
+
statusCode;
|
|
9
|
+
retryable;
|
|
10
|
+
// `cause` is declared on Error in lib.es2022.error.d.ts as `cause?: unknown`
|
|
11
|
+
// We override it here to make it always present (not optional) after construction.
|
|
12
|
+
cause;
|
|
13
|
+
constructor(opts) {
|
|
14
|
+
super(opts.message, { cause: opts.cause });
|
|
15
|
+
this.provider = opts.provider;
|
|
16
|
+
this.statusCode = opts.statusCode;
|
|
17
|
+
this.retryable = opts.retryable;
|
|
18
|
+
this.cause = opts.cause;
|
|
19
|
+
}
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
// src/retry.ts
|
|
23
|
+
var RETRYABLE_HTTP_STATUSES = /* @__PURE__ */ new Set([429, 502, 503, 504]);
|
|
24
|
+
var RETRYABLE_ERROR_CODES = /* @__PURE__ */ new Set(["ECONNRESET", "ETIMEDOUT", "ECONNABORTED"]);
|
|
25
|
+
var NON_RETRYABLE_HTTP_STATUSES = /* @__PURE__ */ new Set([400, 401, 403, 404]);
|
|
26
|
+
function isRetryableStatus(statusCode) {
|
|
27
|
+
if (RETRYABLE_HTTP_STATUSES.has(statusCode)) return true;
|
|
28
|
+
if (NON_RETRYABLE_HTTP_STATUSES.has(statusCode)) return false;
|
|
29
|
+
return statusCode >= 500;
|
|
30
|
+
}
|
|
31
|
+
function isRetryableErrorCode(code) {
|
|
32
|
+
return RETRYABLE_ERROR_CODES.has(code);
|
|
33
|
+
}
|
|
34
|
+
function computeBackoffMs(attempt, baseDelayMs) {
|
|
35
|
+
const ceiling = baseDelayMs * 2 ** attempt;
|
|
36
|
+
return Math.random() * ceiling;
|
|
37
|
+
}
|
|
38
|
+
async function withRetry(fn, opts) {
|
|
39
|
+
let lastError;
|
|
40
|
+
for (let attempt = 0; attempt <= opts.maxRetries; attempt++) {
|
|
41
|
+
try {
|
|
42
|
+
return await fn(attempt);
|
|
43
|
+
} catch (err) {
|
|
44
|
+
const llmErr = normalizeThrownError(err, opts.provider);
|
|
45
|
+
if (!llmErr.retryable || attempt === opts.maxRetries) {
|
|
46
|
+
throw llmErr;
|
|
47
|
+
}
|
|
48
|
+
lastError = llmErr;
|
|
49
|
+
const delayMs = computeBackoffMs(attempt, opts.baseDelayMs);
|
|
50
|
+
await sleep(delayMs);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
throw lastError ?? new LlmError({
|
|
54
|
+
message: "Unexpected retry exhaustion",
|
|
55
|
+
provider: opts.provider,
|
|
56
|
+
retryable: false
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
function normalizeThrownError(err, provider) {
|
|
60
|
+
if (err instanceof LlmError) return err;
|
|
61
|
+
if (err instanceof Error) {
|
|
62
|
+
const errWithCode = err;
|
|
63
|
+
const statusCode = errWithCode.status ?? errWithCode.statusCode;
|
|
64
|
+
if (errWithCode.code !== void 0 && isRetryableErrorCode(errWithCode.code)) {
|
|
65
|
+
if (statusCode !== void 0) {
|
|
66
|
+
return new LlmError({
|
|
67
|
+
message: err.message,
|
|
68
|
+
provider,
|
|
69
|
+
statusCode,
|
|
70
|
+
retryable: true,
|
|
71
|
+
cause: err
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
return new LlmError({ message: err.message, provider, retryable: true, cause: err });
|
|
75
|
+
}
|
|
76
|
+
if (statusCode !== void 0) {
|
|
77
|
+
return new LlmError({
|
|
78
|
+
message: err.message,
|
|
79
|
+
provider,
|
|
80
|
+
statusCode,
|
|
81
|
+
retryable: isRetryableStatus(statusCode),
|
|
82
|
+
cause: err
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
return new LlmError({
|
|
86
|
+
message: err.message,
|
|
87
|
+
provider,
|
|
88
|
+
retryable: false,
|
|
89
|
+
cause: err
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
return new LlmError({
|
|
93
|
+
message: String(err),
|
|
94
|
+
provider,
|
|
95
|
+
retryable: false,
|
|
96
|
+
cause: err
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
function sleep(ms) {
|
|
100
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// src/providers/anthropic.ts
|
|
104
|
+
var PROVIDER = "anthropic";
|
|
105
|
+
function normalizeUsage(usage) {
|
|
106
|
+
const inputTokens = usage?.input_tokens ?? 0;
|
|
107
|
+
const outputTokens = usage?.output_tokens ?? 0;
|
|
108
|
+
return {
|
|
109
|
+
inputTokens,
|
|
110
|
+
outputTokens,
|
|
111
|
+
totalTokens: inputTokens + outputTokens,
|
|
112
|
+
// cache_creation_input_tokens and cache_read_input_tokens are present on
|
|
113
|
+
// extended usage objects from prompt caching — cast to access them safely.
|
|
114
|
+
cacheCreationTokens: usage?.cache_creation_input_tokens,
|
|
115
|
+
cacheReadTokens: usage?.cache_read_input_tokens
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
function buildAnthropicMessages(messages) {
|
|
119
|
+
const systemMessages = messages.filter((m) => m.role === "system");
|
|
120
|
+
const conversationMessages = messages.filter((m) => m.role !== "system");
|
|
121
|
+
const system = systemMessages.length > 0 ? systemMessages.map((m) => m.content).join("\n") : void 0;
|
|
122
|
+
const anthropicMessages = conversationMessages.map((m) => ({
|
|
123
|
+
role: m.role,
|
|
124
|
+
content: m.content
|
|
125
|
+
}));
|
|
126
|
+
return { system, messages: anthropicMessages };
|
|
127
|
+
}
|
|
128
|
+
function normalizeAnthropicError(err) {
|
|
129
|
+
if (err instanceof LlmError) return err;
|
|
130
|
+
if (typeof Anthropic.APIConnectionError === "function" && err instanceof Anthropic.APIConnectionError) {
|
|
131
|
+
return new LlmError({
|
|
132
|
+
message: err.message,
|
|
133
|
+
provider: PROVIDER,
|
|
134
|
+
retryable: true,
|
|
135
|
+
cause: err
|
|
136
|
+
});
|
|
137
|
+
}
|
|
138
|
+
if (typeof Anthropic.APIError === "function" && err instanceof Anthropic.APIError) {
|
|
139
|
+
const status = err.status;
|
|
140
|
+
if (status !== void 0) {
|
|
141
|
+
const retryable = [429, 502, 503, 504].includes(status) || status >= 500;
|
|
142
|
+
return new LlmError({
|
|
143
|
+
message: err.message,
|
|
144
|
+
provider: PROVIDER,
|
|
145
|
+
statusCode: status,
|
|
146
|
+
retryable,
|
|
147
|
+
cause: err
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });
|
|
151
|
+
}
|
|
152
|
+
return normalizeThrownError(err, PROVIDER);
|
|
153
|
+
}
|
|
154
|
+
function createAnthropicProvider(config) {
|
|
155
|
+
const client = new Anthropic({
|
|
156
|
+
apiKey: config.apiKey,
|
|
157
|
+
timeout: config.timeoutMs ?? 3e4,
|
|
158
|
+
maxRetries: 0
|
|
159
|
+
// We manage retries ourselves via withRetry
|
|
160
|
+
});
|
|
161
|
+
const retryOpts = {
|
|
162
|
+
maxRetries: config.maxRetries ?? 3,
|
|
163
|
+
baseDelayMs: config.baseDelayMs ?? 1e3,
|
|
164
|
+
provider: PROVIDER
|
|
165
|
+
};
|
|
166
|
+
async function complete(messages, options) {
|
|
167
|
+
const model = options?.model ?? config.model;
|
|
168
|
+
const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);
|
|
169
|
+
const start = Date.now();
|
|
170
|
+
return withRetry(async () => {
|
|
171
|
+
try {
|
|
172
|
+
const params = {
|
|
173
|
+
model,
|
|
174
|
+
messages: anthropicMessages,
|
|
175
|
+
max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024
|
|
176
|
+
};
|
|
177
|
+
if (system !== void 0) params.system = system;
|
|
178
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
179
|
+
if (temperature !== void 0) {
|
|
180
|
+
params.temperature = temperature;
|
|
181
|
+
}
|
|
182
|
+
const response = await client.messages.create(params);
|
|
183
|
+
const content = response.content.filter((block) => block.type === "text").map((block) => block.text).join("");
|
|
184
|
+
return {
|
|
185
|
+
content,
|
|
186
|
+
model: response.model,
|
|
187
|
+
usage: normalizeUsage(response.usage),
|
|
188
|
+
latencyMs: Date.now() - start
|
|
189
|
+
};
|
|
190
|
+
} catch (err) {
|
|
191
|
+
throw normalizeAnthropicError(err);
|
|
192
|
+
}
|
|
193
|
+
}, retryOpts);
|
|
194
|
+
}
|
|
195
|
+
async function* stream(messages, options) {
|
|
196
|
+
const model = options?.model ?? config.model;
|
|
197
|
+
const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);
|
|
198
|
+
const params = {
|
|
199
|
+
model,
|
|
200
|
+
messages: anthropicMessages,
|
|
201
|
+
max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024
|
|
202
|
+
};
|
|
203
|
+
if (system !== void 0) params.system = system;
|
|
204
|
+
const streamTemperature = options?.temperature ?? config.temperature;
|
|
205
|
+
if (streamTemperature !== void 0) {
|
|
206
|
+
params.temperature = streamTemperature;
|
|
207
|
+
}
|
|
208
|
+
let sdkStream;
|
|
209
|
+
try {
|
|
210
|
+
sdkStream = client.messages.stream(params);
|
|
211
|
+
} catch (err) {
|
|
212
|
+
throw normalizeAnthropicError(err);
|
|
213
|
+
}
|
|
214
|
+
let finalUsage;
|
|
215
|
+
try {
|
|
216
|
+
for await (const event of sdkStream) {
|
|
217
|
+
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
|
|
218
|
+
yield { token: event.delta.text };
|
|
219
|
+
} else if (event.type === "message_delta" && "usage" in event) {
|
|
220
|
+
const accum = await sdkStream.finalMessage();
|
|
221
|
+
finalUsage = normalizeUsage(accum.usage);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
} catch (err) {
|
|
225
|
+
throw normalizeAnthropicError(err);
|
|
226
|
+
}
|
|
227
|
+
if (finalUsage !== void 0) {
|
|
228
|
+
yield { token: "", usage: finalUsage };
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
async function structured(messages, schema, options) {
|
|
232
|
+
const jsonSystemInstruction = {
|
|
233
|
+
role: "system",
|
|
234
|
+
content: "You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse()."
|
|
235
|
+
};
|
|
236
|
+
const augmentedMessages = [jsonSystemInstruction, ...messages];
|
|
237
|
+
const start = Date.now();
|
|
238
|
+
const response = await complete(augmentedMessages, options);
|
|
239
|
+
let parsed;
|
|
240
|
+
try {
|
|
241
|
+
const cleaned = response.content.replace(/^```(?:json)?\s*/i, "").replace(/\s*```$/, "").trim();
|
|
242
|
+
parsed = JSON.parse(cleaned);
|
|
243
|
+
} catch (err) {
|
|
244
|
+
throw new LlmError({
|
|
245
|
+
message: `Anthropic structured output: response is not valid JSON. Raw: ${response.content.slice(0, 200)}`,
|
|
246
|
+
provider: PROVIDER,
|
|
247
|
+
retryable: false,
|
|
248
|
+
cause: err
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
let data;
|
|
252
|
+
try {
|
|
253
|
+
data = schema.parse(parsed);
|
|
254
|
+
} catch (err) {
|
|
255
|
+
throw new LlmError({
|
|
256
|
+
message: `Anthropic structured output: response failed schema validation. ${String(err)}`,
|
|
257
|
+
provider: PROVIDER,
|
|
258
|
+
retryable: false,
|
|
259
|
+
cause: err
|
|
260
|
+
});
|
|
261
|
+
}
|
|
262
|
+
return {
|
|
263
|
+
data,
|
|
264
|
+
usage: response.usage,
|
|
265
|
+
latencyMs: Date.now() - start
|
|
266
|
+
};
|
|
267
|
+
}
|
|
268
|
+
return {
|
|
269
|
+
config,
|
|
270
|
+
complete,
|
|
271
|
+
stream,
|
|
272
|
+
structured
|
|
273
|
+
};
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// src/providers/deepseek.ts
|
|
277
|
+
import OpenAI from "openai";
|
|
278
|
+
var PROVIDER2 = "deepseek";
|
|
279
|
+
var DEEPSEEK_BASE_URL = "https://api.deepseek.com";
|
|
280
|
+
function normalizeUsage2(usage) {
|
|
281
|
+
const inputTokens = usage?.prompt_tokens ?? 0;
|
|
282
|
+
const outputTokens = usage?.completion_tokens ?? 0;
|
|
283
|
+
return {
|
|
284
|
+
inputTokens,
|
|
285
|
+
outputTokens,
|
|
286
|
+
totalTokens: usage?.total_tokens ?? inputTokens + outputTokens
|
|
287
|
+
};
|
|
288
|
+
}
|
|
289
|
+
function buildMessages(messages) {
|
|
290
|
+
return messages.map((m) => ({
|
|
291
|
+
role: m.role,
|
|
292
|
+
content: m.content
|
|
293
|
+
}));
|
|
294
|
+
}
|
|
295
|
+
function normalizeDeepSeekError(err) {
|
|
296
|
+
if (err instanceof LlmError) return err;
|
|
297
|
+
if (typeof OpenAI.APIConnectionError === "function" && err instanceof OpenAI.APIConnectionError) {
|
|
298
|
+
return new LlmError({
|
|
299
|
+
message: err.message,
|
|
300
|
+
provider: PROVIDER2,
|
|
301
|
+
retryable: true,
|
|
302
|
+
cause: err
|
|
303
|
+
});
|
|
304
|
+
}
|
|
305
|
+
if (typeof OpenAI.APIError === "function" && err instanceof OpenAI.APIError) {
|
|
306
|
+
const status = err.status;
|
|
307
|
+
if (status !== void 0) {
|
|
308
|
+
const retryable = [429, 502, 503, 504].includes(status) || status >= 500;
|
|
309
|
+
return new LlmError({
|
|
310
|
+
message: err.message,
|
|
311
|
+
provider: PROVIDER2,
|
|
312
|
+
statusCode: status,
|
|
313
|
+
retryable,
|
|
314
|
+
cause: err
|
|
315
|
+
});
|
|
316
|
+
}
|
|
317
|
+
return new LlmError({ message: err.message, provider: PROVIDER2, retryable: false, cause: err });
|
|
318
|
+
}
|
|
319
|
+
return normalizeThrownError(err, PROVIDER2);
|
|
320
|
+
}
|
|
321
|
+
function createDeepSeekProvider(config) {
|
|
322
|
+
const client = new OpenAI({
|
|
323
|
+
apiKey: config.apiKey,
|
|
324
|
+
baseURL: DEEPSEEK_BASE_URL,
|
|
325
|
+
timeout: config.timeoutMs ?? 3e4,
|
|
326
|
+
maxRetries: 0
|
|
327
|
+
// Retries managed by withRetry
|
|
328
|
+
});
|
|
329
|
+
const retryOpts = {
|
|
330
|
+
maxRetries: config.maxRetries ?? 3,
|
|
331
|
+
baseDelayMs: config.baseDelayMs ?? 1e3,
|
|
332
|
+
provider: PROVIDER2
|
|
333
|
+
};
|
|
334
|
+
async function complete(messages, options) {
|
|
335
|
+
const model = options?.model ?? config.model;
|
|
336
|
+
const chatMessages = buildMessages(messages);
|
|
337
|
+
const start = Date.now();
|
|
338
|
+
return withRetry(async () => {
|
|
339
|
+
try {
|
|
340
|
+
const params = {
|
|
341
|
+
model,
|
|
342
|
+
messages: chatMessages,
|
|
343
|
+
stream: false
|
|
344
|
+
};
|
|
345
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
346
|
+
if (maxTokens !== void 0) params.max_tokens = maxTokens;
|
|
347
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
348
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
349
|
+
const response = await client.chat.completions.create(params);
|
|
350
|
+
const content = response.choices.map((c) => c.message.content ?? "").join("");
|
|
351
|
+
return {
|
|
352
|
+
content,
|
|
353
|
+
model: response.model,
|
|
354
|
+
usage: normalizeUsage2(response.usage),
|
|
355
|
+
latencyMs: Date.now() - start
|
|
356
|
+
};
|
|
357
|
+
} catch (err) {
|
|
358
|
+
throw normalizeDeepSeekError(err);
|
|
359
|
+
}
|
|
360
|
+
}, retryOpts);
|
|
361
|
+
}
|
|
362
|
+
async function* stream(messages, options) {
|
|
363
|
+
const model = options?.model ?? config.model;
|
|
364
|
+
const chatMessages = buildMessages(messages);
|
|
365
|
+
const params = {
|
|
366
|
+
model,
|
|
367
|
+
messages: chatMessages,
|
|
368
|
+
stream: true,
|
|
369
|
+
stream_options: { include_usage: true }
|
|
370
|
+
};
|
|
371
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
372
|
+
if (maxTokens !== void 0) params.max_tokens = maxTokens;
|
|
373
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
374
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
375
|
+
let sdkStream;
|
|
376
|
+
try {
|
|
377
|
+
sdkStream = await client.chat.completions.create(params);
|
|
378
|
+
} catch (err) {
|
|
379
|
+
throw normalizeDeepSeekError(err);
|
|
380
|
+
}
|
|
381
|
+
let finalUsage;
|
|
382
|
+
try {
|
|
383
|
+
for await (const chunk of sdkStream) {
|
|
384
|
+
const delta = chunk.choices[0]?.delta.content;
|
|
385
|
+
if (delta !== void 0 && delta !== null && delta.length > 0) {
|
|
386
|
+
yield { token: delta };
|
|
387
|
+
}
|
|
388
|
+
if (chunk.usage !== void 0 && chunk.usage !== null) {
|
|
389
|
+
finalUsage = normalizeUsage2(chunk.usage);
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
} catch (err) {
|
|
393
|
+
throw normalizeDeepSeekError(err);
|
|
394
|
+
}
|
|
395
|
+
if (finalUsage !== void 0) {
|
|
396
|
+
yield { token: "", usage: finalUsage };
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
async function structured(messages, schema, options) {
|
|
400
|
+
const jsonSystemInstruction = {
|
|
401
|
+
role: "system",
|
|
402
|
+
content: "You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse()."
|
|
403
|
+
};
|
|
404
|
+
const augmentedMessages = [jsonSystemInstruction, ...messages];
|
|
405
|
+
const model = options?.model ?? config.model;
|
|
406
|
+
const chatMessages = buildMessages(augmentedMessages);
|
|
407
|
+
const start = Date.now();
|
|
408
|
+
const rawResponse = await withRetry(async () => {
|
|
409
|
+
try {
|
|
410
|
+
const params = {
|
|
411
|
+
model,
|
|
412
|
+
messages: chatMessages,
|
|
413
|
+
stream: false
|
|
414
|
+
};
|
|
415
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
416
|
+
if (maxTokens !== void 0) params.max_tokens = maxTokens;
|
|
417
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
418
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
419
|
+
return await client.chat.completions.create(params);
|
|
420
|
+
} catch (err) {
|
|
421
|
+
throw normalizeDeepSeekError(err);
|
|
422
|
+
}
|
|
423
|
+
}, retryOpts);
|
|
424
|
+
const rawContent = rawResponse.choices[0]?.message.content ?? "";
|
|
425
|
+
let parsed;
|
|
426
|
+
try {
|
|
427
|
+
const cleaned = rawContent.replace(/^```(?:json)?\s*/i, "").replace(/\s*```$/, "").trim();
|
|
428
|
+
parsed = JSON.parse(cleaned);
|
|
429
|
+
} catch (err) {
|
|
430
|
+
throw new LlmError({
|
|
431
|
+
message: `DeepSeek structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,
|
|
432
|
+
provider: PROVIDER2,
|
|
433
|
+
retryable: false,
|
|
434
|
+
cause: err
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
let data;
|
|
438
|
+
try {
|
|
439
|
+
data = schema.parse(parsed);
|
|
440
|
+
} catch (err) {
|
|
441
|
+
throw new LlmError({
|
|
442
|
+
message: `DeepSeek structured output: response failed schema validation. ${String(err)}`,
|
|
443
|
+
provider: PROVIDER2,
|
|
444
|
+
retryable: false,
|
|
445
|
+
cause: err
|
|
446
|
+
});
|
|
447
|
+
}
|
|
448
|
+
return {
|
|
449
|
+
data,
|
|
450
|
+
usage: normalizeUsage2(rawResponse.usage),
|
|
451
|
+
latencyMs: Date.now() - start
|
|
452
|
+
};
|
|
453
|
+
}
|
|
454
|
+
return {
|
|
455
|
+
config,
|
|
456
|
+
complete,
|
|
457
|
+
stream,
|
|
458
|
+
structured
|
|
459
|
+
};
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
// src/providers/gemini.ts
|
|
463
|
+
import {
|
|
464
|
+
ApiError,
|
|
465
|
+
GoogleGenAI
|
|
466
|
+
} from "@google/genai";
|
|
467
|
+
var PROVIDER3 = "gemini";
|
|
468
|
+
function normalizeUsage3(meta) {
|
|
469
|
+
const inputTokens = meta?.promptTokenCount ?? 0;
|
|
470
|
+
const outputTokens = meta?.candidatesTokenCount ?? 0;
|
|
471
|
+
return {
|
|
472
|
+
inputTokens,
|
|
473
|
+
outputTokens,
|
|
474
|
+
totalTokens: meta?.totalTokenCount ?? inputTokens + outputTokens
|
|
475
|
+
};
|
|
476
|
+
}
|
|
477
|
+
function buildGeminiContents(messages) {
|
|
478
|
+
const systemMessages = messages.filter((m) => m.role === "system");
|
|
479
|
+
const conversationMessages = messages.filter((m) => m.role !== "system");
|
|
480
|
+
const system = systemMessages.length > 0 ? systemMessages.map((m) => m.content).join("\n") : void 0;
|
|
481
|
+
const contents = conversationMessages.map((m) => ({
|
|
482
|
+
role: m.role === "assistant" ? "model" : "user",
|
|
483
|
+
parts: [{ text: m.content }]
|
|
484
|
+
}));
|
|
485
|
+
return { system, contents };
|
|
486
|
+
}
|
|
487
|
+
function normalizeGeminiError(err) {
|
|
488
|
+
if (err instanceof LlmError) return err;
|
|
489
|
+
if (err instanceof ApiError) {
|
|
490
|
+
const retryable = err.status === 429 || err.status >= 500;
|
|
491
|
+
return new LlmError({
|
|
492
|
+
message: err.message,
|
|
493
|
+
provider: PROVIDER3,
|
|
494
|
+
statusCode: err.status,
|
|
495
|
+
retryable,
|
|
496
|
+
cause: err
|
|
497
|
+
});
|
|
498
|
+
}
|
|
499
|
+
return normalizeThrownError(err, PROVIDER3);
|
|
500
|
+
}
|
|
501
|
+
function createGeminiProvider(config) {
|
|
502
|
+
const ai = new GoogleGenAI({
|
|
503
|
+
apiKey: config.apiKey,
|
|
504
|
+
httpOptions: {
|
|
505
|
+
timeout: config.timeoutMs ?? 3e4
|
|
506
|
+
}
|
|
507
|
+
});
|
|
508
|
+
const retryOpts = {
|
|
509
|
+
maxRetries: config.maxRetries ?? 3,
|
|
510
|
+
baseDelayMs: config.baseDelayMs ?? 1e3,
|
|
511
|
+
provider: PROVIDER3
|
|
512
|
+
};
|
|
513
|
+
async function complete(messages, options) {
|
|
514
|
+
const model = options?.model ?? config.model;
|
|
515
|
+
const { system, contents } = buildGeminiContents(messages);
|
|
516
|
+
const start = Date.now();
|
|
517
|
+
return withRetry(async () => {
|
|
518
|
+
try {
|
|
519
|
+
const geminiConfig = {};
|
|
520
|
+
if (system !== void 0) geminiConfig.systemInstruction = system;
|
|
521
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
522
|
+
if (maxTokens !== void 0) geminiConfig.maxOutputTokens = maxTokens;
|
|
523
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
524
|
+
if (temperature !== void 0) geminiConfig.temperature = temperature;
|
|
525
|
+
const response = await ai.models.generateContent({
|
|
526
|
+
model,
|
|
527
|
+
contents,
|
|
528
|
+
config: geminiConfig
|
|
529
|
+
});
|
|
530
|
+
return {
|
|
531
|
+
content: response.text ?? "",
|
|
532
|
+
model,
|
|
533
|
+
usage: normalizeUsage3(response.usageMetadata),
|
|
534
|
+
latencyMs: Date.now() - start
|
|
535
|
+
};
|
|
536
|
+
} catch (err) {
|
|
537
|
+
throw normalizeGeminiError(err);
|
|
538
|
+
}
|
|
539
|
+
}, retryOpts);
|
|
540
|
+
}
|
|
541
|
+
async function* stream(messages, options) {
|
|
542
|
+
const model = options?.model ?? config.model;
|
|
543
|
+
const { system, contents } = buildGeminiContents(messages);
|
|
544
|
+
const geminiConfig = {};
|
|
545
|
+
if (system !== void 0) geminiConfig.systemInstruction = system;
|
|
546
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
547
|
+
if (maxTokens !== void 0) geminiConfig.maxOutputTokens = maxTokens;
|
|
548
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
549
|
+
if (temperature !== void 0) geminiConfig.temperature = temperature;
|
|
550
|
+
let sdkStream;
|
|
551
|
+
try {
|
|
552
|
+
sdkStream = await ai.models.generateContentStream({
|
|
553
|
+
model,
|
|
554
|
+
contents,
|
|
555
|
+
config: geminiConfig
|
|
556
|
+
});
|
|
557
|
+
} catch (err) {
|
|
558
|
+
throw normalizeGeminiError(err);
|
|
559
|
+
}
|
|
560
|
+
let finalUsage;
|
|
561
|
+
try {
|
|
562
|
+
for await (const chunk of sdkStream) {
|
|
563
|
+
const text = chunk.text;
|
|
564
|
+
if (text !== void 0 && text.length > 0) {
|
|
565
|
+
yield { token: text };
|
|
566
|
+
}
|
|
567
|
+
if (chunk.usageMetadata !== void 0) {
|
|
568
|
+
finalUsage = normalizeUsage3(chunk.usageMetadata);
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
} catch (err) {
|
|
572
|
+
throw normalizeGeminiError(err);
|
|
573
|
+
}
|
|
574
|
+
if (finalUsage !== void 0) {
|
|
575
|
+
yield { token: "", usage: finalUsage };
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
async function structured(messages, schema, options) {
|
|
579
|
+
const augmentedMessages = [
|
|
580
|
+
{
|
|
581
|
+
role: "system",
|
|
582
|
+
content: "You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse()."
|
|
583
|
+
},
|
|
584
|
+
...messages
|
|
585
|
+
];
|
|
586
|
+
const model = options?.model ?? config.model;
|
|
587
|
+
const { system, contents } = buildGeminiContents(augmentedMessages);
|
|
588
|
+
const start = Date.now();
|
|
589
|
+
const rawResponse = await withRetry(async () => {
|
|
590
|
+
try {
|
|
591
|
+
const geminiConfig = {
|
|
592
|
+
// Instruct Gemini to return JSON directly
|
|
593
|
+
responseMimeType: "application/json"
|
|
594
|
+
};
|
|
595
|
+
if (system !== void 0) geminiConfig.systemInstruction = system;
|
|
596
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
597
|
+
if (maxTokens !== void 0) geminiConfig.maxOutputTokens = maxTokens;
|
|
598
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
599
|
+
if (temperature !== void 0) geminiConfig.temperature = temperature;
|
|
600
|
+
return await ai.models.generateContent({
|
|
601
|
+
model,
|
|
602
|
+
contents,
|
|
603
|
+
config: geminiConfig
|
|
604
|
+
});
|
|
605
|
+
} catch (err) {
|
|
606
|
+
throw normalizeGeminiError(err);
|
|
607
|
+
}
|
|
608
|
+
}, retryOpts);
|
|
609
|
+
const rawContent = rawResponse.text ?? "";
|
|
610
|
+
let parsed;
|
|
611
|
+
try {
|
|
612
|
+
const cleaned = rawContent.replace(/^```(?:json)?\s*/i, "").replace(/\s*```$/, "").trim();
|
|
613
|
+
parsed = JSON.parse(cleaned);
|
|
614
|
+
} catch (err) {
|
|
615
|
+
throw new LlmError({
|
|
616
|
+
message: `Gemini structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,
|
|
617
|
+
provider: PROVIDER3,
|
|
618
|
+
retryable: false,
|
|
619
|
+
cause: err
|
|
620
|
+
});
|
|
621
|
+
}
|
|
622
|
+
let data;
|
|
623
|
+
try {
|
|
624
|
+
data = schema.parse(parsed);
|
|
625
|
+
} catch (err) {
|
|
626
|
+
throw new LlmError({
|
|
627
|
+
message: `Gemini structured output: response failed schema validation. ${String(err)}`,
|
|
628
|
+
provider: PROVIDER3,
|
|
629
|
+
retryable: false,
|
|
630
|
+
cause: err
|
|
631
|
+
});
|
|
632
|
+
}
|
|
633
|
+
return {
|
|
634
|
+
data,
|
|
635
|
+
usage: normalizeUsage3(rawResponse.usageMetadata),
|
|
636
|
+
latencyMs: Date.now() - start
|
|
637
|
+
};
|
|
638
|
+
}
|
|
639
|
+
return {
|
|
640
|
+
config,
|
|
641
|
+
complete,
|
|
642
|
+
stream,
|
|
643
|
+
structured
|
|
644
|
+
};
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
// src/providers/openai.ts
|
|
648
|
+
import OpenAI2 from "openai";
|
|
649
|
+
var PROVIDER4 = "openai";
|
|
650
|
+
function normalizeUsage4(usage) {
|
|
651
|
+
const inputTokens = usage?.prompt_tokens ?? 0;
|
|
652
|
+
const outputTokens = usage?.completion_tokens ?? 0;
|
|
653
|
+
return {
|
|
654
|
+
inputTokens,
|
|
655
|
+
outputTokens,
|
|
656
|
+
totalTokens: usage?.total_tokens ?? inputTokens + outputTokens
|
|
657
|
+
};
|
|
658
|
+
}
|
|
659
|
+
function buildOpenAIMessages(messages) {
|
|
660
|
+
return messages.map((m) => ({
|
|
661
|
+
role: m.role,
|
|
662
|
+
content: m.content
|
|
663
|
+
}));
|
|
664
|
+
}
|
|
665
|
+
function normalizeOpenAIError(err) {
|
|
666
|
+
if (err instanceof LlmError) return err;
|
|
667
|
+
if (typeof OpenAI2.APIConnectionError === "function" && err instanceof OpenAI2.APIConnectionError) {
|
|
668
|
+
return new LlmError({
|
|
669
|
+
message: err.message,
|
|
670
|
+
provider: PROVIDER4,
|
|
671
|
+
retryable: true,
|
|
672
|
+
cause: err
|
|
673
|
+
});
|
|
674
|
+
}
|
|
675
|
+
if (typeof OpenAI2.APIError === "function" && err instanceof OpenAI2.APIError) {
|
|
676
|
+
const status = err.status;
|
|
677
|
+
if (status !== void 0) {
|
|
678
|
+
const retryable = [429, 502, 503, 504].includes(status) || status >= 500;
|
|
679
|
+
return new LlmError({
|
|
680
|
+
message: err.message,
|
|
681
|
+
provider: PROVIDER4,
|
|
682
|
+
statusCode: status,
|
|
683
|
+
retryable,
|
|
684
|
+
cause: err
|
|
685
|
+
});
|
|
686
|
+
}
|
|
687
|
+
return new LlmError({ message: err.message, provider: PROVIDER4, retryable: false, cause: err });
|
|
688
|
+
}
|
|
689
|
+
return normalizeThrownError(err, PROVIDER4);
|
|
690
|
+
}
|
|
691
|
+
function createOpenAIProvider(config) {
|
|
692
|
+
const client = new OpenAI2({
|
|
693
|
+
apiKey: config.apiKey,
|
|
694
|
+
timeout: config.timeoutMs ?? 3e4,
|
|
695
|
+
maxRetries: 0
|
|
696
|
+
// We manage retries ourselves via withRetry
|
|
697
|
+
});
|
|
698
|
+
const retryOpts = {
|
|
699
|
+
maxRetries: config.maxRetries ?? 3,
|
|
700
|
+
baseDelayMs: config.baseDelayMs ?? 1e3,
|
|
701
|
+
provider: PROVIDER4
|
|
702
|
+
};
|
|
703
|
+
async function complete(messages, options) {
|
|
704
|
+
const model = options?.model ?? config.model;
|
|
705
|
+
const openAIMessages = buildOpenAIMessages(messages);
|
|
706
|
+
const start = Date.now();
|
|
707
|
+
return withRetry(async () => {
|
|
708
|
+
try {
|
|
709
|
+
const params = {
|
|
710
|
+
model,
|
|
711
|
+
messages: openAIMessages,
|
|
712
|
+
stream: false
|
|
713
|
+
};
|
|
714
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
715
|
+
if (maxTokens !== void 0) params.max_tokens = maxTokens;
|
|
716
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
717
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
718
|
+
const response = await client.chat.completions.create(params);
|
|
719
|
+
const content = response.choices.map((c) => c.message.content ?? "").join("");
|
|
720
|
+
return {
|
|
721
|
+
content,
|
|
722
|
+
model: response.model,
|
|
723
|
+
usage: normalizeUsage4(response.usage),
|
|
724
|
+
latencyMs: Date.now() - start
|
|
725
|
+
};
|
|
726
|
+
} catch (err) {
|
|
727
|
+
throw normalizeOpenAIError(err);
|
|
728
|
+
}
|
|
729
|
+
}, retryOpts);
|
|
730
|
+
}
|
|
731
|
+
async function* stream(messages, options) {
|
|
732
|
+
const model = options?.model ?? config.model;
|
|
733
|
+
const openAIMessages = buildOpenAIMessages(messages);
|
|
734
|
+
const params = {
|
|
735
|
+
model,
|
|
736
|
+
messages: openAIMessages,
|
|
737
|
+
stream: true,
|
|
738
|
+
stream_options: { include_usage: true }
|
|
739
|
+
};
|
|
740
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
741
|
+
if (maxTokens !== void 0) params.max_tokens = maxTokens;
|
|
742
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
743
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
744
|
+
let sdkStream;
|
|
745
|
+
try {
|
|
746
|
+
sdkStream = await client.chat.completions.create(params);
|
|
747
|
+
} catch (err) {
|
|
748
|
+
throw normalizeOpenAIError(err);
|
|
749
|
+
}
|
|
750
|
+
let finalUsage;
|
|
751
|
+
try {
|
|
752
|
+
for await (const chunk of sdkStream) {
|
|
753
|
+
const delta = chunk.choices[0]?.delta.content;
|
|
754
|
+
if (delta !== void 0 && delta !== null && delta.length > 0) {
|
|
755
|
+
yield { token: delta };
|
|
756
|
+
}
|
|
757
|
+
if (chunk.usage !== void 0 && chunk.usage !== null) {
|
|
758
|
+
finalUsage = normalizeUsage4(chunk.usage);
|
|
759
|
+
}
|
|
760
|
+
}
|
|
761
|
+
} catch (err) {
|
|
762
|
+
throw normalizeOpenAIError(err);
|
|
763
|
+
}
|
|
764
|
+
if (finalUsage !== void 0) {
|
|
765
|
+
yield { token: "", usage: finalUsage };
|
|
766
|
+
}
|
|
767
|
+
}
|
|
768
|
+
async function structured(messages, schema, options) {
|
|
769
|
+
const jsonSystemInstruction = {
|
|
770
|
+
role: "system",
|
|
771
|
+
content: "You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse()."
|
|
772
|
+
};
|
|
773
|
+
const augmentedMessages = [jsonSystemInstruction, ...messages];
|
|
774
|
+
const model = options?.model ?? config.model;
|
|
775
|
+
const openAIMessages = buildOpenAIMessages(augmentedMessages);
|
|
776
|
+
const start = Date.now();
|
|
777
|
+
const rawResponse = await withRetry(async () => {
|
|
778
|
+
try {
|
|
779
|
+
const params = {
|
|
780
|
+
model,
|
|
781
|
+
messages: openAIMessages,
|
|
782
|
+
stream: false,
|
|
783
|
+
response_format: { type: "json_object" }
|
|
784
|
+
};
|
|
785
|
+
const maxTokens = options?.maxTokens ?? config.maxTokens;
|
|
786
|
+
if (maxTokens !== void 0) params.max_tokens = maxTokens;
|
|
787
|
+
const temperature = options?.temperature ?? config.temperature;
|
|
788
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
789
|
+
return await client.chat.completions.create(params);
|
|
790
|
+
} catch (err) {
|
|
791
|
+
throw normalizeOpenAIError(err);
|
|
792
|
+
}
|
|
793
|
+
}, retryOpts);
|
|
794
|
+
const rawContent = rawResponse.choices[0]?.message.content ?? "";
|
|
795
|
+
let parsed;
|
|
796
|
+
try {
|
|
797
|
+
parsed = JSON.parse(rawContent);
|
|
798
|
+
} catch (err) {
|
|
799
|
+
throw new LlmError({
|
|
800
|
+
message: `OpenAI structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,
|
|
801
|
+
provider: PROVIDER4,
|
|
802
|
+
retryable: false,
|
|
803
|
+
cause: err
|
|
804
|
+
});
|
|
805
|
+
}
|
|
806
|
+
let data;
|
|
807
|
+
try {
|
|
808
|
+
data = schema.parse(parsed);
|
|
809
|
+
} catch (err) {
|
|
810
|
+
throw new LlmError({
|
|
811
|
+
message: `OpenAI structured output: response failed schema validation. ${String(err)}`,
|
|
812
|
+
provider: PROVIDER4,
|
|
813
|
+
retryable: false,
|
|
814
|
+
cause: err
|
|
815
|
+
});
|
|
816
|
+
}
|
|
817
|
+
return {
|
|
818
|
+
data,
|
|
819
|
+
usage: normalizeUsage4(rawResponse.usage),
|
|
820
|
+
latencyMs: Date.now() - start
|
|
821
|
+
};
|
|
822
|
+
}
|
|
823
|
+
return {
|
|
824
|
+
config,
|
|
825
|
+
complete,
|
|
826
|
+
stream,
|
|
827
|
+
structured
|
|
828
|
+
};
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
// src/providers/stubs.ts
|
|
832
|
+
function rejectingStream(err) {
|
|
833
|
+
const rejected = Promise.reject(err);
|
|
834
|
+
rejected.catch(() => void 0);
|
|
835
|
+
return {
|
|
836
|
+
next: () => rejected,
|
|
837
|
+
return: () => Promise.resolve({ value: void 0, done: true }),
|
|
838
|
+
throw: () => Promise.reject(err),
|
|
839
|
+
[Symbol.asyncIterator]() {
|
|
840
|
+
return this;
|
|
841
|
+
},
|
|
842
|
+
[Symbol.asyncDispose]: async () => void 0
|
|
843
|
+
};
|
|
844
|
+
}
|
|
845
|
+
function notImplemented(provider) {
|
|
846
|
+
const err = new LlmError({
|
|
847
|
+
message: `[dlabs-toolkit] Provider '${provider}' is not yet implemented. Anthropic, OpenAI, Gemini, and DeepSeek are available; Perplexity ships in a later week.`,
|
|
848
|
+
provider,
|
|
849
|
+
retryable: false
|
|
850
|
+
});
|
|
851
|
+
return {
|
|
852
|
+
get config() {
|
|
853
|
+
throw err;
|
|
854
|
+
},
|
|
855
|
+
complete: () => Promise.reject(err),
|
|
856
|
+
stream: () => rejectingStream(err),
|
|
857
|
+
structured: () => Promise.reject(err)
|
|
858
|
+
};
|
|
859
|
+
}
|
|
860
|
+
function createPerplexityProvider(config) {
|
|
861
|
+
void config;
|
|
862
|
+
return notImplemented("perplexity");
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
// src/client.ts
|
|
866
|
+
function createClient(config) {
|
|
867
|
+
switch (config.provider) {
|
|
868
|
+
case "anthropic":
|
|
869
|
+
return createAnthropicProvider(config);
|
|
870
|
+
case "openai":
|
|
871
|
+
return createOpenAIProvider(config);
|
|
872
|
+
case "gemini":
|
|
873
|
+
return createGeminiProvider(config);
|
|
874
|
+
case "deepseek":
|
|
875
|
+
return createDeepSeekProvider(config);
|
|
876
|
+
case "perplexity":
|
|
877
|
+
return createPerplexityProvider(config);
|
|
878
|
+
default: {
|
|
879
|
+
const _exhaustive = config.provider;
|
|
880
|
+
throw new LlmError({
|
|
881
|
+
message: `[dlabs-toolkit] Unknown provider: ${String(_exhaustive)}`,
|
|
882
|
+
provider: String(_exhaustive),
|
|
883
|
+
retryable: false
|
|
884
|
+
});
|
|
885
|
+
}
|
|
886
|
+
}
|
|
887
|
+
}
|
|
888
|
+
function createClientFromEnv(provider, model, overrides) {
|
|
889
|
+
const apiKey = resolveApiKey(provider);
|
|
890
|
+
return createClient({ provider, model, apiKey, ...overrides });
|
|
891
|
+
}
|
|
892
|
+
function resolveApiKey(provider) {
|
|
893
|
+
const envVarMap = {
|
|
894
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
895
|
+
openai: "OPENAI_API_KEY",
|
|
896
|
+
gemini: "GOOGLE_AI_API_KEY",
|
|
897
|
+
deepseek: "DEEPSEEK_API_KEY",
|
|
898
|
+
perplexity: "PERPLEXITY_API_KEY"
|
|
899
|
+
};
|
|
900
|
+
const envVar = envVarMap[provider];
|
|
901
|
+
const apiKey = process.env[envVar];
|
|
902
|
+
if (apiKey === void 0 || apiKey.trim() === "") {
|
|
903
|
+
throw new LlmError({
|
|
904
|
+
message: `[dlabs-toolkit] ${envVar} is not set. Set this environment variable to use the ${provider} provider.`,
|
|
905
|
+
provider,
|
|
906
|
+
retryable: false
|
|
907
|
+
});
|
|
908
|
+
}
|
|
909
|
+
return apiKey;
|
|
910
|
+
}
|
|
911
|
+
export {
|
|
912
|
+
LlmError,
|
|
913
|
+
createClient,
|
|
914
|
+
createClientFromEnv
|
|
915
|
+
};
|
|
916
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/providers/anthropic.ts","../src/types.ts","../src/retry.ts","../src/providers/deepseek.ts","../src/providers/gemini.ts","../src/providers/openai.ts","../src/providers/stubs.ts","../src/client.ts"],"sourcesContent":["/**\n * Anthropic Claude provider for @diabolicallabs/llm-client.\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * Anthropic: input_tokens / output_tokens / cache_creation_input_tokens / cache_read_input_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens / cacheCreationTokens / cacheReadTokens\n *\n * Error mapping:\n * APIStatusError.status → LlmError.statusCode + retryable flag\n * APIConnectionError → retryable: true\n */\n\nimport Anthropic from '@anthropic-ai/sdk';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'anthropic';\n\n/** Normalize Anthropic's usage object to LlmUsage. */\nfunction normalizeUsage(usage: Anthropic.Usage | undefined): LlmUsage {\n const inputTokens = usage?.input_tokens ?? 0;\n const outputTokens = usage?.output_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: inputTokens + outputTokens,\n // cache_creation_input_tokens and cache_read_input_tokens are present on\n // extended usage objects from prompt caching — cast to access them safely.\n cacheCreationTokens: (usage as Anthropic.Usage & { cache_creation_input_tokens?: number })\n ?.cache_creation_input_tokens,\n cacheReadTokens: (usage as Anthropic.Usage & { cache_read_input_tokens?: number })\n ?.cache_read_input_tokens,\n };\n}\n\n/** Convert LlmMessages to Anthropic's message format. Extracts system prompt. */\nfunction buildAnthropicMessages(messages: LlmMessage[]): {\n system: string | undefined;\n messages: Anthropic.MessageParam[];\n} {\n const systemMessages = messages.filter((m) => m.role === 'system');\n const conversationMessages = messages.filter((m) => m.role !== 'system');\n\n const system =\n systemMessages.length > 0 ? systemMessages.map((m) => m.content).join('\\n') : undefined;\n\n const anthropicMessages: Anthropic.MessageParam[] = conversationMessages.map((m) => ({\n role: m.role as 'user' | 'assistant',\n content: m.content,\n }));\n\n return { system, messages: anthropicMessages };\n}\n\n/**\n * Normalize any Anthropic SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n */\nexport function normalizeAnthropicError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // Anthropic SDK v0.94+: uses Anthropic.APIError as the base class with a `.status` field.\n // APIConnectionError is a subclass of APIError with status: undefined — check it first\n // so network failures are always retryable regardless of the missing status code.\n if (\n typeof Anthropic.APIConnectionError === 'function' &&\n err instanceof Anthropic.APIConnectionError\n ) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401),\n // InternalServerError (500), etc. Retryability is determined by HTTP status code.\n if (typeof Anthropic.APIError === 'function' && err instanceof Anthropic.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the Anthropic provider implementation. */\nexport function createAnthropicProvider(config: LlmClientConfig): LlmClient {\n const client = new Anthropic({\n apiKey: config.apiKey,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // We manage retries ourselves via withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);\n\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n const params: Anthropic.MessageCreateParamsNonStreaming = {\n model,\n messages: anthropicMessages,\n max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024,\n };\n\n if (system !== undefined) params.system = system;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) {\n params.temperature = temperature;\n }\n\n const response = await client.messages.create(params);\n\n const content = response.content\n .filter((block): block is Anthropic.TextBlock => block.type === 'text')\n .map((block) => block.text)\n .join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeAnthropicError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);\n\n const params: Anthropic.MessageStreamParams = {\n model,\n messages: anthropicMessages,\n max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024,\n };\n\n if (system !== undefined) params.system = system;\n const streamTemperature = options?.temperature ?? config.temperature;\n if (streamTemperature !== undefined) {\n params.temperature = streamTemperature;\n }\n\n let sdkStream: Awaited<ReturnType<typeof client.messages.stream>>;\n\n try {\n sdkStream = client.messages.stream(params);\n } catch (err) {\n throw normalizeAnthropicError(err);\n }\n\n // Accumulate usage — Anthropic sends it in the message_delta event at stream end\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const event of sdkStream) {\n if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {\n yield { token: event.delta.text };\n } else if (event.type === 'message_delta' && 'usage' in event) {\n // Merge input tokens from message_start with output tokens from message_delta\n const accum = await sdkStream.finalMessage();\n finalUsage = normalizeUsage(accum.usage);\n }\n }\n } catch (err) {\n // Propagate as a normalized LlmError regardless of whether streaming had started.\n // Partial stream errors cannot be recovered from — the consumer must handle them.\n throw normalizeAnthropicError(err);\n }\n\n // Yield usage on the final empty chunk\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmStructuredResponse<T>> {\n // Anthropic JSON mode: append a system instruction to return only JSON.\n // We inject this into the messages so the provider returns parseable output.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const start = Date.now();\n\n const response = await complete(augmentedMessages, options);\n\n let parsed: unknown;\n try {\n // Strip markdown code fences if the model included them despite the instruction\n const cleaned = response.content\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Anthropic structured output: response is not valid JSON. Raw: ${response.content.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Anthropic structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: response.usage,\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Core type definitions for @diabolicallabs/llm-client.\n * These are the stable public API surface — implementation is in Week 2.\n * Types here match the spec in briefs/brief-platform.md §4.1 exactly.\n */\n\n// The canonical message format shared across all providers\nexport interface LlmMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\n// Config passed to createClient\nexport interface LlmClientConfig {\n // Full 5-provider union — gemini, deepseek, perplexity are type-only stubs in Week 2\n provider: 'anthropic' | 'openai' | 'gemini' | 'deepseek' | 'perplexity';\n model: string; // e.g. 'claude-sonnet-4-6', 'gpt-4o', 'gemini-2.5-flash'\n apiKey: string;\n maxRetries?: number; // default: 3\n baseDelayMs?: number; // default: 1000 — exponential backoff base\n maxTokens?: number; // provider default if omitted\n temperature?: number; // provider default if omitted\n timeoutMs?: number; // default: 30000\n}\n\n// Normalized token usage — same shape regardless of provider\nexport interface LlmUsage {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n cacheCreationTokens?: number; // Anthropic prompt cache write tokens\n cacheReadTokens?: number; // Anthropic prompt cache read tokens\n}\n\n// Non-streaming response\nexport interface LlmResponse {\n content: string;\n model: string; // model ID actually used (may differ from requested)\n usage: LlmUsage;\n latencyMs: number;\n}\n\n// Streaming chunk\nexport interface LlmStreamChunk {\n token: string;\n usage?: LlmUsage; // present only on the final chunk\n}\n\n// Normalized error — wraps provider-specific errors\nexport class LlmError extends Error {\n override readonly name = 'LlmError';\n readonly provider: string;\n readonly statusCode: number | undefined;\n readonly retryable: boolean;\n // `cause` is declared on Error in lib.es2022.error.d.ts as `cause?: unknown`\n // We override it here to make it always present (not optional) after construction.\n override readonly cause: unknown;\n\n constructor(opts: {\n message: string;\n provider: string;\n statusCode?: number;\n retryable: boolean;\n cause?: unknown;\n }) {\n super(opts.message, { cause: opts.cause });\n this.provider = opts.provider;\n this.statusCode = opts.statusCode;\n this.retryable = opts.retryable;\n this.cause = opts.cause;\n }\n}\n\n// Structured output — Zod schema inference\nexport type LlmStructuredResponse<T> = {\n data: T;\n usage: LlmUsage;\n latencyMs: number;\n};\n\n// The LlmClient interface — what consumers program against\nexport interface LlmClient {\n readonly config: Readonly<LlmClientConfig>;\n\n // Non-streaming completion\n complete(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmResponse>;\n\n // Streaming completion — async generator of chunks\n stream(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): AsyncGenerator<LlmStreamChunk>;\n\n // Structured output — parses and validates the response against a Zod schema\n // Forces JSON mode on providers that support it; falls back to parse-and-validate\n structured<T>(\n messages: LlmMessage[],\n // Using a narrower interface than the full ZodType to avoid a hard zod dependency at types level\n schema: { parse: (data: unknown) => T },\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmStructuredResponse<T>>;\n}\n","/**\n * Exponential backoff with full jitter — shared across all providers.\n *\n * Formula: delay = random(0, baseDelayMs * 2^attempt)\n *\n * Retryable HTTP statuses: 429 (rate limit), 502/503/504 (server errors).\n * Retryable network codes: ECONNRESET, ETIMEDOUT.\n * Non-retryable: 400 (bad request), 401/403 (auth), 404.\n */\n\nimport { LlmError } from './types.js';\n\n// HTTP status codes that should trigger a retry\nconst RETRYABLE_HTTP_STATUSES = new Set([429, 502, 503, 504]);\n\n// Network error codes that should trigger a retry\nconst RETRYABLE_ERROR_CODES = new Set(['ECONNRESET', 'ETIMEDOUT', 'ECONNABORTED']);\n\n// HTTP status codes that should never retry (fail immediately)\nconst NON_RETRYABLE_HTTP_STATUSES = new Set([400, 401, 403, 404]);\n\n/** Determine if an HTTP status code is retryable. */\nexport function isRetryableStatus(statusCode: number): boolean {\n if (RETRYABLE_HTTP_STATUSES.has(statusCode)) return true;\n if (NON_RETRYABLE_HTTP_STATUSES.has(statusCode)) return false;\n // Treat any 5xx not explicitly handled as retryable\n return statusCode >= 500;\n}\n\n/** Determine if a network error code is retryable. */\nexport function isRetryableErrorCode(code: string): boolean {\n return RETRYABLE_ERROR_CODES.has(code);\n}\n\n/** Compute the delay in ms for attempt N (0-indexed). Full jitter. */\nexport function computeBackoffMs(attempt: number, baseDelayMs: number): number {\n const ceiling = baseDelayMs * 2 ** attempt;\n return Math.random() * ceiling;\n}\n\nexport interface RetryOptions {\n maxRetries: number;\n baseDelayMs: number;\n provider: string;\n}\n\n/**\n * Execute `fn` with retry logic. Wraps the result in structured error normalization.\n * `fn` receives the current attempt number (0-indexed).\n *\n * Throws LlmError after all retries are exhausted.\n */\nexport async function withRetry<T>(\n fn: (attempt: number) => Promise<T>,\n opts: RetryOptions\n): Promise<T> {\n let lastError: LlmError | undefined;\n\n for (let attempt = 0; attempt <= opts.maxRetries; attempt++) {\n try {\n return await fn(attempt);\n } catch (err) {\n const llmErr = normalizeThrownError(err, opts.provider);\n\n if (!llmErr.retryable || attempt === opts.maxRetries) {\n throw llmErr;\n }\n\n lastError = llmErr;\n const delayMs = computeBackoffMs(attempt, opts.baseDelayMs);\n await sleep(delayMs);\n }\n }\n\n // This path is unreachable — the loop always throws or returns.\n // TypeScript needs this for exhaustiveness.\n throw (\n lastError ??\n new LlmError({\n message: 'Unexpected retry exhaustion',\n provider: opts.provider,\n retryable: false,\n })\n );\n}\n\n/** Normalize any thrown value into an LlmError. */\nexport function normalizeThrownError(err: unknown, provider: string): LlmError {\n if (err instanceof LlmError) return err;\n\n if (err instanceof Error) {\n const errWithCode = err as Error & { status?: number; statusCode?: number; code?: string };\n\n const statusCode = errWithCode.status ?? errWithCode.statusCode;\n\n // Check for retryable network error codes\n if (errWithCode.code !== undefined && isRetryableErrorCode(errWithCode.code)) {\n if (statusCode !== undefined) {\n return new LlmError({\n message: err.message,\n provider,\n statusCode,\n retryable: true,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider, retryable: true, cause: err });\n }\n\n // Check for retryable HTTP status codes\n if (statusCode !== undefined) {\n return new LlmError({\n message: err.message,\n provider,\n statusCode,\n retryable: isRetryableStatus(statusCode),\n cause: err,\n });\n }\n\n return new LlmError({\n message: err.message,\n provider,\n retryable: false,\n cause: err,\n });\n }\n\n return new LlmError({\n message: String(err),\n provider,\n retryable: false,\n cause: err,\n });\n}\n\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n","/**\n * DeepSeek provider for @diabolicallabs/llm-client.\n *\n * DeepSeek's chat completions API is fully OpenAI-compatible, so this provider\n * uses the OpenAI SDK pointed at DeepSeek's base URL.\n *\n * API base URL: https://api.deepseek.com\n * Docs: https://platform.deepseek.com/api-docs/\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * DeepSeek returns standard OpenAI-format usage: prompt_tokens / completion_tokens / total_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIConnectionError → retryable: true\n * APIError with status 429 / 5xx → retryable: true\n * Other APIErrors → non-retryable\n *\n * Note: DeepSeek does not support the json_object response_format on all models.\n * structured() injects a system prompt and parses the raw response. If the model\n * includes markdown fences, they are stripped before parsing.\n */\n\nimport OpenAI from 'openai';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'deepseek';\nconst DEEPSEEK_BASE_URL = 'https://api.deepseek.com';\n\n/** Normalize OpenAI-format usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI-format chat message params (compatible with DeepSeek). */\nfunction buildMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Normalize any DeepSeek / OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * Uses the same OpenAI SDK error hierarchy (APIConnectionError before APIError)\n * since the client is an OpenAI instance pointed at DeepSeek's API.\n */\nexport function normalizeDeepSeekError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // APIConnectionError is a subclass of APIError with status: undefined —\n // check it first so network failures are always retryable.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401), etc.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the DeepSeek provider implementation. */\nexport function createDeepSeekProvider(config: LlmClientConfig): LlmClient {\n // OpenAI SDK pointed at DeepSeek's OpenAI-compatible endpoint\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: DEEPSEEK_BASE_URL,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // Retries managed by withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: chatMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const response = await client.chat.completions.create(params);\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {\n model,\n messages: chatMessages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of sdkStream) {\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk when stream_options.include_usage is true\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmStructuredResponse<T>> {\n // Inject JSON-only system instruction. DeepSeek does not guarantee json_object\n // response_format support across all models, so we rely on prompt-level enforcement.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(augmentedMessages);\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: chatMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n }, retryOpts);\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n // Strip markdown fences if the model included them despite the instruction\n const cleaned = rawContent\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `DeepSeek structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `DeepSeek structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Google Gemini provider for @diabolicallabs/llm-client.\n *\n * Uses the @google/genai SDK (v1.x — not the deprecated @google/generative-ai).\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * Gemini: usageMetadata.promptTokenCount / candidatesTokenCount / totalTokenCount\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * ApiError (public SDK class, status: number always defined):\n * retryable for 429 / 5xx\n * non-retryable for 4xx (except 429)\n * Other errors → normalizeThrownError (handles ECONNRESET / ETIMEDOUT as retryable)\n *\n * API notes:\n * - System instructions are passed via config.systemInstruction (not mixed into contents)\n * - Role mapping: 'user' → 'user', 'assistant' → 'model'\n * - Streaming via ai.models.generateContentStream() returns AsyncGenerator<GenerateContentResponse>\n * - Text is accessed via response.text getter on GenerateContentResponse\n * - Structured output: responseMimeType: 'application/json' in GenerateContentConfig\n *\n * SDK error class note:\n * The @google/genai public API exports only ApiError (lowercase 'a'), which has status: number.\n * Internal APIError / APIConnectionError classes (uppercase) are NOT exported from the package\n * root and must not be imported from internal dist paths.\n * Network errors (ECONNRESET, ETIMEDOUT) arrive as plain Error objects caught by normalizeThrownError.\n */\n\nimport {\n ApiError,\n type Content,\n type GenerateContentConfig,\n type GenerateContentResponse,\n type GenerateContentResponseUsageMetadata,\n GoogleGenAI,\n} from '@google/genai';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'gemini';\n\n/** Normalize Gemini's usageMetadata to LlmUsage. */\nfunction normalizeUsage(meta: GenerateContentResponseUsageMetadata | undefined): LlmUsage {\n const inputTokens = meta?.promptTokenCount ?? 0;\n const outputTokens = meta?.candidatesTokenCount ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: meta?.totalTokenCount ?? inputTokens + outputTokens,\n };\n}\n\n/**\n * Convert LlmMessages to Gemini's Content array format.\n * Extracts system message — Gemini treats system instructions separately from contents.\n * Role mapping: 'user' → 'user', 'assistant' → 'model' (Gemini API requires 'model').\n */\nfunction buildGeminiContents(messages: LlmMessage[]): {\n system: string | undefined;\n contents: Content[];\n} {\n const systemMessages = messages.filter((m) => m.role === 'system');\n const conversationMessages = messages.filter((m) => m.role !== 'system');\n\n const system =\n systemMessages.length > 0 ? systemMessages.map((m) => m.content).join('\\n') : undefined;\n\n const contents: Content[] = conversationMessages.map((m) => ({\n role: m.role === 'assistant' ? 'model' : 'user',\n parts: [{ text: m.content }],\n }));\n\n return { system, contents };\n}\n\n/**\n * Normalize any Gemini SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * ApiError (public SDK class) always has status: number, so there is no undefined-status branch.\n * Network errors (no HTTP status) arrive as plain Error objects; normalizeThrownError\n * handles retryable error codes (ECONNRESET, ETIMEDOUT, etc.).\n */\nexport function normalizeGeminiError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // ApiError is the only publicly-exported SDK error class.\n // status is always number (not undefined) per the ApiError type definition.\n if (err instanceof ApiError) {\n const retryable = err.status === 429 || err.status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: err.status,\n retryable,\n cause: err,\n });\n }\n\n // Network errors (ECONNRESET, ETIMEDOUT, etc.) arrive as plain Error objects.\n // normalizeThrownError classifies retryable codes and handles the unknown-error case.\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the Gemini provider implementation. */\nexport function createGeminiProvider(config: LlmClientConfig): LlmClient {\n const ai = new GoogleGenAI({\n apiKey: config.apiKey,\n httpOptions: {\n timeout: config.timeoutMs ?? 30_000,\n },\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(messages);\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n // Build config object — always passed (empty object is valid GenerateContentConfig)\n const geminiConfig: GenerateContentConfig = {};\n\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n const response = await ai.models.generateContent({\n model,\n contents,\n config: geminiConfig,\n });\n\n return {\n content: response.text ?? '',\n model,\n usage: normalizeUsage(response.usageMetadata),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(messages);\n\n // Build config — always passed (empty object is valid GenerateContentConfig)\n const geminiConfig: GenerateContentConfig = {};\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n let sdkStream: AsyncGenerator<GenerateContentResponse>;\n\n try {\n sdkStream = await ai.models.generateContentStream({\n model,\n contents,\n config: geminiConfig,\n });\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of sdkStream) {\n const text = chunk.text;\n if (text !== undefined && text.length > 0) {\n yield { token: text };\n }\n // Capture usage from each chunk — the final chunk has the complete totals\n if (chunk.usageMetadata !== undefined) {\n finalUsage = normalizeUsage(chunk.usageMetadata);\n }\n }\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmStructuredResponse<T>> {\n const augmentedMessages: LlmMessage[] = [\n {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n },\n ...messages,\n ];\n\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(augmentedMessages);\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n try {\n const geminiConfig: GenerateContentConfig = {\n // Instruct Gemini to return JSON directly\n responseMimeType: 'application/json',\n };\n\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n return await ai.models.generateContent({\n model,\n contents,\n config: geminiConfig,\n });\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n }, retryOpts);\n\n const rawContent = rawResponse.text ?? '';\n\n let parsed: unknown;\n try {\n // Strip markdown code fences if the model included them despite the instruction\n const cleaned = rawContent\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Gemini structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Gemini structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usageMetadata),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * OpenAI provider for @diabolicallabs/llm-client.\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * OpenAI: prompt_tokens / completion_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIStatusError.status → LlmError.statusCode + retryable flag\n * APIConnectionError → retryable: true\n *\n * Structured output uses OpenAI's response_format: { type: 'json_object' }.\n * For strict schema enforcement, the schema is described in the system prompt.\n */\n\nimport OpenAI from 'openai';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'openai';\n\n/** Normalize OpenAI's usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI's chat message format. */\nfunction buildOpenAIMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Normalize any OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n */\nexport function normalizeOpenAIError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // OpenAI SDK v6+: uses OpenAI.APIError as the base class with a `.status` field.\n // APIConnectionError is a subclass of APIError with status: undefined — check it first\n // so network failures are always retryable regardless of the missing status code.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401),\n // InternalServerError (500), etc. Retryability is determined by HTTP status code.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the OpenAI provider implementation. */\nexport function createOpenAIProvider(config: LlmClientConfig): LlmClient {\n const client = new OpenAI({\n apiKey: config.apiKey,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // We manage retries ourselves via withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(messages);\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: openAIMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const response = await client.chat.completions.create(params);\n\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(messages);\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {\n model,\n messages: openAIMessages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of sdkStream) {\n // Token chunks arrive in choices[0].delta.content\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk (stream_options.include_usage must be true)\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n\n // Yield usage on the final sentinel chunk\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>>\n ): Promise<LlmStructuredResponse<T>> {\n // OpenAI JSON mode: response_format: { type: 'json_object' }\n // The system prompt must instruct the model to output JSON — OpenAI requires this.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(augmentedMessages);\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: openAIMessages,\n stream: false,\n response_format: { type: 'json_object' },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n }, retryOpts);\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n parsed = JSON.parse(rawContent);\n } catch (err) {\n throw new LlmError({\n message: `OpenAI structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `OpenAI structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Type-only stubs for providers not yet implemented.\n *\n * Perplexity — later week (internal tooling, separate decision)\n *\n * All stubs throw a clear \"not yet implemented\" LlmError.\n * They are registered in the factory so the switch statement is exhaustive\n * and unknown provider values are caught at runtime with a useful message.\n */\n\nimport type { LlmClient, LlmClientConfig, LlmStreamChunk } from '../types.js';\nimport { LlmError } from '../types.js';\n\n/**\n * Returns an AsyncGenerator that immediately rejects when iterated.\n * Implemented without generator syntax to avoid Biome's useYield lint rule —\n * a throw-only generator has no meaningful yield, which Biome correctly flags.\n * The returned object satisfies the AsyncGenerator<LlmStreamChunk> interface contract.\n */\nfunction rejectingStream(err: LlmError): AsyncGenerator<LlmStreamChunk> {\n const rejected = Promise.reject<IteratorResult<LlmStreamChunk>>(err);\n // Attach a no-op catch so Node does not emit an unhandledRejection warning\n // before the caller consumes the generator via for-await-of.\n rejected.catch(() => undefined);\n return {\n next: () => rejected,\n return: () => Promise.resolve({ value: undefined, done: true as const }),\n throw: () => Promise.reject(err),\n [Symbol.asyncIterator]() {\n return this;\n },\n [Symbol.asyncDispose]: async () => undefined,\n };\n}\n\nfunction notImplemented(provider: string): LlmClient {\n const err = new LlmError({\n message: `[dlabs-toolkit] Provider '${provider}' is not yet implemented. Anthropic, OpenAI, Gemini, and DeepSeek are available; Perplexity ships in a later week.`,\n provider,\n retryable: false,\n });\n\n // Return an object that throws on any method call.\n // The error is pre-constructed so stack traces point to the factory call site,\n // not the method call site — easier to debug misconfigured providers.\n return {\n get config(): LlmClientConfig {\n throw err;\n },\n complete: () => Promise.reject(err),\n stream: () => rejectingStream(err),\n structured: () => Promise.reject(err),\n };\n}\n\n/** Perplexity provider stub — later week. */\nexport function createPerplexityProvider(config: LlmClientConfig): LlmClient {\n void config;\n return notImplemented('perplexity');\n}\n","/**\n * Factory functions for LlmClient.\n *\n * createClient — dispatches to the correct provider implementation.\n * createClientFromEnv — convenience wrapper that reads API keys from env vars.\n *\n * Provider dispatch:\n * 'anthropic' → fully implemented (Week 2)\n * 'openai' → fully implemented (Week 2)\n * 'gemini' → fully implemented (Week 3)\n * 'deepseek' → fully implemented (Week 3)\n * 'perplexity' → stub, throws \"not yet implemented\" (later week)\n */\n\nimport { createAnthropicProvider } from './providers/anthropic.js';\nimport { createDeepSeekProvider } from './providers/deepseek.js';\nimport { createGeminiProvider } from './providers/gemini.js';\nimport { createOpenAIProvider } from './providers/openai.js';\nimport { createPerplexityProvider } from './providers/stubs.js';\nimport type { LlmClient, LlmClientConfig } from './types.js';\nimport { LlmError } from './types.js';\n\n/**\n * Create an LlmClient for the given provider and config.\n * Dispatches to the provider-specific implementation.\n *\n * Anthropic, OpenAI, Gemini, and DeepSeek are fully implemented.\n * Perplexity is a type-registered stub that throws \"not yet implemented\".\n */\nexport function createClient(config: LlmClientConfig): LlmClient {\n switch (config.provider) {\n case 'anthropic':\n return createAnthropicProvider(config);\n\n case 'openai':\n return createOpenAIProvider(config);\n\n case 'gemini':\n return createGeminiProvider(config);\n\n case 'deepseek':\n return createDeepSeekProvider(config);\n\n case 'perplexity':\n return createPerplexityProvider(config);\n\n default: {\n // TypeScript exhaustiveness check — if a new provider is added to the union\n // without a case here, this will be a compile-time error.\n const _exhaustive: never = config.provider;\n throw new LlmError({\n message: `[dlabs-toolkit] Unknown provider: ${String(_exhaustive)}`,\n provider: String(_exhaustive),\n retryable: false,\n });\n }\n }\n}\n\n/**\n * Convenience: create an LlmClient from environment variables.\n *\n * Reads API keys from the environment based on provider:\n * anthropic → ANTHROPIC_API_KEY\n * openai → OPENAI_API_KEY\n * gemini → GOOGLE_AI_API_KEY\n * deepseek → DEEPSEEK_API_KEY\n * perplexity → PERPLEXITY_API_KEY\n *\n * Throws LlmError if the required env var is not set.\n */\nexport function createClientFromEnv(\n provider: LlmClientConfig['provider'],\n model: string,\n overrides?: Partial<Omit<LlmClientConfig, 'provider' | 'model' | 'apiKey'>>\n): LlmClient {\n const apiKey = resolveApiKey(provider);\n return createClient({ provider, model, apiKey, ...overrides });\n}\n\n/** Read the API key for a given provider from environment variables. */\nfunction resolveApiKey(provider: LlmClientConfig['provider']): string {\n const envVarMap: Record<LlmClientConfig['provider'], string> = {\n anthropic: 'ANTHROPIC_API_KEY',\n openai: 'OPENAI_API_KEY',\n gemini: 'GOOGLE_AI_API_KEY',\n deepseek: 'DEEPSEEK_API_KEY',\n perplexity: 'PERPLEXITY_API_KEY',\n };\n\n const envVar = envVarMap[provider];\n const apiKey = process.env[envVar];\n\n if (apiKey === undefined || apiKey.trim() === '') {\n throw new LlmError({\n message: `[dlabs-toolkit] ${envVar} is not set. Set this environment variable to use the ${provider} provider.`,\n provider,\n retryable: false,\n });\n }\n\n return apiKey;\n}\n"],"mappings":";AAcA,OAAO,eAAe;;;ACmCf,IAAM,WAAN,cAAuB,MAAM;AAAA,EAChB,OAAO;AAAA,EAChB;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA,EAGS;AAAA,EAElB,YAAY,MAMT;AACD,UAAM,KAAK,SAAS,EAAE,OAAO,KAAK,MAAM,CAAC;AACzC,SAAK,WAAW,KAAK;AACrB,SAAK,aAAa,KAAK;AACvB,SAAK,YAAY,KAAK;AACtB,SAAK,QAAQ,KAAK;AAAA,EACpB;AACF;;;AC1DA,IAAM,0BAA0B,oBAAI,IAAI,CAAC,KAAK,KAAK,KAAK,GAAG,CAAC;AAG5D,IAAM,wBAAwB,oBAAI,IAAI,CAAC,cAAc,aAAa,cAAc,CAAC;AAGjF,IAAM,8BAA8B,oBAAI,IAAI,CAAC,KAAK,KAAK,KAAK,GAAG,CAAC;AAGzD,SAAS,kBAAkB,YAA6B;AAC7D,MAAI,wBAAwB,IAAI,UAAU,EAAG,QAAO;AACpD,MAAI,4BAA4B,IAAI,UAAU,EAAG,QAAO;AAExD,SAAO,cAAc;AACvB;AAGO,SAAS,qBAAqB,MAAuB;AAC1D,SAAO,sBAAsB,IAAI,IAAI;AACvC;AAGO,SAAS,iBAAiB,SAAiB,aAA6B;AAC7E,QAAM,UAAU,cAAc,KAAK;AACnC,SAAO,KAAK,OAAO,IAAI;AACzB;AAcA,eAAsB,UACpB,IACA,MACY;AACZ,MAAI;AAEJ,WAAS,UAAU,GAAG,WAAW,KAAK,YAAY,WAAW;AAC3D,QAAI;AACF,aAAO,MAAM,GAAG,OAAO;AAAA,IACzB,SAAS,KAAK;AACZ,YAAM,SAAS,qBAAqB,KAAK,KAAK,QAAQ;AAEtD,UAAI,CAAC,OAAO,aAAa,YAAY,KAAK,YAAY;AACpD,cAAM;AAAA,MACR;AAEA,kBAAY;AACZ,YAAM,UAAU,iBAAiB,SAAS,KAAK,WAAW;AAC1D,YAAM,MAAM,OAAO;AAAA,IACrB;AAAA,EACF;AAIA,QACE,aACA,IAAI,SAAS;AAAA,IACX,SAAS;AAAA,IACT,UAAU,KAAK;AAAA,IACf,WAAW;AAAA,EACb,CAAC;AAEL;AAGO,SAAS,qBAAqB,KAAc,UAA4B;AAC7E,MAAI,eAAe,SAAU,QAAO;AAEpC,MAAI,eAAe,OAAO;AACxB,UAAM,cAAc;AAEpB,UAAM,aAAa,YAAY,UAAU,YAAY;AAGrD,QAAI,YAAY,SAAS,UAAa,qBAAqB,YAAY,IAAI,GAAG;AAC5E,UAAI,eAAe,QAAW;AAC5B,eAAO,IAAI,SAAS;AAAA,UAClB,SAAS,IAAI;AAAA,UACb;AAAA,UACA;AAAA,UACA,WAAW;AAAA,UACX,OAAO;AAAA,QACT,CAAC;AAAA,MACH;AACA,aAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAU,WAAW,MAAM,OAAO,IAAI,CAAC;AAAA,IACrF;AAGA,QAAI,eAAe,QAAW;AAC5B,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb;AAAA,QACA;AAAA,QACA,WAAW,kBAAkB,UAAU;AAAA,QACvC,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb;AAAA,MACA,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAEA,SAAO,IAAI,SAAS;AAAA,IAClB,SAAS,OAAO,GAAG;AAAA,IACnB;AAAA,IACA,WAAW;AAAA,IACX,OAAO;AAAA,EACT,CAAC;AACH;AAEA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;;;AF/GA,IAAM,WAAW;AAGjB,SAAS,eAAe,OAA8C;AACpE,QAAM,cAAc,OAAO,gBAAgB;AAC3C,QAAM,eAAe,OAAO,iBAAiB;AAC7C,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,cAAc;AAAA;AAAA;AAAA,IAG3B,qBAAsB,OAClB;AAAA,IACJ,iBAAkB,OACd;AAAA,EACN;AACF;AAGA,SAAS,uBAAuB,UAG9B;AACA,QAAM,iBAAiB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AACjE,QAAM,uBAAuB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AAEvE,QAAM,SACJ,eAAe,SAAS,IAAI,eAAe,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,IAAI;AAEhF,QAAM,oBAA8C,qBAAqB,IAAI,CAAC,OAAO;AAAA,IACnF,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AAEF,SAAO,EAAE,QAAQ,UAAU,kBAAkB;AAC/C;AAMO,SAAS,wBAAwB,KAAwB;AAC9D,MAAI,eAAe,SAAU,QAAO;AAKpC,MACE,OAAO,UAAU,uBAAuB,cACxC,eAAe,UAAU,oBACzB;AACA,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAU;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,MAAI,OAAO,UAAU,aAAa,cAAc,eAAe,UAAU,UAAU;AACjF,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAU;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAU,UAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAK,QAAQ;AAC3C;AAGO,SAAS,wBAAwB,QAAoC;AAC1E,QAAM,SAAS,IAAI,UAAU;AAAA,IAC3B,QAAQ,OAAO;AAAA,IACf,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAU;AAAA,EACZ;AAEA,iBAAe,SACb,UACA,SACsB;AACtB,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,UAAU,kBAAkB,IAAI,uBAAuB,QAAQ;AAE/E,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AACF,cAAM,SAAoD;AAAA,UACxD;AAAA,UACA,UAAU;AAAA,UACV,YAAY,SAAS,aAAa,OAAO,aAAa;AAAA,QACxD;AAEA,YAAI,WAAW,OAAW,QAAO,SAAS;AAC1C,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,QAAW;AAC7B,iBAAO,cAAc;AAAA,QACvB;AAEA,cAAM,WAAW,MAAM,OAAO,SAAS,OAAO,MAAM;AAEpD,cAAM,UAAU,SAAS,QACtB,OAAO,CAAC,UAAwC,MAAM,SAAS,MAAM,EACrE,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,EAAE;AAEV,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAO,eAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,wBAAwB,GAAG;AAAA,MACnC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,UAAU,kBAAkB,IAAI,uBAAuB,QAAQ;AAE/E,UAAM,SAAwC;AAAA,MAC5C;AAAA,MACA,UAAU;AAAA,MACV,YAAY,SAAS,aAAa,OAAO,aAAa;AAAA,IACxD;AAEA,QAAI,WAAW,OAAW,QAAO,SAAS;AAC1C,UAAM,oBAAoB,SAAS,eAAe,OAAO;AACzD,QAAI,sBAAsB,QAAW;AACnC,aAAO,cAAc;AAAA,IACvB;AAEA,QAAI;AAEJ,QAAI;AACF,kBAAY,OAAO,SAAS,OAAO,MAAM;AAAA,IAC3C,SAAS,KAAK;AACZ,YAAM,wBAAwB,GAAG;AAAA,IACnC;AAGA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AACnC,YAAI,MAAM,SAAS,yBAAyB,MAAM,MAAM,SAAS,cAAc;AAC7E,gBAAM,EAAE,OAAO,MAAM,MAAM,KAAK;AAAA,QAClC,WAAW,MAAM,SAAS,mBAAmB,WAAW,OAAO;AAE7D,gBAAM,QAAQ,MAAM,UAAU,aAAa;AAC3C,uBAAa,eAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AAGZ,YAAM,wBAAwB,GAAG;AAAA,IACnC;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,WAAW,MAAM,SAAS,mBAAmB,OAAO;AAE1D,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,SAAS,QACtB,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,iEAAiE,SAAS,QAAQ,MAAM,GAAG,GAAG,CAAC;AAAA,QACxG,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,mEAAmE,OAAO,GAAG,CAAC;AAAA,QACvF,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAO,SAAS;AAAA,MAChB,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AG1PA,OAAO,YAAY;AAanB,IAAMA,YAAW;AACjB,IAAM,oBAAoB;AAG1B,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAAS,cAAc,UAAkE;AACvF,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AASO,SAAS,uBAAuB,KAAwB;AAC7D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,OAAO,OAAO,uBAAuB,cAAc,eAAe,OAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUD;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAGA,MAAI,OAAO,OAAO,aAAa,cAAc,eAAe,OAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUA;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,uBAAuB,QAAoC;AAEzE,QAAM,SAAS,IAAI,OAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS;AAAA,IACT,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUA;AAAA,EACZ;AAEA,iBAAe,SACb,UACA,SACsB;AACtB,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,QAAQ;AAC3C,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAC5D,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOC,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,uBAAuB,GAAG;AAAA,MAClC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,QAAQ;AAE3C,UAAM,SAA0D;AAAA,MAC9D;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,IACxC;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,IACzD,SAAS,KAAK;AACZ,YAAM,uBAAuB,GAAG;AAAA,IAClC;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AACnC,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaA,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,uBAAuB,GAAG;AAAA,IAClC;AAEA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,iBAAiB;AACpD,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,MACpD,SAAS,KAAK;AACZ,cAAM,uBAAuB,GAAG;AAAA,MAClC;AAAA,IACF,GAAG,SAAS;AAEZ,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,WACb,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QACjG,UAAUD;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,kEAAkE,OAAO,GAAG,CAAC;AAAA,QACtF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AC/PA;AAAA,EACE;AAAA,EAKA;AAAA,OACK;AAaP,IAAMC,YAAW;AAGjB,SAASC,gBAAe,MAAkE;AACxF,QAAM,cAAc,MAAM,oBAAoB;AAC9C,QAAM,eAAe,MAAM,wBAAwB;AACnD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,MAAM,mBAAmB,cAAc;AAAA,EACtD;AACF;AAOA,SAAS,oBAAoB,UAG3B;AACA,QAAM,iBAAiB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AACjE,QAAM,uBAAuB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AAEvE,QAAM,SACJ,eAAe,SAAS,IAAI,eAAe,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,IAAI;AAEhF,QAAM,WAAsB,qBAAqB,IAAI,CAAC,OAAO;AAAA,IAC3D,MAAM,EAAE,SAAS,cAAc,UAAU;AAAA,IACzC,OAAO,CAAC,EAAE,MAAM,EAAE,QAAQ,CAAC;AAAA,EAC7B,EAAE;AAEF,SAAO,EAAE,QAAQ,SAAS;AAC5B;AAUO,SAAS,qBAAqB,KAAwB;AAC3D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,eAAe,UAAU;AAC3B,UAAM,YAAY,IAAI,WAAW,OAAO,IAAI,UAAU;AACtD,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUD;AAAA,MACV,YAAY,IAAI;AAAA,MAChB;AAAA,MACA,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,qBAAqB,QAAoC;AACvE,QAAM,KAAK,IAAI,YAAY;AAAA,IACzB,QAAQ,OAAO;AAAA,IACf,aAAa;AAAA,MACX,SAAS,OAAO,aAAa;AAAA,IAC/B;AAAA,EACF,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUA;AAAA,EACZ;AAEA,iBAAe,SACb,UACA,SACsB;AACtB,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,QAAQ;AACzD,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AAEF,cAAM,eAAsC,CAAC;AAE7C,YAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,cAAM,WAAW,MAAM,GAAG,OAAO,gBAAgB;AAAA,UAC/C;AAAA,UACA;AAAA,UACA,QAAQ;AAAA,QACV,CAAC;AAED,eAAO;AAAA,UACL,SAAS,SAAS,QAAQ;AAAA,UAC1B;AAAA,UACA,OAAOC,gBAAe,SAAS,aAAa;AAAA,UAC5C,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,QAAQ;AAGzD,UAAM,eAAsC,CAAC;AAC7C,QAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,GAAG,OAAO,sBAAsB;AAAA,QAChD;AAAA,QACA;AAAA,QACA,QAAQ;AAAA,MACV,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AACnC,cAAM,OAAO,MAAM;AACnB,YAAI,SAAS,UAAa,KAAK,SAAS,GAAG;AACzC,gBAAM,EAAE,OAAO,KAAK;AAAA,QACtB;AAEA,YAAI,MAAM,kBAAkB,QAAW;AACrC,uBAAaA,gBAAe,MAAM,aAAa;AAAA,QACjD;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAEA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AACnC,UAAM,oBAAkC;AAAA,MACtC;AAAA,QACE,MAAM;AAAA,QACN,SACE;AAAA,MACJ;AAAA,MACA,GAAG;AAAA,IACL;AAEA,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,iBAAiB;AAClE,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,UAAI;AACF,cAAM,eAAsC;AAAA;AAAA,UAE1C,kBAAkB;AAAA,QACpB;AAEA,YAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,eAAO,MAAM,GAAG,OAAO,gBAAgB;AAAA,UACrC;AAAA,UACA;AAAA,UACA,QAAQ;AAAA,QACV,CAAC;AAAA,MACH,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAEZ,UAAM,aAAa,YAAY,QAAQ;AAEvC,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,WACb,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,8DAA8D,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QAC/F,UAAUD;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,OAAO,GAAG,CAAC;AAAA,QACpF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,aAAa;AAAA,MAC/C,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AC7RA,OAAOC,aAAY;AAanB,IAAMC,YAAW;AAGjB,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAAS,oBAAoB,UAAkE;AAC7F,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AAMO,SAAS,qBAAqB,KAAwB;AAC3D,MAAI,eAAe,SAAU,QAAO;AAKpC,MAAI,OAAOC,QAAO,uBAAuB,cAAc,eAAeA,QAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUF;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,MAAI,OAAOE,QAAO,aAAa,cAAc,eAAeA,QAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUF;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,qBAAqB,QAAoC;AACvE,QAAM,SAAS,IAAIE,QAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUF;AAAA,EACZ;AAEA,iBAAe,SACb,UACA,SACsB;AACtB,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,QAAQ;AACnD,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAE5D,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOC,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,QAAQ;AAEnD,UAAM,SAA0D;AAAA,MAC9D;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,IACxC;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,IACzD,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AAEnC,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaA,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,iBAAiB;AAC5D,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,UACR,iBAAiB,EAAE,MAAM,cAAc;AAAA,QACzC;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,MACpD,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAEZ,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AACF,eAAS,KAAK,MAAM,UAAU;AAAA,IAChC,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,8DAA8D,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QAC/F,UAAUD;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,OAAO,GAAG,CAAC;AAAA,QACpF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AC9PA,SAAS,gBAAgB,KAA+C;AACtE,QAAM,WAAW,QAAQ,OAAuC,GAAG;AAGnE,WAAS,MAAM,MAAM,MAAS;AAC9B,SAAO;AAAA,IACL,MAAM,MAAM;AAAA,IACZ,QAAQ,MAAM,QAAQ,QAAQ,EAAE,OAAO,QAAW,MAAM,KAAc,CAAC;AAAA,IACvE,OAAO,MAAM,QAAQ,OAAO,GAAG;AAAA,IAC/B,CAAC,OAAO,aAAa,IAAI;AACvB,aAAO;AAAA,IACT;AAAA,IACA,CAAC,OAAO,YAAY,GAAG,YAAY;AAAA,EACrC;AACF;AAEA,SAAS,eAAe,UAA6B;AACnD,QAAM,MAAM,IAAI,SAAS;AAAA,IACvB,SAAS,6BAA6B,QAAQ;AAAA,IAC9C;AAAA,IACA,WAAW;AAAA,EACb,CAAC;AAKD,SAAO;AAAA,IACL,IAAI,SAA0B;AAC5B,YAAM;AAAA,IACR;AAAA,IACA,UAAU,MAAM,QAAQ,OAAO,GAAG;AAAA,IAClC,QAAQ,MAAM,gBAAgB,GAAG;AAAA,IACjC,YAAY,MAAM,QAAQ,OAAO,GAAG;AAAA,EACtC;AACF;AAGO,SAAS,yBAAyB,QAAoC;AAC3E,OAAK;AACL,SAAO,eAAe,YAAY;AACpC;;;AC9BO,SAAS,aAAa,QAAoC;AAC/D,UAAQ,OAAO,UAAU;AAAA,IACvB,KAAK;AACH,aAAO,wBAAwB,MAAM;AAAA,IAEvC,KAAK;AACH,aAAO,qBAAqB,MAAM;AAAA,IAEpC,KAAK;AACH,aAAO,qBAAqB,MAAM;AAAA,IAEpC,KAAK;AACH,aAAO,uBAAuB,MAAM;AAAA,IAEtC,KAAK;AACH,aAAO,yBAAyB,MAAM;AAAA,IAExC,SAAS;AAGP,YAAM,cAAqB,OAAO;AAClC,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,qCAAqC,OAAO,WAAW,CAAC;AAAA,QACjE,UAAU,OAAO,WAAW;AAAA,QAC5B,WAAW;AAAA,MACb,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAcO,SAAS,oBACd,UACA,OACA,WACW;AACX,QAAM,SAAS,cAAc,QAAQ;AACrC,SAAO,aAAa,EAAE,UAAU,OAAO,QAAQ,GAAG,UAAU,CAAC;AAC/D;AAGA,SAAS,cAAc,UAA+C;AACpE,QAAM,YAAyD;AAAA,IAC7D,WAAW;AAAA,IACX,QAAQ;AAAA,IACR,QAAQ;AAAA,IACR,UAAU;AAAA,IACV,YAAY;AAAA,EACd;AAEA,QAAM,SAAS,UAAU,QAAQ;AACjC,QAAM,SAAS,QAAQ,IAAI,MAAM;AAEjC,MAAI,WAAW,UAAa,OAAO,KAAK,MAAM,IAAI;AAChD,UAAM,IAAI,SAAS;AAAA,MACjB,SAAS,mBAAmB,MAAM,yDAAyD,QAAQ;AAAA,MACnG;AAAA,MACA,WAAW;AAAA,IACb,CAAC;AAAA,EACH;AAEA,SAAO;AACT;","names":["PROVIDER","normalizeUsage","PROVIDER","normalizeUsage","OpenAI","PROVIDER","normalizeUsage","OpenAI"]}
|
package/package.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@diabolicallabs/llm-client",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Unified LLM API for Anthropic, OpenAI, Google, and DeepSeek. Streaming, retry/backoff, structured output, token normalization. © Diabolical Labs",
|
|
5
|
+
"author": "Diana Ismail <diana@deeismail.com> (https://deeismail.com)",
|
|
6
|
+
"publisher": "Diabolical Labs",
|
|
7
|
+
"license": "UNLICENSED",
|
|
8
|
+
"type": "module",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"import": "./dist/index.js",
|
|
12
|
+
"types": "./dist/index.d.ts"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"main": "./dist/index.js",
|
|
16
|
+
"types": "./dist/index.d.ts",
|
|
17
|
+
"files": [
|
|
18
|
+
"dist"
|
|
19
|
+
],
|
|
20
|
+
"repository": {
|
|
21
|
+
"type": "git",
|
|
22
|
+
"url": "git+https://github.com/mannism/dlabs-toolkit.git",
|
|
23
|
+
"directory": "packages/llm-client"
|
|
24
|
+
},
|
|
25
|
+
"homepage": "https://github.com/mannism/dlabs-toolkit#readme",
|
|
26
|
+
"bugs": {
|
|
27
|
+
"url": "https://github.com/mannism/dlabs-toolkit/issues"
|
|
28
|
+
},
|
|
29
|
+
"engines": {
|
|
30
|
+
"node": ">=20"
|
|
31
|
+
},
|
|
32
|
+
"devDependencies": {
|
|
33
|
+
"@types/node": "^25.6.0",
|
|
34
|
+
"@vitest/coverage-v8": "^4.1.5",
|
|
35
|
+
"tsup": "^8.3.5",
|
|
36
|
+
"vitest": "^4.1.5"
|
|
37
|
+
},
|
|
38
|
+
"dependencies": {
|
|
39
|
+
"@anthropic-ai/sdk": "^0.94.0",
|
|
40
|
+
"@google/genai": "^1.52.0",
|
|
41
|
+
"openai": "^6.36.0",
|
|
42
|
+
"zod": "^4.4.3"
|
|
43
|
+
},
|
|
44
|
+
"scripts": {
|
|
45
|
+
"build": "tsup",
|
|
46
|
+
"typecheck": "tsc --noEmit",
|
|
47
|
+
"lint": "biome check ./src && eslint ./src",
|
|
48
|
+
"test": "vitest run",
|
|
49
|
+
"test:watch": "vitest"
|
|
50
|
+
}
|
|
51
|
+
}
|