@diabolicallabs/llm-client 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -6,7 +6,7 @@ Unified LLM API across Anthropic, OpenAI, Google Gemini, DeepSeek, and Perplexit
6
6
 
7
7
  ## Status
8
8
 
9
- **Published — v0.2.0.** All five providers are implemented. Perplexity adds web-grounded responses with citation extraction and search filters.
9
+ **Published — v0.2.0.** All five providers are implemented. v0.3.0 adds per-call timeouts, caller AbortSignal, and stream stall detection.
10
10
 
11
11
  ## Install
12
12
 
@@ -159,10 +159,95 @@ interface LlmCallOptions {
159
159
  model?: string;
160
160
  maxTokens?: number;
161
161
  temperature?: number;
162
+ timeoutMs?: number; // Per-call timeout (ms). Overrides config.timeoutMs.
163
+ signal?: AbortSignal; // Caller-supplied cancel signal. Never retried.
164
+ streamStallTimeoutMs?: number; // Per-chunk silence timeout for stream(). Default 30000.
162
165
  providerOptions?: Record<string, unknown>; // Perplexity search filters, etc.
163
166
  }
164
167
  ```
165
168
 
169
+ ## Cancellation, timeouts, stall detection
170
+
171
+ ### Per-call timeout override
172
+
173
+ The default timeout is set at client construction via `config.timeoutMs` (default 30 000 ms). Override it per-call:
174
+
175
+ ```typescript
176
+ const client = createClient({
177
+ provider: 'anthropic',
178
+ model: 'claude-sonnet-4-6',
179
+ apiKey: process.env.ANTHROPIC_API_KEY!,
180
+ timeoutMs: 30_000, // client default
181
+ });
182
+
183
+ // This call gets 90 seconds — useful for sonar-deep-research or long reasoning
184
+ const response = await client.complete(messages, { timeoutMs: 90_000 });
185
+ ```
186
+
187
+ On timeout, `LlmError.kind === 'timeout'` and `retryable === true`. Each retry attempt gets a fresh deadline — the timeout resets per attempt, not across the full retry sequence.
188
+
189
+ ### Caller AbortSignal
190
+
191
+ Pass any `AbortSignal` to cancel an in-flight call immediately:
192
+
193
+ ```typescript
194
+ const ac = new AbortController();
195
+
196
+ // Cancel on user navigation, request supersede, shutdown, etc.
197
+ const responsePromise = client.complete(messages, { signal: ac.signal });
198
+
199
+ // Cancel before the call returns
200
+ ac.abort('user navigated away');
201
+
202
+ try {
203
+ await responsePromise;
204
+ } catch (err) {
205
+ if (err instanceof LlmError && err.kind === 'cancelled') {
206
+ // Gracefully handle the cancellation
207
+ }
208
+ }
209
+ ```
210
+
211
+ - A signal already aborted at call time throws immediately — no SDK call is made, no retry.
212
+ - A mid-call abort propagates to the SDK (Anthropic, OpenAI, DeepSeek, Perplexity) or wins a `Promise.race` (Gemini). `kind === 'cancelled'`, `retryable === false`. Never retried.
213
+
214
+ ### Stream stall detection
215
+
216
+ A stream that emits a first chunk and then silently hangs will stall the consumer indefinitely without this feature. `streamStallTimeoutMs` fires a timer per chunk — if no chunk arrives within the window, the stream is aborted and a `kind: 'stream_stall'` error surfaces:
217
+
218
+ ```typescript
219
+ try {
220
+ for await (const chunk of client.stream(messages, { streamStallTimeoutMs: 10_000 })) {
221
+ process.stdout.write(chunk.token);
222
+ }
223
+ } catch (err) {
224
+ if (err instanceof LlmError && err.kind === 'stream_stall') {
225
+ console.error('stream stalled — retry or fallback');
226
+ }
227
+ }
228
+ ```
229
+
230
+ - Default `streamStallTimeoutMs`: 30 000 ms (set independently of `timeoutMs` — tolerant of reasoning-model think-pauses).
231
+ - The stall timer resets after each chunk arrives, so slow-but-not-stalled streams complete normally.
232
+ - Stall errors are **not retried** — partial output is unsafe to re-issue. The error surfaces to the caller.
233
+
234
+ ### `LlmError.kind` discriminator
235
+
236
+ ```typescript
237
+ type LlmErrorKind = 'cancelled' | 'timeout' | 'stream_stall' | 'http' | 'network' | 'unknown';
238
+
239
+ class LlmError extends Error {
240
+ readonly provider: string;
241
+ readonly statusCode?: number;
242
+ readonly retryable: boolean;
243
+ readonly kind: LlmErrorKind | undefined; // undefined on errors from older paths
244
+ }
245
+ ```
246
+
247
+ ### Gemini cancellation caveat
248
+
249
+ `@google/genai` does not accept a per-call `AbortSignal`. Cancellation uses `Promise.race` — when the internal controller aborts, we stop awaiting, but the SDK's HTTP request continues in the background until the SDK-level timeout fires. The SDK client is constructed with `httpOptions.timeout = configTimeoutMs * 2` as a backstop. This bounds the leaked request to at most 2× the configured timeout. Native signal support will be added when the SDK provides it.
250
+
166
251
  ## Error handling
167
252
 
168
253
  All provider errors are normalized into `LlmError`:
@@ -174,12 +259,12 @@ try {
174
259
  const response = await client.complete(messages);
175
260
  } catch (err) {
176
261
  if (err instanceof LlmError) {
177
- console.error(err.provider, err.statusCode, err.retryable);
262
+ console.error(err.provider, err.statusCode, err.retryable, err.kind);
178
263
  }
179
264
  }
180
265
  ```
181
266
 
182
- Retryable errors (429, 5xx, network failures) are retried automatically with exponential backoff and full jitter before throwing.
267
+ Retryable errors (429, 5xx, network failures, timeout) are retried automatically with exponential backoff and full jitter before throwing. Cancelled and stream-stall errors are never retried.
183
268
 
184
269
  ## Token normalization
185
270
 
package/dist/index.d.ts CHANGED
@@ -6,6 +6,13 @@
6
6
  * Week 5 additions:
7
7
  * LlmResponse.citations — populated by the Perplexity provider; undefined for all others.
8
8
  * LlmCallOptions — per-call options type extracted for reuse; adds providerOptions escape hatch.
9
+ *
10
+ * Week 6 additions (v0.3.0 — abort/timeout/stall):
11
+ * LlmCallOptions.timeoutMs — per-call timeout override (ms); overrides config.timeoutMs.
12
+ * LlmCallOptions.signal — caller-supplied AbortSignal; aborts in-flight call.
13
+ * LlmCallOptions.streamStallTimeoutMs — per-stream stall detection (ms); default 30000.
14
+ * LlmClientConfig.streamStallTimeoutMs — config-level stall default.
15
+ * LlmError.kind — discriminator for error classification.
9
16
  */
10
17
  interface LlmMessage {
11
18
  role: 'system' | 'user' | 'assistant';
@@ -20,6 +27,12 @@ interface LlmClientConfig {
20
27
  maxTokens?: number;
21
28
  temperature?: number;
22
29
  timeoutMs?: number;
30
+ /**
31
+ * Default stall timeout for stream() calls (ms). Fires when no chunk is received
32
+ * for this duration. Independent of timeoutMs — tolerant of reasoning-model think-pauses.
33
+ * Default: 30000.
34
+ */
35
+ streamStallTimeoutMs?: number;
23
36
  }
24
37
  interface LlmUsage {
25
38
  inputTokens: number;
@@ -47,29 +60,62 @@ interface LlmResponse {
47
60
  /**
48
61
  * Per-call options shared across complete(), stream(), and structured().
49
62
  * Extends the standard model/maxTokens/temperature overrides with:
50
- * providerOptions generic escape hatch for provider-specific parameters.
51
- * The Perplexity provider reads search_domain_filter and
52
- * search_recency_filter from this field; other providers ignore it.
53
- * Unknown fields are passed through unchanged.
63
+ * timeoutMs per-call timeout override; overrides config.timeoutMs for this call only.
64
+ * signal — caller-supplied AbortSignal; aborts the in-flight call immediately.
65
+ * A pre-aborted signal throws without making an SDK call (no retry).
66
+ * A mid-call abort throws kind:'cancelled', retryable:false (no retry).
67
+ * streamStallTimeoutMs — per-call stall detection for stream(); overrides config default.
68
+ * providerOptions — generic escape hatch for provider-specific parameters.
69
+ * The Perplexity provider reads search_domain_filter and
70
+ * search_recency_filter from this field; other providers ignore it.
71
+ * Unknown fields are passed through unchanged.
54
72
  */
55
- interface LlmCallOptions extends Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>> {
73
+ interface LlmCallOptions extends Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature' | 'timeoutMs'>> {
74
+ /** Caller-supplied AbortSignal. Cancels the in-flight call. Never retried. */
75
+ signal?: AbortSignal;
76
+ /**
77
+ * Per-call stall timeout for stream() in ms. Overrides config.streamStallTimeoutMs.
78
+ * Fires when no chunk arrives within this window. Default: config.streamStallTimeoutMs ?? 30000.
79
+ */
80
+ streamStallTimeoutMs?: number;
56
81
  providerOptions?: Record<string, unknown>;
57
82
  }
58
83
  interface LlmStreamChunk {
59
84
  token: string;
60
85
  usage?: LlmUsage;
61
86
  }
87
+ /**
88
+ * Discriminator for LlmError — lets callers branch on error class without
89
+ * parsing message strings.
90
+ *
91
+ * cancelled — AbortSignal fired (caller-initiated). Never retried.
92
+ * timeout — Per-call timeoutMs deadline exceeded. Retried by withRetry.
93
+ * stream_stall — No chunk received within streamStallTimeoutMs. Not retried
94
+ * (partial stream output is unsafe to re-issue).
95
+ * http — Non-retryable HTTP error (4xx excluding 429).
96
+ * network — Retryable network-layer error (ECONNRESET, ETIMEDOUT, etc.).
97
+ * unknown — Unclassified error.
98
+ */
99
+ type LlmErrorKind = 'cancelled' | 'timeout' | 'stream_stall' | 'http' | 'network' | 'unknown';
62
100
  declare class LlmError extends Error {
63
101
  readonly name = "LlmError";
64
102
  readonly provider: string;
65
103
  readonly statusCode: number | undefined;
66
104
  readonly retryable: boolean;
105
+ /**
106
+ * Optional error kind discriminator. Present on errors produced by the abort/timeout/stall
107
+ * machinery (v0.3.0+). May be undefined on errors from providers that pre-date the kind field
108
+ * or on errors that fall through to the generic normalization path.
109
+ * Typed as LlmErrorKind | undefined to satisfy exactOptionalPropertyTypes.
110
+ */
111
+ readonly kind: LlmErrorKind | undefined;
67
112
  readonly cause: unknown;
68
113
  constructor(opts: {
69
114
  message: string;
70
115
  provider: string;
71
116
  statusCode?: number;
72
117
  retryable: boolean;
118
+ kind?: LlmErrorKind;
73
119
  cause?: unknown;
74
120
  });
75
121
  }