llm-fns 1.0.21 → 1.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -79,11 +79,15 @@ function createLlmRetryClient(params) {
79
79
  const currentPrompt = useFallback ? fallbackPrompt : prompt;
80
80
  const mode = useFallback ? 'fallback' : 'main';
81
81
  const currentMessages = constructLlmMessages(initialMessages, attempt, lastError);
82
+ // Capture raw response for error context
83
+ let rawResponseForError = null;
82
84
  try {
83
85
  const completion = await currentPrompt({
84
86
  messages: currentMessages,
85
87
  ...restOptions,
86
88
  });
89
+ // Extract raw content immediately
90
+ rawResponseForError = completion.choices[0]?.message?.content || null;
87
91
  const assistantMessage = completion.choices[0]?.message;
88
92
  let dataToProcess = completion;
89
93
  if (responseType === 'text') {
@@ -132,19 +136,26 @@ function createLlmRetryClient(params) {
132
136
  return dataToProcess;
133
137
  }
134
138
  catch (error) {
135
- if (error instanceof createLlmClient_js_1.LlmFatalError) {
136
- const fatalAttemptError = new LlmRetryAttemptError(`Fatal error on attempt ${attempt + 1}: ${error.message}`, mode, currentMessages, attempt, error, error.rawResponse, { cause: lastError });
137
- throw new LlmRetryExhaustedError(`Operation failed with fatal error on attempt ${attempt + 1}.`, { cause: fatalAttemptError });
138
- }
139
139
  if (error instanceof LlmRetryError) {
140
140
  const conversationForError = [...currentMessages];
141
141
  if (error.rawResponse) {
142
142
  conversationForError.push({ role: 'assistant', content: error.rawResponse });
143
143
  }
144
- lastError = new LlmRetryAttemptError(`Attempt ${attempt + 1} failed: ${error.message}`, mode, conversationForError, attempt, error, error.rawResponse, { cause: lastError });
144
+ else if (rawResponseForError) {
145
+ conversationForError.push({ role: 'assistant', content: rawResponseForError });
146
+ }
147
+ lastError = new LlmRetryAttemptError(`Attempt ${attempt + 1} failed: ${error.message}`, mode, conversationForError, attempt, error, error.rawResponse || rawResponseForError, { cause: lastError });
145
148
  }
146
149
  else {
147
- throw error;
150
+ // For any other error (ZodError, SchemaValidationError that wasn't fixed, network error, etc.)
151
+ // We wrap it in LlmFatalError to ensure context is preserved.
152
+ const fatalMessage = error.message || 'An unexpected error occurred during LLM execution';
153
+ // If it's already a fatal error, use its cause, otherwise use the error itself
154
+ const cause = error instanceof createLlmClient_js_1.LlmFatalError ? error.cause : error;
155
+ // Use the raw response we captured, or if the error has one (e.g. LlmFatalError from lower client)
156
+ const responseContent = rawResponseForError || error.rawResponse || null;
157
+ throw new createLlmClient_js_1.LlmFatalError(fatalMessage, cause, currentMessages, // This contains the full history of retries
158
+ responseContent);
148
159
  }
149
160
  }
150
161
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "llm-fns",
3
- "version": "1.0.21",
3
+ "version": "1.0.23",
4
4
  "description": "",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
package/readme.md CHANGED
@@ -328,6 +328,68 @@ const result = await llm.promptZod(MySchema, {
328
328
  });
329
329
  ```
330
330
 
331
+ ### Level 4: Retryable Errors in Zod Transforms
332
+
333
+ You can throw `SchemaValidationError` inside Zod `.transform()` or `.refine()` to trigger the retry loop. This is useful for complex validation logic that can't be expressed in the schema itself.
334
+
335
+ ```typescript
336
+ import { z } from 'zod';
337
+ import { SchemaValidationError } from './src';
338
+
339
+ const ProductSchema = z.object({
340
+ name: z.string(),
341
+ price: z.number(),
342
+ currency: z.string()
343
+ }).transform((data) => {
344
+ // Custom validation that triggers retry
345
+ if (data.price < 0) {
346
+ throw new SchemaValidationError(
347
+ `Price cannot be negative. Got: ${data.price}. Please provide a valid positive price.`
348
+ );
349
+ }
350
+
351
+ // Normalize currency
352
+ const validCurrencies = ['USD', 'EUR', 'GBP'];
353
+ if (!validCurrencies.includes(data.currency.toUpperCase())) {
354
+ throw new SchemaValidationError(
355
+ `Invalid currency "${data.currency}". Must be one of: ${validCurrencies.join(', ')}`
356
+ );
357
+ }
358
+
359
+ return {
360
+ ...data,
361
+ currency: data.currency.toUpperCase()
362
+ };
363
+ });
364
+
365
+ // If the LLM returns { price: -10, ... }, the error message is sent back
366
+ // and the LLM gets another chance to fix it
367
+ const product = await llm.promptZod("Extract product info from: ...", ProductSchema);
368
+ ```
369
+
370
+ **Important:** Only `SchemaValidationError` triggers the retry loop. Other errors (like `TypeError`, database errors, etc.) will bubble up immediately without retry. This prevents infinite loops when there's a bug in your transform logic.
371
+
372
+ ```typescript
373
+ const SafeSchema = z.object({
374
+ userId: z.string()
375
+ }).transform(async (data) => {
376
+ // This error WILL trigger retry (user can fix the input)
377
+ if (!data.userId.match(/^[a-z0-9]+$/)) {
378
+ throw new SchemaValidationError(
379
+ `Invalid userId format "${data.userId}". Must be lowercase alphanumeric.`
380
+ );
381
+ }
382
+
383
+ // This error will NOT trigger retry (it's a system error)
384
+ const user = await db.findUser(data.userId);
385
+ if (!user) {
386
+ throw new Error(`User not found: ${data.userId}`); // Bubbles up immediately
387
+ }
388
+
389
+ return { ...data, user };
390
+ });
391
+ ```
392
+
331
393
  ---
332
394
 
333
395
  # Use Case 4: Agentic Retry Loops (`llm.promptTextRetry`)
@@ -358,6 +420,143 @@ const poem = await llm.promptTextRetry({
358
420
 
359
421
  ---
360
422
 
423
+ # Error Handling
424
+
425
+ The library provides a structured error hierarchy that preserves the full context of failures, whether they happen during a retry loop or cause an immediate crash.
426
+
427
+ ## Error Types
428
+
429
+ ### `LlmRetryError`
430
+ Thrown by your validation logic to signal that the current attempt failed but can be retried. The error message is sent back to the LLM as feedback.
431
+
432
+ ```typescript
433
+ import { LlmRetryError } from './src';
434
+
435
+ throw new LlmRetryError(
436
+ "The response must include a title field.", // Message sent to LLM
437
+ 'CUSTOM_ERROR', // Type: 'JSON_PARSE_ERROR' | 'CUSTOM_ERROR'
438
+ { field: 'title' }, // Optional details
439
+ '{"name": "test"}' // Optional raw response
440
+ );
441
+ ```
442
+
443
+ ### `SchemaValidationError`
444
+ A specialized error for schema validation failures. Use this in Zod transforms to trigger retries.
445
+
446
+ ```typescript
447
+ import { SchemaValidationError } from './src';
448
+
449
+ throw new SchemaValidationError("Age must be a positive number");
450
+ ```
451
+
452
+ ### `LlmFatalError`
453
+ Thrown for **unrecoverable errors**. This includes:
454
+ 1. API Errors (401 Unauthorized, 403 Forbidden, Context Length Exceeded).
455
+ 2. Runtime errors in your validation logic (e.g., `TypeError`, database connection failed).
456
+ 3. Validation errors when `maxRetries` is 0 or `disableJsonFixer` is true.
457
+
458
+ Crucially, `LlmFatalError` wraps the original error and attaches the **full conversation context** and **raw response** (if available), so you can debug what the LLM generated that caused the crash.
459
+
460
+ ```typescript
461
+ interface LlmFatalError {
462
+ message: string;
463
+ cause?: any; // The original error (e.g. ZodError, TypeError)
464
+ messages?: ChatCompletionMessageParam[]; // The full conversation history including retries
465
+ rawResponse?: string | null; // The raw text generated by the LLM before the crash
466
+ }
467
+ ```
468
+
469
+ ### `LlmRetryExhaustedError`
470
+ Thrown when the maximum number of retries is reached. It contains the full chain of attempt errors, allowing you to trace the evolution of the conversation.
471
+
472
+ ```typescript
473
+ interface LlmRetryExhaustedError {
474
+ message: string;
475
+ cause: LlmRetryAttemptError; // The last attempt error (with chain to previous)
476
+ }
477
+ ```
478
+
479
+ ### `LlmRetryAttemptError`
480
+ Wraps a single failed attempt within the retry chain.
481
+
482
+ ```typescript
483
+ interface LlmRetryAttemptError {
484
+ message: string;
485
+ mode: 'main' | 'fallback';
486
+ conversation: ChatCompletionMessageParam[];
487
+ attemptNumber: number;
488
+ error: Error;
489
+ rawResponse?: string | null;
490
+ cause?: LlmRetryAttemptError; // Previous attempt's error
491
+ }
492
+ ```
493
+
494
+ ## Error Chain Structure
495
+
496
+ When retries are exhausted, the error chain looks like this:
497
+
498
+ ```
499
+ LlmRetryExhaustedError
500
+ └── cause: LlmRetryAttemptError (Attempt 3)
501
+ ├── error: LlmRetryError (the validation error)
502
+ ├── conversation: [...] (full message history)
503
+ ├── rawResponse: '{"age": "wrong3"}'
504
+ └── cause: LlmRetryAttemptError (Attempt 2)
505
+ ├── ...
506
+ ```
507
+
508
+ ## Handling Errors
509
+
510
+ ```typescript
511
+ import {
512
+ LlmRetryExhaustedError,
513
+ LlmFatalError
514
+ } from './src';
515
+
516
+ try {
517
+ const result = await llm.promptZod(MySchema);
518
+ } catch (error) {
519
+ if (error instanceof LlmRetryExhaustedError) {
520
+ console.log('All retries failed.');
521
+ // Access the last response
522
+ console.log('Last LLM response:', error.cause.rawResponse);
523
+ }
524
+
525
+ if (error instanceof LlmFatalError) {
526
+ console.log('Crash or Fatal API Error:', error.message);
527
+ console.log('Original Cause:', error.cause);
528
+
529
+ // You always have access to what the LLM said, even if your code crashed!
530
+ console.log('LLM Response that caused crash:', error.rawResponse);
531
+ console.log('Conversation History:', error.messages);
532
+ }
533
+ }
534
+ ```
535
+
536
+ ## Extracting the Last Response
537
+
538
+ A common pattern is to extract the last LLM response from a failed operation:
539
+
540
+ ```typescript
541
+ function getLastResponse(error: LlmRetryExhaustedError): string | null {
542
+ return error.cause?.rawResponse ?? null;
543
+ }
544
+
545
+ function getAllResponses(error: LlmRetryExhaustedError): string[] {
546
+ const responses: string[] = [];
547
+ let attempt = error.cause;
548
+ while (attempt) {
549
+ if (attempt.rawResponse) {
550
+ responses.unshift(attempt.rawResponse); // Add to front (chronological order)
551
+ }
552
+ attempt = attempt.cause as LlmRetryAttemptError | undefined;
553
+ }
554
+ return responses;
555
+ }
556
+ ```
557
+
558
+ ---
559
+
361
560
  # Use Case 5: Architecture & Composition
362
561
 
363
562
  How to build the client manually to enable **Fallback Chains** and **Smart Routing**.