llm-fns 1.0.22 → 1.0.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/createLlmRetryClient.js +17 -6
- package/package.json +1 -1
- package/readme.md +33 -42
|
@@ -79,11 +79,15 @@ function createLlmRetryClient(params) {
|
|
|
79
79
|
const currentPrompt = useFallback ? fallbackPrompt : prompt;
|
|
80
80
|
const mode = useFallback ? 'fallback' : 'main';
|
|
81
81
|
const currentMessages = constructLlmMessages(initialMessages, attempt, lastError);
|
|
82
|
+
// Capture raw response for error context
|
|
83
|
+
let rawResponseForError = null;
|
|
82
84
|
try {
|
|
83
85
|
const completion = await currentPrompt({
|
|
84
86
|
messages: currentMessages,
|
|
85
87
|
...restOptions,
|
|
86
88
|
});
|
|
89
|
+
// Extract raw content immediately
|
|
90
|
+
rawResponseForError = completion.choices[0]?.message?.content || null;
|
|
87
91
|
const assistantMessage = completion.choices[0]?.message;
|
|
88
92
|
let dataToProcess = completion;
|
|
89
93
|
if (responseType === 'text') {
|
|
@@ -132,19 +136,26 @@ function createLlmRetryClient(params) {
|
|
|
132
136
|
return dataToProcess;
|
|
133
137
|
}
|
|
134
138
|
catch (error) {
|
|
135
|
-
if (error instanceof createLlmClient_js_1.LlmFatalError) {
|
|
136
|
-
const fatalAttemptError = new LlmRetryAttemptError(`Fatal error on attempt ${attempt + 1}: ${error.message}`, mode, currentMessages, attempt, error, error.rawResponse, { cause: lastError });
|
|
137
|
-
throw new LlmRetryExhaustedError(`Operation failed with fatal error on attempt ${attempt + 1}.`, { cause: fatalAttemptError });
|
|
138
|
-
}
|
|
139
139
|
if (error instanceof LlmRetryError) {
|
|
140
140
|
const conversationForError = [...currentMessages];
|
|
141
141
|
if (error.rawResponse) {
|
|
142
142
|
conversationForError.push({ role: 'assistant', content: error.rawResponse });
|
|
143
143
|
}
|
|
144
|
-
|
|
144
|
+
else if (rawResponseForError) {
|
|
145
|
+
conversationForError.push({ role: 'assistant', content: rawResponseForError });
|
|
146
|
+
}
|
|
147
|
+
lastError = new LlmRetryAttemptError(`Attempt ${attempt + 1} failed: ${error.message}`, mode, conversationForError, attempt, error, error.rawResponse || rawResponseForError, { cause: lastError });
|
|
145
148
|
}
|
|
146
149
|
else {
|
|
147
|
-
|
|
150
|
+
// For any other error (ZodError, SchemaValidationError that wasn't fixed, network error, etc.)
|
|
151
|
+
// We wrap it in LlmFatalError to ensure context is preserved.
|
|
152
|
+
const fatalMessage = error.message || 'An unexpected error occurred during LLM execution';
|
|
153
|
+
// If it's already a fatal error, use its cause, otherwise use the error itself
|
|
154
|
+
const cause = error instanceof createLlmClient_js_1.LlmFatalError ? error.cause : error;
|
|
155
|
+
// Use the raw response we captured, or if the error has one (e.g. LlmFatalError from lower client)
|
|
156
|
+
const responseContent = rawResponseForError || error.rawResponse || null;
|
|
157
|
+
throw new createLlmClient_js_1.LlmFatalError(fatalMessage, cause, currentMessages, // This contains the full history of retries
|
|
158
|
+
responseContent);
|
|
148
159
|
}
|
|
149
160
|
}
|
|
150
161
|
}
|
package/package.json
CHANGED
package/readme.md
CHANGED
|
@@ -422,12 +422,12 @@ const poem = await llm.promptTextRetry({
|
|
|
422
422
|
|
|
423
423
|
# Error Handling
|
|
424
424
|
|
|
425
|
-
The library provides a structured error hierarchy that preserves the full context of failures
|
|
425
|
+
The library provides a structured error hierarchy that preserves the full context of failures, whether they happen during a retry loop or cause an immediate crash.
|
|
426
426
|
|
|
427
427
|
## Error Types
|
|
428
428
|
|
|
429
429
|
### `LlmRetryError`
|
|
430
|
-
Thrown to signal that the current attempt failed but can be retried. The error message is sent back to the LLM.
|
|
430
|
+
Thrown by your validation logic to signal that the current attempt failed but can be retried. The error message is sent back to the LLM as feedback.
|
|
431
431
|
|
|
432
432
|
```typescript
|
|
433
433
|
import { LlmRetryError } from './src';
|
|
@@ -449,23 +449,25 @@ import { SchemaValidationError } from './src';
|
|
|
449
449
|
throw new SchemaValidationError("Age must be a positive number");
|
|
450
450
|
```
|
|
451
451
|
|
|
452
|
-
### `
|
|
453
|
-
|
|
452
|
+
### `LlmFatalError`
|
|
453
|
+
Thrown for **unrecoverable errors**. This includes:
|
|
454
|
+
1. API Errors (401 Unauthorized, 403 Forbidden, Context Length Exceeded).
|
|
455
|
+
2. Runtime errors in your validation logic (e.g., `TypeError`, database connection failed).
|
|
456
|
+
3. Validation errors when `maxRetries` is 0 or `disableJsonFixer` is true.
|
|
457
|
+
|
|
458
|
+
Crucially, `LlmFatalError` wraps the original error and attaches the **full conversation context** and **raw response** (if available), so you can debug what the LLM generated that caused the crash.
|
|
454
459
|
|
|
455
460
|
```typescript
|
|
456
|
-
interface
|
|
461
|
+
interface LlmFatalError {
|
|
457
462
|
message: string;
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
error: Error; // The original error (LlmRetryError, etc.)
|
|
462
|
-
rawResponse?: string | null; // The raw LLM response
|
|
463
|
-
cause?: LlmRetryAttemptError; // Previous attempt's error (chain)
|
|
463
|
+
cause?: any; // The original error (e.g. ZodError, TypeError)
|
|
464
|
+
messages?: ChatCompletionMessageParam[]; // The full conversation history including retries
|
|
465
|
+
rawResponse?: string | null; // The raw text generated by the LLM before the crash
|
|
464
466
|
}
|
|
465
467
|
```
|
|
466
468
|
|
|
467
469
|
### `LlmRetryExhaustedError`
|
|
468
|
-
Thrown when
|
|
470
|
+
Thrown when the maximum number of retries is reached. It contains the full chain of attempt errors, allowing you to trace the evolution of the conversation.
|
|
469
471
|
|
|
470
472
|
```typescript
|
|
471
473
|
interface LlmRetryExhaustedError {
|
|
@@ -474,15 +476,18 @@ interface LlmRetryExhaustedError {
|
|
|
474
476
|
}
|
|
475
477
|
```
|
|
476
478
|
|
|
477
|
-
### `
|
|
478
|
-
|
|
479
|
+
### `LlmRetryAttemptError`
|
|
480
|
+
Wraps a single failed attempt within the retry chain.
|
|
479
481
|
|
|
480
482
|
```typescript
|
|
481
|
-
interface
|
|
483
|
+
interface LlmRetryAttemptError {
|
|
482
484
|
message: string;
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
485
|
+
mode: 'main' | 'fallback';
|
|
486
|
+
conversation: ChatCompletionMessageParam[];
|
|
487
|
+
attemptNumber: number;
|
|
488
|
+
error: Error;
|
|
489
|
+
rawResponse?: string | null;
|
|
490
|
+
cause?: LlmRetryAttemptError; // Previous attempt's error
|
|
486
491
|
}
|
|
487
492
|
```
|
|
488
493
|
|
|
@@ -497,14 +502,7 @@ LlmRetryExhaustedError
|
|
|
497
502
|
├── conversation: [...] (full message history)
|
|
498
503
|
├── rawResponse: '{"age": "wrong3"}'
|
|
499
504
|
└── cause: LlmRetryAttemptError (Attempt 2)
|
|
500
|
-
├──
|
|
501
|
-
├── conversation: [...]
|
|
502
|
-
├── rawResponse: '{"age": "wrong2"}'
|
|
503
|
-
└── cause: LlmRetryAttemptError (Attempt 1)
|
|
504
|
-
├── error: LlmRetryError
|
|
505
|
-
├── conversation: [...]
|
|
506
|
-
├── rawResponse: '{"age": "wrong1"}'
|
|
507
|
-
└── cause: undefined
|
|
505
|
+
├── ...
|
|
508
506
|
```
|
|
509
507
|
|
|
510
508
|
## Handling Errors
|
|
@@ -512,7 +510,6 @@ LlmRetryExhaustedError
|
|
|
512
510
|
```typescript
|
|
513
511
|
import {
|
|
514
512
|
LlmRetryExhaustedError,
|
|
515
|
-
LlmRetryAttemptError,
|
|
516
513
|
LlmFatalError
|
|
517
514
|
} from './src';
|
|
518
515
|
|
|
@@ -520,24 +517,18 @@ try {
|
|
|
520
517
|
const result = await llm.promptZod(MySchema);
|
|
521
518
|
} catch (error) {
|
|
522
519
|
if (error instanceof LlmRetryExhaustedError) {
|
|
523
|
-
console.log('All retries failed');
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
let attempt = error.cause;
|
|
527
|
-
while (attempt) {
|
|
528
|
-
console.log(`Attempt ${attempt.attemptNumber + 1}:`);
|
|
529
|
-
console.log(` Mode: ${attempt.mode}`);
|
|
530
|
-
console.log(` Error: ${attempt.error.message}`);
|
|
531
|
-
console.log(` Raw Response: ${attempt.rawResponse}`);
|
|
532
|
-
console.log(` Conversation length: ${attempt.conversation.length}`);
|
|
533
|
-
|
|
534
|
-
attempt = attempt.cause as LlmRetryAttemptError | undefined;
|
|
535
|
-
}
|
|
520
|
+
console.log('All retries failed.');
|
|
521
|
+
// Access the last response
|
|
522
|
+
console.log('Last LLM response:', error.cause.rawResponse);
|
|
536
523
|
}
|
|
537
524
|
|
|
538
525
|
if (error instanceof LlmFatalError) {
|
|
539
|
-
console.log('Fatal
|
|
540
|
-
console.log('Original
|
|
526
|
+
console.log('Crash or Fatal API Error:', error.message);
|
|
527
|
+
console.log('Original Cause:', error.cause);
|
|
528
|
+
|
|
529
|
+
// You always have access to what the LLM said, even if your code crashed!
|
|
530
|
+
console.log('LLM Response that caused crash:', error.rawResponse);
|
|
531
|
+
console.log('Conversation History:', error.messages);
|
|
541
532
|
}
|
|
542
533
|
}
|
|
543
534
|
```
|