genai-lite 0.3.2 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -1
- package/dist/llm/LLMService.js +8 -0
- package/dist/llm/LLMService.test.js +29 -0
- package/dist/llm/types.d.ts +2 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -268,7 +268,7 @@ The `onMissing` property controls what happens when the expected thinking tag is
|
|
|
268
268
|
|
|
269
269
|
- `'ignore'`: Silently continue without the tag
|
|
270
270
|
- `'warn'`: Log a warning but continue processing
|
|
271
|
-
- `'error'`: Return an error response
|
|
271
|
+
- `'error'`: Return an error response with the original response preserved in `partialResponse`
|
|
272
272
|
- `'auto'` (default): Intelligently decide based on the model's native reasoning capabilities
|
|
273
273
|
|
|
274
274
|
**How `'auto'` Mode Works:**
|
|
@@ -290,6 +290,7 @@ const response = await llmService.sendMessage({
|
|
|
290
290
|
}
|
|
291
291
|
});
|
|
292
292
|
// Result: ERROR if <thinking> tag is missing (strict enforcement)
|
|
293
|
+
// The response is still accessible via errorResponse.partialResponse
|
|
293
294
|
|
|
294
295
|
// With native reasoning models (e.g., Claude with reasoning enabled)
|
|
295
296
|
const response = await llmService.sendMessage({
|
|
@@ -654,6 +655,10 @@ if (response.object === 'error') {
|
|
|
654
655
|
break;
|
|
655
656
|
case 'validation_error':
|
|
656
657
|
console.error('Invalid request:', response.error.message);
|
|
658
|
+
// For validation errors, the response may still be available
|
|
659
|
+
if (response.partialResponse) {
|
|
660
|
+
console.log('Partial response:', response.partialResponse.choices[0].message.content);
|
|
661
|
+
}
|
|
657
662
|
break;
|
|
658
663
|
default:
|
|
659
664
|
console.error('Error:', response.error.message);
|
package/dist/llm/LLMService.js
CHANGED
|
@@ -193,6 +193,14 @@ class LLMService {
|
|
|
193
193
|
type: "validation_error",
|
|
194
194
|
},
|
|
195
195
|
object: "error",
|
|
196
|
+
partialResponse: {
|
|
197
|
+
id: result.id,
|
|
198
|
+
provider: result.provider,
|
|
199
|
+
model: result.model,
|
|
200
|
+
created: result.created,
|
|
201
|
+
choices: result.choices,
|
|
202
|
+
usage: result.usage
|
|
203
|
+
}
|
|
196
204
|
};
|
|
197
205
|
}
|
|
198
206
|
else if (effectiveOnMissing === 'warn') {
|
|
@@ -488,6 +488,9 @@ describe('LLMService', () => {
|
|
|
488
488
|
expect(errorResponse.error.type).toBe('validation_error');
|
|
489
489
|
expect(errorResponse.error.message).toContain('response was expected to start with a <thinking> tag');
|
|
490
490
|
expect(errorResponse.error.message).toContain('does not have native reasoning active');
|
|
491
|
+
// Check that partial response is included
|
|
492
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
493
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without thinking tag.');
|
|
491
494
|
});
|
|
492
495
|
it('should handle missing tag for non-reasoning model with warn', async () => {
|
|
493
496
|
const consoleSpy = jest.spyOn(console, 'warn').mockImplementation();
|
|
@@ -509,6 +512,27 @@ describe('LLMService', () => {
|
|
|
509
512
|
expect(consoleSpy).toHaveBeenCalledWith(expect.stringContaining('Expected <thinking> tag was not found'));
|
|
510
513
|
consoleSpy.mockRestore();
|
|
511
514
|
});
|
|
515
|
+
it('should handle missing tag with explicit error mode', async () => {
|
|
516
|
+
const request = {
|
|
517
|
+
providerId: 'mistral',
|
|
518
|
+
modelId: 'codestral-2501',
|
|
519
|
+
messages: [{ role: 'user', content: 'test_thinking:Response without thinking tag.' }],
|
|
520
|
+
settings: {
|
|
521
|
+
thinkingExtraction: {
|
|
522
|
+
enabled: true,
|
|
523
|
+
onMissing: 'error' // Explicitly set to error
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
};
|
|
527
|
+
const response = await service.sendMessage(request);
|
|
528
|
+
expect(response.object).toBe('error');
|
|
529
|
+
const errorResponse = response;
|
|
530
|
+
expect(errorResponse.error.code).toBe('MISSING_EXPECTED_TAG');
|
|
531
|
+
expect(errorResponse.error.message).toContain('response was expected to start with a <thinking> tag');
|
|
532
|
+
// Check that partial response is included
|
|
533
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
534
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without thinking tag.');
|
|
535
|
+
});
|
|
512
536
|
it('should handle missing tag for non-reasoning model with ignore', async () => {
|
|
513
537
|
const request = {
|
|
514
538
|
providerId: 'mistral',
|
|
@@ -543,6 +567,8 @@ describe('LLMService', () => {
|
|
|
543
567
|
expect(response.object).toBe('error');
|
|
544
568
|
const errorResponse = response;
|
|
545
569
|
expect(errorResponse.error.message).toContain('expected to start with a <reasoning> tag');
|
|
570
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
571
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without custom tag.');
|
|
546
572
|
});
|
|
547
573
|
describe('auto mode with native reasoning detection', () => {
|
|
548
574
|
it('should enforce thinking tags for non-reasoning models by default', async () => {
|
|
@@ -564,6 +590,8 @@ describe('LLMService', () => {
|
|
|
564
590
|
const errorResponse = response;
|
|
565
591
|
expect(errorResponse.error.code).toBe('MISSING_EXPECTED_TAG');
|
|
566
592
|
expect(errorResponse.error.message).toContain('does not have native reasoning active');
|
|
593
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
594
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without thinking tag.');
|
|
567
595
|
});
|
|
568
596
|
it('should respect explicit reasoning.enabled: false even for models with enabledByDefault', async () => {
|
|
569
597
|
// This is the key test for the fix
|
|
@@ -584,6 +612,7 @@ describe('LLMService', () => {
|
|
|
584
612
|
expect(response.object).toBe('error');
|
|
585
613
|
const errorResponse = response;
|
|
586
614
|
expect(errorResponse.error.code).toBe('MISSING_EXPECTED_TAG');
|
|
615
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
587
616
|
});
|
|
588
617
|
});
|
|
589
618
|
});
|
package/dist/llm/types.d.ts
CHANGED
|
@@ -167,6 +167,8 @@ export interface LLMFailureResponse {
|
|
|
167
167
|
model?: string;
|
|
168
168
|
error: LLMError;
|
|
169
169
|
object: 'error';
|
|
170
|
+
/** The partial response that was generated before the error occurred (if available) */
|
|
171
|
+
partialResponse?: Omit<LLMResponse, 'object'>;
|
|
170
172
|
}
|
|
171
173
|
/**
|
|
172
174
|
* Information about a supported LLM provider
|