genai-lite 0.3.1 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -1
- package/dist/llm/LLMService.js +10 -1
- package/dist/llm/LLMService.test.js +71 -0
- package/dist/llm/types.d.ts +2 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -268,7 +268,7 @@ The `onMissing` property controls what happens when the expected thinking tag is
|
|
|
268
268
|
|
|
269
269
|
- `'ignore'`: Silently continue without the tag
|
|
270
270
|
- `'warn'`: Log a warning but continue processing
|
|
271
|
-
- `'error'`: Return an error response
|
|
271
|
+
- `'error'`: Return an error response with the original response preserved in `partialResponse`
|
|
272
272
|
- `'auto'` (default): Intelligently decide based on the model's native reasoning capabilities
|
|
273
273
|
|
|
274
274
|
**How `'auto'` Mode Works:**
|
|
@@ -290,6 +290,7 @@ const response = await llmService.sendMessage({
|
|
|
290
290
|
}
|
|
291
291
|
});
|
|
292
292
|
// Result: ERROR if <thinking> tag is missing (strict enforcement)
|
|
293
|
+
// The response is still accessible via errorResponse.partialResponse
|
|
293
294
|
|
|
294
295
|
// With native reasoning models (e.g., Claude with reasoning enabled)
|
|
295
296
|
const response = await llmService.sendMessage({
|
|
@@ -654,6 +655,10 @@ if (response.object === 'error') {
|
|
|
654
655
|
break;
|
|
655
656
|
case 'validation_error':
|
|
656
657
|
console.error('Invalid request:', response.error.message);
|
|
658
|
+
// For validation errors, the response may still be available
|
|
659
|
+
if (response.partialResponse) {
|
|
660
|
+
console.log('Partial response:', response.partialResponse.choices[0].message.content);
|
|
661
|
+
}
|
|
657
662
|
break;
|
|
658
663
|
default:
|
|
659
664
|
console.error('Error:', response.error.message);
|
package/dist/llm/LLMService.js
CHANGED
|
@@ -155,7 +155,8 @@ class LLMService {
|
|
|
155
155
|
// Check if native reasoning is active
|
|
156
156
|
const isNativeReasoningActive = modelInfo.reasoning?.supported === true &&
|
|
157
157
|
(internalRequest.settings.reasoning?.enabled === true ||
|
|
158
|
-
modelInfo.reasoning?.enabledByDefault === true
|
|
158
|
+
(modelInfo.reasoning?.enabledByDefault === true &&
|
|
159
|
+
internalRequest.settings.reasoning?.enabled !== false) || // Only if not explicitly disabled
|
|
159
160
|
modelInfo.reasoning?.canDisable === false); // Always-on models
|
|
160
161
|
effectiveOnMissing = isNativeReasoningActive ? 'ignore' : 'error';
|
|
161
162
|
}
|
|
@@ -192,6 +193,14 @@ class LLMService {
|
|
|
192
193
|
type: "validation_error",
|
|
193
194
|
},
|
|
194
195
|
object: "error",
|
|
196
|
+
partialResponse: {
|
|
197
|
+
id: result.id,
|
|
198
|
+
provider: result.provider,
|
|
199
|
+
model: result.model,
|
|
200
|
+
created: result.created,
|
|
201
|
+
choices: result.choices,
|
|
202
|
+
usage: result.usage
|
|
203
|
+
}
|
|
195
204
|
};
|
|
196
205
|
}
|
|
197
206
|
else if (effectiveOnMissing === 'warn') {
|
|
@@ -488,6 +488,9 @@ describe('LLMService', () => {
|
|
|
488
488
|
expect(errorResponse.error.type).toBe('validation_error');
|
|
489
489
|
expect(errorResponse.error.message).toContain('response was expected to start with a <thinking> tag');
|
|
490
490
|
expect(errorResponse.error.message).toContain('does not have native reasoning active');
|
|
491
|
+
// Check that partial response is included
|
|
492
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
493
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without thinking tag.');
|
|
491
494
|
});
|
|
492
495
|
it('should handle missing tag for non-reasoning model with warn', async () => {
|
|
493
496
|
const consoleSpy = jest.spyOn(console, 'warn').mockImplementation();
|
|
@@ -509,6 +512,27 @@ describe('LLMService', () => {
|
|
|
509
512
|
expect(consoleSpy).toHaveBeenCalledWith(expect.stringContaining('Expected <thinking> tag was not found'));
|
|
510
513
|
consoleSpy.mockRestore();
|
|
511
514
|
});
|
|
515
|
+
it('should handle missing tag with explicit error mode', async () => {
|
|
516
|
+
const request = {
|
|
517
|
+
providerId: 'mistral',
|
|
518
|
+
modelId: 'codestral-2501',
|
|
519
|
+
messages: [{ role: 'user', content: 'test_thinking:Response without thinking tag.' }],
|
|
520
|
+
settings: {
|
|
521
|
+
thinkingExtraction: {
|
|
522
|
+
enabled: true,
|
|
523
|
+
onMissing: 'error' // Explicitly set to error
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
};
|
|
527
|
+
const response = await service.sendMessage(request);
|
|
528
|
+
expect(response.object).toBe('error');
|
|
529
|
+
const errorResponse = response;
|
|
530
|
+
expect(errorResponse.error.code).toBe('MISSING_EXPECTED_TAG');
|
|
531
|
+
expect(errorResponse.error.message).toContain('response was expected to start with a <thinking> tag');
|
|
532
|
+
// Check that partial response is included
|
|
533
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
534
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without thinking tag.');
|
|
535
|
+
});
|
|
512
536
|
it('should handle missing tag for non-reasoning model with ignore', async () => {
|
|
513
537
|
const request = {
|
|
514
538
|
providerId: 'mistral',
|
|
@@ -543,6 +567,53 @@ describe('LLMService', () => {
|
|
|
543
567
|
expect(response.object).toBe('error');
|
|
544
568
|
const errorResponse = response;
|
|
545
569
|
expect(errorResponse.error.message).toContain('expected to start with a <reasoning> tag');
|
|
570
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
571
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without custom tag.');
|
|
572
|
+
});
|
|
573
|
+
describe('auto mode with native reasoning detection', () => {
|
|
574
|
+
it('should enforce thinking tags for non-reasoning models by default', async () => {
|
|
575
|
+
// Mistral model doesn't have reasoning support
|
|
576
|
+
const request = {
|
|
577
|
+
providerId: 'mistral',
|
|
578
|
+
modelId: 'codestral-2501',
|
|
579
|
+
messages: [{ role: 'user', content: 'test_thinking:Response without thinking tag.' }],
|
|
580
|
+
settings: {
|
|
581
|
+
thinkingExtraction: {
|
|
582
|
+
enabled: true,
|
|
583
|
+
onMissing: 'auto'
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
};
|
|
587
|
+
const response = await service.sendMessage(request);
|
|
588
|
+
// Should error because model doesn't have native reasoning
|
|
589
|
+
expect(response.object).toBe('error');
|
|
590
|
+
const errorResponse = response;
|
|
591
|
+
expect(errorResponse.error.code).toBe('MISSING_EXPECTED_TAG');
|
|
592
|
+
expect(errorResponse.error.message).toContain('does not have native reasoning active');
|
|
593
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
594
|
+
expect(errorResponse.partialResponse.choices[0].message.content).toBe('Response without thinking tag.');
|
|
595
|
+
});
|
|
596
|
+
it('should respect explicit reasoning.enabled: false even for models with enabledByDefault', async () => {
|
|
597
|
+
// This is the key test for the fix
|
|
598
|
+
const request = {
|
|
599
|
+
providerId: 'mistral',
|
|
600
|
+
modelId: 'codestral-2501',
|
|
601
|
+
messages: [{ role: 'user', content: 'test_thinking:Response without thinking tag.' }],
|
|
602
|
+
settings: {
|
|
603
|
+
reasoning: { enabled: false }, // Explicitly disabled
|
|
604
|
+
thinkingExtraction: {
|
|
605
|
+
enabled: true,
|
|
606
|
+
onMissing: 'auto'
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
};
|
|
610
|
+
const response = await service.sendMessage(request);
|
|
611
|
+
// Should error because reasoning is explicitly disabled
|
|
612
|
+
expect(response.object).toBe('error');
|
|
613
|
+
const errorResponse = response;
|
|
614
|
+
expect(errorResponse.error.code).toBe('MISSING_EXPECTED_TAG');
|
|
615
|
+
expect(errorResponse.partialResponse).toBeDefined();
|
|
616
|
+
});
|
|
546
617
|
});
|
|
547
618
|
});
|
|
548
619
|
});
|
package/dist/llm/types.d.ts
CHANGED
|
@@ -167,6 +167,8 @@ export interface LLMFailureResponse {
|
|
|
167
167
|
model?: string;
|
|
168
168
|
error: LLMError;
|
|
169
169
|
object: 'error';
|
|
170
|
+
/** The partial response that was generated before the error occurred (if available) */
|
|
171
|
+
partialResponse?: Omit<LLMResponse, 'object'>;
|
|
170
172
|
}
|
|
171
173
|
/**
|
|
172
174
|
* Information about a supported LLM provider
|