genai-lite 0.3.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/llm/LLMService.js +9 -5
- package/dist/llm/LLMService.test.js +6 -11
- package/dist/llm/clients/MockClientAdapter.js +20 -10
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -240,7 +240,7 @@ const response = await llmService.sendMessage({
|
|
|
240
240
|
//
|
|
241
241
|
// The response will have:
|
|
242
242
|
// - response.choices[0].message.content = "The answer is 36."
|
|
243
|
-
// - response.choices[0].reasoning = "
|
|
243
|
+
// - response.choices[0].reasoning = "15% means 15/100 = 0.15. So 15% of 240 = 0.15 × 240 = 36."
|
|
244
244
|
|
|
245
245
|
// If the model doesn't include the <thinking> tag, you'll get an error (with default 'auto' mode)
|
|
246
246
|
```
|
package/dist/llm/LLMService.js
CHANGED
|
@@ -167,11 +167,15 @@ class LLMService {
|
|
|
167
167
|
console.log(`Extracted <${tagName}> block from response.`);
|
|
168
168
|
// Handle the edge case: append to existing reasoning if present.
|
|
169
169
|
const existingReasoning = choice.reasoning || '';
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
170
|
+
// Only add a separator when appending to existing reasoning
|
|
171
|
+
if (existingReasoning) {
|
|
172
|
+
// Use a neutral markdown header that works for any consumer (human or AI)
|
|
173
|
+
choice.reasoning = `${existingReasoning}\n\n#### Additional Reasoning\n\n${extracted}`;
|
|
174
|
+
}
|
|
175
|
+
else {
|
|
176
|
+
// No existing reasoning, just use the extracted content directly
|
|
177
|
+
choice.reasoning = extracted;
|
|
178
|
+
}
|
|
175
179
|
choice.message.content = remaining;
|
|
176
180
|
}
|
|
177
181
|
else {
|
|
@@ -376,8 +376,7 @@ describe('LLMService', () => {
|
|
|
376
376
|
const response = await service.sendMessage(request);
|
|
377
377
|
expect(response.object).toBe('chat.completion');
|
|
378
378
|
const successResponse = response;
|
|
379
|
-
expect(successResponse.choices[0].reasoning).
|
|
380
|
-
expect(successResponse.choices[0].reasoning).toContain('<!-- Extracted by genai-lite from <thinking> tag -->');
|
|
379
|
+
expect(successResponse.choices[0].reasoning).toBe('I am thinking about this problem.');
|
|
381
380
|
expect(successResponse.choices[0].message.content).toBe('Here is the answer.');
|
|
382
381
|
});
|
|
383
382
|
it('should not extract thinking tag when disabled', async () => {
|
|
@@ -413,19 +412,15 @@ describe('LLMService', () => {
|
|
|
413
412
|
const response = await service.sendMessage(request);
|
|
414
413
|
expect(response.object).toBe('chat.completion');
|
|
415
414
|
const successResponse = response;
|
|
416
|
-
expect(successResponse.choices[0].reasoning).
|
|
417
|
-
expect(successResponse.choices[0].reasoning).toContain('<!-- Extracted by genai-lite from <scratchpad> tag -->');
|
|
415
|
+
expect(successResponse.choices[0].reasoning).toBe('Working through the logic...');
|
|
418
416
|
expect(successResponse.choices[0].message.content).toBe('Final answer is 42.');
|
|
419
417
|
});
|
|
420
418
|
it('should append to existing reasoning', async () => {
|
|
421
|
-
//
|
|
422
|
-
// Then test that thinking extraction appends to it
|
|
423
|
-
// Since MockClientAdapter doesn't generate reasoning, we'll skip this complex test
|
|
424
|
-
// and just test the simple case
|
|
419
|
+
// Use test_reasoning to get a response with existing reasoning, then test extraction appends to it
|
|
425
420
|
const request = {
|
|
426
421
|
providerId: 'mistral',
|
|
427
422
|
modelId: 'codestral-2501',
|
|
428
|
-
messages: [{ role: 'user', content: '
|
|
423
|
+
messages: [{ role: 'user', content: 'test_reasoning:<thinking>Additional thoughts here.</thinking>The analysis is complete.' }],
|
|
429
424
|
settings: {
|
|
430
425
|
thinkingExtraction: {
|
|
431
426
|
enabled: true,
|
|
@@ -436,8 +431,8 @@ describe('LLMService', () => {
|
|
|
436
431
|
const response = await service.sendMessage(request);
|
|
437
432
|
expect(response.object).toBe('chat.completion');
|
|
438
433
|
const successResponse = response;
|
|
439
|
-
|
|
440
|
-
expect(successResponse.choices[0].reasoning).
|
|
434
|
+
// Should contain both the initial reasoning and the extracted thinking with separator
|
|
435
|
+
expect(successResponse.choices[0].reasoning).toBe('Initial model reasoning from native capabilities.\n\n#### Additional Reasoning\n\nAdditional thoughts here.');
|
|
441
436
|
expect(successResponse.choices[0].message.content).toBe('The analysis is complete.');
|
|
442
437
|
});
|
|
443
438
|
it('should handle missing tag with explicit ignore', async () => {
|
|
@@ -100,6 +100,11 @@ class MockClientAdapter {
|
|
|
100
100
|
const startIndex = originalContent.indexOf("test_thinking:") + "test_thinking:".length;
|
|
101
101
|
responseContent = originalContent.substring(startIndex).trim();
|
|
102
102
|
}
|
|
103
|
+
else if (userContent.includes("test_reasoning:")) {
|
|
104
|
+
// Extract content after "test_reasoning:" and return it as both content and reasoning
|
|
105
|
+
const startIndex = originalContent.indexOf("test_reasoning:") + "test_reasoning:".length;
|
|
106
|
+
responseContent = originalContent.substring(startIndex).trim();
|
|
107
|
+
}
|
|
103
108
|
else if (userContent.includes("hello") || userContent.includes("hi")) {
|
|
104
109
|
responseContent =
|
|
105
110
|
"Hello! I'm a mock LLM assistant. How can I help you today?";
|
|
@@ -153,21 +158,26 @@ class MockClientAdapter {
|
|
|
153
158
|
else if (request.settings.stopSequences.some((seq) => responseContent.includes(seq))) {
|
|
154
159
|
finishReason = "stop";
|
|
155
160
|
}
|
|
161
|
+
// Check if we need to add reasoning to the response
|
|
162
|
+
const isReasoningTest = userContent.includes("test_reasoning:");
|
|
163
|
+
const choice = {
|
|
164
|
+
message: {
|
|
165
|
+
role: "assistant",
|
|
166
|
+
content: responseContent,
|
|
167
|
+
},
|
|
168
|
+
finish_reason: finishReason,
|
|
169
|
+
index: 0,
|
|
170
|
+
};
|
|
171
|
+
// Add reasoning field for test_reasoning pattern
|
|
172
|
+
if (isReasoningTest) {
|
|
173
|
+
choice.reasoning = "Initial model reasoning from native capabilities.";
|
|
174
|
+
}
|
|
156
175
|
return {
|
|
157
176
|
id: `mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
|
158
177
|
provider: request.providerId,
|
|
159
178
|
model: request.modelId,
|
|
160
179
|
created: Math.floor(Date.now() / 1000),
|
|
161
|
-
choices: [
|
|
162
|
-
{
|
|
163
|
-
message: {
|
|
164
|
-
role: "assistant",
|
|
165
|
-
content: responseContent,
|
|
166
|
-
},
|
|
167
|
-
finish_reason: finishReason,
|
|
168
|
-
index: 0,
|
|
169
|
-
},
|
|
170
|
-
],
|
|
180
|
+
choices: [choice],
|
|
171
181
|
usage: {
|
|
172
182
|
prompt_tokens: promptTokenCount,
|
|
173
183
|
completion_tokens: mockTokenCount,
|