@contentgrowth/llm-service 0.6.92 → 0.6.95
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@contentgrowth/llm-service",
|
|
3
|
-
"version": "0.6.
|
|
3
|
+
"version": "0.6.95",
|
|
4
4
|
"description": "Unified LLM Service for Content Growth",
|
|
5
5
|
"main": "src/index.js",
|
|
6
6
|
"type": "module",
|
|
@@ -8,6 +8,7 @@
|
|
|
8
8
|
"test": "echo \"Error: no test specified\" && exit 1",
|
|
9
9
|
"test:live": "node test-live.js",
|
|
10
10
|
"test:json": "node test-json-mode.js",
|
|
11
|
+
"test:jsonutil": "node test-extraction.js",
|
|
11
12
|
"examples:json": "node examples-json-mode.js",
|
|
12
13
|
"prepublishOnly": "echo '\n📦 Package contents:\n' && npm pack --dry-run && echo '\n⚠️ Review the files above before publishing!\n'"
|
|
13
14
|
},
|
|
@@ -127,50 +127,67 @@ export class GeminiProvider extends BaseLLMProvider {
|
|
|
127
127
|
}
|
|
128
128
|
|
|
129
129
|
// Use the new @google/genai API
|
|
130
|
-
|
|
130
|
+
// Use the new @google/genai API
|
|
131
|
+
const requestOptions = {
|
|
131
132
|
model: modelName,
|
|
132
133
|
contents: contents,
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
tools: tools ? [{ functionDeclarations: tools.map(t => t.function) }] : undefined,
|
|
136
|
-
});
|
|
134
|
+
config: generationConfig,
|
|
135
|
+
};
|
|
137
136
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
let toolCalls = null;
|
|
141
|
-
if (result.candidates?.[0]?.content?.parts) {
|
|
142
|
-
const functionCallParts = result.candidates[0].content.parts.filter(p => p.functionCall);
|
|
143
|
-
if (functionCallParts.length > 0) {
|
|
144
|
-
toolCalls = functionCallParts.map(p => p.functionCall);
|
|
145
|
-
}
|
|
137
|
+
if (systemPrompt) {
|
|
138
|
+
requestOptions.systemInstruction = systemPrompt;
|
|
146
139
|
}
|
|
147
140
|
|
|
148
|
-
|
|
149
|
-
|
|
141
|
+
if (tools && tools.length > 0) {
|
|
142
|
+
requestOptions.tools = [{ functionDeclarations: tools.map(t => t.function) }];
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
console.log('[GeminiProvider] generateContent request:', JSON.stringify(requestOptions, null, 2));
|
|
146
|
+
|
|
147
|
+
let response;
|
|
150
148
|
try {
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
149
|
+
response = await this.client.models.generateContent(requestOptions);
|
|
150
|
+
} catch (error) {
|
|
151
|
+
console.error('[GeminiProvider] generateContent failed:', error);
|
|
152
|
+
throw error;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// In @google/genai, the response is returned directly (no .response property)
|
|
156
|
+
// And helper methods like .text() or .functionCalls() might not exist on the raw object
|
|
157
|
+
// So we extract manually from candidates
|
|
158
|
+
|
|
159
|
+
const candidate = response.candidates?.[0];
|
|
160
|
+
if (!candidate) {
|
|
161
|
+
throw new LLMServiceException('No candidates returned from model', 500);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
const parts = candidate.content?.parts || [];
|
|
165
|
+
|
|
166
|
+
// Extract text and function calls
|
|
167
|
+
let textContent = '';
|
|
168
|
+
let toolCalls = null;
|
|
169
|
+
|
|
170
|
+
for (const part of parts) {
|
|
171
|
+
if (part.text) {
|
|
172
|
+
textContent += part.text;
|
|
173
|
+
}
|
|
174
|
+
if (part.functionCall) {
|
|
175
|
+
if (!toolCalls) toolCalls = [];
|
|
176
|
+
toolCalls.push(part.functionCall);
|
|
159
177
|
}
|
|
160
|
-
} catch (e) {
|
|
161
|
-
// This is expected behavior for tool-only responses
|
|
162
178
|
}
|
|
163
179
|
|
|
164
180
|
// Validate that we have EITHER content OR tool calls
|
|
165
181
|
if (!textContent && (!toolCalls || toolCalls.length === 0)) {
|
|
166
182
|
console.error('[GeminiProvider] Model returned empty response (no text, no tool calls)');
|
|
167
|
-
console.error('[GeminiProvider]
|
|
183
|
+
console.error('[GeminiProvider] Contents:', JSON.stringify(contents, null, 2));
|
|
168
184
|
throw new LLMServiceException(
|
|
169
185
|
'Model returned empty response. This usually means the prompt or schema is confusing the model.',
|
|
170
186
|
500
|
|
171
187
|
);
|
|
172
188
|
}
|
|
173
189
|
|
|
190
|
+
console.log('Gemini returns:', textContent);
|
|
174
191
|
// Return with parsed JSON if applicable
|
|
175
192
|
return {
|
|
176
193
|
content: textContent,
|
|
@@ -197,15 +214,17 @@ export class GeminiProvider extends BaseLLMProvider {
|
|
|
197
214
|
|
|
198
215
|
const schema = typeof options.responseFormat === 'object'
|
|
199
216
|
? options.responseFormat.schema
|
|
200
|
-
: null;
|
|
217
|
+
: options.responseSchema || null;
|
|
201
218
|
|
|
202
219
|
if (formatType === 'json' || formatType === 'json_schema') {
|
|
203
220
|
config.responseMimeType = 'application/json';
|
|
204
221
|
|
|
205
222
|
// CRITICAL: Must provide schema for "Strict Mode" to avoid markdown wrappers
|
|
206
223
|
if (schema) {
|
|
224
|
+
// Use responseSchema for strict structured output
|
|
225
|
+
// Must convert to Gemini Schema format (Uppercase types)
|
|
207
226
|
config.responseSchema = this._convertToGeminiSchema(schema);
|
|
208
|
-
console.log('[GeminiProvider] Using Strict JSON mode with schema');
|
|
227
|
+
console.log('[GeminiProvider] Using Strict JSON mode with schema (responseSchema)');
|
|
209
228
|
} else {
|
|
210
229
|
console.warn('[GeminiProvider] Using legacy JSON mode without schema - may produce markdown wrappers');
|
|
211
230
|
}
|
|
@@ -216,25 +235,15 @@ export class GeminiProvider extends BaseLLMProvider {
|
|
|
216
235
|
}
|
|
217
236
|
|
|
218
237
|
_convertToGeminiSchema(jsonSchema) {
|
|
219
|
-
// SchemaType constants for Gemini schema conversion
|
|
220
|
-
const SchemaType = {
|
|
221
|
-
STRING: 'STRING',
|
|
222
|
-
NUMBER: 'NUMBER',
|
|
223
|
-
INTEGER: 'INTEGER',
|
|
224
|
-
BOOLEAN: 'BOOLEAN',
|
|
225
|
-
ARRAY: 'ARRAY',
|
|
226
|
-
OBJECT: 'OBJECT'
|
|
227
|
-
};
|
|
228
|
-
|
|
229
238
|
const convertType = (type) => {
|
|
230
239
|
switch (type) {
|
|
231
|
-
case 'string': return
|
|
232
|
-
case 'number': return
|
|
233
|
-
case 'integer': return
|
|
234
|
-
case 'boolean': return
|
|
235
|
-
case 'array': return
|
|
236
|
-
case 'object': return
|
|
237
|
-
default: return
|
|
240
|
+
case 'string': return 'STRING';
|
|
241
|
+
case 'number': return 'NUMBER';
|
|
242
|
+
case 'integer': return 'INTEGER';
|
|
243
|
+
case 'boolean': return 'BOOLEAN';
|
|
244
|
+
case 'array': return 'ARRAY';
|
|
245
|
+
case 'object': return 'OBJECT';
|
|
246
|
+
default: return 'STRING';
|
|
238
247
|
}
|
|
239
248
|
};
|
|
240
249
|
|
|
@@ -348,23 +357,33 @@ export class GeminiProvider extends BaseLLMProvider {
|
|
|
348
357
|
}
|
|
349
358
|
|
|
350
359
|
// Use the new @google/genai API
|
|
351
|
-
const
|
|
360
|
+
const requestOptions = {
|
|
352
361
|
model: modelName,
|
|
353
362
|
contents: [{
|
|
354
363
|
role: "user",
|
|
355
364
|
parts: parts
|
|
356
365
|
}],
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
366
|
+
config: generationConfig
|
|
367
|
+
};
|
|
368
|
+
|
|
369
|
+
if (systemPrompt) {
|
|
370
|
+
requestOptions.systemInstruction = systemPrompt;
|
|
371
|
+
}
|
|
360
372
|
|
|
361
|
-
|
|
362
|
-
|
|
373
|
+
console.log('[GeminiProvider] imageGeneration request:', JSON.stringify(requestOptions, null, 2));
|
|
374
|
+
|
|
375
|
+
const response = await this.client.models.generateContent(requestOptions);
|
|
376
|
+
|
|
377
|
+
const imagePart = response.candidates?.[0]?.content?.parts?.find(
|
|
363
378
|
part => part.inlineData && part.inlineData.mimeType?.startsWith('image/')
|
|
364
379
|
);
|
|
365
380
|
|
|
366
381
|
if (!imagePart || !imagePart.inlineData) {
|
|
367
|
-
|
|
382
|
+
// Fallback: Check if it returned a URI or other format, or just text
|
|
383
|
+
const textPart = response.candidates?.[0]?.content?.parts?.find(p => p.text);
|
|
384
|
+
if (textPart) {
|
|
385
|
+
console.warn('[GeminiProvider] Model returned text instead of image:', textPart.text);
|
|
386
|
+
}
|
|
368
387
|
throw new Error('No image data in response');
|
|
369
388
|
}
|
|
370
389
|
|
|
@@ -93,7 +93,7 @@ export class OpenAIProvider extends BaseLLMProvider {
|
|
|
93
93
|
|
|
94
94
|
const schema = typeof options.responseFormat === 'object'
|
|
95
95
|
? options.responseFormat.schema
|
|
96
|
-
: null;
|
|
96
|
+
: options.responseSchema || null;
|
|
97
97
|
|
|
98
98
|
switch (formatType) {
|
|
99
99
|
case 'json':
|
|
@@ -115,7 +115,7 @@ export class OpenAIProvider extends BaseLLMProvider {
|
|
|
115
115
|
|
|
116
116
|
case 'json_schema':
|
|
117
117
|
if (!schema) {
|
|
118
|
-
throw new Error('
|
|
118
|
+
throw new Error('responseSchema required when using json_schema format');
|
|
119
119
|
}
|
|
120
120
|
console.log('[OpenAIProvider] Using Strict JSON mode with schema');
|
|
121
121
|
return {
|