@contentgrowth/llm-service 0.6.4 → 0.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@contentgrowth/llm-service",
3
- "version": "0.6.4",
3
+ "version": "0.6.6",
4
4
  "description": "Unified LLM Service for Content Growth",
5
5
  "main": "src/index.js",
6
6
  "type": "module",
@@ -314,10 +314,25 @@ export class GeminiProvider extends BaseLLMProvider {
314
314
  };
315
315
  }
316
316
 
317
+ const parts = [{ text: prompt }];
318
+
319
+ if (options.images && options.images.length > 0) {
320
+ options.images.forEach(img => {
321
+ parts.push({
322
+ inlineData: {
323
+ data: img.data,
324
+ mimeType: img.mimeType
325
+ }
326
+ });
327
+ });
328
+ }
329
+
330
+ console.log('[GeminiProvider] Generating image with parts:', parts.map(p => p.text ? `Text: ${p.text.substring(0, 50)}...` : `Image: ${p.inlineData?.mimeType} (${p.inlineData?.data?.length} chars)`));
331
+
317
332
  const result = await model.generateContent({
318
333
  contents: [{
319
334
  role: "user",
320
- parts: [{ text: prompt }]
335
+ parts: parts
321
336
  }],
322
337
  generationConfig
323
338
  });
@@ -136,8 +136,7 @@ export class LLMService {
136
136
  */
137
137
  async chatCompletionJson(messages, tenantId, systemPrompt, schema = null, tools = null) {
138
138
  const options = {
139
- responseFormat: schema ? 'json_schema' : 'json',
140
- responseSchema: schema,
139
+ responseFormat: schema ? { type: 'json_schema', schema } : 'json',
141
140
  autoParse: true
142
141
  };
143
142
 
@@ -160,18 +159,12 @@ export class LLMService {
160
159
  }
161
160
  }
162
161
 
163
- /**
164
- * Wrap of chatCompletion to handle toolcalls from LLM.
165
- * @param {Array} messages - Conversation messages
166
- * @param {string} tenantId - Tenant identifier
167
- * @param {string} systemPrompt - System instructions
168
- * @param {Array} tools - Tools array
169
- * @returns {Object} Response with content, tool_calls, and optionally parsedContent
170
- */
171
162
  async chatWithTools(messages, tenantId, systemPrompt, tools = []) {
172
163
  const provider = await this._getProvider(tenantId);
173
164
 
174
165
  let currentMessages = [...messages];
166
+ const MAX_ITERATIONS = 10; // Prevent infinite loops
167
+ let iteration = 0;
175
168
 
176
169
  // Initial call - no options since tools + JSON mode are incompatible
177
170
  const initialResponse = await provider.chatCompletion(
@@ -182,9 +175,10 @@ export class LLMService {
182
175
 
183
176
  let { content, tool_calls, parsedContent } = initialResponse;
184
177
 
185
- // Tool execution loop
186
- while (tool_calls) {
187
- console.log('[Tool Call] Assistant wants to use tools:', tool_calls);
178
+ // Tool execution loop with safety limit
179
+ while (tool_calls && iteration < MAX_ITERATIONS) {
180
+ iteration++;
181
+ console.log(`[Tool Call] Iteration ${iteration}/${MAX_ITERATIONS}: Assistant wants to use tools:`, tool_calls);
188
182
  currentMessages.push({ role: 'assistant', content: content || '', tool_calls });
189
183
 
190
184
  // Execute tools using the provider's helper (which formats results for that provider)
@@ -202,6 +196,10 @@ export class LLMService {
202
196
  parsedContent = nextResponse.parsedContent; // Preserve parsedContent from final response
203
197
  }
204
198
 
199
+ if (iteration >= MAX_ITERATIONS) {
200
+ console.warn(`[Tool Call] Reached maximum iterations (${MAX_ITERATIONS}). Forcing completion.`);
201
+ }
202
+
205
203
  // Return both content and parsedContent (if available)
206
204
  return { content, parsedContent, toolCalls: tool_calls };
207
205
  }