@contentgrowth/llm-service 0.6.5 → 0.6.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@contentgrowth/llm-service",
3
- "version": "0.6.5",
3
+ "version": "0.6.7",
4
4
  "description": "Unified LLM Service for Content Growth",
5
5
  "main": "src/index.js",
6
6
  "type": "module",
@@ -314,10 +314,23 @@ export class GeminiProvider extends BaseLLMProvider {
314
314
  };
315
315
  }
316
316
 
317
+ const parts = [{ text: prompt }];
318
+
319
+ if (options.images && options.images.length > 0) {
320
+ options.images.forEach(img => {
321
+ parts.push({
322
+ inlineData: {
323
+ data: img.data,
324
+ mimeType: img.mimeType
325
+ }
326
+ });
327
+ });
328
+ }
329
+
317
330
  const result = await model.generateContent({
318
331
  contents: [{
319
332
  role: "user",
320
- parts: [{ text: prompt }]
333
+ parts: parts
321
334
  }],
322
335
  generationConfig
323
336
  });
@@ -159,18 +159,12 @@ export class LLMService {
159
159
  }
160
160
  }
161
161
 
162
- /**
163
- * Wrap of chatCompletion to handle toolcalls from LLM.
164
- * @param {Array} messages - Conversation messages
165
- * @param {string} tenantId - Tenant identifier
166
- * @param {string} systemPrompt - System instructions
167
- * @param {Array} tools - Tools array
168
- * @returns {Object} Response with content, tool_calls, and optionally parsedContent
169
- */
170
162
  async chatWithTools(messages, tenantId, systemPrompt, tools = []) {
171
163
  const provider = await this._getProvider(tenantId);
172
164
 
173
165
  let currentMessages = [...messages];
166
+ const MAX_ITERATIONS = 10; // Prevent infinite loops
167
+ let iteration = 0;
174
168
 
175
169
  // Initial call - no options since tools + JSON mode are incompatible
176
170
  const initialResponse = await provider.chatCompletion(
@@ -181,9 +175,10 @@ export class LLMService {
181
175
 
182
176
  let { content, tool_calls, parsedContent } = initialResponse;
183
177
 
184
- // Tool execution loop
185
- while (tool_calls) {
186
- console.log('[Tool Call] Assistant wants to use tools:', tool_calls);
178
+ // Tool execution loop with safety limit
179
+ while (tool_calls && iteration < MAX_ITERATIONS) {
180
+ iteration++;
181
+ console.log(`[Tool Call] Iteration ${iteration}/${MAX_ITERATIONS}: Assistant wants to use tools:`, tool_calls);
187
182
  currentMessages.push({ role: 'assistant', content: content || '', tool_calls });
188
183
 
189
184
  // Execute tools using the provider's helper (which formats results for that provider)
@@ -201,6 +196,10 @@ export class LLMService {
201
196
  parsedContent = nextResponse.parsedContent; // Preserve parsedContent from final response
202
197
  }
203
198
 
199
+ if (iteration >= MAX_ITERATIONS) {
200
+ console.warn(`[Tool Call] Reached maximum iterations (${MAX_ITERATIONS}). Forcing completion.`);
201
+ }
202
+
204
203
  // Return both content and parsedContent (if available)
205
204
  return { content, parsedContent, toolCalls: tool_calls };
206
205
  }