@contentgrowth/llm-service 0.6.1 → 0.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@contentgrowth/llm-service",
3
- "version": "0.6.1",
3
+ "version": "0.6.2",
4
4
  "description": "Unified LLM Service for Content Growth",
5
5
  "main": "src/index.js",
6
6
  "type": "module",
@@ -48,7 +48,7 @@ export class GeminiProvider extends BaseLLMProvider {
48
48
  tools: tools ? [{ functionDeclarations: tools.map(t => t.function) }] : undefined,
49
49
  };
50
50
 
51
- // Add JSON mode support for Gemini
51
+ // Add JSON mode support for Gemini (only used when NO tools are present)
52
52
  if (options.responseFormat) {
53
53
  modelConfig.generationConfig = this._buildGenerationConfig(options, maxTokens, temperature);
54
54
  } else if (options.temperature !== undefined || options.maxTokens !== undefined) {
@@ -166,20 +166,18 @@ export class LLMService {
166
166
  * @param {string} tenantId - Tenant identifier
167
167
  * @param {string} systemPrompt - System instructions
168
168
  * @param {Array} tools - Tools array
169
- * @param {Object} options - Options object (for responseFormat, etc.)
170
169
  * @returns {Object} Response with content, tool_calls, and optionally parsedContent
171
170
  */
172
- async chatWithTools(messages, tenantId, systemPrompt, tools = [], options = {}) {
171
+ async chatWithTools(messages, tenantId, systemPrompt, tools = []) {
173
172
  const provider = await this._getProvider(tenantId);
174
173
 
175
174
  let currentMessages = [...messages];
176
175
 
177
- // Initial call - pass options to enable JSON mode, etc.
176
+ // Initial call - no options since tools + JSON mode are incompatible
178
177
  const initialResponse = await provider.chatCompletion(
179
178
  currentMessages,
180
179
  systemPrompt,
181
- tools,
182
- options
180
+ tools
183
181
  );
184
182
 
185
183
  let { content, tool_calls, parsedContent } = initialResponse;
@@ -192,12 +190,11 @@ export class LLMService {
192
190
  // Execute tools using the provider's helper (which formats results for that provider)
193
191
  await provider.executeTools(tool_calls, currentMessages, tenantId, this.toolImplementations, this.env);
194
192
 
195
- // Next call - also pass options
193
+ // Next call - no options
196
194
  const nextResponse = await provider.chatCompletion(
197
195
  currentMessages,
198
196
  systemPrompt,
199
- tools,
200
- options
197
+ tools
201
198
  );
202
199
 
203
200
  content = nextResponse.content;