@contentgrowth/llm-service 0.6.1 → 0.6.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@contentgrowth/llm-service",
3
- "version": "0.6.1",
3
+ "version": "0.6.3",
4
4
  "description": "Unified LLM Service for Content Growth",
5
5
  "main": "src/index.js",
6
6
  "type": "module",
@@ -48,7 +48,7 @@ export class GeminiProvider extends BaseLLMProvider {
48
48
  tools: tools ? [{ functionDeclarations: tools.map(t => t.function) }] : undefined,
49
49
  };
50
50
 
51
- // Add JSON mode support for Gemini
51
+ // Add JSON mode support for Gemini (only used when NO tools are present)
52
52
  if (options.responseFormat) {
53
53
  modelConfig.generationConfig = this._buildGenerationConfig(options, maxTokens, temperature);
54
54
  } else if (options.temperature !== undefined || options.maxTokens !== undefined) {
@@ -130,13 +130,30 @@ export class GeminiProvider extends BaseLLMProvider {
130
130
  const response = result.response;
131
131
  const toolCalls = response.functionCalls();
132
132
 
133
+ let textContent = '';
134
+ try {
135
+ textContent = response.text();
136
+ } catch (e) {
137
+ // response.text() throws if there is no text content (e.g. only tool calls)
138
+ // This is expected behavior for tool-only responses
139
+ }
140
+ // Validate that we have EITHER content OR tool calls
141
+ if (!textContent && (!toolCalls || toolCalls.length === 0)) {
142
+ console.error('[GeminiProvider] Model returned empty response (no text, no tool calls)');
143
+ console.error('[GeminiProvider] Last message:', JSON.stringify(lastMessage, null, 2));
144
+ throw new LLMServiceException(
145
+ 'Model returned empty response. This usually means the prompt or schema is confusing the model.',
146
+ 500
147
+ );
148
+ }
149
+
133
150
  // Return with parsed JSON if applicable
134
151
  return {
135
- content: response.text(),
152
+ content: textContent,
136
153
  tool_calls: toolCalls ? toolCalls.map(fc => ({ type: 'function', function: fc })) : null,
137
154
  _responseFormat: options.responseFormat,
138
155
  ...(options.responseFormat && this._shouldAutoParse(options) ? {
139
- parsedContent: this._safeJsonParse(response.text())
156
+ parsedContent: this._safeJsonParse(textContent)
140
157
  } : {})
141
158
  };
142
159
  }
@@ -1,6 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import { BaseLLMProvider } from './base-provider.js';
3
3
  import { extractJsonFromResponse } from '../json-utils.js';
4
+ import { LLMServiceException } from '../../llm-service.js';
4
5
 
5
6
  export class OpenAIProvider extends BaseLLMProvider {
6
7
  constructor(config) {
@@ -58,6 +59,15 @@ export class OpenAIProvider extends BaseLLMProvider {
58
59
  const response = await this.client.chat.completions.create(requestPayload);
59
60
  const message = response.choices[0].message;
60
61
 
62
+ // Validate that we have EITHER content OR tool calls
63
+ if (!message.content && (!message.tool_calls || message.tool_calls.length === 0)) {
64
+ console.error('[OpenAIProvider] Model returned empty response (no text, no tool calls)');
65
+ throw new LLMServiceException(
66
+ 'Model returned empty response. This usually means the prompt or schema is confusing the model.',
67
+ 500
68
+ );
69
+ }
70
+
61
71
  // Return with parsed JSON if applicable
62
72
  return {
63
73
  content: message.content,
@@ -166,20 +166,18 @@ export class LLMService {
166
166
  * @param {string} tenantId - Tenant identifier
167
167
  * @param {string} systemPrompt - System instructions
168
168
  * @param {Array} tools - Tools array
169
- * @param {Object} options - Options object (for responseFormat, etc.)
170
169
  * @returns {Object} Response with content, tool_calls, and optionally parsedContent
171
170
  */
172
- async chatWithTools(messages, tenantId, systemPrompt, tools = [], options = {}) {
171
+ async chatWithTools(messages, tenantId, systemPrompt, tools = []) {
173
172
  const provider = await this._getProvider(tenantId);
174
173
 
175
174
  let currentMessages = [...messages];
176
175
 
177
- // Initial call - pass options to enable JSON mode, etc.
176
+ // Initial call - no options since tools + JSON mode are incompatible
178
177
  const initialResponse = await provider.chatCompletion(
179
178
  currentMessages,
180
179
  systemPrompt,
181
- tools,
182
- options
180
+ tools
183
181
  );
184
182
 
185
183
  let { content, tool_calls, parsedContent } = initialResponse;
@@ -192,12 +190,11 @@ export class LLMService {
192
190
  // Execute tools using the provider's helper (which formats results for that provider)
193
191
  await provider.executeTools(tool_calls, currentMessages, tenantId, this.toolImplementations, this.env);
194
192
 
195
- // Next call - also pass options
193
+ // Next call - no options
196
194
  const nextResponse = await provider.chatCompletion(
197
195
  currentMessages,
198
196
  systemPrompt,
199
- tools,
200
- options
197
+ tools
201
198
  );
202
199
 
203
200
  content = nextResponse.content;