@contentgrowth/llm-service 0.7.0 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@contentgrowth/llm-service",
3
- "version": "0.7.0",
3
+ "version": "0.7.1",
4
4
  "description": "Unified LLM Service for Content Growth",
5
5
  "main": "src/index.js",
6
6
  "type": "module",
@@ -90,6 +90,26 @@ export class GeminiProvider extends BaseLLMProvider {
90
90
  case 'user':
91
91
  role = 'user';
92
92
  parts = [{ text: msg.content }];
93
+
94
+ // Enhancement: If this is the LAST message (current turn), append the reminder.
95
+ // This helps the model respect the system prompt (especially format) even with long context history.
96
+ if (index === geminiMessages.length - 1) {
97
+ let reminder = "";
98
+ if (options.responseFormat === 'json' || options.responseFormat?.type === 'json_schema' || options.responseSchema) {
99
+ reminder = "\n\n[SYSTEM NOTE: The output MUST be valid JSON as per the schema. Do not include markdown formatting or explanations.]";
100
+ } else {
101
+ reminder = "\n\n[SYSTEM NOTE: Please ensure your response adheres strictly to the constraints defined in the System Prompt.]";
102
+ }
103
+
104
+ // Append to the existing text part (Safest method)
105
+ const lastPart = parts.find(p => p.text);
106
+ if (lastPart) {
107
+ lastPart.text += reminder;
108
+ } else {
109
+ // Fallback if message was image-only
110
+ parts.push({ text: reminder });
111
+ }
112
+ }
93
113
  break;
94
114
  case 'assistant':
95
115
  role = 'model';
@@ -115,11 +135,11 @@ export class GeminiProvider extends BaseLLMProvider {
115
135
  // Fix for JSON mode: If JSON is requested, remind the model to output JSON after tool execution
116
136
  // This is necessary because strict JSON mode is disabled when tools are present.
117
137
  if (options.responseFormat === 'json' || options.responseFormat?.type === 'json_schema' || options.responseSchema) {
118
- parts.push({ text: "Please ensure the final response is valid JSON as per the system instructions." });
138
+ parts.push({ text: "\n\n[SYSTEM NOTE: The output MUST be valid JSON as per the schema. Do not include markdown formatting or explanations.]" });
119
139
  } else {
120
140
  // Generic reminder to help model stay on track with system prompt instructions (e.g. formatting)
121
141
  // even if no specific JSON mode is configured.
122
- parts.push({ text: "Please ensure the final response follows the system prompt instructions." });
142
+ parts.push({ text: "\n\n[SYSTEM NOTE: Please ensure your response adheres strictly to the constraints defined in the System Prompt.]" });
123
143
  }
124
144
  break;
125
145
  default:
@@ -159,18 +159,19 @@ export class LLMService {
159
159
  }
160
160
  }
161
161
 
162
- async chatWithTools(messages, tenantId, systemPrompt, tools = []) {
162
+ async chatWithTools(messages, tenantId, systemPrompt, tools = [], options = {}) {
163
163
  const provider = await this._getProvider(tenantId);
164
164
 
165
165
  let currentMessages = [...messages];
166
166
  const MAX_ITERATIONS = 10; // Prevent infinite loops
167
167
  let iteration = 0;
168
168
 
169
- // Initial call - no options since tools + JSON mode are incompatible
169
+ // Initial call
170
170
  const initialResponse = await provider.chatCompletion(
171
171
  currentMessages,
172
172
  systemPrompt,
173
- tools
173
+ tools,
174
+ options
174
175
  );
175
176
 
176
177
  let { content, tool_calls, parsedContent } = initialResponse;
@@ -184,11 +185,12 @@ export class LLMService {
184
185
  // Execute tools using the provider's helper (which formats results for that provider)
185
186
  await provider.executeTools(tool_calls, currentMessages, tenantId, this.toolImplementations, this.env);
186
187
 
187
- // Next call - no options
188
+ // Next call
188
189
  const nextResponse = await provider.chatCompletion(
189
190
  currentMessages,
190
191
  systemPrompt,
191
- tools
192
+ tools,
193
+ options
192
194
  );
193
195
 
194
196
  content = nextResponse.content;