genai-lite 0.7.0 → 0.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -554,5 +554,45 @@
554
554
  "settings": {
555
555
  "temperature": 0.7
556
556
  }
557
+ },
558
+ {
559
+ "id": "openrouter-gemma-3-27b-free",
560
+ "displayName": "OpenRouter - Gemma 3 27B (Free)",
561
+ "description": "Google's Gemma 3 27B via OpenRouter free tier.",
562
+ "providerId": "openrouter",
563
+ "modelId": "google/gemma-3-27b-it:free",
564
+ "settings": {
565
+ "temperature": 0.7
566
+ }
567
+ },
568
+ {
569
+ "id": "openrouter-mistral-small-3.1-free",
570
+ "displayName": "OpenRouter - Mistral Small 3.1 (Free)",
571
+ "description": "Mistral Small 3.1 24B via OpenRouter free tier.",
572
+ "providerId": "openrouter",
573
+ "modelId": "mistralai/mistral-small-3.1-24b-instruct:free",
574
+ "settings": {
575
+ "temperature": 0.7
576
+ }
577
+ },
578
+ {
579
+ "id": "mistral-small-default",
580
+ "displayName": "Mistral - Small",
581
+ "description": "Cost-effective Mistral Small model for general tasks.",
582
+ "providerId": "mistral",
583
+ "modelId": "mistral-small-latest",
584
+ "settings": {
585
+ "temperature": 0.7
586
+ }
587
+ },
588
+ {
589
+ "id": "mistral-codestral-default",
590
+ "displayName": "Mistral - Codestral",
591
+ "description": "Specialized model for code generation with lower temperature.",
592
+ "providerId": "mistral",
593
+ "modelId": "codestral-2501",
594
+ "settings": {
595
+ "temperature": 0.3
596
+ }
557
597
  }
558
598
  ]
package/dist/index.d.ts CHANGED
@@ -9,6 +9,10 @@ export { LlamaCppClientAdapter } from "./llm/clients/LlamaCppClientAdapter";
9
9
  export { LlamaCppServerClient } from "./llm/clients/LlamaCppServerClient";
10
10
  export type { LlamaCppClientConfig, } from "./llm/clients/LlamaCppClientAdapter";
11
11
  export type { LlamaCppHealthResponse, LlamaCppTokenizeResponse, LlamaCppDetokenizeResponse, LlamaCppEmbeddingResponse, LlamaCppInfillResponse, LlamaCppPropsResponse, LlamaCppMetricsResponse, LlamaCppSlot, LlamaCppSlotsResponse, LlamaCppModel, LlamaCppModelsResponse, } from "./llm/clients/LlamaCppServerClient";
12
+ export { OpenRouterClientAdapter } from "./llm/clients/OpenRouterClientAdapter";
13
+ export type { OpenRouterClientConfig } from "./llm/clients/OpenRouterClientAdapter";
14
+ export { MistralClientAdapter } from "./llm/clients/MistralClientAdapter";
15
+ export type { MistralClientConfig } from "./llm/clients/MistralClientAdapter";
12
16
  export { ImageService } from "./image/ImageService";
13
17
  export type { ImageProviderId, ImageMimeType, ImageResponseFormat, ImageQuality, ImageStyle, DiffusionSampler, ImageProgressStage, ImageProgressCallback, DiffusionSettings, OpenAISpecificSettings, ImageGenerationSettings, ResolvedImageGenerationSettings, ImageUsage, GeneratedImage, ImageGenerationRequestBase, ImageGenerationRequest, ImageGenerationRequestWithPreset, ImageGenerationResponse, ImageFailureResponse, ImageProviderCapabilities, ImageModelInfo, ImageProviderInfo, ImagePreset, ImageProviderAdapterConfig, ImageProviderAdapter, ImageServiceOptions, CreatePromptResult, } from "./types/image";
14
18
  export { renderTemplate } from "./prompting/template";
package/dist/index.js CHANGED
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.silentLogger = exports.DEFAULT_LOG_LEVEL = exports.createDefaultLogger = exports.KNOWN_GGUF_MODELS = exports.detectGgufCapabilities = exports.createFallbackModelInfo = exports.parseTemplateWithMetadata = exports.extractInitialTaggedContent = exports.parseRoleTags = exports.parseStructuredContent = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.ImageService = exports.LlamaCppServerClient = exports.LlamaCppClientAdapter = exports.fromEnvironment = exports.LLMService = void 0;
17
+ exports.silentLogger = exports.DEFAULT_LOG_LEVEL = exports.createDefaultLogger = exports.KNOWN_GGUF_MODELS = exports.detectGgufCapabilities = exports.createFallbackModelInfo = exports.parseTemplateWithMetadata = exports.extractInitialTaggedContent = exports.parseRoleTags = exports.parseStructuredContent = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.ImageService = exports.MistralClientAdapter = exports.OpenRouterClientAdapter = exports.LlamaCppServerClient = exports.LlamaCppClientAdapter = exports.fromEnvironment = exports.LLMService = void 0;
18
18
  // --- LLM Service ---
19
19
  var LLMService_1 = require("./llm/LLMService");
20
20
  Object.defineProperty(exports, "LLMService", { enumerable: true, get: function () { return LLMService_1.LLMService; } });
@@ -30,6 +30,12 @@ var LlamaCppClientAdapter_1 = require("./llm/clients/LlamaCppClientAdapter");
30
30
  Object.defineProperty(exports, "LlamaCppClientAdapter", { enumerable: true, get: function () { return LlamaCppClientAdapter_1.LlamaCppClientAdapter; } });
31
31
  var LlamaCppServerClient_1 = require("./llm/clients/LlamaCppServerClient");
32
32
  Object.defineProperty(exports, "LlamaCppServerClient", { enumerable: true, get: function () { return LlamaCppServerClient_1.LlamaCppServerClient; } });
33
+ // --- OpenRouter Integration ---
34
+ var OpenRouterClientAdapter_1 = require("./llm/clients/OpenRouterClientAdapter");
35
+ Object.defineProperty(exports, "OpenRouterClientAdapter", { enumerable: true, get: function () { return OpenRouterClientAdapter_1.OpenRouterClientAdapter; } });
36
+ // --- Mistral Integration ---
37
+ var MistralClientAdapter_1 = require("./llm/clients/MistralClientAdapter");
38
+ Object.defineProperty(exports, "MistralClientAdapter", { enumerable: true, get: function () { return MistralClientAdapter_1.MistralClientAdapter; } });
33
39
  // --- Image Generation ---
34
40
  // Export Image Service
35
41
  var ImageService_1 = require("./image/ImageService");
@@ -9,6 +9,7 @@ exports.AnthropicClientAdapter = void 0;
9
9
  const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
10
10
  const types_1 = require("./types");
11
11
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
12
+ const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
12
13
  const defaultLogger_1 = require("../../logging/defaultLogger");
13
14
  const logger = (0, defaultLogger_1.createDefaultLogger)();
14
15
  /**
@@ -144,18 +145,14 @@ class AnthropicClientAdapter {
144
145
  */
145
146
  formatMessagesForAnthropic(request) {
146
147
  const messages = [];
147
- let systemMessage = request.systemMessage;
148
+ const inlineSystemMessages = [];
149
+ // Check if model supports system messages
150
+ const supportsSystem = request.settings.supportsSystemMessage !== false;
148
151
  // Process conversation messages
149
152
  for (const message of request.messages) {
150
153
  if (message.role === "system") {
151
- // Anthropic handles system messages separately
152
- // If we already have a system message, append to it
153
- if (systemMessage) {
154
- systemMessage += "\n\n" + message.content;
155
- }
156
- else {
157
- systemMessage = message.content;
158
- }
154
+ // Collect inline system messages
155
+ inlineSystemMessages.push(message.content);
159
156
  }
160
157
  else if (message.role === "user") {
161
158
  messages.push({
@@ -170,6 +167,28 @@ class AnthropicClientAdapter {
170
167
  });
171
168
  }
172
169
  }
170
+ // Use shared utility to collect and combine system content
171
+ const { combinedSystemContent, useNativeSystemMessage } = (0, systemMessageUtils_1.collectSystemContent)(request.systemMessage, inlineSystemMessages, supportsSystem);
172
+ let systemMessage;
173
+ if (combinedSystemContent) {
174
+ if (useNativeSystemMessage) {
175
+ // Model supports system messages - use Anthropic's system parameter
176
+ systemMessage = combinedSystemContent;
177
+ }
178
+ else {
179
+ // Model doesn't support system messages - prepend to first user message
180
+ const simpleMessages = messages.map((m) => ({
181
+ role: m.role,
182
+ content: m.content,
183
+ }));
184
+ const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleMessages, combinedSystemContent, request.settings.systemMessageFallback);
185
+ if (modifiedIndex !== -1) {
186
+ messages[modifiedIndex].content = simpleMessages[modifiedIndex].content;
187
+ logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
188
+ }
189
+ // Don't set systemMessage - it stays undefined
190
+ }
191
+ }
173
192
  // Anthropic requires messages to start with 'user' role
174
193
  // If the first message is not from user, we need to handle this
175
194
  if (messages.length > 0 && messages[0].role !== "user") {
@@ -6,6 +6,7 @@ exports.GeminiClientAdapter = void 0;
6
6
  const genai_1 = require("@google/genai");
7
7
  const types_1 = require("./types");
8
8
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
9
+ const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
9
10
  const defaultLogger_1 = require("../../logging/defaultLogger");
10
11
  const logger = (0, defaultLogger_1.createDefaultLogger)();
11
12
  /**
@@ -99,17 +100,14 @@ class GeminiClientAdapter {
99
100
  */
100
101
  formatInternalRequestToGemini(request) {
101
102
  const contents = [];
102
- let systemInstruction = request.systemMessage;
103
+ const inlineSystemMessages = [];
104
+ // Check if model supports system instructions (e.g., Gemma models don't)
105
+ const supportsSystem = request.settings.supportsSystemMessage !== false;
103
106
  // Process messages - separate system messages and build conversation contents
104
107
  for (const message of request.messages) {
105
108
  if (message.role === "system") {
106
- // Gemini handles system messages as systemInstruction
107
- if (systemInstruction) {
108
- systemInstruction += "\n\n" + message.content;
109
- }
110
- else {
111
- systemInstruction = message.content;
112
- }
109
+ // Collect inline system messages
110
+ inlineSystemMessages.push(message.content);
113
111
  }
114
112
  else if (message.role === "user") {
115
113
  contents.push({
@@ -125,6 +123,30 @@ class GeminiClientAdapter {
125
123
  });
126
124
  }
127
125
  }
126
+ // Use shared utility to collect and combine system content
127
+ const { combinedSystemContent, useNativeSystemMessage } = (0, systemMessageUtils_1.collectSystemContent)(request.systemMessage, inlineSystemMessages, supportsSystem);
128
+ let systemInstruction;
129
+ if (combinedSystemContent) {
130
+ if (useNativeSystemMessage) {
131
+ // Model supports system instructions - use native API
132
+ systemInstruction = combinedSystemContent;
133
+ }
134
+ else {
135
+ // Model doesn't support system instructions - prepend to first user message
136
+ // Create a simple array with role/content for the utility
137
+ const simpleContents = contents.map((c) => ({
138
+ role: c.role,
139
+ content: c.parts[0].text,
140
+ }));
141
+ const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleContents, combinedSystemContent, request.settings.systemMessageFallback);
142
+ if (modifiedIndex !== -1) {
143
+ // Update the actual contents array
144
+ contents[modifiedIndex].parts[0].text = simpleContents[modifiedIndex].content;
145
+ logger.debug(`Model ${request.modelId} doesn't support system instructions - prepended to first user message`);
146
+ }
147
+ // Don't set systemInstruction - it stays undefined
148
+ }
149
+ }
128
150
  // Build generation config
129
151
  const generationConfig = {
130
152
  maxOutputTokens: request.settings.maxTokens,
@@ -9,6 +9,7 @@ exports.LlamaCppClientAdapter = void 0;
9
9
  const openai_1 = __importDefault(require("openai"));
10
10
  const types_1 = require("./types");
11
11
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
12
+ const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
12
13
  const LlamaCppServerClient_1 = require("./LlamaCppServerClient");
13
14
  const config_1 = require("../config");
14
15
  const defaultLogger_1 = require("../../logging/defaultLogger");
@@ -245,20 +246,14 @@ class LlamaCppClientAdapter {
245
246
  */
246
247
  formatMessages(request) {
247
248
  const messages = [];
248
- // Add system message if provided
249
- if (request.systemMessage) {
250
- messages.push({
251
- role: "system",
252
- content: request.systemMessage,
253
- });
254
- }
255
- // Add conversation messages
249
+ const inlineSystemMessages = [];
250
+ // Check if model supports system messages
251
+ const supportsSystem = request.settings.supportsSystemMessage !== false;
252
+ // Add conversation messages (collecting system messages separately)
256
253
  for (const message of request.messages) {
257
254
  if (message.role === "system") {
258
- messages.push({
259
- role: "system",
260
- content: message.content,
261
- });
255
+ // Collect inline system messages
256
+ inlineSystemMessages.push(message.content);
262
257
  }
263
258
  else if (message.role === "user") {
264
259
  messages.push({
@@ -273,6 +268,29 @@ class LlamaCppClientAdapter {
273
268
  });
274
269
  }
275
270
  }
271
+ // Use shared utility to collect and combine system content
272
+ const { combinedSystemContent, useNativeSystemMessage } = (0, systemMessageUtils_1.collectSystemContent)(request.systemMessage, inlineSystemMessages, supportsSystem);
273
+ if (combinedSystemContent) {
274
+ if (useNativeSystemMessage) {
275
+ // Model supports system messages - add as system role at the start
276
+ messages.unshift({
277
+ role: "system",
278
+ content: combinedSystemContent,
279
+ });
280
+ }
281
+ else {
282
+ // Model doesn't support system messages - prepend to first user message
283
+ const simpleMessages = messages.map((m) => ({
284
+ role: m.role,
285
+ content: m.content,
286
+ }));
287
+ const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleMessages, combinedSystemContent, request.settings.systemMessageFallback);
288
+ if (modifiedIndex !== -1) {
289
+ messages[modifiedIndex].content = simpleMessages[modifiedIndex].content;
290
+ logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
291
+ }
292
+ }
293
+ }
276
294
  return messages;
277
295
  }
278
296
  /**
@@ -0,0 +1,94 @@
1
+ import type { LLMResponse, LLMFailureResponse } from "../types";
2
+ import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ /**
4
+ * Configuration options for MistralClientAdapter
5
+ */
6
+ export interface MistralClientConfig {
7
+ /** Base URL of the Mistral API (default: https://api.mistral.ai) */
8
+ baseURL?: string;
9
+ }
10
+ /**
11
+ * Client adapter for Mistral AI API integration
12
+ *
13
+ * Mistral AI provides powerful language models including:
14
+ * - mistral-small-latest: Cost-effective model for general tasks
15
+ * - mistral-large-2512: Frontier model with 256K context
16
+ * - codestral-2501: Specialized for code generation
17
+ *
18
+ * Key features:
19
+ * - Uses official @mistralai/mistralai SDK
20
+ * - Supports standard chat parameters (temperature, max_tokens, top_p, stop)
21
+ * - Does NOT support frequency_penalty or presence_penalty
22
+ *
23
+ * @example
24
+ * ```typescript
25
+ * // Create adapter
26
+ * const adapter = new MistralClientAdapter();
27
+ *
28
+ * // Use via LLMService
29
+ * const response = await service.sendMessage({
30
+ * providerId: 'mistral',
31
+ * modelId: 'mistral-small-latest',
32
+ * messages: [{ role: 'user', content: 'Hello!' }]
33
+ * });
34
+ * ```
35
+ */
36
+ export declare class MistralClientAdapter implements ILLMClientAdapter {
37
+ private baseURL;
38
+ /**
39
+ * Creates a new Mistral client adapter
40
+ *
41
+ * @param config Optional configuration for the adapter
42
+ */
43
+ constructor(config?: MistralClientConfig);
44
+ /**
45
+ * Sends a chat message to Mistral API
46
+ *
47
+ * @param request - The internal LLM request with applied settings
48
+ * @param apiKey - The Mistral API key
49
+ * @returns Promise resolving to success or failure response
50
+ */
51
+ sendMessage(request: InternalLLMChatRequest, apiKey: string): Promise<LLMResponse | LLMFailureResponse>;
52
+ /**
53
+ * Validates Mistral API key format
54
+ *
55
+ * Mistral API keys don't have a standard prefix, so we just check
56
+ * that the key has reasonable length and character set.
57
+ *
58
+ * @param apiKey - The API key to validate
59
+ * @returns True if the key format appears valid
60
+ */
61
+ validateApiKey(apiKey: string): boolean;
62
+ /**
63
+ * Gets adapter information
64
+ */
65
+ getAdapterInfo(): {
66
+ providerId: "mistral";
67
+ name: string;
68
+ version: string;
69
+ baseURL: string;
70
+ };
71
+ /**
72
+ * Formats messages for Mistral API
73
+ *
74
+ * @param request - The internal LLM request
75
+ * @returns Formatted messages array
76
+ */
77
+ private formatMessages;
78
+ /**
79
+ * Creates a standardized success response from Mistral's response
80
+ *
81
+ * @param completion - Raw Mistral completion response
82
+ * @param request - Original request for context
83
+ * @returns Standardized LLM response
84
+ */
85
+ private createSuccessResponse;
86
+ /**
87
+ * Creates a standardized error response from an error
88
+ *
89
+ * @param error - The error that occurred
90
+ * @param request - Original request for context
91
+ * @returns Standardized LLM failure response
92
+ */
93
+ private createErrorResponse;
94
+ }
@@ -0,0 +1,239 @@
1
+ "use strict";
2
+ // AI Summary: Client adapter for Mistral AI API using the official Mistral SDK.
3
+ // Provides access to Mistral models including Codestral and Mistral Large.
4
+ Object.defineProperty(exports, "__esModule", { value: true });
5
+ exports.MistralClientAdapter = void 0;
6
+ const mistralai_1 = require("@mistralai/mistralai");
7
+ const types_1 = require("./types");
8
+ const errorUtils_1 = require("../../shared/adapters/errorUtils");
9
+ const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
10
+ const defaultLogger_1 = require("../../logging/defaultLogger");
11
+ const logger = (0, defaultLogger_1.createDefaultLogger)();
12
+ /**
13
+ * Client adapter for Mistral AI API integration
14
+ *
15
+ * Mistral AI provides powerful language models including:
16
+ * - mistral-small-latest: Cost-effective model for general tasks
17
+ * - mistral-large-2512: Frontier model with 256K context
18
+ * - codestral-2501: Specialized for code generation
19
+ *
20
+ * Key features:
21
+ * - Uses official @mistralai/mistralai SDK
22
+ * - Supports standard chat parameters (temperature, max_tokens, top_p, stop)
23
+ * - Does NOT support frequency_penalty or presence_penalty
24
+ *
25
+ * @example
26
+ * ```typescript
27
+ * // Create adapter
28
+ * const adapter = new MistralClientAdapter();
29
+ *
30
+ * // Use via LLMService
31
+ * const response = await service.sendMessage({
32
+ * providerId: 'mistral',
33
+ * modelId: 'mistral-small-latest',
34
+ * messages: [{ role: 'user', content: 'Hello!' }]
35
+ * });
36
+ * ```
37
+ */
38
+ class MistralClientAdapter {
39
+ /**
40
+ * Creates a new Mistral client adapter
41
+ *
42
+ * @param config Optional configuration for the adapter
43
+ */
44
+ constructor(config) {
45
+ this.baseURL = config?.baseURL || process.env.MISTRAL_API_BASE_URL || 'https://api.mistral.ai';
46
+ }
47
+ /**
48
+ * Sends a chat message to Mistral API
49
+ *
50
+ * @param request - The internal LLM request with applied settings
51
+ * @param apiKey - The Mistral API key
52
+ * @returns Promise resolving to success or failure response
53
+ */
54
+ async sendMessage(request, apiKey) {
55
+ try {
56
+ // Initialize Mistral client
57
+ const mistral = new mistralai_1.Mistral({
58
+ apiKey,
59
+ serverURL: this.baseURL !== 'https://api.mistral.ai' ? this.baseURL : undefined,
60
+ });
61
+ // Format messages for Mistral API
62
+ const messages = this.formatMessages(request);
63
+ logger.debug(`Mistral API parameters:`, {
64
+ baseURL: this.baseURL,
65
+ model: request.modelId,
66
+ temperature: request.settings.temperature,
67
+ max_tokens: request.settings.maxTokens,
68
+ top_p: request.settings.topP,
69
+ });
70
+ logger.info(`Making Mistral API call for model: ${request.modelId}`);
71
+ // Make the API call
72
+ const completion = await mistral.chat.complete({
73
+ model: request.modelId,
74
+ messages: messages,
75
+ temperature: request.settings.temperature,
76
+ maxTokens: request.settings.maxTokens,
77
+ topP: request.settings.topP,
78
+ ...(request.settings.stopSequences.length > 0 && {
79
+ stop: request.settings.stopSequences,
80
+ }),
81
+ // Note: Mistral does not support frequency_penalty or presence_penalty
82
+ });
83
+ if (completion && completion.choices && completion.choices.length > 0) {
84
+ logger.info(`Mistral API call successful, response ID: ${completion.id}`);
85
+ return this.createSuccessResponse(completion, request);
86
+ }
87
+ else {
88
+ throw new Error('No valid choices in Mistral completion response');
89
+ }
90
+ }
91
+ catch (error) {
92
+ logger.error("Mistral API error:", error);
93
+ return this.createErrorResponse(error, request);
94
+ }
95
+ }
96
+ /**
97
+ * Validates Mistral API key format
98
+ *
99
+ * Mistral API keys don't have a standard prefix, so we just check
100
+ * that the key has reasonable length and character set.
101
+ *
102
+ * @param apiKey - The API key to validate
103
+ * @returns True if the key format appears valid
104
+ */
105
+ validateApiKey(apiKey) {
106
+ // Mistral keys are typically 32+ characters, alphanumeric
107
+ return apiKey.length >= 32 && /^[a-zA-Z0-9]+$/.test(apiKey);
108
+ }
109
+ /**
110
+ * Gets adapter information
111
+ */
112
+ getAdapterInfo() {
113
+ return {
114
+ providerId: "mistral",
115
+ name: "Mistral Client Adapter",
116
+ version: "1.0.0",
117
+ baseURL: this.baseURL,
118
+ };
119
+ }
120
+ /**
121
+ * Formats messages for Mistral API
122
+ *
123
+ * @param request - The internal LLM request
124
+ * @returns Formatted messages array
125
+ */
126
+ formatMessages(request) {
127
+ const messages = [];
128
+ const inlineSystemMessages = [];
129
+ // Mistral supports system messages natively
130
+ const supportsSystem = request.settings.supportsSystemMessage !== false;
131
+ // Add conversation messages (collecting system messages separately)
132
+ for (const message of request.messages) {
133
+ if (message.role === "system") {
134
+ // Collect inline system messages
135
+ inlineSystemMessages.push(message.content);
136
+ }
137
+ else if (message.role === "user") {
138
+ messages.push({
139
+ role: "user",
140
+ content: message.content,
141
+ });
142
+ }
143
+ else if (message.role === "assistant") {
144
+ messages.push({
145
+ role: "assistant",
146
+ content: message.content,
147
+ });
148
+ }
149
+ }
150
+ // Use shared utility to collect and combine system content
151
+ const { combinedSystemContent, useNativeSystemMessage } = (0, systemMessageUtils_1.collectSystemContent)(request.systemMessage, inlineSystemMessages, supportsSystem);
152
+ if (combinedSystemContent) {
153
+ if (useNativeSystemMessage) {
154
+ // Model supports system messages - add as system role at the start
155
+ messages.unshift({
156
+ role: "system",
157
+ content: combinedSystemContent,
158
+ });
159
+ }
160
+ else {
161
+ // Model doesn't support system messages - prepend to first user message
162
+ const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(messages, combinedSystemContent, request.settings.systemMessageFallback);
163
+ if (modifiedIndex !== -1) {
164
+ logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
165
+ }
166
+ }
167
+ }
168
+ return messages;
169
+ }
170
+ /**
171
+ * Creates a standardized success response from Mistral's response
172
+ *
173
+ * @param completion - Raw Mistral completion response
174
+ * @param request - Original request for context
175
+ * @returns Standardized LLM response
176
+ */
177
+ createSuccessResponse(completion, request) {
178
+ const choice = completion.choices[0];
179
+ if (!choice || !choice.message) {
180
+ throw new Error("No valid choices in Mistral completion response");
181
+ }
182
+ return {
183
+ id: completion.id || `mistral-${Date.now()}`,
184
+ provider: request.providerId,
185
+ model: completion.model || request.modelId,
186
+ created: completion.created || Math.floor(Date.now() / 1000),
187
+ choices: completion.choices.map((c, index) => ({
188
+ message: {
189
+ role: "assistant",
190
+ content: c.message?.content || "",
191
+ },
192
+ finish_reason: c.finishReason || c.finish_reason || "stop",
193
+ index: c.index ?? index,
194
+ })),
195
+ usage: completion.usage
196
+ ? {
197
+ prompt_tokens: completion.usage.promptTokens || completion.usage.prompt_tokens || 0,
198
+ completion_tokens: completion.usage.completionTokens || completion.usage.completion_tokens || 0,
199
+ total_tokens: completion.usage.totalTokens || completion.usage.total_tokens || 0,
200
+ }
201
+ : undefined,
202
+ object: "chat.completion",
203
+ };
204
+ }
205
+ /**
206
+ * Creates a standardized error response from an error
207
+ *
208
+ * @param error - The error that occurred
209
+ * @param request - Original request for context
210
+ * @returns Standardized LLM failure response
211
+ */
212
+ createErrorResponse(error, request) {
213
+ // Use common error mapping
214
+ const mappedError = (0, errorUtils_1.getCommonMappedErrorDetails)(error);
215
+ // Mistral-specific error refinements
216
+ if (mappedError.status === 400) {
217
+ const errorMessage = (error?.message || '').toLowerCase();
218
+ if (errorMessage.includes('model') && (errorMessage.includes('not available') || errorMessage.includes('not found'))) {
219
+ mappedError.errorCode = types_1.ADAPTER_ERROR_CODES.MODEL_NOT_FOUND;
220
+ }
221
+ if (errorMessage.includes('context') || errorMessage.includes('token')) {
222
+ mappedError.errorCode = types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED;
223
+ }
224
+ }
225
+ return {
226
+ provider: request.providerId,
227
+ model: request.modelId,
228
+ error: {
229
+ message: mappedError.errorMessage,
230
+ code: mappedError.errorCode,
231
+ type: mappedError.errorType,
232
+ ...(mappedError.status && { status: mappedError.status }),
233
+ providerError: error,
234
+ },
235
+ object: "error",
236
+ };
237
+ }
238
+ }
239
+ exports.MistralClientAdapter = MistralClientAdapter;
@@ -9,6 +9,7 @@ exports.OpenAIClientAdapter = void 0;
9
9
  const openai_1 = __importDefault(require("openai"));
10
10
  const types_1 = require("./types");
11
11
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
12
+ const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
12
13
  const defaultLogger_1 = require("../../logging/defaultLogger");
13
14
  const logger = (0, defaultLogger_1.createDefaultLogger)();
14
15
  /**
@@ -134,21 +135,14 @@ class OpenAIClientAdapter {
134
135
  */
135
136
  formatMessages(request) {
136
137
  const messages = [];
137
- // Add system message if provided
138
- if (request.systemMessage) {
139
- messages.push({
140
- role: "system",
141
- content: request.systemMessage,
142
- });
143
- }
144
- // Add conversation messages
138
+ const inlineSystemMessages = [];
139
+ // Check if model supports system messages
140
+ const supportsSystem = request.settings.supportsSystemMessage !== false;
141
+ // Add conversation messages (collecting system messages separately)
145
142
  for (const message of request.messages) {
146
143
  if (message.role === "system") {
147
- // Handle system messages in conversation
148
- messages.push({
149
- role: "system",
150
- content: message.content,
151
- });
144
+ // Collect inline system messages
145
+ inlineSystemMessages.push(message.content);
152
146
  }
153
147
  else if (message.role === "user") {
154
148
  messages.push({
@@ -163,6 +157,29 @@ class OpenAIClientAdapter {
163
157
  });
164
158
  }
165
159
  }
160
+ // Use shared utility to collect and combine system content
161
+ const { combinedSystemContent, useNativeSystemMessage } = (0, systemMessageUtils_1.collectSystemContent)(request.systemMessage, inlineSystemMessages, supportsSystem);
162
+ if (combinedSystemContent) {
163
+ if (useNativeSystemMessage) {
164
+ // Model supports system messages - add as system role at the start
165
+ messages.unshift({
166
+ role: "system",
167
+ content: combinedSystemContent,
168
+ });
169
+ }
170
+ else {
171
+ // Model doesn't support system messages - prepend to first user message
172
+ const simpleMessages = messages.map((m) => ({
173
+ role: m.role,
174
+ content: m.content,
175
+ }));
176
+ const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleMessages, combinedSystemContent, request.settings.systemMessageFallback);
177
+ if (modifiedIndex !== -1) {
178
+ messages[modifiedIndex].content = simpleMessages[modifiedIndex].content;
179
+ logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
180
+ }
181
+ }
182
+ }
166
183
  return messages;
167
184
  }
168
185
  /**