genai-lite 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,284 @@
1
+ "use strict";
2
+ // AI Summary: Mock client adapter for testing LLM functionality without making real API calls.
3
+ // Provides deterministic responses based on request content for development and testing.
4
+ Object.defineProperty(exports, "__esModule", { value: true });
5
+ exports.MockClientAdapter = void 0;
6
+ const types_1 = require("./types");
7
+ /**
8
+ * Mock client adapter for testing LLM functionality
9
+ *
10
+ * This adapter simulates various LLM provider responses without making real API calls.
11
+ * It's useful for:
12
+ * - Testing the LLM service flow
13
+ * - Development when API keys are not available
14
+ * - Simulating error conditions
15
+ * - Performance testing without API costs
16
+ */
17
+ class MockClientAdapter {
18
+ constructor(providerId = "openai") {
19
+ this.providerId = providerId;
20
+ }
21
+ /**
22
+ * Sends a mock message response based on request content
23
+ *
24
+ * @param request - The LLM request
25
+ * @param apiKey - The API key (ignored for mock)
26
+ * @returns Promise resolving to mock response
27
+ */
28
+ async sendMessage(request, apiKey) {
29
+ // Simulate network delay
30
+ await this.simulateDelay(100, 500);
31
+ try {
32
+ // Check for special test patterns in the last user message
33
+ const lastMessage = request.messages[request.messages.length - 1];
34
+ const content = lastMessage?.content?.toLowerCase() || "";
35
+ // Simulate various error conditions based on message content
36
+ if (content.includes("error_invalid_key")) {
37
+ return this.createErrorResponse("Invalid API key provided", types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY, 401, request);
38
+ }
39
+ if (content.includes("error_rate_limit")) {
40
+ return this.createErrorResponse("Rate limit exceeded", types_1.ADAPTER_ERROR_CODES.RATE_LIMIT_EXCEEDED, 429, request);
41
+ }
42
+ if (content.includes("error_credits")) {
43
+ return this.createErrorResponse("Insufficient credits", types_1.ADAPTER_ERROR_CODES.INSUFFICIENT_CREDITS, 402, request);
44
+ }
45
+ if (content.includes("error_context_length")) {
46
+ return this.createErrorResponse("Context length exceeded", types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED, 400, request);
47
+ }
48
+ if (content.includes("error_model_not_found")) {
49
+ return this.createErrorResponse("Model not found", types_1.ADAPTER_ERROR_CODES.MODEL_NOT_FOUND, 404, request);
50
+ }
51
+ if (content.includes("error_content_filter")) {
52
+ return this.createErrorResponse("Content filtered due to policy violation", types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER, 400, request);
53
+ }
54
+ if (content.includes("error_network")) {
55
+ return this.createErrorResponse("Network connection failed", types_1.ADAPTER_ERROR_CODES.NETWORK_ERROR, 0, request);
56
+ }
57
+ if (content.includes("error_generic")) {
58
+ return this.createErrorResponse("Generic provider error", types_1.ADAPTER_ERROR_CODES.PROVIDER_ERROR, 500, request);
59
+ }
60
+ // Generate successful mock response
61
+ return this.createSuccessResponse(request, content);
62
+ }
63
+ catch (error) {
64
+ return this.createErrorResponse(`Mock adapter error: ${error instanceof Error ? error.message : "Unknown error"}`, types_1.ADAPTER_ERROR_CODES.UNKNOWN_ERROR, 500, request);
65
+ }
66
+ }
67
+ /**
68
+ * Validates API key format (always returns true for mock)
69
+ */
70
+ validateApiKey(apiKey) {
71
+ return apiKey.length > 0;
72
+ }
73
+ /**
74
+ * Gets adapter information
75
+ */
76
+ getAdapterInfo() {
77
+ return {
78
+ providerId: this.providerId,
79
+ name: "Mock Client Adapter",
80
+ version: "1.0.0",
81
+ supportedModels: ["mock-model-1", "mock-model-2"],
82
+ };
83
+ }
84
+ /**
85
+ * Creates a successful mock response
86
+ */
87
+ createSuccessResponse(request, userContent) {
88
+ // Generate response content based on user input and settings
89
+ let responseContent;
90
+ // Check for settings-based test patterns
91
+ if (userContent.includes("test_temperature")) {
92
+ responseContent = this.generateTemperatureTestResponse(request.settings.temperature);
93
+ }
94
+ else if (userContent.includes("test_settings")) {
95
+ responseContent = this.generateSettingsTestResponse(request.settings);
96
+ }
97
+ else if (userContent.includes("hello") || userContent.includes("hi")) {
98
+ responseContent =
99
+ "Hello! I'm a mock LLM assistant. How can I help you today?";
100
+ }
101
+ else if (userContent.includes("weather")) {
102
+ responseContent =
103
+ "I'm a mock assistant and don't have access to real weather data, but I can pretend it's sunny and 72°F!";
104
+ }
105
+ else if (userContent.includes("code") ||
106
+ userContent.includes("programming")) {
107
+ responseContent =
108
+ 'Here\'s some mock code:\n\n```javascript\nfunction mockFunction() {\n return "This is mock code!";\n}\n```';
109
+ }
110
+ else if (userContent.includes("long") ||
111
+ userContent.includes("detailed")) {
112
+ responseContent = this.generateLongResponse();
113
+ }
114
+ else {
115
+ responseContent = `You said: "${userContent}". This is a mock response from the ${this.providerId} mock adapter.`;
116
+ }
117
+ // Apply creativity based on temperature
118
+ responseContent = this.applyTemperatureEffects(responseContent, request.settings.temperature);
119
+ // Apply maxTokens constraint (rough simulation)
120
+ const originalLength = responseContent.length;
121
+ if (request.settings.maxTokens && request.settings.maxTokens < 200) {
122
+ const words = responseContent.split(" ");
123
+ const maxWords = Math.max(1, Math.floor(request.settings.maxTokens / 4));
124
+ if (words.length > maxWords) {
125
+ responseContent = words.slice(0, maxWords).join(" ") + "...";
126
+ }
127
+ }
128
+ // Check for stop sequences
129
+ if (request.settings.stopSequences.length > 0) {
130
+ for (const stopSeq of request.settings.stopSequences) {
131
+ const stopIndex = responseContent.indexOf(stopSeq);
132
+ if (stopIndex !== -1) {
133
+ responseContent = responseContent.substring(0, stopIndex);
134
+ break;
135
+ }
136
+ }
137
+ }
138
+ const mockTokenCount = Math.floor(responseContent.length / 4); // Rough token estimation
139
+ const promptTokenCount = Math.floor(request.messages.reduce((acc, msg) => acc + msg.content.length, 0) / 4);
140
+ // Determine finish reason
141
+ let finishReason = "stop";
142
+ if (originalLength > responseContent.length &&
143
+ request.settings.maxTokens &&
144
+ mockTokenCount >= request.settings.maxTokens) {
145
+ finishReason = "length";
146
+ }
147
+ else if (request.settings.stopSequences.some((seq) => responseContent.includes(seq))) {
148
+ finishReason = "stop";
149
+ }
150
+ return {
151
+ id: `mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
152
+ provider: request.providerId,
153
+ model: request.modelId,
154
+ created: Math.floor(Date.now() / 1000),
155
+ choices: [
156
+ {
157
+ message: {
158
+ role: "assistant",
159
+ content: responseContent,
160
+ },
161
+ finish_reason: finishReason,
162
+ index: 0,
163
+ },
164
+ ],
165
+ usage: {
166
+ prompt_tokens: promptTokenCount,
167
+ completion_tokens: mockTokenCount,
168
+ total_tokens: promptTokenCount + mockTokenCount,
169
+ },
170
+ object: "chat.completion",
171
+ };
172
+ }
173
+ /**
174
+ * Generates a response that demonstrates temperature effects
175
+ */
176
+ generateTemperatureTestResponse(temperature) {
177
+ if (temperature < 0.3) {
178
+ return "Low temperature setting detected. This response should be more deterministic and focused.";
179
+ }
180
+ else if (temperature > 0.8) {
181
+ return "High temperature setting detected! This response should be more creative, varied, and potentially surprising in its word choices and structure.";
182
+ }
183
+ else {
184
+ return "Moderate temperature setting detected. This response balances consistency with some creative variation.";
185
+ }
186
+ }
187
+ /**
188
+ * Generates a response that shows current settings
189
+ */
190
+ generateSettingsTestResponse(settings) {
191
+ return `Current mock settings:
192
+ - Temperature: ${settings.temperature}
193
+ - Max Tokens: ${settings.maxTokens}
194
+ - Top P: ${settings.topP}
195
+ - Stop Sequences: ${settings.stopSequences.length > 0
196
+ ? settings.stopSequences.join(", ")
197
+ : "none"}
198
+ - Frequency Penalty: ${settings.frequencyPenalty}
199
+ - Presence Penalty: ${settings.presencePenalty}
200
+ - User: ${settings.user || "not set"}`;
201
+ }
202
+ /**
203
+ * Applies mock temperature effects to response content
204
+ */
205
+ applyTemperatureEffects(content, temperature) {
206
+ // At very low temperatures, make responses more formal
207
+ if (temperature < 0.2) {
208
+ return content.replace(/!/g, ".").replace(/\?/g, ".");
209
+ }
210
+ // At high temperatures, add some creative variations
211
+ if (temperature > 0.8) {
212
+ const variations = [
213
+ content + " 🎯",
214
+ content + " (with creative flair!)",
215
+ "✨ " + content,
216
+ content + " — quite interesting, isn't it?",
217
+ ];
218
+ return variations[Math.floor(Math.random() * variations.length)];
219
+ }
220
+ return content;
221
+ }
222
+ /**
223
+ * Creates an error response
224
+ */
225
+ createErrorResponse(message, code, status, request) {
226
+ return {
227
+ provider: request.providerId,
228
+ model: request.modelId,
229
+ error: {
230
+ message,
231
+ code,
232
+ type: this.getErrorType(code),
233
+ ...(status > 0 && { status }),
234
+ },
235
+ object: "error",
236
+ };
237
+ }
238
+ /**
239
+ * Maps error codes to error types
240
+ */
241
+ getErrorType(code) {
242
+ switch (code) {
243
+ case types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY:
244
+ return "authentication_error";
245
+ case types_1.ADAPTER_ERROR_CODES.RATE_LIMIT_EXCEEDED:
246
+ case types_1.ADAPTER_ERROR_CODES.INSUFFICIENT_CREDITS:
247
+ return "rate_limit_error";
248
+ case types_1.ADAPTER_ERROR_CODES.MODEL_NOT_FOUND:
249
+ case types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED:
250
+ return "invalid_request_error";
251
+ case types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER:
252
+ return "content_filter_error";
253
+ case types_1.ADAPTER_ERROR_CODES.NETWORK_ERROR:
254
+ return "connection_error";
255
+ default:
256
+ return "server_error";
257
+ }
258
+ }
259
+ /**
260
+ * Generates a longer mock response for testing
261
+ */
262
+ generateLongResponse() {
263
+ return `This is a detailed mock response from the ${this.providerId} adapter.
264
+
265
+ I can simulate various types of responses based on your input. Here are some features:
266
+
267
+ 1. **Error Simulation**: Include phrases like "error_rate_limit" to test error handling
268
+ 2. **Variable Length**: Request "long" responses to test token limits
269
+ 3. **Code Generation**: Ask about "programming" to get mock code snippets
270
+ 4. **Conversational**: Simple greetings work too
271
+
272
+ The mock adapter is useful for testing the LLM integration without making real API calls. It simulates realistic response times, token usage, and various error conditions that you might encounter with real LLM providers.
273
+
274
+ This response demonstrates how the adapter can generate longer content while still respecting the maxTokens parameter if specified in the request settings.`;
275
+ }
276
+ /**
277
+ * Simulates network delay with random variation
278
+ */
279
+ async simulateDelay(minMs, maxMs) {
280
+ const delay = Math.floor(Math.random() * (maxMs - minMs + 1)) + minMs;
281
+ return new Promise((resolve) => setTimeout(resolve, delay));
282
+ }
283
+ }
284
+ exports.MockClientAdapter = MockClientAdapter;
@@ -0,0 +1,69 @@
1
+ import type { LLMResponse, LLMFailureResponse } from "../types";
2
+ import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ /**
4
+ * Client adapter for OpenAI API integration
5
+ *
6
+ * This adapter:
7
+ * - Formats requests according to OpenAI's chat completions API
8
+ * - Handles OpenAI-specific authentication and headers
9
+ * - Maps OpenAI responses to standardized LLMResponse format
10
+ * - Converts OpenAI errors to standardized LLMFailureResponse format
11
+ */
12
+ export declare class OpenAIClientAdapter implements ILLMClientAdapter {
13
+ private baseURL?;
14
+ /**
15
+ * Creates a new OpenAI client adapter
16
+ *
17
+ * @param config Optional configuration for the adapter
18
+ * @param config.baseURL Custom base URL for OpenAI-compatible APIs
19
+ */
20
+ constructor(config?: {
21
+ baseURL?: string;
22
+ });
23
+ /**
24
+ * Sends a chat message to OpenAI's API
25
+ *
26
+ * @param request - The internal LLM request with applied settings
27
+ * @param apiKey - The decrypted OpenAI API key
28
+ * @returns Promise resolving to success or failure response
29
+ */
30
+ sendMessage(request: InternalLLMChatRequest, apiKey: string): Promise<LLMResponse | LLMFailureResponse>;
31
+ /**
32
+ * Validates OpenAI API key format
33
+ *
34
+ * @param apiKey - The API key to validate
35
+ * @returns True if the key format appears valid
36
+ */
37
+ validateApiKey(apiKey: string): boolean;
38
+ /**
39
+ * Gets adapter information
40
+ */
41
+ getAdapterInfo(): {
42
+ providerId: "openai";
43
+ name: string;
44
+ version: string;
45
+ };
46
+ /**
47
+ * Formats messages for OpenAI API
48
+ *
49
+ * @param request - The internal LLM request
50
+ * @returns Formatted messages array for OpenAI
51
+ */
52
+ private formatMessages;
53
+ /**
54
+ * Creates a standardized success response from OpenAI's response
55
+ *
56
+ * @param completion - Raw OpenAI completion response
57
+ * @param request - Original request for context
58
+ * @returns Standardized LLM response
59
+ */
60
+ private createSuccessResponse;
61
+ /**
62
+ * Creates a standardized error response from OpenAI errors
63
+ *
64
+ * @param error - The error from OpenAI API
65
+ * @param request - Original request for context
66
+ * @returns Standardized LLM failure response
67
+ */
68
+ private createErrorResponse;
69
+ }
@@ -0,0 +1,227 @@
1
+ "use strict";
2
+ // AI Summary: OpenAI client adapter for making real API calls to OpenAI's chat completions endpoint.
3
+ // Handles request formatting, response parsing, and error mapping to standardized format.
4
+ var __importDefault = (this && this.__importDefault) || function (mod) {
5
+ return (mod && mod.__esModule) ? mod : { "default": mod };
6
+ };
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.OpenAIClientAdapter = void 0;
9
+ const openai_1 = __importDefault(require("openai"));
10
+ const types_1 = require("./types");
11
+ const adapterErrorUtils_1 = require("./adapterErrorUtils");
12
+ /**
13
+ * Client adapter for OpenAI API integration
14
+ *
15
+ * This adapter:
16
+ * - Formats requests according to OpenAI's chat completions API
17
+ * - Handles OpenAI-specific authentication and headers
18
+ * - Maps OpenAI responses to standardized LLMResponse format
19
+ * - Converts OpenAI errors to standardized LLMFailureResponse format
20
+ */
21
+ class OpenAIClientAdapter {
22
+ /**
23
+ * Creates a new OpenAI client adapter
24
+ *
25
+ * @param config Optional configuration for the adapter
26
+ * @param config.baseURL Custom base URL for OpenAI-compatible APIs
27
+ */
28
+ constructor(config) {
29
+ this.baseURL = config?.baseURL;
30
+ }
31
+ /**
32
+ * Sends a chat message to OpenAI's API
33
+ *
34
+ * @param request - The internal LLM request with applied settings
35
+ * @param apiKey - The decrypted OpenAI API key
36
+ * @returns Promise resolving to success or failure response
37
+ */
38
+ async sendMessage(request, apiKey) {
39
+ try {
40
+ // Initialize OpenAI client
41
+ const openai = new openai_1.default({
42
+ apiKey,
43
+ ...(this.baseURL && { baseURL: this.baseURL }),
44
+ });
45
+ // Format messages for OpenAI API
46
+ const messages = this.formatMessages(request);
47
+ // Prepare API call parameters
48
+ const completionParams = {
49
+ model: request.modelId,
50
+ messages: messages,
51
+ temperature: request.settings.temperature,
52
+ max_completion_tokens: request.settings.maxTokens,
53
+ top_p: request.settings.topP,
54
+ ...(request.settings.stopSequences.length > 0 && {
55
+ stop: request.settings.stopSequences,
56
+ }),
57
+ ...(request.settings.frequencyPenalty !== 0 && {
58
+ frequency_penalty: request.settings.frequencyPenalty,
59
+ }),
60
+ ...(request.settings.presencePenalty !== 0 && {
61
+ presence_penalty: request.settings.presencePenalty,
62
+ }),
63
+ ...(request.settings.user && {
64
+ user: request.settings.user,
65
+ }),
66
+ };
67
+ console.log(`OpenAI API parameters:`, {
68
+ model: completionParams.model,
69
+ temperature: completionParams.temperature,
70
+ max_completion_tokens: completionParams.max_completion_tokens,
71
+ top_p: completionParams.top_p,
72
+ hasStop: !!completionParams.stop,
73
+ frequency_penalty: completionParams.frequency_penalty,
74
+ presence_penalty: completionParams.presence_penalty,
75
+ hasUser: !!completionParams.user,
76
+ });
77
+ console.log(`Making OpenAI API call for model: ${request.modelId}`);
78
+ // Make the API call
79
+ const completion = await openai.chat.completions.create(completionParams);
80
+ // Type guard to ensure we have a non-streaming response
81
+ if ('id' in completion && 'choices' in completion) {
82
+ console.log(`OpenAI API call successful, response ID: ${completion.id}`);
83
+ // Convert to standardized response format
84
+ return this.createSuccessResponse(completion, request);
85
+ }
86
+ else {
87
+ throw new Error('Unexpected streaming response from OpenAI API');
88
+ }
89
+ }
90
+ catch (error) {
91
+ console.error("OpenAI API error:", error);
92
+ return this.createErrorResponse(error, request);
93
+ }
94
+ }
95
+ /**
96
+ * Validates OpenAI API key format
97
+ *
98
+ * @param apiKey - The API key to validate
99
+ * @returns True if the key format appears valid
100
+ */
101
+ validateApiKey(apiKey) {
102
+ // OpenAI API keys typically start with 'sk-' and are at least 20 characters
103
+ return apiKey.startsWith("sk-") && apiKey.length >= 20;
104
+ }
105
+ /**
106
+ * Gets adapter information
107
+ */
108
+ getAdapterInfo() {
109
+ return {
110
+ providerId: "openai",
111
+ name: "OpenAI Client Adapter",
112
+ version: "1.0.0",
113
+ };
114
+ }
115
+ /**
116
+ * Formats messages for OpenAI API
117
+ *
118
+ * @param request - The internal LLM request
119
+ * @returns Formatted messages array for OpenAI
120
+ */
121
+ formatMessages(request) {
122
+ const messages = [];
123
+ // Add system message if provided
124
+ if (request.systemMessage) {
125
+ messages.push({
126
+ role: "system",
127
+ content: request.systemMessage,
128
+ });
129
+ }
130
+ // Add conversation messages
131
+ for (const message of request.messages) {
132
+ if (message.role === "system") {
133
+ // Handle system messages in conversation
134
+ messages.push({
135
+ role: "system",
136
+ content: message.content,
137
+ });
138
+ }
139
+ else if (message.role === "user") {
140
+ messages.push({
141
+ role: "user",
142
+ content: message.content,
143
+ });
144
+ }
145
+ else if (message.role === "assistant") {
146
+ messages.push({
147
+ role: "assistant",
148
+ content: message.content,
149
+ });
150
+ }
151
+ }
152
+ return messages;
153
+ }
154
+ /**
155
+ * Creates a standardized success response from OpenAI's response
156
+ *
157
+ * @param completion - Raw OpenAI completion response
158
+ * @param request - Original request for context
159
+ * @returns Standardized LLM response
160
+ */
161
+ createSuccessResponse(completion, request) {
162
+ const choice = completion.choices[0];
163
+ if (!choice || !choice.message) {
164
+ throw new Error("Invalid completion structure from OpenAI API");
165
+ }
166
+ return {
167
+ id: completion.id,
168
+ provider: request.providerId,
169
+ model: completion.model || request.modelId,
170
+ created: completion.created,
171
+ choices: [
172
+ {
173
+ message: {
174
+ role: choice.message.role,
175
+ content: choice.message.content || "",
176
+ },
177
+ finish_reason: choice.finish_reason,
178
+ index: choice.index,
179
+ },
180
+ ],
181
+ usage: completion.usage
182
+ ? {
183
+ prompt_tokens: completion.usage.prompt_tokens,
184
+ completion_tokens: completion.usage.completion_tokens,
185
+ total_tokens: completion.usage.total_tokens,
186
+ }
187
+ : undefined,
188
+ object: "chat.completion",
189
+ };
190
+ }
191
+ /**
192
+ * Creates a standardized error response from OpenAI errors
193
+ *
194
+ * @param error - The error from OpenAI API
195
+ * @param request - Original request for context
196
+ * @returns Standardized LLM failure response
197
+ */
198
+ createErrorResponse(error, request) {
199
+ // Use shared error mapping utility for common error patterns
200
+ const initialProviderMessage = error instanceof openai_1.default.APIError ? error.message : undefined;
201
+ let { errorCode, errorMessage, errorType, status } = (0, adapterErrorUtils_1.getCommonMappedErrorDetails)(error, initialProviderMessage);
202
+ // Apply OpenAI-specific refinements for 400 errors based on message content
203
+ if (error instanceof openai_1.default.APIError && status === 400) {
204
+ if (error.message.toLowerCase().includes("context length")) {
205
+ errorCode = types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED;
206
+ }
207
+ else if (error.message.toLowerCase().includes("content policy")) {
208
+ errorCode = types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER;
209
+ errorType = "content_filter_error";
210
+ }
211
+ // For other 400 errors, use the default mapping from the utility (PROVIDER_ERROR)
212
+ }
213
+ return {
214
+ provider: request.providerId,
215
+ model: request.modelId,
216
+ error: {
217
+ message: errorMessage,
218
+ code: errorCode,
219
+ type: errorType,
220
+ ...(status && { status }),
221
+ providerError: error,
222
+ },
223
+ object: "error",
224
+ };
225
+ }
226
+ }
227
+ exports.OpenAIClientAdapter = OpenAIClientAdapter;
@@ -0,0 +1,26 @@
1
+ import { type AdapterErrorCode } from './types';
2
+ /**
3
+ * Mapped error details returned by the utility function
4
+ */
5
+ export interface MappedErrorDetails {
6
+ errorCode: AdapterErrorCode;
7
+ errorMessage: string;
8
+ errorType: string;
9
+ status?: number;
10
+ }
11
+ /**
12
+ * Maps common error patterns to standardized error codes and types
13
+ *
14
+ * This utility handles:
15
+ * - Common HTTP status codes (401, 402, 404, 429, 4xx, 5xx)
16
+ * - Network connection errors (ENOTFOUND, ECONNREFUSED, timeouts)
17
+ * - Generic JavaScript errors
18
+ *
19
+ * Individual adapters can further refine the mappings for provider-specific cases,
20
+ * particularly for 400 errors where message content determines the specific error type.
21
+ *
22
+ * @param error - The error object from the provider SDK or network layer
23
+ * @param providerMessageOverride - Optional override for the error message (e.g., from provider SDK)
24
+ * @returns Mapped error details with standardized codes and types
25
+ */
26
+ export declare function getCommonMappedErrorDetails(error: any, providerMessageOverride?: string): MappedErrorDetails;