@loxia-labs/loxia-autopilot-one 1.0.1 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. package/README.md +44 -54
  2. package/bin/cli.js +1 -115
  3. package/bin/loxia-terminal-v2.js +3 -0
  4. package/bin/loxia-terminal.js +3 -0
  5. package/bin/start-with-terminal.js +3 -0
  6. package/package.json +14 -15
  7. package/scripts/install-scanners.js +1 -235
  8. package/src/analyzers/CSSAnalyzer.js +1 -297
  9. package/src/analyzers/ConfigValidator.js +1 -690
  10. package/src/analyzers/ESLintAnalyzer.js +1 -320
  11. package/src/analyzers/JavaScriptAnalyzer.js +1 -261
  12. package/src/analyzers/PrettierFormatter.js +1 -247
  13. package/src/analyzers/PythonAnalyzer.js +1 -266
  14. package/src/analyzers/SecurityAnalyzer.js +1 -729
  15. package/src/analyzers/TypeScriptAnalyzer.js +1 -247
  16. package/src/analyzers/codeCloneDetector/analyzer.js +1 -344
  17. package/src/analyzers/codeCloneDetector/detector.js +1 -203
  18. package/src/analyzers/codeCloneDetector/index.js +1 -160
  19. package/src/analyzers/codeCloneDetector/parser.js +1 -199
  20. package/src/analyzers/codeCloneDetector/reporter.js +1 -148
  21. package/src/analyzers/codeCloneDetector/scanner.js +1 -59
  22. package/src/core/agentPool.js +1 -1474
  23. package/src/core/agentScheduler.js +1 -2147
  24. package/src/core/contextManager.js +1 -709
  25. package/src/core/messageProcessor.js +1 -732
  26. package/src/core/orchestrator.js +1 -548
  27. package/src/core/stateManager.js +1 -877
  28. package/src/index.js +1 -631
  29. package/src/interfaces/cli.js +1 -549
  30. package/src/interfaces/terminal/__tests__/smoke/advancedFeatures.test.js +1 -0
  31. package/src/interfaces/terminal/__tests__/smoke/agentControl.test.js +1 -0
  32. package/src/interfaces/terminal/__tests__/smoke/agents.test.js +1 -0
  33. package/src/interfaces/terminal/__tests__/smoke/components.test.js +1 -0
  34. package/src/interfaces/terminal/__tests__/smoke/connection.test.js +1 -0
  35. package/src/interfaces/terminal/__tests__/smoke/enhancements.test.js +1 -0
  36. package/src/interfaces/terminal/__tests__/smoke/imports.test.js +1 -0
  37. package/src/interfaces/terminal/__tests__/smoke/messages.test.js +1 -0
  38. package/src/interfaces/terminal/__tests__/smoke/tools.test.js +1 -0
  39. package/src/interfaces/terminal/api/apiClient.js +1 -0
  40. package/src/interfaces/terminal/api/messageRouter.js +1 -0
  41. package/src/interfaces/terminal/api/session.js +1 -0
  42. package/src/interfaces/terminal/api/websocket.js +1 -0
  43. package/src/interfaces/terminal/components/AgentCreator.js +1 -0
  44. package/src/interfaces/terminal/components/AgentEditor.js +1 -0
  45. package/src/interfaces/terminal/components/AgentSwitcher.js +1 -0
  46. package/src/interfaces/terminal/components/ErrorBoundary.js +1 -0
  47. package/src/interfaces/terminal/components/ErrorPanel.js +1 -0
  48. package/src/interfaces/terminal/components/Header.js +1 -0
  49. package/src/interfaces/terminal/components/HelpPanel.js +1 -0
  50. package/src/interfaces/terminal/components/InputBox.js +1 -0
  51. package/src/interfaces/terminal/components/Layout.js +1 -0
  52. package/src/interfaces/terminal/components/LoadingSpinner.js +1 -0
  53. package/src/interfaces/terminal/components/MessageList.js +1 -0
  54. package/src/interfaces/terminal/components/MultilineTextInput.js +1 -0
  55. package/src/interfaces/terminal/components/SearchPanel.js +1 -0
  56. package/src/interfaces/terminal/components/SettingsPanel.js +1 -0
  57. package/src/interfaces/terminal/components/StatusBar.js +1 -0
  58. package/src/interfaces/terminal/components/TextInput.js +1 -0
  59. package/src/interfaces/terminal/config/agentEditorConstants.js +1 -0
  60. package/src/interfaces/terminal/config/constants.js +1 -0
  61. package/src/interfaces/terminal/index.js +1 -0
  62. package/src/interfaces/terminal/state/useAgentControl.js +1 -0
  63. package/src/interfaces/terminal/state/useAgents.js +1 -0
  64. package/src/interfaces/terminal/state/useConnection.js +1 -0
  65. package/src/interfaces/terminal/state/useMessages.js +1 -0
  66. package/src/interfaces/terminal/state/useTools.js +1 -0
  67. package/src/interfaces/terminal/utils/debugLogger.js +1 -0
  68. package/src/interfaces/terminal/utils/settingsStorage.js +1 -0
  69. package/src/interfaces/terminal/utils/theme.js +1 -0
  70. package/src/interfaces/webServer.js +1 -2162
  71. package/src/modules/fileExplorer/controller.js +1 -280
  72. package/src/modules/fileExplorer/index.js +1 -37
  73. package/src/modules/fileExplorer/middleware.js +1 -92
  74. package/src/modules/fileExplorer/routes.js +1 -125
  75. package/src/modules/fileExplorer/types.js +1 -44
  76. package/src/services/aiService.js +1 -1232
  77. package/src/services/apiKeyManager.js +1 -164
  78. package/src/services/benchmarkService.js +1 -366
  79. package/src/services/budgetService.js +1 -539
  80. package/src/services/contextInjectionService.js +1 -247
  81. package/src/services/conversationCompactionService.js +1 -637
  82. package/src/services/errorHandler.js +1 -810
  83. package/src/services/fileAttachmentService.js +1 -544
  84. package/src/services/modelRouterService.js +1 -366
  85. package/src/services/modelsService.js +1 -322
  86. package/src/services/qualityInspector.js +1 -796
  87. package/src/services/tokenCountingService.js +1 -536
  88. package/src/tools/agentCommunicationTool.js +1 -1344
  89. package/src/tools/agentDelayTool.js +1 -485
  90. package/src/tools/asyncToolManager.js +1 -604
  91. package/src/tools/baseTool.js +1 -800
  92. package/src/tools/browserTool.js +1 -920
  93. package/src/tools/cloneDetectionTool.js +1 -621
  94. package/src/tools/dependencyResolverTool.js +1 -1215
  95. package/src/tools/fileContentReplaceTool.js +1 -875
  96. package/src/tools/fileSystemTool.js +1 -1107
  97. package/src/tools/fileTreeTool.js +1 -853
  98. package/src/tools/imageTool.js +1 -901
  99. package/src/tools/importAnalyzerTool.js +1 -1060
  100. package/src/tools/jobDoneTool.js +1 -248
  101. package/src/tools/seekTool.js +1 -956
  102. package/src/tools/staticAnalysisTool.js +1 -1778
  103. package/src/tools/taskManagerTool.js +1 -2873
  104. package/src/tools/terminalTool.js +1 -2304
  105. package/src/tools/webTool.js +1 -1430
  106. package/src/types/agent.js +1 -519
  107. package/src/types/contextReference.js +1 -972
  108. package/src/types/conversation.js +1 -730
  109. package/src/types/toolCommand.js +1 -747
  110. package/src/utilities/attachmentValidator.js +1 -292
  111. package/src/utilities/configManager.js +1 -582
  112. package/src/utilities/constants.js +1 -722
  113. package/src/utilities/directoryAccessManager.js +1 -535
  114. package/src/utilities/fileProcessor.js +1 -307
  115. package/src/utilities/logger.js +1 -436
  116. package/src/utilities/tagParser.js +1 -1246
  117. package/src/utilities/toolConstants.js +1 -317
  118. package/web-ui/build/index.html +2 -2
  119. package/web-ui/build/static/{index-Dy2bYbOa.css → index-CClD1090.css} +1 -1
  120. package/web-ui/build/static/{index-CjkkcnFA.js → index-lCBai6dX.js} +66 -67
@@ -1,1232 +1 @@
1
- /**
2
- * AIService - Manages communication with Azure backend API, model routing, rate limiting
3
- *
4
- * Purpose:
5
- * - Backend API communication
6
- * - Model selection and routing
7
- * - Rate limiting enforcement
8
- * - Conversation compactization
9
- * - Token usage tracking
10
- * - Request/response transformation
11
- */
12
-
13
- import {
14
- MODELS,
15
- MODEL_PROVIDERS,
16
- MODEL_ROUTING,
17
- HTTP_STATUS,
18
- ERROR_TYPES,
19
- SYSTEM_DEFAULTS
20
- } from '../utilities/constants.js';
21
-
22
- class AIService {
23
- constructor(config, logger, budgetService, errorHandler) {
24
- this.config = config;
25
- this.logger = logger;
26
- this.budgetService = budgetService;
27
- this.errorHandler = errorHandler;
28
-
29
- this.baseUrl = config.backend?.baseUrl || 'https://api.loxia.ai';
30
- this.timeout = config.backend?.timeout || 60000;
31
- this.retryAttempts = config.backend?.retryAttempts || 3;
32
-
33
- // Rate limiting
34
- this.rateLimiters = new Map();
35
- this.requestQueue = new Map();
36
-
37
- // Circuit breaker
38
- this.circuitBreaker = {
39
- failures: 0,
40
- lastFailureTime: null,
41
- isOpen: false,
42
- threshold: 5,
43
- timeout: 30000 // 30 seconds
44
- };
45
-
46
- // Model specifications
47
- this.modelSpecs = this._initializeModelSpecs();
48
-
49
- // Conversation managers for multi-model support
50
- this.conversationManagers = new Map();
51
-
52
- // API Key Manager reference (will be set by LoxiaSystem)
53
- this.apiKeyManager = null;
54
-
55
- // Agent Pool reference (will be set by LoxiaSystem)
56
- this.agentPool = null;
57
- }
58
-
59
- /**
60
- * Send message to backend API
61
- * @param {string} model - Target model name
62
- * @param {string|Array} messages - Message content or conversation history
63
- * @param {Object} options - Additional options (agentId, systemPrompt, etc.)
64
- * @returns {Promise<Object>} API response with content and metadata
65
- */
66
- async sendMessage(model, messages, options = {}) {
67
- const requestId = `req-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
68
-
69
- try {
70
- // Check circuit breaker
71
- if (this._isCircuitBreakerOpen()) {
72
- throw new Error('Service temporarily unavailable - circuit breaker is open');
73
- }
74
-
75
- // Skip local model validation - let Azure backend handle it
76
-
77
- // Check rate limits
78
- await this._checkRateLimit(model);
79
-
80
- // Format messages for specific model
81
- const formattedMessages = this._formatMessagesForModel(messages, model, options);
82
-
83
- // Prepare request payload
84
- const payload = {
85
- model, // Use original model name - let Azure backend handle it
86
- messages: formattedMessages,
87
- options: {
88
- max_tokens: this.modelSpecs[model]?.maxTokens || 4000,
89
- temperature: options.temperature || 0.7,
90
- stream: options.stream || false
91
- },
92
- metadata: {
93
- requestId,
94
- agentId: options.agentId,
95
- timestamp: new Date().toISOString()
96
- }
97
- };
98
-
99
- // Add system prompt if provided
100
- if (options.systemPrompt) {
101
- payload.system = options.systemPrompt;
102
- }
103
-
104
- this.logger.info(`Sending message to model: ${model}`, {
105
- requestId,
106
- agentId: options.agentId,
107
- messageCount: Array.isArray(messages) ? messages.length : 1,
108
- maxTokens: payload.max_tokens
109
- });
110
-
111
- // Make API request
112
- const response = await this._makeAPIRequest('/chat/completions', payload, requestId, {
113
- ...options,
114
- sessionId: options.sessionId, // Pass session ID for API key retrieval
115
- platformProvided: options.platformProvided || false
116
- });
117
-
118
- // Track usage
119
- if (response.usage) {
120
- await this.trackUsage(options.agentId, model, {
121
- prompt_tokens: response.usage.prompt_tokens || 0,
122
- completion_tokens: response.usage.completion_tokens || 0,
123
- total_tokens: response.usage.total_tokens || 0
124
- });
125
- }
126
-
127
- // Reset circuit breaker on success
128
- this._resetCircuitBreaker();
129
-
130
- return {
131
- content: response.choices[0]?.message?.content || '',
132
- model: response.model,
133
- tokenUsage: response.usage,
134
- requestId,
135
- finishReason: response.choices[0]?.finish_reason
136
- };
137
-
138
- } catch (error) {
139
- // Handle circuit breaker
140
- this._recordFailure();
141
-
142
- this.logger.error(`AI service request failed: ${error.message}`, {
143
- requestId,
144
- model,
145
- agentId: options.agentId,
146
- error: error.stack
147
- });
148
-
149
- // Handle specific error types
150
- await this.handleHttpError(error, { requestId, model, agentId: options.agentId });
151
-
152
- throw error;
153
- }
154
- }
155
-
156
- /**
157
- * Route model selection based on task and context
158
- * @param {string} task - Task type (coding, analysis, quick-tasks, creative)
159
- * @param {Object} context - Additional context for routing
160
- * @returns {Promise<string>} Selected model name
161
- */
162
- async routeModel(task, context = {}) {
163
- try {
164
- // Get available models for task
165
- const availableModels = MODEL_ROUTING[task.toUpperCase()] || MODEL_ROUTING.FALLBACK;
166
-
167
- // Check model availability and health
168
- const healthyModels = [];
169
- for (const model of availableModels) {
170
- const isHealthy = await this._checkModelHealth(model);
171
- if (isHealthy) {
172
- healthyModels.push(model);
173
- }
174
- }
175
-
176
- if (healthyModels.length === 0) {
177
- this.logger.warn(`No healthy models available for task: ${task}, using fallback`);
178
- return MODEL_ROUTING.FALLBACK[0];
179
- }
180
-
181
- // Select model based on context
182
- let selectedModel = healthyModels[0]; // Default to first healthy model
183
-
184
- // Prefer model based on context
185
- if (context.preferredModel && healthyModels.includes(context.preferredModel)) {
186
- selectedModel = context.preferredModel;
187
- } else if (context.complexity === 'high' && healthyModels.includes(MODELS.ANTHROPIC_OPUS)) {
188
- selectedModel = MODELS.ANTHROPIC_OPUS;
189
- } else if (context.speed === 'fast' && healthyModels.includes(MODELS.ANTHROPIC_HAIKU)) {
190
- selectedModel = MODELS.ANTHROPIC_HAIKU;
191
- }
192
-
193
- this.logger.info(`Model routed for task: ${task}`, {
194
- selectedModel,
195
- availableModels: healthyModels.length,
196
- context
197
- });
198
-
199
- return selectedModel;
200
-
201
- } catch (error) {
202
- this.logger.error(`Model routing failed: ${error.message}`, { task, context });
203
- return MODEL_ROUTING.FALLBACK[0];
204
- }
205
- }
206
-
207
- /**
208
- * Compactize conversation for specific model context window
209
- * @param {Array} messages - Message history
210
- * @param {string} targetModel - Target model name
211
- * @returns {Promise<Array>} Compactized messages
212
- */
213
- async compactizeConversation(messages, targetModel) {
214
- const modelSpec = this.modelSpecs[targetModel];
215
- if (!modelSpec) {
216
- throw new Error(`Unknown model: ${targetModel}`);
217
- }
218
-
219
- const maxTokens = modelSpec.contextWindow * 0.8; // Use 80% of context window
220
- let currentTokens = 0;
221
- const compactizedMessages = [];
222
-
223
- // Estimate tokens for each message
224
- const messagesWithTokens = await Promise.all(
225
- messages.map(async (msg) => ({
226
- ...msg,
227
- estimatedTokens: await this._estimateTokens(msg.content, targetModel)
228
- }))
229
- );
230
-
231
- // Start from the most recent messages
232
- const reversedMessages = [...messagesWithTokens].reverse();
233
-
234
- for (const message of reversedMessages) {
235
- if (currentTokens + message.estimatedTokens > maxTokens) {
236
- // If we've exceeded the limit, summarize older messages
237
- if (compactizedMessages.length === 0) {
238
- // If even the latest message is too long, truncate it
239
- const truncatedContent = await this._truncateMessage(message.content, maxTokens);
240
- compactizedMessages.unshift({
241
- ...message,
242
- content: truncatedContent,
243
- estimatedTokens: maxTokens
244
- });
245
- }
246
- break;
247
- }
248
-
249
- compactizedMessages.unshift(message);
250
- currentTokens += message.estimatedTokens;
251
- }
252
-
253
- // If we have remaining older messages, create a summary
254
- const remainingMessages = messagesWithTokens.slice(0, -compactizedMessages.length);
255
- if (remainingMessages.length > 0) {
256
- const summary = await this._summarizeMessages(remainingMessages, targetModel);
257
- compactizedMessages.unshift({
258
- role: 'system',
259
- content: `Previous conversation summary: ${summary}`,
260
- timestamp: remainingMessages[0].timestamp,
261
- type: 'summary',
262
- estimatedTokens: await this._estimateTokens(summary, targetModel)
263
- });
264
- }
265
-
266
- this.logger.info(`Conversation compactized for model: ${targetModel}`, {
267
- originalMessages: messages.length,
268
- compactizedMessages: compactizedMessages.length,
269
- estimatedTokens: currentTokens,
270
- maxTokens
271
- });
272
-
273
- return compactizedMessages;
274
- }
275
-
276
- /**
277
- * Track token usage and costs
278
- * @param {number} tokens - Number of tokens used
279
- * @param {number} cost - Cost in dollars
280
- * @returns {Promise<void>}
281
- */
282
- async trackUsage(agentId, model, tokenUsage, cost) {
283
- try {
284
- if (this.budgetService) {
285
- this.budgetService.trackUsage(agentId, model, tokenUsage);
286
- }
287
- } catch (error) {
288
- this.logger.error(`Usage tracking failed: ${error.message}`);
289
- }
290
- }
291
-
292
- /**
293
- * Handle HTTP errors with comprehensive error handling
294
- * @param {Error} error - Error object
295
- * @param {Object} context - Request context
296
- * @returns {Promise<void>}
297
- */
298
- async handleHttpError(error, context) {
299
- const errorType = this.errorHandler?.classifyError?.(error, context);
300
-
301
- switch (error.status || error.code) {
302
- case HTTP_STATUS.BAD_REQUEST:
303
- this.logger.error('Bad request to AI service', { context, error: error.message });
304
- throw new Error(`Invalid request: ${error.message}`);
305
-
306
- case HTTP_STATUS.UNAUTHORIZED:
307
- this.logger.error('Authentication failed with AI service', { context });
308
- throw new Error('Authentication failed - check API credentials');
309
-
310
- case HTTP_STATUS.FORBIDDEN:
311
- this.logger.error('Access forbidden to AI service', { context });
312
- throw new Error('Access forbidden - insufficient permissions');
313
-
314
- case HTTP_STATUS.NOT_FOUND:
315
- this.logger.error('AI service endpoint not found', { context });
316
- throw new Error('Service endpoint not found');
317
-
318
- case HTTP_STATUS.TOO_MANY_REQUESTS:
319
- this.logger.warn('Rate limit exceeded', { context });
320
- await this._handleRateLimit(context);
321
- throw new Error('Rate limit exceeded - request queued for retry');
322
-
323
- case HTTP_STATUS.INTERNAL_SERVER_ERROR:
324
- case HTTP_STATUS.BAD_GATEWAY:
325
- case HTTP_STATUS.SERVICE_UNAVAILABLE:
326
- case HTTP_STATUS.GATEWAY_TIMEOUT:
327
- this.logger.error('AI service unavailable', { context, status: error.status });
328
- await this._handleServiceUnavailable(context);
329
- throw new Error('AI service temporarily unavailable');
330
-
331
- default:
332
- this.logger.error('Unknown AI service error', { context, error: error.message });
333
- throw new Error(`AI service error: ${error.message}`);
334
- }
335
- }
336
-
337
- /**
338
- * Set API key manager instance
339
- * @param {ApiKeyManager} apiKeyManager - API key manager instance
340
- */
341
- setApiKeyManager(apiKeyManager) {
342
- this.apiKeyManager = apiKeyManager;
343
-
344
- this.logger?.info('API key manager set for AI service', {
345
- hasManager: !!apiKeyManager
346
- });
347
- }
348
-
349
- /**
350
- * Set agent pool reference
351
- * @param {Object} agentPool - Agent pool instance
352
- */
353
- setAgentPool(agentPool) {
354
- this.agentPool = agentPool;
355
-
356
- this.logger?.info('Agent pool set for AI service', {
357
- hasAgentPool: !!agentPool
358
- });
359
- }
360
-
361
- /**
362
- * Generate image from text prompt using AI models
363
- * @param {string} prompt - Text description of the image to generate
364
- * @param {Object} options - Generation options
365
- * @param {string} options.model - Model to use (e.g., 'flux-1.1-pro', 'dall-e-3')
366
- * @param {string} options.size - Image size (e.g., '1024x1024', '512x512')
367
- * @param {string} options.quality - Image quality ('standard' or 'hd')
368
- * @param {string} options.responseFormat - Response format ('url' or 'b64_json')
369
- * @param {string} options.sessionId - Session ID for API key retrieval
370
- * @param {boolean} options.platformProvided - Whether to use platform model
371
- * @returns {Promise<Object>} Generated image result with URL or base64 data
372
- */
373
- async generateImage(prompt, options = {}) {
374
- const requestId = `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
375
-
376
- try {
377
- // Check circuit breaker
378
- if (this._isCircuitBreakerOpen()) {
379
- throw new Error('Service temporarily unavailable - circuit breaker is open');
380
- }
381
-
382
- // Validate prompt
383
- if (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {
384
- throw new Error('Image generation requires a non-empty text prompt');
385
- }
386
-
387
- // Default options
388
- const model = options.model || 'flux-1.1-pro';
389
- const size = options.size || '1024x1024';
390
- const quality = options.quality || 'standard';
391
- const responseFormat = options.responseFormat || 'url';
392
-
393
- this.logger.info(`Generating image with model: ${model}`, {
394
- requestId,
395
- model,
396
- size,
397
- quality,
398
- promptLength: prompt.length
399
- });
400
-
401
- // Prepare request payload
402
- const payload = {
403
- prompt,
404
- model,
405
- size,
406
- quality,
407
- response_format: responseFormat,
408
- n: 1, // Generate 1 image
409
- metadata: {
410
- requestId,
411
- timestamp: new Date().toISOString()
412
- }
413
- };
414
-
415
- // Make API request to image generation endpoint
416
- const response = await this._makeImageAPIRequest(payload, requestId, {
417
- sessionId: options.sessionId,
418
- platformProvided: options.platformProvided || false
419
- });
420
-
421
- // Reset circuit breaker on success
422
- this._resetCircuitBreaker();
423
-
424
- return {
425
- url: response.data?.[0]?.url || null,
426
- b64_json: response.data?.[0]?.b64_json || null,
427
- model: response.model || model,
428
- requestId,
429
- revisedPrompt: response.data?.[0]?.revised_prompt || prompt
430
- };
431
-
432
- } catch (error) {
433
- // Handle circuit breaker
434
- this._recordFailure();
435
-
436
- this.logger.error(`Image generation failed: ${error.message}`, {
437
- requestId,
438
- model: options.model,
439
- error: error.stack
440
- });
441
-
442
- throw error;
443
- }
444
- }
445
-
446
- /**
447
- * Check service health for circuit breaker
448
- * @returns {Promise<boolean>} Service health status
449
- */
450
- async checkServiceHealth() {
451
- try {
452
- const response = await this._makeAPIRequest('/health', {}, 'health-check');
453
- return response.status === 'healthy';
454
- } catch (error) {
455
- return false;
456
- }
457
- }
458
-
459
- /**
460
- * Switch agent to different model
461
- * @param {string} agentId - Agent identifier
462
- * @param {string} newModel - New model name
463
- * @returns {Promise<Object>} Switch result
464
- */
465
- async switchAgentModel(agentId, newModel) {
466
- try {
467
- if (!this._isValidModel(newModel)) {
468
- throw new Error(`Invalid model: ${newModel}`);
469
- }
470
-
471
- // Get conversation manager for agent
472
- let conversationManager = this.conversationManagers.get(agentId);
473
- if (!conversationManager) {
474
- // Create new conversation manager if it doesn't exist
475
- conversationManager = new ConversationManager(agentId, this.logger);
476
- this.conversationManagers.set(agentId, conversationManager);
477
- }
478
-
479
- // Switch model and return conversation
480
- const modelConversation = await conversationManager.switchModel(newModel);
481
-
482
- // CRITICAL FIX: Update agent's currentModel field in AgentPool
483
- const agent = await this.agentPool?.getAgent(agentId);
484
- if (agent) {
485
- agent.currentModel = newModel;
486
- await this.agentPool.persistAgentState(agentId);
487
- }
488
-
489
- this.logger.info(`Agent model switched: ${agentId}`, {
490
- newModel,
491
- messageCount: modelConversation.messages.length
492
- });
493
-
494
- return {
495
- success: true,
496
- agentId,
497
- newModel,
498
- conversation: modelConversation
499
- };
500
-
501
- } catch (error) {
502
- this.logger.error(`Model switch failed: ${error.message}`, { agentId, newModel });
503
- throw error;
504
- }
505
- }
506
-
507
- /**
508
- * Initialize model specifications
509
- * @private
510
- */
511
- _initializeModelSpecs() {
512
- const baseSpecs = {
513
- // Anthropic Claude models
514
- [MODELS.ANTHROPIC_SONNET]: {
515
- provider: MODEL_PROVIDERS.ANTHROPIC,
516
- contextWindow: 200000,
517
- maxTokens: 8192, // Increased from 4096
518
- costPer1kTokens: 0.015
519
- },
520
- [MODELS.ANTHROPIC_OPUS]: {
521
- provider: MODEL_PROVIDERS.ANTHROPIC,
522
- contextWindow: 200000,
523
- maxTokens: 8192, // Increased from 4096
524
- costPer1kTokens: 0.075
525
- },
526
- [MODELS.ANTHROPIC_HAIKU]: {
527
- provider: MODEL_PROVIDERS.ANTHROPIC,
528
- contextWindow: 200000,
529
- maxTokens: 8192, // Increased from 4096
530
- costPer1kTokens: 0.0025
531
- },
532
-
533
- // OpenAI models
534
- [MODELS.GPT_4]: {
535
- provider: MODEL_PROVIDERS.OPENAI,
536
- contextWindow: 128000,
537
- maxTokens: 8192, // Increased from 4096
538
- costPer1kTokens: 0.03
539
- },
540
- [MODELS.GPT_4_MINI]: {
541
- provider: MODEL_PROVIDERS.OPENAI,
542
- contextWindow: 128000,
543
- maxTokens: 16384,
544
- costPer1kTokens: 0.0015
545
- },
546
- 'gpt-4o': {
547
- provider: MODEL_PROVIDERS.OPENAI,
548
- contextWindow: 128000,
549
- maxTokens: 8192,
550
- costPer1kTokens: 0.03
551
- },
552
- 'gpt-4o-mini': {
553
- provider: MODEL_PROVIDERS.OPENAI,
554
- contextWindow: 128000,
555
- maxTokens: 16384,
556
- costPer1kTokens: 0.0015
557
- },
558
- 'gpt-4-turbo': {
559
- provider: MODEL_PROVIDERS.OPENAI,
560
- contextWindow: 128000,
561
- maxTokens: 8192,
562
- costPer1kTokens: 0.03
563
- },
564
- 'gpt-3.5-turbo': {
565
- provider: MODEL_PROVIDERS.OPENAI,
566
- contextWindow: 16384,
567
- maxTokens: 4096,
568
- costPer1kTokens: 0.001
569
- },
570
-
571
- // DeepSeek models
572
- [MODELS.DEEPSEEK_R1]: {
573
- provider: MODEL_PROVIDERS.DEEPSEEK,
574
- contextWindow: 128000,
575
- maxTokens: 8192,
576
- costPer1kTokens: 0.002
577
- },
578
-
579
- // Phi models
580
- [MODELS.PHI_4]: {
581
- provider: MODEL_PROVIDERS.PHI,
582
- contextWindow: 16384,
583
- maxTokens: 4096, // Increased from 2048
584
- costPer1kTokens: 0.001
585
- },
586
-
587
- // Azure AI Foundry models
588
- 'azure-ai-grok3': {
589
- provider: 'AZURE',
590
- contextWindow: 128000,
591
- maxTokens: 8192, // Increased from 4096
592
- costPer1kTokens: 0.01
593
- },
594
- 'azure-ai-deepseek-r1': {
595
- provider: 'AZURE',
596
- contextWindow: 128000,
597
- maxTokens: 8192,
598
- costPer1kTokens: 0.002
599
- },
600
- 'azure-openai-gpt-5': {
601
- provider: 'AZURE',
602
- contextWindow: 128000,
603
- maxTokens: 8192,
604
- costPer1kTokens: 0.03
605
- },
606
- 'azure-openai-gpt-4': {
607
- provider: 'AZURE',
608
- contextWindow: 128000,
609
- maxTokens: 8192,
610
- costPer1kTokens: 0.03
611
- },
612
- 'azure-openai-gpt-4o': {
613
- provider: 'AZURE',
614
- contextWindow: 128000,
615
- maxTokens: 8192,
616
- costPer1kTokens: 0.03
617
- },
618
-
619
- // Router model
620
- 'autopilot-model-router': {
621
- provider: 'AZURE',
622
- contextWindow: 16384,
623
- maxTokens: 2048,
624
- costPer1kTokens: 0.001
625
- }
626
- };
627
-
628
- // No need for prefixed models anymore - just return clean base specs
629
- return baseSpecs;
630
- }
631
-
632
- /**
633
- * Format messages for specific model
634
- * @private
635
- */
636
- _formatMessagesForModel(messages, model, options) {
637
- // Get model spec or use default
638
- const modelSpec = this.modelSpecs[model] || { provider: 'AZURE' };
639
-
640
- let formattedMessages;
641
-
642
- if (typeof messages === 'string') {
643
- // Single message
644
- formattedMessages = [{
645
- role: 'user',
646
- content: messages
647
- }];
648
- } else {
649
- // Message array
650
- formattedMessages = messages.map(msg => this._formatSingleMessage(msg, model));
651
- }
652
-
653
- // Apply provider-specific formatting
654
- switch (modelSpec.provider) {
655
- case MODEL_PROVIDERS.ANTHROPIC:
656
- return this._formatForAnthropic(formattedMessages);
657
- case MODEL_PROVIDERS.OPENAI:
658
- return this._formatForOpenAI(formattedMessages);
659
- case MODEL_PROVIDERS.AZURE:
660
- return this._formatForAzure(formattedMessages);
661
- default:
662
- return formattedMessages;
663
- }
664
- }
665
-
666
- /**
667
- * Format single message for model
668
- * @private
669
- */
670
- _formatSingleMessage(message, model) {
671
- return {
672
- role: message.role || 'user',
673
- content: message.content,
674
- timestamp: message.timestamp
675
- };
676
- }
677
-
678
- /**
679
- * Format messages for Anthropic models
680
- * @private
681
- */
682
- _formatForAnthropic(messages) {
683
- return messages.map(msg => {
684
- if (msg.role === 'system') {
685
- return {
686
- role: 'user',
687
- content: `System: ${msg.content}`
688
- };
689
- }
690
- return msg;
691
- });
692
- }
693
-
694
- /**
695
- * Format messages for OpenAI models
696
- * @private
697
- */
698
- _formatForOpenAI(messages) {
699
- // OpenAI supports system role natively
700
- return messages;
701
- }
702
-
703
- /**
704
- * Format messages for Azure models
705
- * @private
706
- */
707
- _formatForAzure(messages) {
708
- // Azure may have specific formatting requirements
709
- return messages.map(msg => ({
710
- ...msg,
711
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
712
- }));
713
- }
714
-
715
- /**
716
- * Check if model is valid
717
- * @private
718
- */
719
- _isValidModel(model) {
720
- this.logger.debug('Validating model', { model, modelType: typeof model });
721
-
722
- // Check if model exists in our specs directly
723
- if (this.modelSpecs[model] !== undefined) {
724
- return true;
725
- }
726
-
727
- this.logger.warn('Model validation failed', {
728
- model,
729
- availableModels: Object.keys(this.modelSpecs)
730
- });
731
-
732
- return false;
733
- }
734
-
735
- /**
736
- * Check model health status
737
- * @private
738
- */
739
- async _checkModelHealth(model) {
740
- // Implementation would check model-specific health endpoints
741
- // For now, return true (assuming all models are healthy)
742
- return true;
743
- }
744
-
745
- /**
746
- * Estimate tokens for content
747
- * @private
748
- */
749
- async _estimateTokens(content, model) {
750
- // Rough estimation: 1 token ≈ 4 characters for most models
751
- return Math.ceil(content.length / 4);
752
- }
753
-
754
- /**
755
- * Truncate message to fit token limit
756
- * @private
757
- */
758
- async _truncateMessage(content, maxTokens) {
759
- const maxChars = maxTokens * 4; // Rough estimation
760
- if (content.length <= maxChars) {
761
- return content;
762
- }
763
-
764
- return content.substring(0, maxChars - 20) + '\n... [message truncated]';
765
- }
766
-
767
- /**
768
- * Summarize messages for compactization
769
- * @private
770
- */
771
- async _summarizeMessages(messages, model) {
772
- const combinedContent = messages
773
- .map(msg => `${msg.role}: ${msg.content}`)
774
- .join('\n');
775
-
776
- // This would use the AI service to create a summary
777
- // For now, return a simple truncated version
778
- const maxLength = 500;
779
- if (combinedContent.length <= maxLength) {
780
- return combinedContent;
781
- }
782
-
783
- return combinedContent.substring(0, maxLength) + '... [conversation summary truncated]';
784
- }
785
-
786
- /**
787
- * Make API request with retry logic
788
- * @private
789
- */
790
- async _makeAPIRequest(endpoint, payload, requestId, options = {}) {
791
- // Make request directly to Azure backend (not through local proxy)
792
- const azureBackendUrl = 'https://autopilot-api.azurewebsites.net/llm/chat';
793
- let lastError;
794
-
795
- // Get API keys from session-based storage
796
- let apiKey = null;
797
- let vendorApiKey = null;
798
-
799
- // Log the state for debugging
800
- this.logger?.info('🔑 API Key retrieval state', {
801
- hasApiKeyManager: !!this.apiKeyManager,
802
- sessionId: options.sessionId,
803
- hasSessionId: !!options.sessionId,
804
- optionsKeys: Object.keys(options),
805
- model: payload.model
806
- });
807
-
808
- // First try to get from API key manager using session ID
809
- if (this.apiKeyManager && options.sessionId) {
810
- const keys = this.apiKeyManager.getKeysForRequest(options.sessionId, {
811
- platformProvided: options.platformProvided || false,
812
- vendor: this._getVendorFromModel(payload.model)
813
- });
814
-
815
- apiKey = keys.loxiaApiKey;
816
- vendorApiKey = keys.vendorApiKey;
817
-
818
- this.logger?.debug('Retrieved API keys from session manager', {
819
- sessionId: options.sessionId,
820
- hasLoxiaKey: !!apiKey,
821
- hasVendorKey: !!vendorApiKey,
822
- vendor: this._getVendorFromModel(payload.model)
823
- });
824
- }
825
-
826
- // Fallback to options (passed from frontend)
827
- if (!apiKey && options.apiKey) {
828
- apiKey = options.apiKey;
829
- }
830
-
831
- // Fallback to config if no API key from session or options
832
- if (!apiKey && this.config.apiKey) {
833
- apiKey = this.config.apiKey;
834
- }
835
-
836
- if (!apiKey) {
837
- throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
838
- }
839
-
840
- // Use the model name from payload (already transformed)
841
- const modelName = payload.model;
842
-
843
- // Transform the payload to match the Azure backend API format
844
- const azurePayload = {
845
- conversationId: requestId,
846
- message: payload.messages[payload.messages.length - 1]?.content || '',
847
- messages: payload.messages,
848
- model: modelName,
849
- requestId,
850
- options: payload.options || {},
851
- platformProvided: options.platformProvided || false // Indicate if this is a platform model
852
- };
853
-
854
- // Add system prompt if provided
855
- if (payload.system) {
856
- azurePayload.systemPrompt = payload.system;
857
- }
858
-
859
- // Include appropriate API key based on model type
860
- if (options.platformProvided) {
861
- // Platform models use Loxia API key
862
- azurePayload.apiKey = apiKey;
863
- } else {
864
- // Direct access models use vendor-specific keys
865
- if (vendorApiKey) {
866
- azurePayload.vendorApiKey = vendorApiKey;
867
- }
868
-
869
- // Also include custom API keys from options for backward compatibility
870
- if (options.customApiKeys) {
871
- azurePayload.customApiKeys = options.customApiKeys;
872
- }
873
-
874
- // Include Loxia API key as fallback
875
- azurePayload.apiKey = apiKey;
876
- }
877
-
878
- for (let attempt = 1; attempt <= this.retryAttempts; attempt++) {
879
- try {
880
- this.logger.info('Making request to Azure backend', {
881
- url: azureBackendUrl,
882
- model: payload.model,
883
- requestId,
884
- attempt,
885
- hasApiKey: !!apiKey
886
- });
887
-
888
- const response = await fetch(azureBackendUrl, {
889
- method: 'POST',
890
- headers: {
891
- 'Content-Type': 'application/json',
892
- 'Authorization': `Bearer ${apiKey}`,
893
- 'X-Request-ID': requestId
894
- },
895
- body: JSON.stringify(azurePayload),
896
- timeout: this.timeout
897
- });
898
-
899
- if (!response.ok) {
900
- const errorText = await response.text();
901
- const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
902
- error.status = response.status;
903
- throw error;
904
- }
905
-
906
- const data = await response.json();
907
-
908
- // Transform Azure backend response to match expected format
909
- return {
910
- choices: [{
911
- message: {
912
- content: data.content || '',
913
- role: 'assistant'
914
- },
915
- finish_reason: data.finishReason || 'stop'
916
- }],
917
- usage: data.usage || {
918
- prompt_tokens: 0,
919
- completion_tokens: 0,
920
- total_tokens: 0
921
- },
922
- model: data.model || payload.model,
923
- id: data.requestId || requestId
924
- };
925
-
926
- } catch (error) {
927
- lastError = error;
928
-
929
- this.logger.warn('Request to Azure backend failed', {
930
- attempt,
931
- requestId,
932
- error: error.message,
933
- status: error.status
934
- });
935
-
936
- if (attempt < this.retryAttempts) {
937
- const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
938
- this.logger.warn(`Retrying in ${delay}ms`, { attempt, requestId });
939
- await new Promise(resolve => setTimeout(resolve, delay));
940
- }
941
- }
942
- }
943
-
944
- // If all attempts failed, return a mock response with error info
945
- this.logger.error('All attempts to reach backend failed, using mock response', {
946
- requestId,
947
- error: lastError.message
948
- });
949
-
950
- // Check if this is an API key error that should stop retries
951
- const isApiKeyError = lastError.message && lastError.message.includes('No API key configured');
952
-
953
- // For API key errors, throw to stop the autonomous loop
954
- if (isApiKeyError) {
955
- throw new Error(`API Configuration Error: ${lastError.message}`);
956
- }
957
-
958
- // For other errors, return a short error message without including the original content
959
- return {
960
- choices: [{
961
- message: {
962
- content: `I apologize, but I'm unable to connect to the AI service at the moment. Error: ${lastError.message}`,
963
- role: 'assistant'
964
- },
965
- finish_reason: 'stop'
966
- }],
967
- usage: {
968
- prompt_tokens: 0,
969
- completion_tokens: 0,
970
- total_tokens: 0
971
- },
972
- model: payload.model,
973
- id: `error-${requestId}`,
974
- error: lastError.message
975
- };
976
- }
977
-
978
-
979
- /**
980
- * Make API request for image generation
981
- * @private
982
- */
983
- async _makeImageAPIRequest(payload, requestId, options = {}) {
984
- // Image generation endpoint on Azure backend (CORRECTED)
985
- const azureImageUrl = 'https://autopilot-api.azurewebsites.net/llm/generate-image';
986
- let lastError;
987
-
988
- // Get API keys from session-based storage
989
- let apiKey = null;
990
- let vendorApiKey = null;
991
-
992
- this.logger?.info('🖼️ Image API request state', {
993
- hasApiKeyManager: !!this.apiKeyManager,
994
- sessionId: options.sessionId,
995
- hasSessionId: !!options.sessionId,
996
- model: payload.model
997
- });
998
-
999
- // First try to get from API key manager using session ID
1000
- if (this.apiKeyManager && options.sessionId) {
1001
- const keys = this.apiKeyManager.getKeysForRequest(options.sessionId, {
1002
- platformProvided: options.platformProvided || false,
1003
- vendor: this._getVendorFromModel(payload.model)
1004
- });
1005
-
1006
- apiKey = keys.loxiaApiKey;
1007
- vendorApiKey = keys.vendorApiKey;
1008
-
1009
- this.logger?.debug('Retrieved API keys from session manager for image', {
1010
- sessionId: options.sessionId,
1011
- hasLoxiaKey: !!apiKey,
1012
- hasVendorKey: !!vendorApiKey
1013
- });
1014
- }
1015
-
1016
- // Fallback to options (passed from frontend)
1017
- if (!apiKey && options.apiKey) {
1018
- apiKey = options.apiKey;
1019
- }
1020
-
1021
- // Fallback to config if no API key from session or options
1022
- if (!apiKey && this.config.apiKey) {
1023
- apiKey = this.config.apiKey;
1024
- }
1025
-
1026
- if (!apiKey) {
1027
- throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
1028
- }
1029
-
1030
- // Transform the payload to match Azure backend format
1031
- // Backend expects: prompt, model, size, quality, style, n, requestId
1032
- const azurePayload = {
1033
- prompt: payload.prompt,
1034
- model: payload.model || 'azure-openai-dalle3', // Backend default
1035
- size: payload.size,
1036
- quality: payload.quality,
1037
- style: payload.style || 'vivid', // Backend default
1038
- n: payload.n || 1,
1039
- requestId
1040
- };
1041
-
1042
- // API key is sent via Authorization header, not in body
1043
-
1044
- for (let attempt = 1; attempt <= this.retryAttempts; attempt++) {
1045
- try {
1046
- this.logger.info('Making image request to Azure backend', {
1047
- url: azureImageUrl,
1048
- model: payload.model,
1049
- requestId,
1050
- attempt,
1051
- hasApiKey: !!apiKey
1052
- });
1053
-
1054
- const response = await fetch(azureImageUrl, {
1055
- method: 'POST',
1056
- headers: {
1057
- 'Content-Type': 'application/json',
1058
- 'Authorization': `Bearer ${apiKey}`,
1059
- 'X-Request-ID': requestId
1060
- },
1061
- body: JSON.stringify(azurePayload),
1062
- timeout: this.timeout * 2 // Image generation may take longer
1063
- });
1064
-
1065
- if (!response.ok) {
1066
- const errorText = await response.text();
1067
- const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
1068
- error.status = response.status;
1069
- throw error;
1070
- }
1071
-
1072
- const data = await response.json();
1073
-
1074
- // Backend returns: { images: [...], usage: {...}, model, requestId, created }
1075
- // Transform to match our expected format
1076
- return {
1077
- data: data.images || [],
1078
- model: data.model || payload.model,
1079
- created: data.created || Date.now(),
1080
- usage: data.usage
1081
- };
1082
-
1083
- } catch (error) {
1084
- lastError = error;
1085
-
1086
- this.logger.warn('Image request to Azure backend failed', {
1087
- attempt,
1088
- requestId,
1089
- error: error.message,
1090
- status: error.status
1091
- });
1092
-
1093
- if (attempt < this.retryAttempts) {
1094
- const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
1095
- this.logger.warn(`Retrying in ${delay}ms`, { attempt, requestId });
1096
- await new Promise(resolve => setTimeout(resolve, delay));
1097
- }
1098
- }
1099
- }
1100
-
1101
- // If all attempts failed, throw error
1102
- this.logger.error('All image generation attempts failed', {
1103
- requestId,
1104
- error: lastError.message
1105
- });
1106
-
1107
- throw new Error(`Image generation failed: ${lastError.message}`);
1108
- }
1109
-
1110
- /**
1111
- * Check rate limits
1112
- * @private
1113
- */
1114
- async _checkRateLimit(model) {
1115
- // Implementation would check model-specific rate limits
1116
- // For now, just add a small delay
1117
- await new Promise(resolve => setTimeout(resolve, 100));
1118
- }
1119
-
1120
- /**
1121
- * Handle rate limit exceeded
1122
- * @private
1123
- */
1124
- async _handleRateLimit(context) {
1125
- const delay = 60000; // 1 minute delay for rate limits
1126
- this.logger.info(`Rate limit exceeded, waiting ${delay}ms`, context);
1127
- await new Promise(resolve => setTimeout(resolve, delay));
1128
- }
1129
-
1130
- /**
1131
- * Handle service unavailable
1132
- * @private
1133
- */
1134
- async _handleServiceUnavailable(context) {
1135
- this._recordFailure();
1136
- const delay = 30000; // 30 second delay for service issues
1137
- this.logger.info(`Service unavailable, waiting ${delay}ms`, context);
1138
- await new Promise(resolve => setTimeout(resolve, delay));
1139
- }
1140
-
1141
- /**
1142
- * Check if circuit breaker is open
1143
- * @private
1144
- */
1145
- _isCircuitBreakerOpen() {
1146
- if (!this.circuitBreaker.isOpen) {
1147
- return false;
1148
- }
1149
-
1150
- const timeSinceLastFailure = Date.now() - this.circuitBreaker.lastFailureTime;
1151
- if (timeSinceLastFailure > this.circuitBreaker.timeout) {
1152
- this.circuitBreaker.isOpen = false;
1153
- this.circuitBreaker.failures = 0;
1154
- return false;
1155
- }
1156
-
1157
- return true;
1158
- }
1159
-
1160
- /**
1161
- * Record failure for circuit breaker
1162
- * @private
1163
- */
1164
- _recordFailure() {
1165
- this.circuitBreaker.failures++;
1166
- this.circuitBreaker.lastFailureTime = Date.now();
1167
-
1168
- if (this.circuitBreaker.failures >= this.circuitBreaker.threshold) {
1169
- this.circuitBreaker.isOpen = true;
1170
- this.logger.warn('Circuit breaker opened due to repeated failures');
1171
- }
1172
- }
1173
-
1174
- /**
1175
- * Reset circuit breaker on success
1176
- * @private
1177
- */
1178
- _resetCircuitBreaker() {
1179
- if (this.circuitBreaker.failures > 0) {
1180
- this.circuitBreaker.failures = 0;
1181
- this.circuitBreaker.isOpen = false;
1182
- this.logger.info('Circuit breaker reset - service recovered');
1183
- }
1184
- }
1185
-
1186
- /**
1187
- * Extract vendor name from model name
1188
- * @param {string} model - Model name
1189
- * @returns {string|null} Vendor name
1190
- * @private
1191
- */
1192
- _getVendorFromModel(model) {
1193
- if (!model) return null;
1194
-
1195
- const modelName = model.toLowerCase();
1196
-
1197
- if (modelName.includes('anthropic') || modelName.includes('claude')) {
1198
- return 'anthropic';
1199
- } else if (modelName.includes('openai') || modelName.includes('gpt')) {
1200
- return 'openai';
1201
- } else if (modelName.includes('deepseek')) {
1202
- return 'deepseek';
1203
- } else if (modelName.includes('phi')) {
1204
- return 'microsoft';
1205
- }
1206
-
1207
- return null;
1208
- }
1209
- }
1210
-
1211
- /**
1212
- * ConversationManager - Handles multi-model conversation state
1213
- */
1214
- class ConversationManager {
1215
- constructor(agentId, logger) {
1216
- this.agentId = agentId;
1217
- this.logger = logger;
1218
- this.conversations = new Map();
1219
- }
1220
-
1221
- async switchModel(newModel) {
1222
- // Implementation would handle model switching logic
1223
- // For now, return empty conversation
1224
- return {
1225
- messages: [],
1226
- model: newModel,
1227
- lastUpdated: new Date().toISOString()
1228
- };
1229
- }
1230
- }
1231
-
1232
- export default AIService;
1
+ const a0_0x1ee68b=a0_0x61d5;function a0_0x61d5(_0x4882a2,_0x573fad){_0x4882a2=_0x4882a2-0x155;const _0x220079=a0_0x2200();let _0x61d5a9=_0x220079[_0x4882a2];if(a0_0x61d5['scDwBa']===undefined){var _0x3ecd92=function(_0xd899fe){const _0x41632a='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/=';let _0x531ed3='',_0xe7e58a='';for(let _0x3272c9=0x0,_0x407c1c,_0x50d9f1,_0x4f551c=0x0;_0x50d9f1=_0xd899fe['charAt'](_0x4f551c++);~_0x50d9f1&&(_0x407c1c=_0x3272c9%0x4?_0x407c1c*0x40+_0x50d9f1:_0x50d9f1,_0x3272c9++%0x4)?_0x531ed3+=String['fromCharCode'](0xff&_0x407c1c>>(-0x2*_0x3272c9&0x6)):0x0){_0x50d9f1=_0x41632a['indexOf'](_0x50d9f1);}for(let _0x4df59f=0x0,_0x444377=_0x531ed3['length'];_0x4df59f<_0x444377;_0x4df59f++){_0xe7e58a+='%'+('00'+_0x531ed3['charCodeAt'](_0x4df59f)['toString'](0x10))['slice'](-0x2);}return decodeURIComponent(_0xe7e58a);};a0_0x61d5['mKhoZX']=_0x3ecd92,a0_0x61d5['bCbyJz']={},a0_0x61d5['scDwBa']=!![];}const _0x45f9a0=_0x220079[0x0],_0x5c5c6d=_0x4882a2+_0x45f9a0,_0x42bc2d=a0_0x61d5['bCbyJz'][_0x5c5c6d];return!_0x42bc2d?(_0x61d5a9=a0_0x61d5['mKhoZX'](_0x61d5a9),a0_0x61d5['bCbyJz'][_0x5c5c6d]=_0x61d5a9):_0x61d5a9=_0x42bc2d,_0x61d5a9;}(function(_0x3249f4,_0x4dcd8f){const _0xb0e11=a0_0x61d5,_0x2cb7ca=_0x3249f4();while(!![]){try{const _0x1ea631=-parseInt(_0xb0e11(0x16a))/0x1*(parseInt(_0xb0e11(0x1aa))/0x2)+-parseInt(_0xb0e11(0x1b5))/0x3+parseInt(_0xb0e11(0x1e4))/0x4+-parseInt(_0xb0e11(0x199))/0x5+parseInt(_0xb0e11(0x191))/0x6+-parseInt(_0xb0e11(0x16b))/0x7+-parseInt(_0xb0e11(0x160))/0x8*(-parseInt(_0xb0e11(0x1e2))/0x9);if(_0x1ea631===_0x4dcd8f)break;else _0x2cb7ca['push'](_0x2cb7ca['shift']());}catch(_0x3394eb){_0x2cb7ca['push'](_0x2cb7ca['shift']());}}}(a0_0x2200,0x4cad3));import{MODELS,MODEL_PROVIDERS,MODEL_ROUTING,HTTP_STATUS,ERROR_TYPES,SYSTEM_DEFAULTS}from'../utilities/constants.js';class AIService{constructor(_0x531ed3,_0xe7e58a,_0x3272c9,_0x407c1c){const _0x333a73=a0_0x61d5;this[_0x333a73(0x19b)]=_0x531ed3,this[_0x333a73(0x193)]=_0xe7e58a,this[_0x333a73(0x1c7)]=_0x3272c9,this['errorHandler']=_0x407c1c,this['baseUrl']=_0x531ed3[_0x333a73(0x190)]?.[_0x333a73(0x1a0)]||'https://api.loxia.ai',this[_0x333a73(0x1ce)]=_0x531ed3['backend']?.['timeout']||0xea60,this[_0x333a73(0x1cb)]=_0x531ed3['backend']?.[_0x333a73(0x1cb)]||0x3,this['rateLimiters']=new Map(),this['requestQueue']=new Map(),this[_0x333a73(0x162)]={'failures':0x0,'lastFailureTime':null,'isOpen':![],'threshold':0x5,'timeout':0x7530},this[_0x333a73(0x17f)]=this['_initializeModelSpecs'](),this[_0x333a73(0x18a)]=new Map(),this['apiKeyManager']=null,this[_0x333a73(0x1eb)]=null;}async[a0_0x1ee68b(0x1f2)](_0x50d9f1,_0x4f551c,_0x4df59f={}){const _0x4f9ecc=a0_0x1ee68b,_0x444377='req-'+Date[_0x4f9ecc(0x1cc)]()+'-'+Math['random']()[_0x4f9ecc(0x1d1)](0x24)['substr'](0x2,0x9);try{if(this['_isCircuitBreakerOpen']())throw new Error(_0x4f9ecc(0x192));await this['_checkRateLimit'](_0x50d9f1);const _0x3d7017=this['_formatMessagesForModel'](_0x4f551c,_0x50d9f1,_0x4df59f),_0x3412d9={'model':_0x50d9f1,'messages':_0x3d7017,'options':{'max_tokens':this[_0x4f9ecc(0x17f)][_0x50d9f1]?.['maxTokens']||0xfa0,'temperature':_0x4df59f[_0x4f9ecc(0x1a6)]||0.7,'stream':_0x4df59f[_0x4f9ecc(0x179)]||![]},'metadata':{'requestId':_0x444377,'agentId':_0x4df59f[_0x4f9ecc(0x1bd)],'timestamp':new Date()['toISOString']()}};_0x4df59f['systemPrompt']&&(_0x3412d9[_0x4f9ecc(0x19f)]=_0x4df59f['systemPrompt']);this['logger'][_0x4f9ecc(0x1ba)]('Sending\x20message\x20to\x20model:\x20'+_0x50d9f1,{'requestId':_0x444377,'agentId':_0x4df59f[_0x4f9ecc(0x1bd)],'messageCount':Array[_0x4f9ecc(0x1b9)](_0x4f551c)?_0x4f551c[_0x4f9ecc(0x1c6)]:0x1,'maxTokens':_0x3412d9['max_tokens']});const _0x2883eb=await this['_makeAPIRequest'](_0x4f9ecc(0x172),_0x3412d9,_0x444377,{..._0x4df59f,'sessionId':_0x4df59f[_0x4f9ecc(0x164)],'platformProvided':_0x4df59f['platformProvided']||![]});return _0x2883eb['usage']&&await this[_0x4f9ecc(0x1e9)](_0x4df59f[_0x4f9ecc(0x1bd)],_0x50d9f1,{'prompt_tokens':_0x2883eb[_0x4f9ecc(0x186)][_0x4f9ecc(0x1e0)]||0x0,'completion_tokens':_0x2883eb[_0x4f9ecc(0x186)]['completion_tokens']||0x0,'total_tokens':_0x2883eb['usage'][_0x4f9ecc(0x184)]||0x0}),this['_resetCircuitBreaker'](),{'content':_0x2883eb['choices'][0x0]?.['message']?.[_0x4f9ecc(0x1df)]||'','model':_0x2883eb[_0x4f9ecc(0x1b1)],'tokenUsage':_0x2883eb['usage'],'requestId':_0x444377,'finishReason':_0x2883eb['choices'][0x0]?.['finish_reason']};}catch(_0x3b5a2b){this[_0x4f9ecc(0x1b0)](),this[_0x4f9ecc(0x193)]['error']('AI\x20service\x20request\x20failed:\x20'+_0x3b5a2b['message'],{'requestId':_0x444377,'model':_0x50d9f1,'agentId':_0x4df59f[_0x4f9ecc(0x1bd)],'error':_0x3b5a2b[_0x4f9ecc(0x180)]}),await this['handleHttpError'](_0x3b5a2b,{'requestId':_0x444377,'model':_0x50d9f1,'agentId':_0x4df59f[_0x4f9ecc(0x1bd)]});throw _0x3b5a2b;}}async[a0_0x1ee68b(0x1a3)](_0x3b42cb,_0x34a913={}){const _0x40517c=a0_0x1ee68b;try{const _0x15ec8f=MODEL_ROUTING[_0x3b42cb[_0x40517c(0x177)]()]||MODEL_ROUTING[_0x40517c(0x197)],_0x4334c0=[];for(const _0xb56c42 of _0x15ec8f){const _0x479560=await this[_0x40517c(0x1b2)](_0xb56c42);_0x479560&&_0x4334c0[_0x40517c(0x168)](_0xb56c42);}if(_0x4334c0[_0x40517c(0x1c6)]===0x0)return this['logger'][_0x40517c(0x1ef)](_0x40517c(0x1a5)+_0x3b42cb+',\x20using\x20fallback'),MODEL_ROUTING['FALLBACK'][0x0];let _0xaec4f9=_0x4334c0[0x0];if(_0x34a913[_0x40517c(0x1ae)]&&_0x4334c0[_0x40517c(0x1de)](_0x34a913[_0x40517c(0x1ae)]))_0xaec4f9=_0x34a913[_0x40517c(0x1ae)];else{if(_0x34a913['complexity']==='high'&&_0x4334c0[_0x40517c(0x1de)](MODELS[_0x40517c(0x17e)]))_0xaec4f9=MODELS[_0x40517c(0x17e)];else _0x34a913[_0x40517c(0x1a7)]===_0x40517c(0x1ac)&&_0x4334c0['includes'](MODELS[_0x40517c(0x16e)])&&(_0xaec4f9=MODELS[_0x40517c(0x16e)]);}return this[_0x40517c(0x193)]['info'](_0x40517c(0x196)+_0x3b42cb,{'selectedModel':_0xaec4f9,'availableModels':_0x4334c0['length'],'context':_0x34a913}),_0xaec4f9;}catch(_0x10bfd4){return this[_0x40517c(0x193)]['error'](_0x40517c(0x189)+_0x10bfd4[_0x40517c(0x1ab)],{'task':_0x3b42cb,'context':_0x34a913}),MODEL_ROUTING[_0x40517c(0x197)][0x0];}}async[a0_0x1ee68b(0x155)](_0x52040f,_0x1183b0){const _0x50bd29=a0_0x1ee68b,_0x58869b=this[_0x50bd29(0x17f)][_0x1183b0];if(!_0x58869b)throw new Error(_0x50bd29(0x1d4)+_0x1183b0);const _0x5e76c7=_0x58869b['contextWindow']*0.8;let _0x4f2f74=0x0;const _0x3af523=[],_0x5138d4=await Promise['all'](_0x52040f[_0x50bd29(0x1cd)](async _0x1eda0b=>({..._0x1eda0b,'estimatedTokens':await this['_estimateTokens'](_0x1eda0b[_0x50bd29(0x1df)],_0x1183b0)}))),_0x3d8831=[..._0x5138d4][_0x50bd29(0x1bb)]();for(const _0x22f69d of _0x3d8831){if(_0x4f2f74+_0x22f69d[_0x50bd29(0x1dd)]>_0x5e76c7){if(_0x3af523['length']===0x0){const _0x146d65=await this[_0x50bd29(0x1e5)](_0x22f69d['content'],_0x5e76c7);_0x3af523['unshift']({..._0x22f69d,'content':_0x146d65,'estimatedTokens':_0x5e76c7});}break;}_0x3af523['unshift'](_0x22f69d),_0x4f2f74+=_0x22f69d[_0x50bd29(0x1dd)];}const _0x4d0a8e=_0x5138d4['slice'](0x0,-_0x3af523[_0x50bd29(0x1c6)]);if(_0x4d0a8e['length']>0x0){const _0x1182f9=await this['_summarizeMessages'](_0x4d0a8e,_0x1183b0);_0x3af523[_0x50bd29(0x18f)]({'role':'system','content':'Previous\x20conversation\x20summary:\x20'+_0x1182f9,'timestamp':_0x4d0a8e[0x0]['timestamp'],'type':'summary','estimatedTokens':await this[_0x50bd29(0x1c9)](_0x1182f9,_0x1183b0)});}return this[_0x50bd29(0x193)][_0x50bd29(0x1ba)]('Conversation\x20compactized\x20for\x20model:\x20'+_0x1183b0,{'originalMessages':_0x52040f[_0x50bd29(0x1c6)],'compactizedMessages':_0x3af523['length'],'estimatedTokens':_0x4f2f74,'maxTokens':_0x5e76c7}),_0x3af523;}async[a0_0x1ee68b(0x1e9)](_0x53fdb2,_0x469296,_0xd749e3,_0x4f6a06){const _0x5b0743=a0_0x1ee68b;try{this[_0x5b0743(0x1c7)]&&this[_0x5b0743(0x1c7)][_0x5b0743(0x1e9)](_0x53fdb2,_0x469296,_0xd749e3);}catch(_0x5501b7){this['logger']['error'](_0x5b0743(0x17a)+_0x5501b7[_0x5b0743(0x1ab)]);}}async[a0_0x1ee68b(0x1ea)](_0x599cb9,_0x2c8aeb){const _0xa9f84a=a0_0x1ee68b,_0x122488=this[_0xa9f84a(0x17c)]?.[_0xa9f84a(0x15f)]?.(_0x599cb9,_0x2c8aeb);switch(_0x599cb9[_0xa9f84a(0x1d0)]||_0x599cb9[_0xa9f84a(0x157)]){case HTTP_STATUS[_0xa9f84a(0x1c0)]:this['logger']['error'](_0xa9f84a(0x166),{'context':_0x2c8aeb,'error':_0x599cb9[_0xa9f84a(0x1ab)]});throw new Error(_0xa9f84a(0x1e1)+_0x599cb9[_0xa9f84a(0x1ab)]);case HTTP_STATUS['UNAUTHORIZED']:this[_0xa9f84a(0x193)][_0xa9f84a(0x1be)]('Authentication\x20failed\x20with\x20AI\x20service',{'context':_0x2c8aeb});throw new Error('Authentication\x20failed\x20-\x20check\x20API\x20credentials');case HTTP_STATUS['FORBIDDEN']:this[_0xa9f84a(0x193)]['error']('Access\x20forbidden\x20to\x20AI\x20service',{'context':_0x2c8aeb});throw new Error('Access\x20forbidden\x20-\x20insufficient\x20permissions');case HTTP_STATUS['NOT_FOUND']:this['logger']['error'](_0xa9f84a(0x167),{'context':_0x2c8aeb});throw new Error(_0xa9f84a(0x1ad));case HTTP_STATUS['TOO_MANY_REQUESTS']:this[_0xa9f84a(0x193)][_0xa9f84a(0x1ef)]('Rate\x20limit\x20exceeded',{'context':_0x2c8aeb}),await this[_0xa9f84a(0x1af)](_0x2c8aeb);throw new Error(_0xa9f84a(0x1d7));case HTTP_STATUS[_0xa9f84a(0x1b4)]:case HTTP_STATUS['BAD_GATEWAY']:case HTTP_STATUS['SERVICE_UNAVAILABLE']:case HTTP_STATUS[_0xa9f84a(0x1f0)]:this['logger']['error']('AI\x20service\x20unavailable',{'context':_0x2c8aeb,'status':_0x599cb9[_0xa9f84a(0x1d0)]}),await this['_handleServiceUnavailable'](_0x2c8aeb);throw new Error('AI\x20service\x20temporarily\x20unavailable');default:this['logger'][_0xa9f84a(0x1be)](_0xa9f84a(0x18e),{'context':_0x2c8aeb,'error':_0x599cb9['message']});throw new Error('AI\x20service\x20error:\x20'+_0x599cb9['message']);}}['setApiKeyManager'](_0x23807c){const _0xd2e33=a0_0x1ee68b;this['apiKeyManager']=_0x23807c,this[_0xd2e33(0x193)]?.['info'](_0xd2e33(0x1d3),{'hasManager':!!_0x23807c});}['setAgentPool'](_0x10d559){const _0x494664=a0_0x1ee68b;this['agentPool']=_0x10d559,this[_0x494664(0x193)]?.[_0x494664(0x1ba)]('Agent\x20pool\x20set\x20for\x20AI\x20service',{'hasAgentPool':!!_0x10d559});}async[a0_0x1ee68b(0x159)](_0x2e205b,_0x4a9895={}){const _0x1bf961=a0_0x1ee68b,_0x4943d0='img-'+Date[_0x1bf961(0x1cc)]()+'-'+Math['random']()[_0x1bf961(0x1d1)](0x24)['substr'](0x2,0x9);try{if(this[_0x1bf961(0x16c)]())throw new Error(_0x1bf961(0x192));if(!_0x2e205b||typeof _0x2e205b!=='string'||_0x2e205b[_0x1bf961(0x156)]()['length']===0x0)throw new Error('Image\x20generation\x20requires\x20a\x20non-empty\x20text\x20prompt');const _0x3c5749=_0x4a9895[_0x1bf961(0x1b1)]||'flux-1.1-pro',_0x497d69=_0x4a9895['size']||'1024x1024',_0x55456b=_0x4a9895[_0x1bf961(0x15a)]||_0x1bf961(0x1c2),_0x1958a9=_0x4a9895['responseFormat']||_0x1bf961(0x194);this['logger']['info'](_0x1bf961(0x16f)+_0x3c5749,{'requestId':_0x4943d0,'model':_0x3c5749,'size':_0x497d69,'quality':_0x55456b,'promptLength':_0x2e205b[_0x1bf961(0x1c6)]});const _0xad2bc3={'prompt':_0x2e205b,'model':_0x3c5749,'size':_0x497d69,'quality':_0x55456b,'response_format':_0x1958a9,'n':0x1,'metadata':{'requestId':_0x4943d0,'timestamp':new Date()[_0x1bf961(0x195)]()}},_0x39a37a=await this[_0x1bf961(0x181)](_0xad2bc3,_0x4943d0,{'sessionId':_0x4a9895[_0x1bf961(0x164)],'platformProvided':_0x4a9895['platformProvided']||![]});return this[_0x1bf961(0x1d2)](),{'url':_0x39a37a['data']?.[0x0]?.['url']||null,'b64_json':_0x39a37a['data']?.[0x0]?.[_0x1bf961(0x18c)]||null,'model':_0x39a37a['model']||_0x3c5749,'requestId':_0x4943d0,'revisedPrompt':_0x39a37a[_0x1bf961(0x1a4)]?.[0x0]?.['revised_prompt']||_0x2e205b};}catch(_0x2459d0){this['_recordFailure'](),this[_0x1bf961(0x193)]['error']('Image\x20generation\x20failed:\x20'+_0x2459d0['message'],{'requestId':_0x4943d0,'model':_0x4a9895['model'],'error':_0x2459d0[_0x1bf961(0x180)]});throw _0x2459d0;}}async[a0_0x1ee68b(0x185)](){const _0x1eb631=a0_0x1ee68b;try{const _0x4b91e9=await this[_0x1eb631(0x1bc)]('/health',{},'health-check');return _0x4b91e9[_0x1eb631(0x1d0)]===_0x1eb631(0x183);}catch(_0x2339a4){return![];}}async['switchAgentModel'](_0x3c3840,_0xe358f5){const _0x18c45d=a0_0x1ee68b;try{if(!this['_isValidModel'](_0xe358f5))throw new Error('Invalid\x20model:\x20'+_0xe358f5);let _0xb3a02e=this[_0x18c45d(0x18a)]['get'](_0x3c3840);!_0xb3a02e&&(_0xb3a02e=new ConversationManager(_0x3c3840,this['logger']),this['conversationManagers'][_0x18c45d(0x1e6)](_0x3c3840,_0xb3a02e));const _0x5e79f1=await _0xb3a02e['switchModel'](_0xe358f5),_0x148b0d=await this['agentPool']?.[_0x18c45d(0x174)](_0x3c3840);return _0x148b0d&&(_0x148b0d[_0x18c45d(0x19d)]=_0xe358f5,await this['agentPool'][_0x18c45d(0x1a2)](_0x3c3840)),this[_0x18c45d(0x193)][_0x18c45d(0x1ba)](_0x18c45d(0x16d)+_0x3c3840,{'newModel':_0xe358f5,'messageCount':_0x5e79f1[_0x18c45d(0x1c8)]['length']}),{'success':!![],'agentId':_0x3c3840,'newModel':_0xe358f5,'conversation':_0x5e79f1};}catch(_0x338f03){this['logger'][_0x18c45d(0x1be)](_0x18c45d(0x1c3)+_0x338f03[_0x18c45d(0x1ab)],{'agentId':_0x3c3840,'newModel':_0xe358f5});throw _0x338f03;}}[a0_0x1ee68b(0x15b)](){const _0x2618eb=a0_0x1ee68b,_0x3592db={[MODELS['ANTHROPIC_SONNET']]:{'provider':MODEL_PROVIDERS['ANTHROPIC'],'contextWindow':0x30d40,'maxTokens':0x2000,'costPer1kTokens':0.015},[MODELS['ANTHROPIC_OPUS']]:{'provider':MODEL_PROVIDERS['ANTHROPIC'],'contextWindow':0x30d40,'maxTokens':0x2000,'costPer1kTokens':0.075},[MODELS[_0x2618eb(0x16e)]]:{'provider':MODEL_PROVIDERS['ANTHROPIC'],'contextWindow':0x30d40,'maxTokens':0x2000,'costPer1kTokens':0.0025},[MODELS[_0x2618eb(0x1c1)]]:{'provider':MODEL_PROVIDERS['OPENAI'],'contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.03},[MODELS['GPT_4_MINI']]:{'provider':MODEL_PROVIDERS[_0x2618eb(0x1c4)],'contextWindow':0x1f400,'maxTokens':0x4000,'costPer1kTokens':0.0015},'gpt-4o':{'provider':MODEL_PROVIDERS['OPENAI'],'contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.03},'gpt-4o-mini':{'provider':MODEL_PROVIDERS['OPENAI'],'contextWindow':0x1f400,'maxTokens':0x4000,'costPer1kTokens':0.0015},'gpt-4-turbo':{'provider':MODEL_PROVIDERS[_0x2618eb(0x1c4)],'contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.03},'gpt-3.5-turbo':{'provider':MODEL_PROVIDERS[_0x2618eb(0x1c4)],'contextWindow':0x4000,'maxTokens':0x1000,'costPer1kTokens':0.001},[MODELS['DEEPSEEK_R1']]:{'provider':MODEL_PROVIDERS[_0x2618eb(0x163)],'contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.002},[MODELS['PHI_4']]:{'provider':MODEL_PROVIDERS[_0x2618eb(0x1ca)],'contextWindow':0x4000,'maxTokens':0x1000,'costPer1kTokens':0.001},'azure-ai-grok3':{'provider':'AZURE','contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.01},'azure-ai-deepseek-r1':{'provider':_0x2618eb(0x182),'contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.002},'azure-openai-gpt-5':{'provider':'AZURE','contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.03},'azure-openai-gpt-4':{'provider':'AZURE','contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.03},'azure-openai-gpt-4o':{'provider':_0x2618eb(0x182),'contextWindow':0x1f400,'maxTokens':0x2000,'costPer1kTokens':0.03},'autopilot-model-router':{'provider':'AZURE','contextWindow':0x4000,'maxTokens':0x800,'costPer1kTokens':0.001}};return _0x3592db;}['_formatMessagesForModel'](_0x348d52,_0x3d3dac,_0x2ac02e){const _0x3bd41a=a0_0x1ee68b,_0x4941e0=this['modelSpecs'][_0x3d3dac]||{'provider':_0x3bd41a(0x182)};let _0x517c73;typeof _0x348d52==='string'?_0x517c73=[{'role':'user','content':_0x348d52}]:_0x517c73=_0x348d52[_0x3bd41a(0x1cd)](_0x33a4d8=>this['_formatSingleMessage'](_0x33a4d8,_0x3d3dac));switch(_0x4941e0['provider']){case MODEL_PROVIDERS['ANTHROPIC']:return this['_formatForAnthropic'](_0x517c73);case MODEL_PROVIDERS[_0x3bd41a(0x1c4)]:return this[_0x3bd41a(0x1d5)](_0x517c73);case MODEL_PROVIDERS[_0x3bd41a(0x182)]:return this['_formatForAzure'](_0x517c73);default:return _0x517c73;}}['_formatSingleMessage'](_0x27e0ef,_0x534499){const _0x3adf89=a0_0x1ee68b;return{'role':_0x27e0ef[_0x3adf89(0x161)]||'user','content':_0x27e0ef['content'],'timestamp':_0x27e0ef[_0x3adf89(0x1d6)]};}[a0_0x1ee68b(0x1b6)](_0x49c3a4){const _0x5a5ffb=a0_0x1ee68b;return _0x49c3a4[_0x5a5ffb(0x1cd)](_0x22b8e8=>{const _0x47daee=_0x5a5ffb;if(_0x22b8e8[_0x47daee(0x161)]===_0x47daee(0x19f))return{'role':'user','content':'System:\x20'+_0x22b8e8[_0x47daee(0x1df)]};return _0x22b8e8;});}[a0_0x1ee68b(0x1d5)](_0x28ebb7){return _0x28ebb7;}['_formatForAzure'](_0x4d3eb8){const _0x298095=a0_0x1ee68b;return _0x4d3eb8['map'](_0x4623a0=>({..._0x4623a0,'content':typeof _0x4623a0['content']==='string'?_0x4623a0['content']:JSON[_0x298095(0x15d)](_0x4623a0[_0x298095(0x1df)])}));}['_isValidModel'](_0x13c179){const _0x4d292a=a0_0x1ee68b;this['logger'][_0x4d292a(0x171)]('Validating\x20model',{'model':_0x13c179,'modelType':typeof _0x13c179});if(this['modelSpecs'][_0x13c179]!==undefined)return!![];return this[_0x4d292a(0x193)]['warn'](_0x4d292a(0x15c),{'model':_0x13c179,'availableModels':Object['keys'](this[_0x4d292a(0x17f)])}),![];}async[a0_0x1ee68b(0x1b2)](_0x3f9ec0){return!![];}async['_estimateTokens'](_0x321e1b,_0x1f9ba3){const _0x461ab3=a0_0x1ee68b;return Math['ceil'](_0x321e1b[_0x461ab3(0x1c6)]/0x4);}async['_truncateMessage'](_0x3e5256,_0x5d1e1d){const _0xdba118=a0_0x1ee68b,_0x41d0cc=_0x5d1e1d*0x4;if(_0x3e5256['length']<=_0x41d0cc)return _0x3e5256;return _0x3e5256[_0xdba118(0x19c)](0x0,_0x41d0cc-0x14)+'\x0a...\x20[message\x20truncated]';}async[a0_0x1ee68b(0x19e)](_0x5b9570,_0x4abe4b){const _0x3e6f01=a0_0x1ee68b,_0x4be709=_0x5b9570[_0x3e6f01(0x1cd)](_0x4966a8=>_0x4966a8[_0x3e6f01(0x161)]+':\x20'+_0x4966a8['content'])['join']('\x0a'),_0x1d28ec=0x1f4;if(_0x4be709['length']<=_0x1d28ec)return _0x4be709;return _0x4be709['substring'](0x0,_0x1d28ec)+'...\x20[conversation\x20summary\x20truncated]';}async['_makeAPIRequest'](_0x3ca7c0,_0x41fc97,_0x303bba,_0x44df24={}){const _0x9324ec=a0_0x1ee68b,_0x4514f7=_0x9324ec(0x198);let _0x4d258a,_0x2751e9=null,_0x3cd9bf=null;this['logger']?.[_0x9324ec(0x1ba)]('🔑\x20API\x20Key\x20retrieval\x20state',{'hasApiKeyManager':!!this['apiKeyManager'],'sessionId':_0x44df24['sessionId'],'hasSessionId':!!_0x44df24['sessionId'],'optionsKeys':Object['keys'](_0x44df24),'model':_0x41fc97[_0x9324ec(0x1b1)]});if(this[_0x9324ec(0x1dc)]&&_0x44df24['sessionId']){const _0x34445d=this['apiKeyManager']['getKeysForRequest'](_0x44df24[_0x9324ec(0x164)],{'platformProvided':_0x44df24['platformProvided']||![],'vendor':this['_getVendorFromModel'](_0x41fc97[_0x9324ec(0x1b1)])});_0x2751e9=_0x34445d[_0x9324ec(0x1cf)],_0x3cd9bf=_0x34445d[_0x9324ec(0x1a9)],this[_0x9324ec(0x193)]?.[_0x9324ec(0x171)]('Retrieved API keys from session manager',{'sessionId':_0x44df24[_0x9324ec(0x164)],'hasLoxiaKey':!!_0x2751e9,'hasVendorKey':!!_0x3cd9bf,'vendor':this[_0x9324ec(0x1d9)](_0x41fc97[_0x9324ec(0x1b1)])});}!_0x2751e9&&_0x44df24['apiKey']&&(_0x2751e9=_0x44df24['apiKey']);!_0x2751e9&&this[_0x9324ec(0x19b)]['apiKey']&&(_0x2751e9=this[_0x9324ec(0x19b)]['apiKey']);if(!_0x2751e9)throw new Error(_0x9324ec(0x1e7));const _0x551b46=_0x41fc97[_0x9324ec(0x1b1)],_0x1b2ad1={'conversationId':_0x303bba,'message':_0x41fc97['messages'][_0x41fc97['messages'][_0x9324ec(0x1c6)]-0x1]?.['content']||'','messages':_0x41fc97[_0x9324ec(0x1c8)],'model':_0x551b46,'requestId':_0x303bba,'options':_0x41fc97[_0x9324ec(0x176)]||{},'platformProvided':_0x44df24['platformProvided']||![]};_0x41fc97[_0x9324ec(0x19f)]&&(_0x1b2ad1['systemPrompt']=_0x41fc97[_0x9324ec(0x19f)]);_0x44df24['platformProvided']?_0x1b2ad1['apiKey']=_0x2751e9:(_0x3cd9bf&&(_0x1b2ad1[_0x9324ec(0x1a9)]=_0x3cd9bf),_0x44df24[_0x9324ec(0x1a1)]&&(_0x1b2ad1['customApiKeys']=_0x44df24['customApiKeys']),_0x1b2ad1[_0x9324ec(0x17b)]=_0x2751e9);for(let _0x4ab6f2=0x1;_0x4ab6f2<=this['retryAttempts'];_0x4ab6f2++){try{this['logger'][_0x9324ec(0x1ba)](_0x9324ec(0x169),{'url':_0x4514f7,'model':_0x41fc97['model'],'requestId':_0x303bba,'attempt':_0x4ab6f2,'hasApiKey':!!_0x2751e9});const _0xdd1d09=await fetch(_0x4514f7,{'method':'POST','headers':{'Content-Type':'application/json','Authorization':'Bearer\x20'+_0x2751e9,'X-Request-ID':_0x303bba},'body':JSON['stringify'](_0x1b2ad1),'timeout':this[_0x9324ec(0x1ce)]});if(!_0xdd1d09['ok']){const _0x1fb4b0=await _0xdd1d09[_0x9324ec(0x18d)](),_0x3b2672=new Error('HTTP\x20'+_0xdd1d09[_0x9324ec(0x1d0)]+':\x20'+_0xdd1d09[_0x9324ec(0x1b7)]+(_0x1fb4b0?'\x20-\x20'+_0x1fb4b0:''));_0x3b2672[_0x9324ec(0x1d0)]=_0xdd1d09['status'];throw _0x3b2672;}const _0x4845e0=await _0xdd1d09[_0x9324ec(0x1da)]();return{'choices':[{'message':{'content':_0x4845e0[_0x9324ec(0x1df)]||'','role':'assistant'},'finish_reason':_0x4845e0['finishReason']||_0x9324ec(0x1ee)}],'usage':_0x4845e0[_0x9324ec(0x186)]||{'prompt_tokens':0x0,'completion_tokens':0x0,'total_tokens':0x0},'model':_0x4845e0[_0x9324ec(0x1b1)]||_0x41fc97[_0x9324ec(0x1b1)],'id':_0x4845e0[_0x9324ec(0x1f3)]||_0x303bba};}catch(_0x404cd8){_0x4d258a=_0x404cd8,this['logger'][_0x9324ec(0x1ef)]('Request\x20to\x20Azure\x20backend\x20failed',{'attempt':_0x4ab6f2,'requestId':_0x303bba,'error':_0x404cd8['message'],'status':_0x404cd8[_0x9324ec(0x1d0)]});if(_0x4ab6f2<this[_0x9324ec(0x1cb)]){const _0x2647e2=Math['pow'](0x2,_0x4ab6f2)*0x3e8;this[_0x9324ec(0x193)][_0x9324ec(0x1ef)](_0x9324ec(0x175)+_0x2647e2+'ms',{'attempt':_0x4ab6f2,'requestId':_0x303bba}),await new Promise(_0x4d646e=>setTimeout(_0x4d646e,_0x2647e2));}}}this[_0x9324ec(0x193)]['error'](_0x9324ec(0x170),{'requestId':_0x303bba,'error':_0x4d258a[_0x9324ec(0x1ab)]});const _0x2394d7=_0x4d258a['message']&&_0x4d258a['message'][_0x9324ec(0x1de)]('No\x20API\x20key\x20configured');if(_0x2394d7)throw new Error(_0x9324ec(0x1b8)+_0x4d258a[_0x9324ec(0x1ab)]);return{'choices':[{'message':{'content':_0x9324ec(0x173)+_0x4d258a['message'],'role':'assistant'},'finish_reason':_0x9324ec(0x1ee)}],'usage':{'prompt_tokens':0x0,'completion_tokens':0x0,'total_tokens':0x0},'model':_0x41fc97['model'],'id':'error-'+_0x303bba,'error':_0x4d258a['message']};}async['_makeImageAPIRequest'](_0x1d3244,_0x131ede,_0x55671d={}){const _0x3b75ef=a0_0x1ee68b,_0x3192a2=_0x3b75ef(0x1bf);let _0x77dc89,_0x3341eb=null,_0x411ec5=null;this['logger']?.['info'](_0x3b75ef(0x1ec),{'hasApiKeyManager':!!this['apiKeyManager'],'sessionId':_0x55671d[_0x3b75ef(0x164)],'hasSessionId':!!_0x55671d[_0x3b75ef(0x164)],'model':_0x1d3244['model']});if(this['apiKeyManager']&&_0x55671d['sessionId']){const _0x4b5546=this[_0x3b75ef(0x1dc)]['getKeysForRequest'](_0x55671d[_0x3b75ef(0x164)],{'platformProvided':_0x55671d[_0x3b75ef(0x1c5)]||![],'vendor':this['_getVendorFromModel'](_0x1d3244[_0x3b75ef(0x1b1)])});_0x3341eb=_0x4b5546['loxiaApiKey'],_0x411ec5=_0x4b5546[_0x3b75ef(0x1a9)],this['logger']?.['debug']('Retrieved API keys from session manager for image',{'sessionId':_0x55671d['sessionId'],'hasLoxiaKey':!!_0x3341eb,'hasVendorKey':!!_0x411ec5});}!_0x3341eb&&_0x55671d['apiKey']&&(_0x3341eb=_0x55671d['apiKey']);!_0x3341eb&&this['config']['apiKey']&&(_0x3341eb=this['config']['apiKey']);if(!_0x3341eb)throw new Error(_0x3b75ef(0x1e7));const _0x1f2668={'prompt':_0x1d3244[_0x3b75ef(0x1f1)],'model':_0x1d3244[_0x3b75ef(0x1b1)]||_0x3b75ef(0x1ed),'size':_0x1d3244[_0x3b75ef(0x15e)],'quality':_0x1d3244[_0x3b75ef(0x15a)],'style':_0x1d3244['style']||'vivid','n':_0x1d3244['n']||0x1,'requestId':_0x131ede};for(let _0x1919d4=0x1;_0x1919d4<=this['retryAttempts'];_0x1919d4++){try{this['logger']['info']('Making\x20image\x20request\x20to\x20Azure\x20backend',{'url':_0x3192a2,'model':_0x1d3244[_0x3b75ef(0x1b1)],'requestId':_0x131ede,'attempt':_0x1919d4,'hasApiKey':!!_0x3341eb});const _0x348175=await fetch(_0x3192a2,{'method':_0x3b75ef(0x1e3),'headers':{'Content-Type':_0x3b75ef(0x165),'Authorization':'Bearer\x20'+_0x3341eb,'X-Request-ID':_0x131ede},'body':JSON['stringify'](_0x1f2668),'timeout':this['timeout']*0x2});if(!_0x348175['ok']){const _0x32053d=await _0x348175['text'](),_0x361563=new Error('HTTP\x20'+_0x348175['status']+':\x20'+_0x348175['statusText']+(_0x32053d?_0x3b75ef(0x188)+_0x32053d:''));_0x361563['status']=_0x348175[_0x3b75ef(0x1d0)];throw _0x361563;}const _0x18bf12=await _0x348175[_0x3b75ef(0x1da)]();return{'data':_0x18bf12['images']||[],'model':_0x18bf12[_0x3b75ef(0x1b1)]||_0x1d3244[_0x3b75ef(0x1b1)],'created':_0x18bf12['created']||Date['now'](),'usage':_0x18bf12['usage']};}catch(_0x32e5b3){_0x77dc89=_0x32e5b3,this['logger'][_0x3b75ef(0x1ef)]('Image\x20request\x20to\x20Azure\x20backend\x20failed',{'attempt':_0x1919d4,'requestId':_0x131ede,'error':_0x32e5b3['message'],'status':_0x32e5b3['status']});if(_0x1919d4<this[_0x3b75ef(0x1cb)]){const _0x22fd24=Math['pow'](0x2,_0x1919d4)*0x3e8;this[_0x3b75ef(0x193)]['warn']('Retrying\x20in\x20'+_0x22fd24+'ms',{'attempt':_0x1919d4,'requestId':_0x131ede}),await new Promise(_0x3a2f40=>setTimeout(_0x3a2f40,_0x22fd24));}}}this[_0x3b75ef(0x193)]['error']('All\x20image\x20generation\x20attempts\x20failed',{'requestId':_0x131ede,'error':_0x77dc89['message']});throw new Error(_0x3b75ef(0x158)+_0x77dc89['message']);}async[a0_0x1ee68b(0x1b3)](_0x4cc783){await new Promise(_0x504983=>setTimeout(_0x504983,0x64));}async[a0_0x1ee68b(0x1af)](_0x48cac2){const _0x1d47f6=a0_0x1ee68b,_0x21c82a=0xea60;this['logger']['info'](_0x1d47f6(0x19a)+_0x21c82a+'ms',_0x48cac2),await new Promise(_0x4ccca7=>setTimeout(_0x4ccca7,_0x21c82a));}async[a0_0x1ee68b(0x17d)](_0x5437b4){const _0x30934a=a0_0x1ee68b;this['_recordFailure']();const _0x54b436=0x7530;this[_0x30934a(0x193)]['info']('Service\x20unavailable,\x20waiting\x20'+_0x54b436+'ms',_0x5437b4),await new Promise(_0x2f2c84=>setTimeout(_0x2f2c84,_0x54b436));}['_isCircuitBreakerOpen'](){const _0x19dc36=a0_0x1ee68b;if(!this['circuitBreaker']['isOpen'])return![];const _0x273339=Date[_0x19dc36(0x1cc)]()-this[_0x19dc36(0x162)]['lastFailureTime'];if(_0x273339>this['circuitBreaker']['timeout'])return this['circuitBreaker'][_0x19dc36(0x187)]=![],this[_0x19dc36(0x162)]['failures']=0x0,![];return!![];}['_recordFailure'](){const _0x1260e8=a0_0x1ee68b;this[_0x1260e8(0x162)][_0x1260e8(0x178)]++,this[_0x1260e8(0x162)]['lastFailureTime']=Date['now'](),this['circuitBreaker'][_0x1260e8(0x178)]>=this['circuitBreaker'][_0x1260e8(0x18b)]&&(this[_0x1260e8(0x162)]['isOpen']=!![],this[_0x1260e8(0x193)]['warn'](_0x1260e8(0x1e8)));}[a0_0x1ee68b(0x1d2)](){const _0x196b67=a0_0x1ee68b;this['circuitBreaker'][_0x196b67(0x178)]>0x0&&(this['circuitBreaker'][_0x196b67(0x178)]=0x0,this[_0x196b67(0x162)][_0x196b67(0x187)]=![],this[_0x196b67(0x193)]['info']('Circuit\x20breaker\x20reset\x20-\x20service\x20recovered'));}[a0_0x1ee68b(0x1d9)](_0x3ffc08){const _0x2ab2c3=a0_0x1ee68b;if(!_0x3ffc08)return null;const _0x3dc776=_0x3ffc08['toLowerCase']();if(_0x3dc776[_0x2ab2c3(0x1de)]('anthropic')||_0x3dc776[_0x2ab2c3(0x1de)]('claude'))return'anthropic';else{if(_0x3dc776['includes']('openai')||_0x3dc776[_0x2ab2c3(0x1de)](_0x2ab2c3(0x1d8)))return'openai';else{if(_0x3dc776['includes']('deepseek'))return'deepseek';else{if(_0x3dc776['includes'](_0x2ab2c3(0x1a8)))return'microsoft';}}}return null;}}class ConversationManager{constructor(_0x5adffc,_0x3bde02){const _0x409c5f=a0_0x1ee68b;this[_0x409c5f(0x1bd)]=_0x5adffc,this['logger']=_0x3bde02,this[_0x409c5f(0x1db)]=new Map();}async['switchModel'](_0x55d12b){return{'messages':[],'model':_0x55d12b,'lastUpdated':new Date()['toISOString']()};}}function a0_0x2200(){const _0xa585=['tM8GqvbjigTLEsbJB25MAwD1CMvKlIbqBgvHC2uGy29UzMLNDxjLihLVDxiGtg94AweGqvbjigTLEsbPBIbtzxr0Aw5NCY4','q2LYy3vPDcbICMvHA2vYig9Wzw5LzcbKDwuGDg8GCMvWzwf0zwqGzMfPBhvYzxm','DhjHy2TvC2fNzq','AgfUzgXLshr0CevYCM9Y','ywDLBNrqB29S','8j+wVo+4JYbjBwfNzsbbueKGCMvXDwvZDcbZDgf0zq','yxP1CMuTB3bLBMfPlwrHBgXLmW','C3rVCa','D2fYBG','r0furvDbwv9usu1ft1vu','ChjVBxb0','C2vUze1LC3nHz2u','CMvXDwvZDeLK','y29TCgfJDgL6zunVBNzLCNnHDgLVBG','DhjPBq','y29Kzq','sw1Hz2uGz2vUzxjHDgLVBIbMywLSzwq6ia','z2vUzxjHDgvjBwfNzq','CxvHBgL0Eq','x2LUAxrPywXPEMvnB2rLBfnWzwnZ','tw9KzwWGDMfSAwrHDgLVBIbMywLSzwq','C3rYAw5NAwz5','C2L6zq','y2XHC3nPzNLfCNjVCG','mtK5mKLhzNPLza','CM9Szq','y2LYy3vPDejYzwfRzxi','revfufnfruS','C2vZC2LVBKLK','yxbWBgLJyxrPB24VANnVBG','qMfKihjLCxvLC3qGDg8GquKGC2vYDMLJzq','quKGC2vYDMLJzsbLBMrWB2LUDcbUB3qGzM91BMq','ChvZAa','twfRAw5NihjLCxvLC3qGDg8GqxP1CMuGyMfJA2vUza','mJC3ntC0zvLfzhvi','nde0mdaZogfAtg9QuW','x2LZq2LYy3vPDejYzwfRzxjpCgvU','qwDLBNqGBw9KzwWGC3DPDgnOzwq6ia','qu5usfjpueLdx0HbsuTv','r2vUzxjHDgLUzYbPBwfNzsb3AxrOig1VzgvSoIa','qwXSigf0DgvTChrZihrVihjLywnOigjHy2TLBMqGzMfPBgvKlcb1C2LUzYbTB2nRihjLC3bVBNnL','zgvIDwC','l2nOyxqVy29TCgXLDgLVBNm','ssbHCg9SB2DPEMuSigj1Dcbjj20GDw5HyMXLihrVignVBM5Ly3qGDg8GDgHLiefjihnLCNzPy2uGyxqGDgHLig1VBwvUDc4GrxjYB3i6ia','z2v0qwDLBNq','uMv0CNLPBMCGAw4G','B3b0Aw9UCW','Dg9vChbLCKnHC2u','zMfPBhvYzxm','C3rYzwfT','vxnHz2uGDhjHy2TPBMCGzMfPBgvKoIa','yxbPs2v5','zxjYB3jiyw5KBgvY','x2HHBMrSzvnLCNzPy2vvBMf2ywLSywjSzq','qu5usfjpueLdx09qvvm','Bw9KzwXtCgvJCW','C3rHy2S','x21HA2vjBwfNzufqsvjLCxvLC3q','qvPvuKu','AgvHBhrOEq','Dg90ywXFDg9Rzw5Z','y2HLy2Ttzxj2AwnLsgvHBhrO','DxnHz2u','AxnpCgvU','ic0G','tw9KzwWGCM91DgLUzYbMywLSzwq6ia','y29UDMvYC2f0Aw9UtwfUywDLCNm','DgHYzxnOB2XK','yJy0x2PZB24','Dgv4Da','vw5RBM93BIbbssbZzxj2AwnLigvYCM9Y','Dw5ZAgLMDa','yMfJA2vUza','mtK0mdK0nNrzAeDjDq','u2vYDMLJzsb0zw1WB3jHCMLSEsb1BMf2ywLSywjSzsaTignPCMn1AxqGyNjLywTLCIbPCYbVCgvU','Bg9Nz2vY','DxjS','Dg9ju09tDhjPBMC','tw9KzwWGCM91DgvKigzVCIb0yxnRoIa','rKfmtejbq0S','Ahr0Chm6lY9HDxrVCgLSB3qTyxbPlMf6DxjLD2vIC2L0zxmUBMv0l2XSBs9JAgf0','mJu3nJCYmgXezMnIrW','uMf0zsbSAw1PDcbLEgnLzwrLzcWGD2fPDgLUzYa','y29UzMLN','C3vIC3rYAw5N','y3vYCMvUDe1VzgvS','x3n1Bw1HCML6zu1LC3nHz2vZ','C3LZDgvT','yMfZzvvYBa','y3vZDg9TqxbPs2v5CW','CgvYC2LZDefNzw50u3rHDgu','CM91DgvnB2rLBa','zgf0yq','tM8GAgvHBhrOEsbTB2rLBhmGyxzHAwXHyMXLigzVCIb0yxnRoIa','DgvTCgvYyxr1CMu','C3bLzwq','CgHP','DMvUzg9YqxbPs2v5','mKPVDKrHAG','BwvZC2fNzq','zMfZDa','u2vYDMLJzsbLBMrWB2LUDcbUB3qGzM91BMq','ChjLzMvYCMvKtw9KzwW','x2HHBMrSzvjHDgvmAw1PDa','x3jLy29YzezHAwX1CMu','Bw9KzwW','x2nOzwnRtw9KzwXizwfSDgG','x2nOzwnRuMf0zuXPBwL0','su5urvjoquXFu0vsvKvsx0vsuK9s','nte4odCXuwrUB3jJ','x2zVCM1HDezVCKfUDgHYB3bPyW','C3rHDhvZvgv4Da','qvbjienVBMzPz3vYyxrPB24GrxjYB3i6ia','AxnbCNjHEq','Aw5MBW','CMv2zxjZzq','x21HA2vbueLszxf1zxn0','ywDLBNrjza','zxjYB3i','Ahr0Chm6lY9HDxrVCgLSB3qTyxbPlMf6DxjLD2vIC2L0zxmUBMv0l2XSBs9Nzw5LCMf0zs1PBwfNzq','qKfex1jfuvvfu1q','r1buxZq','C3rHBMrHCMq','tw9KzwWGC3DPDgnOigzHAwXLzdOG','t1bftKfj','CgXHDgzVCM1qCM92AwrLza','BgvUz3rO','yNvKz2v0u2vYDMLJzq','BwvZC2fNzxm','x2vZDgLTyxrLvg9Rzw5Z','ueHj','CMv0CNLbDhrLBxb0CW','BM93','BwfW','DgLTzw91Da','Bg94AwfbCgLlzxK','C3rHDhvZ','Dg9tDhjPBMC','x3jLC2v0q2LYy3vPDejYzwfRzxi','qvbjigTLEsbTyw5Hz2vYihnLDcbMB3iGquKGC2vYDMLJzq','vw5RBM93BIbTB2rLBdOG','x2zVCM1HDezVCK9Wzw5bsq','DgLTzxn0yw1W','uMf0zsbSAw1PDcbLEgnLzwrLzcaTihjLCxvLC3qGCxvLDwvKigzVCIbYzxrYEq','z3b0','x2DLDfzLBMrVCKzYB21nB2rLBa','ANnVBG','y29UDMvYC2f0Aw9UCW','yxbPs2v5twfUywDLCG','zxn0Aw1HDgvKvg9Rzw5Z','Aw5JBhvKzxm','y29UDgvUDa','ChjVBxb0x3rVA2vUCW','sw52ywXPzcbYzxf1zxn0oIa','mZyYmdDhBvLSuey','ue9tva','mJe4ndyZmK1qANnjtG','x3rYDw5JyxrLtwvZC2fNzq','C2v0'];a0_0x2200=function(){return _0xa585;};return a0_0x2200();}export default AIService;