@vybestack/llxprt-code-core 0.4.8 → 0.5.0-nightly.251102.f115237d

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (210) hide show
  1. package/dist/prompt-config/defaults/default-prompts.json +4 -17
  2. package/dist/src/auth/precedence.d.ts +69 -9
  3. package/dist/src/auth/precedence.js +467 -69
  4. package/dist/src/auth/precedence.js.map +1 -1
  5. package/dist/src/auth/types.d.ts +2 -2
  6. package/dist/src/config/config.d.ts +15 -1
  7. package/dist/src/config/config.js +118 -6
  8. package/dist/src/config/config.js.map +1 -1
  9. package/dist/src/config/index.d.ts +6 -0
  10. package/dist/src/config/index.js +5 -0
  11. package/dist/src/config/index.js.map +1 -1
  12. package/dist/src/config/profileManager.d.ts +23 -3
  13. package/dist/src/config/profileManager.js +54 -7
  14. package/dist/src/config/profileManager.js.map +1 -1
  15. package/dist/src/config/subagentManager.d.ts +96 -0
  16. package/dist/src/config/subagentManager.js +371 -0
  17. package/dist/src/config/subagentManager.js.map +1 -0
  18. package/dist/src/config/types.d.ts +18 -0
  19. package/dist/src/config/types.js +3 -0
  20. package/dist/src/config/types.js.map +1 -0
  21. package/dist/src/core/client.d.ts +27 -7
  22. package/dist/src/core/client.js +217 -55
  23. package/dist/src/core/client.js.map +1 -1
  24. package/dist/src/core/contentGenerator.d.ts +3 -1
  25. package/dist/src/core/contentGenerator.js +3 -0
  26. package/dist/src/core/contentGenerator.js.map +1 -1
  27. package/dist/src/core/coreToolScheduler.d.ts +1 -5
  28. package/dist/src/core/coreToolScheduler.js +95 -23
  29. package/dist/src/core/coreToolScheduler.js.map +1 -1
  30. package/dist/src/core/geminiChat.d.ts +42 -12
  31. package/dist/src/core/geminiChat.js +405 -205
  32. package/dist/src/core/geminiChat.js.map +1 -1
  33. package/dist/src/core/nonInteractiveToolExecutor.d.ts +3 -2
  34. package/dist/src/core/nonInteractiveToolExecutor.js +94 -10
  35. package/dist/src/core/nonInteractiveToolExecutor.js.map +1 -1
  36. package/dist/src/core/subagent.d.ts +86 -7
  37. package/dist/src/core/subagent.js +809 -79
  38. package/dist/src/core/subagent.js.map +1 -1
  39. package/dist/src/core/subagentOrchestrator.d.ts +73 -0
  40. package/dist/src/core/subagentOrchestrator.js +383 -0
  41. package/dist/src/core/subagentOrchestrator.js.map +1 -0
  42. package/dist/src/core/subagentScheduler.d.ts +16 -0
  43. package/dist/src/core/subagentScheduler.js +7 -0
  44. package/dist/src/core/subagentScheduler.js.map +1 -0
  45. package/dist/src/core/turn.d.ts +5 -1
  46. package/dist/src/core/turn.js +5 -1
  47. package/dist/src/core/turn.js.map +1 -1
  48. package/dist/src/hooks/tool-render-suppression-hook.js +6 -1
  49. package/dist/src/hooks/tool-render-suppression-hook.js.map +1 -1
  50. package/dist/src/ide/ideContext.d.ts +32 -32
  51. package/dist/src/index.d.ts +19 -1
  52. package/dist/src/index.js +15 -2
  53. package/dist/src/index.js.map +1 -1
  54. package/dist/src/interfaces/index.d.ts +1 -0
  55. package/dist/src/interfaces/index.js +4 -0
  56. package/dist/src/interfaces/index.js.map +1 -0
  57. package/dist/src/interfaces/nodejs-error.interface.d.ts +4 -0
  58. package/dist/src/interfaces/nodejs-error.interface.js +2 -0
  59. package/dist/src/interfaces/nodejs-error.interface.js.map +1 -0
  60. package/dist/src/parsers/TextToolCallParser.js +41 -1
  61. package/dist/src/parsers/TextToolCallParser.js.map +1 -1
  62. package/dist/src/prompt-config/defaults/core.md +15 -0
  63. package/dist/src/prompt-config/defaults/providers/gemini/core.md +203 -119
  64. package/dist/src/prompt-config/defaults/tool-defaults.js +2 -0
  65. package/dist/src/prompt-config/defaults/tool-defaults.js.map +1 -1
  66. package/dist/src/prompt-config/defaults/tools/list-subagents.md +7 -0
  67. package/dist/src/prompt-config/defaults/tools/task.md +8 -0
  68. package/dist/src/providers/BaseProvider.d.ts +115 -30
  69. package/dist/src/providers/BaseProvider.js +445 -109
  70. package/dist/src/providers/BaseProvider.js.map +1 -1
  71. package/dist/src/providers/IProvider.d.ts +50 -18
  72. package/dist/src/providers/LoggingProviderWrapper.d.ts +60 -16
  73. package/dist/src/providers/LoggingProviderWrapper.js +213 -60
  74. package/dist/src/providers/LoggingProviderWrapper.js.map +1 -1
  75. package/dist/src/providers/ProviderManager.d.ts +73 -2
  76. package/dist/src/providers/ProviderManager.js +492 -40
  77. package/dist/src/providers/ProviderManager.js.map +1 -1
  78. package/dist/src/providers/anthropic/AnthropicProvider.d.ts +35 -38
  79. package/dist/src/providers/anthropic/AnthropicProvider.js +222 -227
  80. package/dist/src/providers/anthropic/AnthropicProvider.js.map +1 -1
  81. package/dist/src/providers/errors.d.ts +86 -0
  82. package/dist/src/providers/errors.js +89 -0
  83. package/dist/src/providers/errors.js.map +1 -1
  84. package/dist/src/providers/gemini/GeminiProvider.d.ts +101 -41
  85. package/dist/src/providers/gemini/GeminiProvider.js +386 -311
  86. package/dist/src/providers/gemini/GeminiProvider.js.map +1 -1
  87. package/dist/src/providers/openai/ConversationCache.d.ts +5 -3
  88. package/dist/src/providers/openai/ConversationCache.js +93 -32
  89. package/dist/src/providers/openai/ConversationCache.js.map +1 -1
  90. package/dist/src/providers/openai/OpenAIProvider.d.ts +82 -42
  91. package/dist/src/providers/openai/OpenAIProvider.js +373 -532
  92. package/dist/src/providers/openai/OpenAIProvider.js.map +1 -1
  93. package/dist/src/providers/openai/getOpenAIProviderInfo.d.ts +1 -1
  94. package/dist/src/providers/openai/getOpenAIProviderInfo.js +52 -22
  95. package/dist/src/providers/openai/getOpenAIProviderInfo.js.map +1 -1
  96. package/dist/src/providers/openai/openaiRequestParams.d.ts +7 -0
  97. package/dist/src/providers/openai/openaiRequestParams.js +66 -0
  98. package/dist/src/providers/openai/openaiRequestParams.js.map +1 -0
  99. package/dist/src/providers/openai-responses/OpenAIResponsesProvider.d.ts +6 -33
  100. package/dist/src/providers/openai-responses/OpenAIResponsesProvider.js +84 -183
  101. package/dist/src/providers/openai-responses/OpenAIResponsesProvider.js.map +1 -1
  102. package/dist/src/providers/types/providerRuntime.d.ts +17 -0
  103. package/dist/src/providers/types/providerRuntime.js +7 -0
  104. package/dist/src/providers/types/providerRuntime.js.map +1 -0
  105. package/dist/src/providers/utils/authToken.d.ts +12 -0
  106. package/dist/src/providers/utils/authToken.js +17 -0
  107. package/dist/src/providers/utils/authToken.js.map +1 -0
  108. package/dist/src/providers/utils/userMemory.d.ts +8 -0
  109. package/dist/src/providers/utils/userMemory.js +34 -0
  110. package/dist/src/providers/utils/userMemory.js.map +1 -0
  111. package/dist/src/runtime/AgentRuntimeContext.d.ts +213 -0
  112. package/dist/src/runtime/AgentRuntimeContext.js +17 -0
  113. package/dist/src/runtime/AgentRuntimeContext.js.map +1 -0
  114. package/dist/src/runtime/AgentRuntimeLoader.d.ts +47 -0
  115. package/dist/src/runtime/AgentRuntimeLoader.js +122 -0
  116. package/dist/src/runtime/AgentRuntimeLoader.js.map +1 -0
  117. package/dist/src/runtime/AgentRuntimeState.d.ts +232 -0
  118. package/dist/src/runtime/AgentRuntimeState.js +439 -0
  119. package/dist/src/runtime/AgentRuntimeState.js.map +1 -0
  120. package/dist/src/runtime/RuntimeInvocationContext.d.ts +51 -0
  121. package/dist/src/runtime/RuntimeInvocationContext.js +52 -0
  122. package/dist/src/runtime/RuntimeInvocationContext.js.map +1 -0
  123. package/dist/src/runtime/createAgentRuntimeContext.d.ts +7 -0
  124. package/dist/src/runtime/createAgentRuntimeContext.js +58 -0
  125. package/dist/src/runtime/createAgentRuntimeContext.js.map +1 -0
  126. package/dist/src/runtime/index.d.ts +13 -0
  127. package/dist/src/runtime/index.js +14 -0
  128. package/dist/src/runtime/index.js.map +1 -0
  129. package/dist/src/runtime/providerRuntimeContext.d.ts +30 -0
  130. package/dist/src/runtime/providerRuntimeContext.js +70 -0
  131. package/dist/src/runtime/providerRuntimeContext.js.map +1 -0
  132. package/dist/src/runtime/runtimeAdapters.d.ts +22 -0
  133. package/dist/src/runtime/runtimeAdapters.js +81 -0
  134. package/dist/src/runtime/runtimeAdapters.js.map +1 -0
  135. package/dist/src/runtime/runtimeStateFactory.d.ts +21 -0
  136. package/dist/src/runtime/runtimeStateFactory.js +104 -0
  137. package/dist/src/runtime/runtimeStateFactory.js.map +1 -0
  138. package/dist/src/services/todo-context-tracker.d.ts +10 -8
  139. package/dist/src/services/todo-context-tracker.js +26 -10
  140. package/dist/src/services/todo-context-tracker.js.map +1 -1
  141. package/dist/src/services/tool-call-tracker-service.d.ts +11 -7
  142. package/dist/src/services/tool-call-tracker-service.js +89 -29
  143. package/dist/src/services/tool-call-tracker-service.js.map +1 -1
  144. package/dist/src/settings/SettingsService.d.ts +4 -0
  145. package/dist/src/settings/SettingsService.js +58 -2
  146. package/dist/src/settings/SettingsService.js.map +1 -1
  147. package/dist/src/settings/settingsServiceInstance.d.ts +6 -1
  148. package/dist/src/settings/settingsServiceInstance.js +28 -8
  149. package/dist/src/settings/settingsServiceInstance.js.map +1 -1
  150. package/dist/src/telemetry/loggers.d.ts +5 -1
  151. package/dist/src/telemetry/loggers.js.map +1 -1
  152. package/dist/src/telemetry/loggers.test.circular.js +4 -0
  153. package/dist/src/telemetry/loggers.test.circular.js.map +1 -1
  154. package/dist/src/telemetry/metrics.d.ts +3 -1
  155. package/dist/src/telemetry/metrics.js.map +1 -1
  156. package/dist/src/telemetry/types.d.ts +1 -0
  157. package/dist/src/telemetry/types.js +3 -0
  158. package/dist/src/telemetry/types.js.map +1 -1
  159. package/dist/src/test-utils/index.d.ts +2 -0
  160. package/dist/src/test-utils/index.js +2 -0
  161. package/dist/src/test-utils/index.js.map +1 -1
  162. package/dist/src/test-utils/mockWorkspaceContext.d.ts +0 -3
  163. package/dist/src/test-utils/mockWorkspaceContext.js +3 -4
  164. package/dist/src/test-utils/mockWorkspaceContext.js.map +1 -1
  165. package/dist/src/test-utils/providerCallOptions.d.ts +43 -0
  166. package/dist/src/test-utils/providerCallOptions.js +137 -0
  167. package/dist/src/test-utils/providerCallOptions.js.map +1 -0
  168. package/dist/src/test-utils/runtime.d.ts +92 -0
  169. package/dist/src/test-utils/runtime.js +226 -0
  170. package/dist/src/test-utils/runtime.js.map +1 -0
  171. package/dist/src/test-utils/tools.d.ts +4 -4
  172. package/dist/src/test-utils/tools.js +20 -10
  173. package/dist/src/test-utils/tools.js.map +1 -1
  174. package/dist/src/tools/list-subagents.d.ts +31 -0
  175. package/dist/src/tools/list-subagents.js +109 -0
  176. package/dist/src/tools/list-subagents.js.map +1 -0
  177. package/dist/src/tools/task.d.ts +87 -0
  178. package/dist/src/tools/task.js +427 -0
  179. package/dist/src/tools/task.js.map +1 -0
  180. package/dist/src/tools/todo-read.js +1 -1
  181. package/dist/src/tools/todo-read.js.map +1 -1
  182. package/dist/src/tools/todo-store.js +4 -2
  183. package/dist/src/tools/todo-store.js.map +1 -1
  184. package/dist/src/tools/todo-write.js +4 -2
  185. package/dist/src/tools/todo-write.js.map +1 -1
  186. package/dist/src/tools/tool-error.d.ts +1 -0
  187. package/dist/src/tools/tool-error.js +1 -0
  188. package/dist/src/tools/tool-error.js.map +1 -1
  189. package/dist/src/tools/tool-registry.d.ts +2 -0
  190. package/dist/src/tools/tool-registry.js +46 -21
  191. package/dist/src/tools/tool-registry.js.map +1 -1
  192. package/dist/src/types/modelParams.d.ts +4 -0
  193. package/dist/src/utils/gitIgnoreParser.js +15 -3
  194. package/dist/src/utils/gitIgnoreParser.js.map +1 -1
  195. package/package.json +1 -1
  196. package/dist/src/prompt-config/defaults/providers/anthropic/core.md +0 -97
  197. package/dist/src/prompt-config/defaults/providers/anthropic/tools/glob.md +0 -34
  198. package/dist/src/prompt-config/defaults/providers/anthropic/tools/list-directory.md +0 -11
  199. package/dist/src/prompt-config/defaults/providers/anthropic/tools/read-file.md +0 -14
  200. package/dist/src/prompt-config/defaults/providers/anthropic/tools/read-many-files.md +0 -31
  201. package/dist/src/prompt-config/defaults/providers/anthropic/tools/replace.md +0 -41
  202. package/dist/src/prompt-config/defaults/providers/anthropic/tools/run-shell-command.md +0 -32
  203. package/dist/src/prompt-config/defaults/providers/anthropic/tools/save-memory.md +0 -35
  204. package/dist/src/prompt-config/defaults/providers/anthropic/tools/search-file-content.md +0 -44
  205. package/dist/src/prompt-config/defaults/providers/anthropic/tools/todo-write.md +0 -45
  206. package/dist/src/prompt-config/defaults/providers/anthropic/tools/write-file.md +0 -11
  207. package/dist/src/prompt-config/defaults/providers/openai/core.md +0 -97
  208. package/dist/src/prompt-config/defaults/providers/openai/tools/todo-pause.md +0 -28
  209. package/dist/src/prompt-config/defaults/providers/openai/tools/todo-read.md +0 -5
  210. package/dist/src/prompt-config/defaults/providers/openai/tools/todo-write.md +0 -45
@@ -21,20 +21,25 @@ import OpenAI from 'openai';
21
21
  import * as http from 'http';
22
22
  import * as https from 'https';
23
23
  import * as net from 'net';
24
- import { BaseProvider } from '../BaseProvider.js';
24
+ import { BaseProvider, } from '../BaseProvider.js';
25
25
  import { DebugLogger } from '../../debug/index.js';
26
- import { getSettingsService } from '../../settings/settingsServiceInstance.js';
27
26
  import { ToolFormatter } from '../../tools/ToolFormatter.js';
28
27
  import { processToolParameters } from '../../tools/doubleEscapeUtils.js';
29
28
  import { getCoreSystemPromptAsync } from '../../core/prompts.js';
30
- import { retryWithBackoff, isNetworkTransientError, } from '../../utils/retry.js';
29
+ import { retryWithBackoff } from '../../utils/retry.js';
30
+ import { resolveUserMemory } from '../utils/userMemory.js';
31
+ import { resolveRuntimeAuthToken } from '../utils/authToken.js';
32
+ import { filterOpenAIRequestParams } from './openaiRequestParams.js';
31
33
  export class OpenAIProvider extends BaseProvider {
32
34
  name = 'openai';
33
- logger;
34
- toolFormatter;
35
- _cachedClient;
36
- _cachedClientKey;
37
- modelParams;
35
+ getLogger() {
36
+ return new DebugLogger('llxprt:provider:openai');
37
+ }
38
+ /**
39
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
40
+ * @requirement:REQ-SP4-003
41
+ * Constructor reduced to minimal initialization - no state captured
42
+ */
38
43
  constructor(apiKey, baseURL, config, oauthManager) {
39
44
  // Normalize empty string to undefined for proper precedence handling
40
45
  const normalizedApiKey = apiKey && apiKey.trim() !== '' ? apiKey : undefined;
@@ -45,310 +50,169 @@ export class OpenAIProvider extends BaseProvider {
45
50
  (baseURL.includes('dashscope.aliyuncs.com') ||
46
51
  baseURL.includes('api.qwen.com') ||
47
52
  baseURL.includes('qwen')));
53
+ const forceQwenOAuth = Boolean(config?.forceQwenOAuth);
48
54
  // Initialize base provider with auth configuration
49
55
  super({
50
56
  name: 'openai',
51
57
  apiKey: normalizedApiKey,
52
58
  baseURL,
53
59
  envKeyNames: ['OPENAI_API_KEY'], // Support environment variable fallback
54
- isOAuthEnabled: isQwenEndpoint && !!oauthManager,
55
- oauthProvider: isQwenEndpoint ? 'qwen' : undefined,
60
+ isOAuthEnabled: (isQwenEndpoint || forceQwenOAuth) && !!oauthManager,
61
+ oauthProvider: isQwenEndpoint || forceQwenOAuth ? 'qwen' : undefined,
56
62
  oauthManager,
57
63
  }, config);
58
- this.toolFormatter = new ToolFormatter();
59
- // new DebugLogger('llxprt:core:toolformatter'), // TODO: Fix ToolFormatter constructor
60
- // Setup debug logger
61
- this.logger = new DebugLogger('llxprt:provider:openai');
62
- this.loadModelParamsFromSettings().catch((error) => {
63
- this.logger.debug(() => `Failed to initialize model params from SettingsService: ${error}`);
64
- });
64
+ // @plan:PLAN-20251023-STATELESS-HARDENING.P08
65
+ // @requirement:REQ-SP4-002
66
+ // No constructor-captured state - all values sourced from normalized options per call
65
67
  }
66
- getSocketSettings() {
67
- const settings = this.providerConfig?.getEphemeralSettings?.() || {};
68
- const timeoutSetting = settings['socket-timeout'];
69
- const keepAliveSetting = settings['socket-keepalive'];
70
- const noDelaySetting = settings['socket-nodelay'];
71
- const hasExplicitValue = (setting) => setting !== undefined && setting !== null;
72
- if (!hasExplicitValue(timeoutSetting) &&
73
- !hasExplicitValue(keepAliveSetting) &&
74
- !hasExplicitValue(noDelaySetting)) {
68
+ /**
69
+ * Create HTTP/HTTPS agents with socket configuration for local AI servers
70
+ * Returns undefined if no socket settings are configured
71
+ *
72
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
73
+ * @requirement:REQ-SP4-003
74
+ * Now sources ephemeral settings from call options instead of provider config
75
+ */
76
+ createHttpAgents(options) {
77
+ // Get socket configuration from call options or fallback to provider config
78
+ const settingsFromInvocation = options?.invocation?.ephemerals;
79
+ const settings = settingsFromInvocation ??
80
+ this.providerConfig?.getEphemeralSettings?.() ??
81
+ {};
82
+ // Check if any socket settings are explicitly configured
83
+ const hasSocketSettings = 'socket-timeout' in settings ||
84
+ 'socket-keepalive' in settings ||
85
+ 'socket-nodelay' in settings;
86
+ // Only create custom agents if socket settings are configured
87
+ if (!hasSocketSettings) {
75
88
  return undefined;
76
89
  }
77
- const timeout = typeof timeoutSetting === 'number' && Number.isFinite(timeoutSetting)
78
- ? timeoutSetting
79
- : Number(timeoutSetting) > 0
80
- ? Number(timeoutSetting)
81
- : 60000;
82
- const keepAlive = keepAliveSetting === undefined || keepAliveSetting === null
83
- ? true
84
- : keepAliveSetting !== false;
85
- const noDelay = noDelaySetting === undefined || noDelaySetting === null
86
- ? true
87
- : noDelaySetting !== false;
88
- return {
89
- timeout,
90
- keepAlive,
91
- noDelay,
92
- };
93
- }
94
- createSocketAwareFetch(config) {
95
- const { timeout, keepAlive, noDelay } = config;
96
- const maxRetries = 2;
97
- const retryDelay = 1000;
98
- const partialResponseThreshold = 2;
99
- const buildHeaders = (init) => {
100
- const baseHeaders = {
101
- Accept: 'text/event-stream',
102
- Connection: keepAlive ? 'keep-alive' : 'close',
103
- 'Cache-Control': 'no-cache',
104
- };
105
- if (!init?.headers) {
106
- return baseHeaders;
107
- }
108
- const appendHeader = (key, value) => {
109
- baseHeaders[key] = value;
110
- };
111
- const headers = init.headers;
112
- if (headers instanceof Headers) {
113
- headers.forEach((value, key) => {
114
- appendHeader(key, value);
115
- });
116
- }
117
- else if (Array.isArray(headers)) {
118
- headers.forEach(([key, value]) => {
119
- if (typeof value === 'string') {
120
- appendHeader(key, value);
121
- }
122
- });
123
- }
124
- else if (typeof headers === 'object') {
125
- Object.entries(headers).forEach(([key, value]) => {
126
- if (typeof value === 'string') {
127
- appendHeader(key, value);
128
- }
129
- else if (Array.isArray(value)) {
130
- appendHeader(key, value.join(', '));
131
- }
132
- else if (value !== undefined && value !== null) {
133
- appendHeader(key, String(value));
134
- }
135
- });
136
- }
137
- return baseHeaders;
138
- };
139
- const collectResponseHeaders = (rawHeaders) => {
140
- const headers = new Headers();
141
- for (const [key, value] of Object.entries(rawHeaders)) {
142
- if (!key)
143
- continue;
144
- if (Array.isArray(value)) {
145
- headers.append(key, value.join(', '));
146
- }
147
- else if (value !== undefined) {
148
- headers.append(key, value);
149
- }
150
- }
151
- return headers;
152
- };
153
- const writeRequestBody = (req, body) => {
154
- if (!body) {
155
- req.end();
156
- return;
157
- }
158
- if (typeof body === 'string' || body instanceof Buffer) {
159
- req.write(body);
160
- req.end();
161
- return;
162
- }
163
- if (body instanceof ArrayBuffer) {
164
- req.write(Buffer.from(body));
165
- req.end();
166
- return;
167
- }
168
- if (ArrayBuffer.isView(body)) {
169
- req.write(Buffer.from(body.buffer, body.byteOffset, body.byteLength));
170
- req.end();
171
- return;
172
- }
173
- try {
174
- req.write(body);
175
- }
176
- catch {
177
- req.write(String(body));
178
- }
179
- req.end();
180
- };
181
- const delay = (ms) => new Promise((resolve) => {
182
- setTimeout(resolve, ms);
90
+ // Socket configuration with defaults for when settings ARE configured
91
+ const socketTimeout = settings['socket-timeout'] || 60000; // 60 seconds default
92
+ const socketKeepAlive = settings['socket-keepalive'] !== false; // true by default
93
+ const socketNoDelay = settings['socket-nodelay'] !== false; // true by default
94
+ // Create HTTP agent with socket options
95
+ const httpAgent = new http.Agent({
96
+ keepAlive: socketKeepAlive,
97
+ keepAliveMsecs: 1000,
98
+ timeout: socketTimeout,
183
99
  });
184
- const makeRequest = async (url, init, attempt = 0) => new Promise((resolve, reject) => {
185
- let parsedUrl;
186
- try {
187
- parsedUrl = new URL(url);
188
- }
189
- catch (error) {
190
- reject(new Error(`Invalid URL provided to socket-aware fetch: ${url} (${String(error)})`));
191
- return;
192
- }
193
- const isHttps = parsedUrl.protocol === 'https:';
194
- const httpModule = isHttps ? https : http;
195
- const options = {
196
- hostname: parsedUrl.hostname,
197
- port: parsedUrl.port ? Number(parsedUrl.port) : isHttps ? 443 : 80,
198
- path: `${parsedUrl.pathname}${parsedUrl.search}`,
199
- method: init?.method?.toUpperCase() || 'GET',
200
- headers: buildHeaders(init),
201
- };
202
- const req = httpModule.request(options, (res) => {
203
- const chunks = [];
204
- let chunkCount = 0;
205
- res.on('data', (chunk) => {
206
- chunkCount += 1;
207
- if (typeof chunk === 'string') {
208
- chunks.push(Buffer.from(chunk));
209
- }
210
- else {
211
- chunks.push(chunk);
212
- }
213
- });
214
- res.on('end', () => {
215
- const bodyBuffer = Buffer.concat(chunks);
216
- resolve(new Response(bodyBuffer, {
217
- status: res.statusCode ?? 0,
218
- statusText: res.statusMessage ?? '',
219
- headers: collectResponseHeaders(res.headers),
220
- }));
221
- });
222
- res.on('error', async (error) => {
223
- if (chunkCount >= partialResponseThreshold &&
224
- attempt < maxRetries) {
225
- await delay(retryDelay);
226
- try {
227
- const retryResponse = await makeRequest(url, init, attempt + 1);
228
- resolve(retryResponse);
229
- return;
230
- }
231
- catch (retryError) {
232
- reject(retryError);
233
- return;
234
- }
235
- }
236
- reject(new Error(`Response stream error: ${String(error)}`));
237
- });
238
- });
239
- req.on('socket', (socket) => {
100
+ // Create HTTPS agent with socket options
101
+ const httpsAgent = new https.Agent({
102
+ keepAlive: socketKeepAlive,
103
+ keepAliveMsecs: 1000,
104
+ timeout: socketTimeout,
105
+ });
106
+ // Apply TCP_NODELAY if enabled (reduces latency for local servers)
107
+ if (socketNoDelay) {
108
+ const originalCreateConnection = httpAgent.createConnection;
109
+ httpAgent.createConnection = function (options, callback) {
110
+ const socket = originalCreateConnection.call(this, options, callback);
240
111
  if (socket instanceof net.Socket) {
241
- socket.setTimeout(timeout);
242
- socket.setKeepAlive(keepAlive, 1000);
243
- socket.setNoDelay(noDelay);
112
+ socket.setNoDelay(true);
244
113
  }
245
- });
246
- req.setTimeout(timeout, () => {
247
- req.destroy(new Error(`Request timed out after ${timeout}ms`));
248
- });
249
- if (init?.signal) {
250
- const abortHandler = () => {
251
- const abortError = new Error('Request aborted');
252
- abortError.name = 'AbortError';
253
- req.destroy(abortError);
254
- };
255
- if (init.signal.aborted) {
256
- abortHandler();
257
- return;
258
- }
259
- init.signal.addEventListener('abort', abortHandler);
260
- req.on('close', () => {
261
- init.signal?.removeEventListener('abort', abortHandler);
262
- });
263
- }
264
- req.on('error', async (error) => {
265
- if (attempt < maxRetries) {
266
- await delay(retryDelay);
267
- try {
268
- const retryResponse = await makeRequest(url, init, attempt + 1);
269
- resolve(retryResponse);
270
- return;
271
- }
272
- catch (retryError) {
273
- reject(retryError);
274
- return;
275
- }
114
+ return socket;
115
+ };
116
+ const originalHttpsCreateConnection = httpsAgent.createConnection;
117
+ httpsAgent.createConnection = function (options, callback) {
118
+ const socket = originalHttpsCreateConnection.call(this, options, callback);
119
+ if (socket instanceof net.Socket) {
120
+ socket.setNoDelay(true);
276
121
  }
277
- reject(new Error(`Request failed: ${String(error)}`));
278
- });
279
- writeRequestBody(req, init?.body ?? null);
280
- });
281
- return async (input, init) => {
282
- const url = typeof input === 'string'
283
- ? input
284
- : input instanceof URL
285
- ? input.href
286
- : input.url;
287
- if (typeof url !== 'string') {
288
- return fetch(input, init);
289
- }
290
- return makeRequest(url, init);
291
- };
292
- }
293
- async loadModelParamsFromSettings() {
294
- const params = await this.getModelParamsFromSettings();
295
- this.modelParams = params;
296
- }
297
- async resolveModelParams() {
298
- if (this.modelParams) {
299
- return this.modelParams;
122
+ return socket;
123
+ };
300
124
  }
301
- const params = await this.getModelParamsFromSettings();
302
- if (params) {
303
- this.modelParams = params;
125
+ return { httpAgent, httpsAgent };
126
+ }
127
+ /**
128
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
129
+ * @requirement:REQ-SP4-002
130
+ * Extract model parameters from normalized options instead of settings service
131
+ */
132
+ extractModelParamsFromOptions(options) {
133
+ const providerSettings = options.settings?.getProviderSettings(this.name) ?? {};
134
+ const configEphemerals = options.invocation?.ephemerals ?? {};
135
+ const filteredProviderParams = filterOpenAIRequestParams(providerSettings);
136
+ const filteredEphemeralParams = filterOpenAIRequestParams(configEphemerals);
137
+ if (!filteredProviderParams && !filteredEphemeralParams) {
138
+ return undefined;
304
139
  }
305
- return params;
140
+ return {
141
+ ...(filteredProviderParams ?? {}),
142
+ ...(filteredEphemeralParams ?? {}),
143
+ };
306
144
  }
307
145
  /**
308
- * Get or create OpenAI client instance
309
- * Will use the API key from resolved auth
310
- * @returns OpenAI client instance
146
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
147
+ * @requirement:REQ-SP4-003
148
+ * Resolve runtime key from normalized options for client scoping
311
149
  */
312
- async getClient() {
313
- const resolvedKey = await this.getAuthToken();
314
- // Use the unified getBaseURL() method from BaseProvider
315
- const baseURL = this.getBaseURL();
316
- const socketSettings = this.getSocketSettings();
317
- const socketKey = socketSettings
318
- ? JSON.stringify(socketSettings)
319
- : 'default';
320
- const clientKey = `${baseURL}-${resolvedKey}-${socketKey}`;
321
- // Clear cache if we have no valid auth (e.g., after logout)
322
- if (!resolvedKey && this._cachedClient) {
323
- this._cachedClient = undefined;
324
- this._cachedClientKey = undefined;
150
+ resolveRuntimeKey(options) {
151
+ if (options.runtime?.runtimeId) {
152
+ return options.runtime.runtimeId;
325
153
  }
326
- // Return cached client if available and auth hasn't changed
327
- if (this._cachedClient && this._cachedClientKey === clientKey) {
328
- return this._cachedClient;
154
+ const metadataRuntimeId = options.metadata?.runtimeId;
155
+ if (typeof metadataRuntimeId === 'string' && metadataRuntimeId.trim()) {
156
+ return metadataRuntimeId.trim();
329
157
  }
330
- const baseOptions = {
331
- apiKey: resolvedKey || '',
332
- baseURL,
333
- // CRITICAL: Disable OpenAI SDK's built-in retries so our retry logic can handle them
334
- // This allows us to track throttle wait times properly
158
+ const callId = options.settings.get('call-id');
159
+ if (typeof callId === 'string' && callId.trim()) {
160
+ return `call:${callId.trim()}`;
161
+ }
162
+ return 'openai.runtime.unscoped';
163
+ }
164
+ /**
165
+ * Tool formatter instances cannot be shared between stateless calls,
166
+ * so construct a fresh one for every invocation.
167
+ *
168
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
169
+ * @requirement:REQ-SP4-003
170
+ */
171
+ createToolFormatter() {
172
+ return new ToolFormatter();
173
+ }
174
+ /**
175
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P09
176
+ * @requirement:REQ-SP4-002
177
+ * Instantiates a fresh OpenAI client per call to preserve stateless behaviour.
178
+ */
179
+ instantiateClient(authToken, baseURL, agents) {
180
+ const clientOptions = {
181
+ apiKey: authToken || '',
335
182
  maxRetries: 0,
336
183
  };
337
- if (socketSettings) {
338
- baseOptions.timeout = socketSettings.timeout;
339
- baseOptions.fetch = this.createSocketAwareFetch(socketSettings);
184
+ if (baseURL && baseURL.trim() !== '') {
185
+ clientOptions.baseURL = baseURL;
186
+ }
187
+ if (agents) {
188
+ clientOptions.httpAgent = agents.httpAgent;
189
+ clientOptions.httpsAgent = agents.httpsAgent;
340
190
  }
341
- // Create new client with current auth and optional socket configuration
342
- // Cast to unknown then to the expected type to bypass TypeScript's structural checking
343
- this._cachedClient = new OpenAI(baseOptions);
344
- this._cachedClientKey = clientKey;
345
- return this._cachedClient;
191
+ return new OpenAI(clientOptions);
192
+ }
193
+ /**
194
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P09
195
+ * @requirement:REQ-SP4-002
196
+ * Creates a client scoped to the active runtime metadata without caching.
197
+ */
198
+ async getClient(options) {
199
+ const authToken = (await resolveRuntimeAuthToken(options.resolved.authToken)) ?? '';
200
+ if (!authToken) {
201
+ throw new Error(`ProviderCacheError("Auth token unavailable for runtimeId=${options.runtime?.runtimeId} (REQ-SP4-003).")`);
202
+ }
203
+ const baseURL = options.resolved.baseURL ?? this.baseProviderConfig.baseURL;
204
+ const agents = this.createHttpAgents(options);
205
+ return this.instantiateClient(authToken, baseURL, agents);
346
206
  }
347
207
  /**
348
208
  * Check if OAuth is supported for this provider
349
209
  * Qwen endpoints support OAuth, standard OpenAI does not
350
210
  */
351
211
  supportsOAuth() {
212
+ const providerConfig = this.providerConfig;
213
+ if (providerConfig?.forceQwenOAuth) {
214
+ return true;
215
+ }
352
216
  // CRITICAL FIX: Check provider name first for cases where base URL is changed by profiles
353
217
  // This handles the cerebrasqwen3 profile case where base-url is changed to cerebras.ai
354
218
  // but the provider name is still 'qwen' due to Object.defineProperty override
@@ -370,7 +234,10 @@ export class OpenAIProvider extends BaseProvider {
370
234
  try {
371
235
  // Always try to fetch models, regardless of auth status
372
236
  // Local endpoints often work without authentication
373
- const client = await this.getClient();
237
+ const authToken = await this.getAuthToken();
238
+ const baseURL = this.getBaseURL();
239
+ const agents = this.createHttpAgents();
240
+ const client = this.instantiateClient(authToken, baseURL, agents);
374
241
  const response = await client.models.list();
375
242
  const models = [];
376
243
  for await (const model of response) {
@@ -387,7 +254,7 @@ export class OpenAIProvider extends BaseProvider {
387
254
  return models;
388
255
  }
389
256
  catch (error) {
390
- this.logger.debug(() => `Error fetching models from OpenAI: ${error}`);
257
+ this.getLogger().debug(() => `Error fetching models from OpenAI: ${error}`);
391
258
  // Return a hardcoded list as fallback
392
259
  return this.getFallbackModels();
393
260
  }
@@ -397,9 +264,6 @@ export class OpenAIProvider extends BaseProvider {
397
264
  }
398
265
  getDefaultModel() {
399
266
  // Return hardcoded default - do NOT call getModel() to avoid circular dependency
400
- if (this.providerConfig?.defaultModel) {
401
- return this.providerConfig.defaultModel;
402
- }
403
267
  // Check if this is a Qwen provider instance based on baseURL
404
268
  const baseURL = this.getBaseURL();
405
269
  if (baseURL &&
@@ -408,15 +272,6 @@ export class OpenAIProvider extends BaseProvider {
408
272
  }
409
273
  return process.env.LLXPRT_DEFAULT_MODEL || 'gpt-5';
410
274
  }
411
- /**
412
- * Set the model to use for this provider
413
- * This updates the model in ephemeral settings so it's immediately available
414
- */
415
- setModel(modelId) {
416
- const settingsService = getSettingsService();
417
- settingsService.set('model', modelId);
418
- this.logger.debug(() => `Model set to: ${modelId}`);
419
- }
420
275
  /**
421
276
  * Get the currently selected model
422
277
  */
@@ -424,32 +279,51 @@ export class OpenAIProvider extends BaseProvider {
424
279
  return this.getModel();
425
280
  }
426
281
  /**
427
- * Clear the cached OpenAI client
428
- * Should be called when authentication state changes (e.g., after logout)
282
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P09
283
+ * @requirement:REQ-SP4-002
284
+ * No-op retained for compatibility because clients are no longer cached.
429
285
  */
430
286
  // eslint-disable-next-line @typescript-eslint/explicit-member-accessibility
431
- clearClientCache() {
432
- this._cachedClient = undefined;
433
- this._cachedClientKey = undefined;
287
+ clearClientCache(runtimeKey) {
288
+ void runtimeKey;
434
289
  }
435
290
  /**
436
291
  * Override isAuthenticated for qwen provider to check OAuth directly
437
292
  */
438
293
  async isAuthenticated() {
439
294
  const config = this.providerConfig;
295
+ const directApiKey = this.baseProviderConfig.apiKey;
296
+ if (typeof directApiKey === 'string' && directApiKey.trim() !== '') {
297
+ return true;
298
+ }
299
+ try {
300
+ const nonOAuthToken = await this.authResolver.resolveAuthentication({
301
+ settingsService: this.resolveSettingsService(),
302
+ includeOAuth: false,
303
+ });
304
+ if (typeof nonOAuthToken === 'string' && nonOAuthToken.trim() !== '') {
305
+ return true;
306
+ }
307
+ }
308
+ catch (error) {
309
+ if (process.env.DEBUG) {
310
+ this.getLogger().debug(() => `[openai] non-OAuth authentication resolution failed: ${error instanceof Error ? error.message : String(error)}`);
311
+ }
312
+ }
440
313
  if (this.name === 'qwen' && config?.forceQwenOAuth) {
441
- // For qwen with forceQwenOAuth, check OAuth directly
442
- if (this.baseProviderConfig.oauthManager) {
443
- try {
444
- const oauthProviderName = this.baseProviderConfig.oauthProvider || 'qwen';
445
- const token = await this.baseProviderConfig.oauthManager.getToken(oauthProviderName);
446
- return token !== null;
447
- }
448
- catch {
449
- return false;
314
+ try {
315
+ const token = await this.authResolver.resolveAuthentication({
316
+ settingsService: this.resolveSettingsService(),
317
+ includeOAuth: true,
318
+ });
319
+ return typeof token === 'string' && token.trim() !== '';
320
+ }
321
+ catch (error) {
322
+ if (process.env.DEBUG) {
323
+ this.getLogger().debug(() => `[openai] forced OAuth authentication failed: ${error instanceof Error ? error.message : String(error)}`);
450
324
  }
325
+ return false;
451
326
  }
452
- return false;
453
327
  }
454
328
  // For non-qwen providers, use the normal check
455
329
  return super.isAuthenticated();
@@ -462,15 +336,6 @@ export class OpenAIProvider extends BaseProvider {
462
336
  // If this is the qwen provider and we have forceQwenOAuth, skip SettingsService checks
463
337
  const config = this.providerConfig;
464
338
  if (this.name === 'qwen' && config?.forceQwenOAuth) {
465
- // Check cache first (short-lived cache to avoid repeated OAuth calls)
466
- if (this.cachedAuthToken &&
467
- this.authCacheTimestamp &&
468
- Date.now() - this.authCacheTimestamp < this.AUTH_CACHE_DURATION) {
469
- return this.cachedAuthToken;
470
- }
471
- // Clear stale cache
472
- this.cachedAuthToken = undefined;
473
- this.authCacheTimestamp = undefined;
474
339
  // For qwen, skip directly to OAuth without checking SettingsService
475
340
  // Use 'qwen' as the provider name even if baseProviderConfig.oauthProvider is not set
476
341
  const oauthProviderName = this.baseProviderConfig.oauthProvider || 'qwen';
@@ -478,9 +343,6 @@ export class OpenAIProvider extends BaseProvider {
478
343
  try {
479
344
  const token = await this.baseProviderConfig.oauthManager.getToken(oauthProviderName);
480
345
  if (token) {
481
- // Cache the token briefly
482
- this.cachedAuthToken = token;
483
- this.authCacheTimestamp = Date.now();
484
346
  return token;
485
347
  }
486
348
  }
@@ -558,25 +420,36 @@ export class OpenAIProvider extends BaseProvider {
558
420
  return 'hist_tool_' + id;
559
421
  }
560
422
  /**
561
- * Generate chat completion with IContent interface
562
- * Internally converts to OpenAI API format, but only yields IContent
563
- * @param contents Array of content blocks (text and tool_call)
564
- * @param tools Array of available tools
423
+ * @plan PLAN-20250218-STATELESSPROVIDER.P04
424
+ * @requirement REQ-SP-001
425
+ * @pseudocode base-provider.md lines 7-15
426
+ * @pseudocode provider-invocation.md lines 8-12
427
+ */
428
+ /**
429
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P09
430
+ * @requirement:REQ-SP4-002
431
+ * Generate chat completion with per-call client instantiation.
565
432
  */
566
- async *generateChatCompletion(contents, tools) {
433
+ async *generateChatCompletionWithOptions(options) {
434
+ const callFormatter = this.createToolFormatter();
435
+ const client = await this.getClient(options);
436
+ const runtimeKey = this.resolveRuntimeKey(options);
437
+ const { tools } = options;
438
+ const logger = new DebugLogger('llxprt:provider:openai');
567
439
  // Debug log what we receive
568
- if (this.logger.enabled) {
569
- this.logger.debug(() => `[OpenAIProvider] generateChatCompletion received tools:`, {
440
+ if (logger.enabled) {
441
+ logger.debug(() => `[OpenAIProvider] generateChatCompletion received tools:`, {
570
442
  hasTools: !!tools,
571
443
  toolsLength: tools?.length,
572
444
  toolsType: typeof tools,
573
445
  isArray: Array.isArray(tools),
574
446
  firstToolName: tools?.[0]?.functionDeclarations?.[0]?.name,
575
447
  toolsStructure: tools ? 'available' : 'undefined',
448
+ runtimeKey,
576
449
  });
577
450
  }
578
- // Pass tools directly in Gemini format - they'll be converted in generateChatCompletionImpl
579
- const generator = this.generateChatCompletionImpl(contents, tools, undefined, undefined, undefined);
451
+ // Pass tools directly in Gemini format - they'll be converted per call
452
+ const generator = this.generateChatCompletionImpl(options, callFormatter, client, logger);
580
453
  for await (const item of generator) {
581
454
  yield item;
582
455
  }
@@ -646,27 +519,44 @@ export class OpenAIProvider extends BaseProvider {
646
519
  return messages;
647
520
  }
648
521
  /**
649
- * Internal implementation for chat completion
522
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
523
+ * @requirement:REQ-SP4-003
524
+ * Internal implementation for chat completion using normalized options
650
525
  */
651
- async *generateChatCompletionImpl(contents, tools, maxTokens, abortSignal, modelName) {
652
- // Always look up model from SettingsService
653
- const model = modelName || this.getModel() || this.getDefaultModel();
526
+ async *generateChatCompletionImpl(options, toolFormatter, client, logger) {
527
+ const { contents, tools, metadata } = options;
528
+ const model = options.resolved.model || this.getDefaultModel();
529
+ const abortSignal = metadata?.abortSignal;
530
+ const ephemeralSettings = options.invocation?.ephemerals ?? {};
531
+ if (logger.enabled) {
532
+ const resolved = options.resolved;
533
+ logger.debug(() => `[OpenAIProvider] Resolved request context`, {
534
+ provider: this.name,
535
+ model,
536
+ resolvedModel: resolved.model,
537
+ resolvedBaseUrl: resolved.baseURL,
538
+ authTokenPresent: Boolean(resolved.authToken),
539
+ messageCount: contents.length,
540
+ toolCount: tools?.length ?? 0,
541
+ metadataKeys: Object.keys(metadata ?? {}),
542
+ });
543
+ }
654
544
  // Convert IContent to OpenAI messages format
655
545
  const messages = this.convertToOpenAIMessages(contents);
656
546
  // Detect the tool format to use (once at the start of the method)
657
547
  const detectedFormat = this.detectToolFormat();
658
548
  // Log the detected format for debugging
659
- this.logger.debug(() => `[OpenAIProvider] Using tool format '${detectedFormat}' for model '${model}'`, {
549
+ logger.debug(() => `[OpenAIProvider] Using tool format '${detectedFormat}' for model '${model}'`, {
660
550
  model,
661
551
  detectedFormat,
662
552
  provider: this.name,
663
553
  });
664
554
  // Convert Gemini format tools to the detected format
665
- let formattedTools = this.toolFormatter.convertGeminiToFormat(tools, detectedFormat);
555
+ let formattedTools = toolFormatter.convertGeminiToFormat(tools, detectedFormat);
666
556
  // CRITICAL FIX: Ensure we never pass an empty tools array
667
557
  // The OpenAI API errors when tools=[] but a tool call is attempted
668
558
  if (Array.isArray(formattedTools) && formattedTools.length === 0) {
669
- this.logger.warn(() => `[OpenAIProvider] CRITICAL: Formatted tools is empty array! Setting to undefined to prevent API errors.`, {
559
+ logger.warn(() => `[OpenAIProvider] CRITICAL: Formatted tools is empty array! Setting to undefined to prevent API errors.`, {
670
560
  model,
671
561
  inputTools: tools,
672
562
  inputToolsLength: tools?.length,
@@ -676,8 +566,8 @@ export class OpenAIProvider extends BaseProvider {
676
566
  formattedTools = undefined;
677
567
  }
678
568
  // Debug log the conversion result - enhanced logging for intermittent issues
679
- if (this.logger.enabled && formattedTools) {
680
- this.logger.debug(() => `[OpenAIProvider] Tool conversion summary:`, {
569
+ if (logger.enabled && formattedTools) {
570
+ logger.debug(() => `[OpenAIProvider] Tool conversion summary:`, {
681
571
  detectedFormat,
682
572
  inputHadTools: !!tools,
683
573
  inputToolsLength: tools?.length,
@@ -689,58 +579,61 @@ export class OpenAIProvider extends BaseProvider {
689
579
  });
690
580
  }
691
581
  // Get streaming setting from ephemeral settings (default: enabled)
692
- const streamingSetting = this.providerConfig?.getEphemeralSettings?.()?.['streaming'];
582
+ const streamingSetting = ephemeralSettings['streaming'];
693
583
  const streamingEnabled = streamingSetting !== 'disabled';
694
584
  // Get the system prompt
695
585
  const flattenedToolNames = tools?.flatMap((group) => group.functionDeclarations
696
586
  .map((decl) => decl.name)
697
587
  .filter((name) => !!name)) ?? [];
698
588
  const toolNamesArg = tools === undefined ? undefined : Array.from(new Set(flattenedToolNames));
699
- const userMemory = this.globalConfig?.getUserMemory
700
- ? this.globalConfig.getUserMemory()
701
- : '';
589
+ /**
590
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
591
+ * @requirement:REQ-SP4-003
592
+ * Source user memory from normalized options instead of global config
593
+ */
594
+ const userMemory = await resolveUserMemory(options.userMemory, () => options.invocation?.userMemory);
702
595
  const systemPrompt = await getCoreSystemPromptAsync(userMemory, model, toolNamesArg);
703
596
  // Add system prompt as the first message in the array
704
597
  const messagesWithSystem = [
705
598
  { role: 'system', content: systemPrompt },
706
599
  ...messages,
707
600
  ];
601
+ const maxTokens = metadata?.maxTokens ??
602
+ ephemeralSettings['max-tokens'];
708
603
  // Build request - only include tools if they exist and are not empty
709
604
  // IMPORTANT: Create a deep copy of tools to prevent mutation issues
710
605
  const requestBody = {
711
606
  model,
712
607
  messages: messagesWithSystem,
713
- ...(formattedTools && formattedTools.length > 0
714
- ? {
715
- // Deep clone the tools array to prevent any mutation issues
716
- tools: JSON.parse(JSON.stringify(formattedTools)),
717
- // Add tool_choice for Qwen/Cerebras to ensure proper tool calling
718
- tool_choice: 'auto',
719
- }
720
- : {}),
721
- max_tokens: maxTokens,
722
608
  stream: streamingEnabled,
723
609
  };
724
- // Special handling for Cerebras GLM: need a user message with content in the request body
725
- // This is a workaround for a Cerebras bug where they block calls without text
726
- // even though it's a tool response that shouldn't require it.
727
- if (this.getModel()?.toLowerCase().includes('glm') &&
728
- this.getBaseURL()?.includes('cerebras') &&
729
- formattedTools &&
730
- formattedTools.length > 0) {
731
- // Add a dummy user message with content to bypass Cerebras validation
732
- requestBody.messages.push({ role: 'user', content: '\n' });
610
+ if (formattedTools && formattedTools.length > 0) {
611
+ requestBody.tools = JSON.parse(JSON.stringify(formattedTools));
612
+ requestBody.tool_choice = 'auto';
613
+ }
614
+ /**
615
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
616
+ * @requirement:REQ-SP4-002
617
+ * Extract per-call request overrides from normalized options instead of cached state
618
+ */
619
+ const requestOverrides = this.extractModelParamsFromOptions(options);
620
+ if (requestOverrides) {
621
+ if (logger.enabled) {
622
+ logger.debug(() => `[OpenAIProvider] Applying request overrides`, {
623
+ overrideKeys: Object.keys(requestOverrides),
624
+ });
625
+ }
626
+ Object.assign(requestBody, requestOverrides);
733
627
  }
734
- const modelParams = await this.resolveModelParams();
735
- if (modelParams) {
736
- Object.assign(requestBody, modelParams);
628
+ if (typeof maxTokens === 'number' && Number.isFinite(maxTokens)) {
629
+ requestBody.max_tokens = maxTokens;
737
630
  }
738
631
  // Debug log request summary for Cerebras/Qwen
739
- if (this.logger.enabled &&
740
- (model.toLowerCase().includes('qwen') ||
741
- this.getBaseURL()?.includes('cerebras'))) {
742
- this.logger.debug(() => `Request to ${this.getBaseURL()} for model ${model}:`, {
743
- baseURL: this.getBaseURL(),
632
+ const baseURL = options.resolved.baseURL ?? this.getBaseURL();
633
+ if (logger.enabled &&
634
+ (model.toLowerCase().includes('qwen') || baseURL?.includes('cerebras'))) {
635
+ logger.debug(() => `Request to ${baseURL} for model ${model}:`, {
636
+ baseURL,
744
637
  model,
745
638
  streamingEnabled,
746
639
  hasTools: 'tools' in requestBody,
@@ -749,10 +642,7 @@ export class OpenAIProvider extends BaseProvider {
749
642
  toolsInRequest: 'tools' in requestBody ? requestBody.tools?.length : 'not included',
750
643
  });
751
644
  }
752
- // Get OpenAI client
753
- const client = await this.getClient();
754
645
  // Get retry settings from ephemeral settings
755
- const ephemeralSettings = this.providerConfig?.getEphemeralSettings?.() || {};
756
646
  const maxRetries = ephemeralSettings['retries'] ?? 6; // Default for OpenAI
757
647
  const initialDelayMs = ephemeralSettings['retrywait'] ?? 4000; // Default for OpenAI
758
648
  // Get stream options from ephemeral settings (default: include usage for token tracking)
@@ -762,41 +652,43 @@ export class OpenAIProvider extends BaseProvider {
762
652
  Object.assign(requestBody, { stream_options: streamOptions });
763
653
  }
764
654
  // Log the exact tools being sent for debugging
765
- if (this.logger.enabled && 'tools' in requestBody) {
766
- this.logger.debug(() => `[OpenAIProvider] Exact tools being sent to API:`, {
655
+ if (logger.enabled && 'tools' in requestBody) {
656
+ logger.debug(() => `[OpenAIProvider] Exact tools being sent to API:`, {
767
657
  toolCount: requestBody.tools?.length,
768
658
  toolNames: requestBody.tools?.map((t) => 'function' in t ? t.function?.name : undefined),
769
659
  firstTool: requestBody.tools?.[0],
770
660
  });
771
661
  }
772
662
  // Wrap the API call with retry logic using centralized retry utility
663
+ if (logger.enabled) {
664
+ logger.debug(() => `[OpenAIProvider] Sending chat request`, {
665
+ model,
666
+ baseURL: baseURL ?? this.getBaseURL(),
667
+ streamingEnabled,
668
+ toolCount: formattedTools?.length ?? 0,
669
+ hasAuthToken: Boolean(options.resolved.authToken),
670
+ requestHasSystemPrompt: Boolean(systemPrompt?.length),
671
+ messageCount: messagesWithSystem.length,
672
+ });
673
+ }
773
674
  let response;
774
675
  // Debug log throttle tracker status
775
- this.logger.debug(() => `Retry configuration:`, {
676
+ logger.debug(() => `Retry configuration:`, {
776
677
  hasThrottleTracker: !!this.throttleTracker,
777
678
  throttleTrackerType: typeof this.throttleTracker,
778
679
  maxRetries,
779
680
  initialDelayMs,
780
681
  });
781
682
  const customHeaders = this.getCustomHeaders();
782
- // Log the request body before making the API call
783
- this.logger.debug(() => `[OpenAIProvider] Request body:`, {
784
- model: requestBody.model,
785
- messageCount: requestBody.messages.length,
786
- hasTools: 'tools' in requestBody &&
787
- requestBody.tools &&
788
- requestBody.tools.length > 0,
789
- toolCount: 'tools' in requestBody ? requestBody.tools?.length : 0,
790
- streaming: requestBody.stream,
791
- lastThreeMessages: requestBody.messages.slice(-3),
792
- messagesWithToolCalls: requestBody.messages.filter((m) => 'tool_calls' in m && m.tool_calls),
793
- messagesWithToolRole: requestBody.messages.filter((m) => m.role === 'tool'),
794
- fullRequestBody: requestBody,
795
- });
796
- // Check if dumponerror is enabled from either CLI flag or ephemeral setting
797
- const ephemeralSettingsForDump = this.providerConfig?.getEphemeralSettings?.() || {};
798
- const dumpOnError = this.globalConfig?.getDumpOnError?.() ||
799
- ephemeralSettingsForDump['dumponerror'] === 'enabled';
683
+ if (logger.enabled) {
684
+ logger.debug(() => `[OpenAIProvider] Request body preview`, {
685
+ model: requestBody.model,
686
+ hasStop: 'stop' in requestBody,
687
+ hasMaxTokens: 'max_tokens' in requestBody,
688
+ hasResponseFormat: 'response_format' in requestBody,
689
+ overrideKeys: requestOverrides ? Object.keys(requestOverrides) : [],
690
+ });
691
+ }
800
692
  try {
801
693
  response = await retryWithBackoff(() => client.chat.completions.create(requestBody, {
802
694
  ...(abortSignal ? { signal: abortSignal } : {}),
@@ -809,59 +701,12 @@ export class OpenAIProvider extends BaseProvider {
809
701
  });
810
702
  }
811
703
  catch (error) {
812
- // Log the error details
813
- this.logger.error(() => `[OpenAIProvider] API call failed:`, {
814
- error,
815
- errorType: error?.constructor?.name,
816
- errorMessage: error instanceof Error ? error.message : String(error),
817
- errorStatus: error?.status,
818
- errorHeaders: error?.headers,
819
- errorBody: error?.error,
820
- model,
821
- baseURL: this.getBaseURL(),
822
- });
823
- // Dump request body on error if enabled
824
- if (dumpOnError) {
825
- try {
826
- const fs = await import('fs');
827
- const path = await import('path');
828
- const os = await import('os');
829
- const homeDir = os.homedir();
830
- const dumpDir = path.join(homeDir, '.llxprt', 'dumps');
831
- // Ensure dumps directory exists
832
- if (!fs.existsSync(dumpDir)) {
833
- fs.mkdirSync(dumpDir, { recursive: true });
834
- }
835
- const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
836
- const dumpFilePath = path.join(dumpDir, `request-dump-${timestamp}.json`);
837
- const dumpContent = JSON.stringify({
838
- timestamp: new Date().toISOString(),
839
- error: {
840
- message: error instanceof Error ? error.message : String(error),
841
- status: error?.status,
842
- type: error?.type,
843
- code: error?.code,
844
- param: error?.param,
845
- fullError: error,
846
- },
847
- request: requestBody,
848
- baseURL: this.getBaseURL(),
849
- model,
850
- notes: 'This dump contains the ACTUAL request sent to the API in OpenAI format. Messages with role:tool have tool_call_id set.',
851
- }, null, 2);
852
- fs.writeFileSync(dumpFilePath, dumpContent, 'utf-8');
853
- this.logger.debug(() => `Request body dumped to ${dumpFilePath} (error occurred)`);
854
- }
855
- catch (dumpError) {
856
- this.logger.debug(() => `Failed to dump request body: ${dumpError}`);
857
- }
858
- }
859
704
  // Special handling for Cerebras/Qwen "Tool not present" errors
860
705
  const errorMessage = String(error);
861
706
  if (errorMessage.includes('Tool is not present in the tools list') &&
862
707
  (model.toLowerCase().includes('qwen') ||
863
708
  this.getBaseURL()?.includes('cerebras'))) {
864
- this.logger.error('Cerebras/Qwen API error: Tool not found despite being in request. This is a known API issue.', {
709
+ logger.error('Cerebras/Qwen API error: Tool not found despite being in request. This is a known API issue.', {
865
710
  error,
866
711
  model,
867
712
  toolsProvided: formattedTools?.length || 0,
@@ -875,13 +720,23 @@ export class OpenAIProvider extends BaseProvider {
875
720
  throw enhancedError;
876
721
  }
877
722
  // Re-throw other errors as-is
723
+ const capturedErrorMessage = error instanceof Error ? error.message : String(error);
724
+ const status = typeof error === 'object' &&
725
+ error !== null &&
726
+ 'status' in error &&
727
+ typeof error.status === 'number'
728
+ ? error.status
729
+ : undefined;
730
+ logger.error(() => `[OpenAIProvider] Chat completion failed for model '${model}' at '${baseURL ?? this.getBaseURL() ?? 'default'}': ${capturedErrorMessage}`, {
731
+ model,
732
+ baseURL: baseURL ?? this.getBaseURL(),
733
+ streamingEnabled,
734
+ hasTools: formattedTools?.length ?? 0,
735
+ requestHasSystemPrompt: !!systemPrompt,
736
+ status,
737
+ });
878
738
  throw error;
879
739
  }
880
- // Log successful response start
881
- this.logger.debug(() => `[OpenAIProvider] API call succeeded, processing response...`, {
882
- streaming: streamingEnabled,
883
- model,
884
- });
885
740
  // Check if response is streaming or not
886
741
  if (streamingEnabled) {
887
742
  // Process streaming response
@@ -909,7 +764,7 @@ export class OpenAIProvider extends BaseProvider {
909
764
  continue;
910
765
  // Check for finish_reason to detect proper stream ending
911
766
  if (choice.finish_reason) {
912
- this.logger.debug(() => `[Streaming] Stream finished with reason: ${choice.finish_reason}`, {
767
+ logger.debug(() => `[Streaming] Stream finished with reason: ${choice.finish_reason}`, {
913
768
  model,
914
769
  finishReason: choice.finish_reason,
915
770
  hasAccumulatedText: _accumulatedText.length > 0,
@@ -918,7 +773,7 @@ export class OpenAIProvider extends BaseProvider {
918
773
  });
919
774
  // If finish_reason is 'length', the response was cut off
920
775
  if (choice.finish_reason === 'length') {
921
- this.logger.debug(() => `Response truncated due to length limit for model ${model}`);
776
+ logger.debug(() => `Response truncated due to length limit for model ${model}`);
922
777
  }
923
778
  // Flush any buffered text when stream finishes
924
779
  if (textBuffer.length > 0) {
@@ -940,7 +795,7 @@ export class OpenAIProvider extends BaseProvider {
940
795
  _accumulatedText += deltaContent;
941
796
  // Debug log for providers that need buffering
942
797
  if (shouldBufferText) {
943
- this.logger.debug(() => `[Streaming] Chunk content for ${detectedFormat} format:`, {
798
+ logger.debug(() => `[Streaming] Chunk content for ${detectedFormat} format:`, {
944
799
  deltaContent,
945
800
  length: deltaContent.length,
946
801
  hasNewline: deltaContent.includes('\n'),
@@ -951,7 +806,8 @@ export class OpenAIProvider extends BaseProvider {
951
806
  textBuffer += deltaContent;
952
807
  // Emit buffered text when we have a complete sentence or paragraph
953
808
  // Look for natural break points
954
- if (textBuffer.endsWith('. ') ||
809
+ if (textBuffer.includes('\n') ||
810
+ textBuffer.endsWith('. ') ||
955
811
  textBuffer.endsWith('! ') ||
956
812
  textBuffer.endsWith('? ') ||
957
813
  textBuffer.length > 100) {
@@ -1021,7 +877,7 @@ export class OpenAIProvider extends BaseProvider {
1021
877
  if (errorMessage.includes('Tool is not present in the tools list') &&
1022
878
  (model.toLowerCase().includes('qwen') ||
1023
879
  this.getBaseURL()?.includes('cerebras'))) {
1024
- this.logger.error('Cerebras/Qwen API error: Tool not found despite being in request. This is a known API issue.', {
880
+ logger.error('Cerebras/Qwen API error: Tool not found despite being in request. This is a known API issue.', {
1025
881
  error,
1026
882
  model,
1027
883
  toolsProvided: formattedTools?.length || 0,
@@ -1032,7 +888,7 @@ export class OpenAIProvider extends BaseProvider {
1032
888
  enhancedError.originalError = error;
1033
889
  throw enhancedError;
1034
890
  }
1035
- this.logger.error('Error processing streaming response:', error);
891
+ logger.error('Error processing streaming response:', error);
1036
892
  throw error;
1037
893
  }
1038
894
  }
@@ -1111,7 +967,7 @@ export class OpenAIProvider extends BaseProvider {
1111
967
  }
1112
968
  // Log finish reason for debugging Qwen issues
1113
969
  if (choice.finish_reason) {
1114
- this.logger.debug(() => `[Non-streaming] Response finish_reason: ${choice.finish_reason}`, {
970
+ logger.debug(() => `[Non-streaming] Response finish_reason: ${choice.finish_reason}`, {
1115
971
  model,
1116
972
  finishReason: choice.finish_reason,
1117
973
  hasContent: !!choice.message?.content,
@@ -1122,7 +978,7 @@ export class OpenAIProvider extends BaseProvider {
1122
978
  });
1123
979
  // Warn if the response was truncated
1124
980
  if (choice.finish_reason === 'length') {
1125
- this.logger.warn(() => `Response truncated due to max_tokens limit for model ${model}. Consider increasing max_tokens.`);
981
+ logger.warn(() => `Response truncated due to max_tokens limit for model ${model}. Consider increasing max_tokens.`);
1126
982
  }
1127
983
  }
1128
984
  const blocks = [];
@@ -1188,103 +1044,89 @@ export class OpenAIProvider extends BaseProvider {
1188
1044
  }
1189
1045
  }
1190
1046
  /**
1191
- * Update model parameters and persist them in the SettingsService.
1047
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
1048
+ * @requirement:REQ-SP4-002
1049
+ * Memoization of model parameters disabled for stateless provider
1192
1050
  */
1193
- setModelParams(params) {
1194
- if (params === undefined) {
1195
- this.modelParams = undefined;
1196
- this.setModelParamsInSettings(undefined).catch((error) => {
1197
- this.logger.debug(() => `Failed to clear model params in SettingsService: ${error}`);
1198
- });
1199
- return;
1200
- }
1201
- const updated = { ...(this.modelParams ?? {}) };
1202
- for (const [key, value] of Object.entries(params)) {
1203
- if (value === undefined || value === null) {
1204
- delete updated[key];
1205
- }
1206
- else {
1207
- updated[key] = value;
1208
- }
1209
- }
1210
- this.modelParams = Object.keys(updated).length > 0 ? updated : undefined;
1211
- this.setModelParamsInSettings(this.modelParams).catch((error) => {
1212
- this.logger.debug(() => `Failed to persist model params to SettingsService: ${error}`);
1213
- });
1051
+ setModelParams(_params) {
1052
+ throw new Error('ProviderCacheError("Attempted to memoize model parameters for openai")');
1214
1053
  }
1054
+ /**
1055
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
1056
+ * @requirement:REQ-SP4-003
1057
+ * Gets model parameters from SettingsService per call (stateless)
1058
+ */
1215
1059
  getModelParams() {
1216
- return this.modelParams;
1060
+ try {
1061
+ const settingsService = this.resolveSettingsService();
1062
+ const providerSettings = settingsService.getProviderSettings(this.name);
1063
+ const reservedKeys = new Set([
1064
+ 'enabled',
1065
+ 'apiKey',
1066
+ 'api-key',
1067
+ 'apiKeyfile',
1068
+ 'api-keyfile',
1069
+ 'baseUrl',
1070
+ 'base-url',
1071
+ 'model',
1072
+ 'toolFormat',
1073
+ 'tool-format',
1074
+ 'toolFormatOverride',
1075
+ 'tool-format-override',
1076
+ 'defaultModel',
1077
+ ]);
1078
+ const params = {};
1079
+ if (providerSettings) {
1080
+ for (const [key, value] of Object.entries(providerSettings)) {
1081
+ if (reservedKeys.has(key) || value === undefined || value === null) {
1082
+ continue;
1083
+ }
1084
+ params[key] = value;
1085
+ }
1086
+ }
1087
+ return Object.keys(params).length > 0 ? params : undefined;
1088
+ }
1089
+ catch (error) {
1090
+ this.getLogger().debug(() => `Failed to get OpenAI provider settings from SettingsService: ${error}`);
1091
+ return undefined;
1092
+ }
1217
1093
  }
1218
1094
  /**
1219
- * Get the tool format for this provider
1095
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
1096
+ * @requirement:REQ-SP4-003
1097
+ * Get the tool format for this provider using normalized options
1220
1098
  * @returns The tool format to use
1221
1099
  */
1222
1100
  getToolFormat() {
1223
1101
  const format = this.detectToolFormat();
1224
- this.logger.debug(() => `getToolFormat() called, returning: ${format}`, {
1102
+ const logger = new DebugLogger('llxprt:provider:openai');
1103
+ logger.debug(() => `getToolFormat() called, returning: ${format}`, {
1225
1104
  provider: this.name,
1226
1105
  model: this.getModel(),
1227
1106
  format,
1228
1107
  });
1229
1108
  return format;
1230
1109
  }
1231
- /**
1232
- * Set tool format override for this provider
1233
- * @param format The format to use, or null to clear override
1234
- */
1235
- setToolFormatOverride(format) {
1236
- const settingsService = getSettingsService();
1237
- if (format === null) {
1238
- settingsService.setProviderSetting(this.name, 'toolFormat', 'auto');
1239
- this.logger.debug(() => `Tool format override cleared for ${this.name}`);
1240
- }
1241
- else {
1242
- settingsService.setProviderSetting(this.name, 'toolFormat', format);
1243
- this.logger.debug(() => `Tool format override set to '${format}' for ${this.name}`);
1244
- }
1245
- // Clear cached client to ensure new format takes effect
1246
- this._cachedClient = undefined;
1247
- this._cachedClientKey = undefined;
1248
- }
1249
1110
  /**
1250
1111
  * Detects the tool call format based on the model being used
1251
1112
  * @returns The detected tool format ('openai' or 'qwen')
1252
1113
  */
1253
1114
  detectToolFormat() {
1254
- try {
1255
- // Check for toolFormat override in provider settings
1256
- const settingsService = getSettingsService();
1257
- const currentSettings = settingsService['settings'];
1258
- const providerSettings = currentSettings?.providers?.[this.name];
1259
- const toolFormatOverride = providerSettings?.toolFormat;
1260
- // If explicitly set to a specific format (not 'auto'), use it
1261
- if (toolFormatOverride && toolFormatOverride !== 'auto') {
1262
- this.logger.debug(() => `Using tool format override '${toolFormatOverride}' for ${this.name}`);
1263
- return toolFormatOverride;
1264
- }
1265
- }
1266
- catch (error) {
1267
- this.logger.debug(() => `Failed to detect tool format from SettingsService: ${error}`);
1268
- }
1269
1115
  // Auto-detect based on model name if set to 'auto' or not set
1270
1116
  const modelName = (this.getModel() || this.getDefaultModel()).toLowerCase();
1271
- // Check for GLM models (glm-4.5, glm-4-6, etc.) which require Qwen handling
1272
- if (modelName.includes('glm-')) {
1273
- this.logger.debug(() => `Auto-detected 'qwen' format for GLM model: ${modelName}`);
1274
- return 'qwen';
1275
- }
1276
- // Check for MiniMax models (minimax, mini-max, etc.) which require Qwen handling
1277
- if (modelName.includes('minimax') || modelName.includes('mini-max')) {
1278
- this.logger.debug(() => `Auto-detected 'qwen' format for MiniMax model: ${modelName}`);
1117
+ const logger = new DebugLogger('llxprt:provider:openai');
1118
+ // Check for GLM-4 models (glm-4, glm-4.5, glm-4.6, glm-4-5, etc.)
1119
+ if (modelName.includes('glm-4')) {
1120
+ logger.debug(() => `Auto-detected 'qwen' format for GLM-4.x model: ${modelName}`);
1279
1121
  return 'qwen';
1280
1122
  }
1281
1123
  // Check for qwen models
1282
1124
  if (modelName.includes('qwen')) {
1283
- this.logger.debug(() => `Auto-detected 'qwen' format for Qwen model: ${modelName}`);
1125
+ logger.debug(() => `Auto-detected 'qwen' format for Qwen model: ${modelName}`);
1284
1126
  return 'qwen';
1285
1127
  }
1286
1128
  // Default to 'openai' format
1287
- this.logger.debug(() => `Using default 'openai' format for model: ${modelName}`);
1129
+ logger.debug(() => `Using default 'openai' format for model: ${modelName}`);
1288
1130
  return 'openai';
1289
1131
  }
1290
1132
  /**
@@ -1298,11 +1140,14 @@ export class OpenAIProvider extends BaseProvider {
1298
1140
  return response;
1299
1141
  }
1300
1142
  /**
1143
+ * @plan:PLAN-20251023-STATELESS-HARDENING.P08
1144
+ * @requirement:REQ-SP4-003
1301
1145
  * Determines whether a response should be retried based on error codes
1302
1146
  * @param error The error object from the API response
1303
1147
  * @returns true if the request should be retried, false otherwise
1304
1148
  */
1305
1149
  shouldRetryResponse(error) {
1150
+ const logger = new DebugLogger('llxprt:provider:openai');
1306
1151
  // Don't retry if we're streaming chunks - just continue processing
1307
1152
  if (error &&
1308
1153
  typeof error === 'object' &&
@@ -1330,7 +1175,7 @@ export class OpenAIProvider extends BaseProvider {
1330
1175
  }
1331
1176
  }
1332
1177
  // Log what we're seeing
1333
- this.logger.debug(() => `shouldRetryResponse checking error:`, {
1178
+ logger.debug(() => `shouldRetryResponse checking error:`, {
1334
1179
  hasError: !!error,
1335
1180
  errorType: error?.constructor?.name,
1336
1181
  status,
@@ -1338,15 +1183,11 @@ export class OpenAIProvider extends BaseProvider {
1338
1183
  errorKeys: error && typeof error === 'object' ? Object.keys(error) : [],
1339
1184
  });
1340
1185
  // Retry on 429 rate limit errors or 5xx server errors
1341
- if (status === 429 || (status && status >= 500 && status < 600)) {
1342
- this.logger.debug(() => `Will retry request due to status ${status}`);
1343
- return true;
1186
+ const shouldRetry = Boolean(status === 429 || (status && status >= 500 && status < 600));
1187
+ if (shouldRetry) {
1188
+ logger.debug(() => `Will retry request due to status ${status}`);
1344
1189
  }
1345
- if (isNetworkTransientError(error)) {
1346
- this.logger.debug(() => 'Will retry request due to transient network error signature (connection-level failure).');
1347
- return true;
1348
- }
1349
- return false;
1190
+ return shouldRetry;
1350
1191
  }
1351
1192
  }
1352
1193
  //# sourceMappingURL=OpenAIProvider.js.map