@aituber-onair/chat 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.ja.md +68 -1
  2. package/README.md +68 -1
  3. package/dist/cjs/adapters/gasFetch.d.ts +6 -0
  4. package/dist/cjs/adapters/gasFetch.d.ts.map +1 -0
  5. package/dist/cjs/adapters/gasFetch.js +53 -0
  6. package/dist/cjs/adapters/gasFetch.js.map +1 -0
  7. package/dist/cjs/index.d.ts +1 -0
  8. package/dist/cjs/index.d.ts.map +1 -1
  9. package/dist/cjs/index.js +4 -1
  10. package/dist/cjs/index.js.map +1 -1
  11. package/dist/cjs/utils/chatServiceHttpClient.d.ts +8 -0
  12. package/dist/cjs/utils/chatServiceHttpClient.d.ts.map +1 -1
  13. package/dist/cjs/utils/chatServiceHttpClient.js +29 -10
  14. package/dist/cjs/utils/chatServiceHttpClient.js.map +1 -1
  15. package/dist/cjs/utils/index.d.ts +1 -0
  16. package/dist/cjs/utils/index.d.ts.map +1 -1
  17. package/dist/cjs/utils/index.js +1 -0
  18. package/dist/cjs/utils/index.js.map +1 -1
  19. package/dist/cjs/utils/runOnce.d.ts +8 -0
  20. package/dist/cjs/utils/runOnce.d.ts.map +1 -0
  21. package/dist/cjs/utils/runOnce.js +13 -0
  22. package/dist/cjs/utils/runOnce.js.map +1 -0
  23. package/dist/esm/adapters/gasFetch.d.ts +6 -0
  24. package/dist/esm/adapters/gasFetch.d.ts.map +1 -0
  25. package/dist/esm/adapters/gasFetch.js +50 -0
  26. package/dist/esm/adapters/gasFetch.js.map +1 -0
  27. package/dist/esm/index.d.ts +1 -0
  28. package/dist/esm/index.d.ts.map +1 -1
  29. package/dist/esm/index.js +2 -0
  30. package/dist/esm/index.js.map +1 -1
  31. package/dist/esm/utils/chatServiceHttpClient.d.ts +8 -0
  32. package/dist/esm/utils/chatServiceHttpClient.d.ts.map +1 -1
  33. package/dist/esm/utils/chatServiceHttpClient.js +29 -10
  34. package/dist/esm/utils/chatServiceHttpClient.js.map +1 -1
  35. package/dist/esm/utils/index.d.ts +1 -0
  36. package/dist/esm/utils/index.d.ts.map +1 -1
  37. package/dist/esm/utils/index.js +1 -0
  38. package/dist/esm/utils/index.js.map +1 -1
  39. package/dist/esm/utils/runOnce.d.ts +8 -0
  40. package/dist/esm/utils/runOnce.d.ts.map +1 -0
  41. package/dist/esm/utils/runOnce.js +10 -0
  42. package/dist/esm/utils/runOnce.js.map +1 -0
  43. package/dist/umd/aituber-onair-chat.js +2997 -0
  44. package/dist/umd/aituber-onair-chat.min.js +19 -0
  45. package/package.json +12 -6
@@ -0,0 +1,2997 @@
1
+ "use strict";
2
+ var AITuberOnAirChat = (() => {
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
7
+ var __export = (target, all) => {
8
+ for (var name in all)
9
+ __defProp(target, name, { get: all[name], enumerable: true });
10
+ };
11
+ var __copyProps = (to, from, except, desc) => {
12
+ if (from && typeof from === "object" || typeof from === "function") {
13
+ for (let key of __getOwnPropNames(from))
14
+ if (!__hasOwnProp.call(to, key) && key !== except)
15
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
16
+ }
17
+ return to;
18
+ };
19
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
20
+
21
+ // src/index.ts
22
+ var index_exports = {};
23
+ __export(index_exports, {
24
+ CHAT_RESPONSE_LENGTH: () => CHAT_RESPONSE_LENGTH,
25
+ CLAUDE_VISION_SUPPORTED_MODELS: () => CLAUDE_VISION_SUPPORTED_MODELS,
26
+ ChatServiceFactory: () => ChatServiceFactory,
27
+ ChatServiceHttpClient: () => ChatServiceHttpClient,
28
+ ClaudeChatService: () => ClaudeChatService,
29
+ ClaudeChatServiceProvider: () => ClaudeChatServiceProvider,
30
+ DEFAULT_MAX_TOKENS: () => DEFAULT_MAX_TOKENS,
31
+ DEFAULT_SUMMARY_PROMPT_TEMPLATE: () => DEFAULT_SUMMARY_PROMPT_TEMPLATE,
32
+ DEFAULT_VISION_PROMPT: () => DEFAULT_VISION_PROMPT,
33
+ EMOTION_TAG_CLEANUP_REGEX: () => EMOTION_TAG_CLEANUP_REGEX,
34
+ EMOTION_TAG_REGEX: () => EMOTION_TAG_REGEX,
35
+ ENDPOINT_CLAUDE_API: () => ENDPOINT_CLAUDE_API,
36
+ ENDPOINT_GEMINI_API: () => ENDPOINT_GEMINI_API,
37
+ ENDPOINT_OPENAI_CHAT_COMPLETIONS_API: () => ENDPOINT_OPENAI_CHAT_COMPLETIONS_API,
38
+ ENDPOINT_OPENAI_RESPONSES_API: () => ENDPOINT_OPENAI_RESPONSES_API,
39
+ ENDPOINT_OPENROUTER_API: () => ENDPOINT_OPENROUTER_API,
40
+ EmotionParser: () => EmotionParser,
41
+ GEMINI_VISION_SUPPORTED_MODELS: () => GEMINI_VISION_SUPPORTED_MODELS,
42
+ GPT5_PRESETS: () => GPT5_PRESETS,
43
+ GPT_5_MODELS: () => GPT_5_MODELS,
44
+ GeminiChatService: () => GeminiChatService,
45
+ GeminiChatServiceProvider: () => GeminiChatServiceProvider,
46
+ HttpError: () => HttpError,
47
+ MAX_TOKENS_BY_LENGTH: () => MAX_TOKENS_BY_LENGTH,
48
+ MODEL_CLAUDE_3_5_HAIKU: () => MODEL_CLAUDE_3_5_HAIKU,
49
+ MODEL_CLAUDE_3_5_SONNET: () => MODEL_CLAUDE_3_5_SONNET,
50
+ MODEL_CLAUDE_3_7_SONNET: () => MODEL_CLAUDE_3_7_SONNET,
51
+ MODEL_CLAUDE_3_HAIKU: () => MODEL_CLAUDE_3_HAIKU,
52
+ MODEL_CLAUDE_4_OPUS: () => MODEL_CLAUDE_4_OPUS,
53
+ MODEL_CLAUDE_4_SONNET: () => MODEL_CLAUDE_4_SONNET,
54
+ MODEL_GEMINI_1_5_FLASH: () => MODEL_GEMINI_1_5_FLASH,
55
+ MODEL_GEMINI_1_5_PRO: () => MODEL_GEMINI_1_5_PRO,
56
+ MODEL_GEMINI_2_0_FLASH: () => MODEL_GEMINI_2_0_FLASH,
57
+ MODEL_GEMINI_2_0_FLASH_LITE: () => MODEL_GEMINI_2_0_FLASH_LITE,
58
+ MODEL_GEMINI_2_5_FLASH: () => MODEL_GEMINI_2_5_FLASH,
59
+ MODEL_GEMINI_2_5_FLASH_LITE: () => MODEL_GEMINI_2_5_FLASH_LITE,
60
+ MODEL_GEMINI_2_5_FLASH_LITE_PREVIEW_06_17: () => MODEL_GEMINI_2_5_FLASH_LITE_PREVIEW_06_17,
61
+ MODEL_GEMINI_2_5_PRO: () => MODEL_GEMINI_2_5_PRO,
62
+ MODEL_GPT_4O: () => MODEL_GPT_4O,
63
+ MODEL_GPT_4O_MINI: () => MODEL_GPT_4O_MINI,
64
+ MODEL_GPT_4_1: () => MODEL_GPT_4_1,
65
+ MODEL_GPT_4_1_MINI: () => MODEL_GPT_4_1_MINI,
66
+ MODEL_GPT_4_1_NANO: () => MODEL_GPT_4_1_NANO,
67
+ MODEL_GPT_4_5_PREVIEW: () => MODEL_GPT_4_5_PREVIEW,
68
+ MODEL_GPT_5: () => MODEL_GPT_5,
69
+ MODEL_GPT_5_CHAT_LATEST: () => MODEL_GPT_5_CHAT_LATEST,
70
+ MODEL_GPT_5_MINI: () => MODEL_GPT_5_MINI,
71
+ MODEL_GPT_5_NANO: () => MODEL_GPT_5_NANO,
72
+ MODEL_GPT_OSS_20B_FREE: () => MODEL_GPT_OSS_20B_FREE,
73
+ MODEL_O1: () => MODEL_O1,
74
+ MODEL_O1_MINI: () => MODEL_O1_MINI,
75
+ MODEL_O3_MINI: () => MODEL_O3_MINI,
76
+ OPENROUTER_CREDITS_THRESHOLD: () => OPENROUTER_CREDITS_THRESHOLD,
77
+ OPENROUTER_FREE_DAILY_LIMIT_HIGH_CREDITS: () => OPENROUTER_FREE_DAILY_LIMIT_HIGH_CREDITS,
78
+ OPENROUTER_FREE_DAILY_LIMIT_LOW_CREDITS: () => OPENROUTER_FREE_DAILY_LIMIT_LOW_CREDITS,
79
+ OPENROUTER_FREE_MODELS: () => OPENROUTER_FREE_MODELS,
80
+ OPENROUTER_FREE_RATE_LIMIT_PER_MINUTE: () => OPENROUTER_FREE_RATE_LIMIT_PER_MINUTE,
81
+ OPENROUTER_VISION_SUPPORTED_MODELS: () => OPENROUTER_VISION_SUPPORTED_MODELS,
82
+ OpenAIChatService: () => OpenAIChatService,
83
+ OpenAIChatServiceProvider: () => OpenAIChatServiceProvider,
84
+ OpenRouterChatService: () => OpenRouterChatService,
85
+ OpenRouterChatServiceProvider: () => OpenRouterChatServiceProvider,
86
+ StreamTextAccumulator: () => StreamTextAccumulator,
87
+ VISION_SUPPORTED_MODELS: () => VISION_SUPPORTED_MODELS,
88
+ getMaxTokensForResponseLength: () => getMaxTokensForResponseLength,
89
+ installGASFetch: () => installGASFetch,
90
+ isGPT5Model: () => isGPT5Model,
91
+ isOpenRouterFreeModel: () => isOpenRouterFreeModel,
92
+ isOpenRouterVisionModel: () => isOpenRouterVisionModel,
93
+ runOnceText: () => runOnceText,
94
+ screenplayToText: () => screenplayToText,
95
+ textToScreenplay: () => textToScreenplay,
96
+ textsToScreenplay: () => textsToScreenplay
97
+ });
98
+
99
+ // src/constants/openai.ts
100
+ var ENDPOINT_OPENAI_CHAT_COMPLETIONS_API = "https://api.openai.com/v1/chat/completions";
101
+ var ENDPOINT_OPENAI_RESPONSES_API = "https://api.openai.com/v1/responses";
102
+ var MODEL_GPT_5_NANO = "gpt-5-nano";
103
+ var MODEL_GPT_5_MINI = "gpt-5-mini";
104
+ var MODEL_GPT_5 = "gpt-5";
105
+ var MODEL_GPT_5_CHAT_LATEST = "gpt-5-chat-latest";
106
+ var MODEL_GPT_4_1 = "gpt-4.1";
107
+ var MODEL_GPT_4_1_MINI = "gpt-4.1-mini";
108
+ var MODEL_GPT_4_1_NANO = "gpt-4.1-nano";
109
+ var MODEL_GPT_4O_MINI = "gpt-4o-mini";
110
+ var MODEL_GPT_4O = "gpt-4o";
111
+ var MODEL_O3_MINI = "o3-mini";
112
+ var MODEL_O1_MINI = "o1-mini";
113
+ var MODEL_O1 = "o1";
114
+ var MODEL_GPT_4_5_PREVIEW = "gpt-4.5-preview";
115
+ var VISION_SUPPORTED_MODELS = [
116
+ MODEL_GPT_5_NANO,
117
+ MODEL_GPT_5_MINI,
118
+ MODEL_GPT_5,
119
+ MODEL_GPT_5_CHAT_LATEST,
120
+ MODEL_GPT_4_1,
121
+ MODEL_GPT_4_1_MINI,
122
+ MODEL_GPT_4_1_NANO,
123
+ MODEL_GPT_4O_MINI,
124
+ MODEL_GPT_4O,
125
+ MODEL_GPT_4_5_PREVIEW,
126
+ MODEL_O1
127
+ // MODEL_O3_MINI and MODEL_O1_MINI are not included as they don't support vision
128
+ ];
129
+ var GPT_5_MODELS = [
130
+ MODEL_GPT_5_NANO,
131
+ MODEL_GPT_5_MINI,
132
+ MODEL_GPT_5,
133
+ MODEL_GPT_5_CHAT_LATEST
134
+ ];
135
+ function isGPT5Model(model) {
136
+ return GPT_5_MODELS.includes(model);
137
+ }
138
+
139
+ // src/constants/gemini.ts
140
+ var ENDPOINT_GEMINI_API = "https://generativelanguage.googleapis.com";
141
+ var MODEL_GEMINI_2_5_PRO = "gemini-2.5-pro";
142
+ var MODEL_GEMINI_2_5_FLASH = "gemini-2.5-flash";
143
+ var MODEL_GEMINI_2_5_FLASH_LITE = "gemini-2.5-flash-lite";
144
+ var MODEL_GEMINI_2_5_FLASH_LITE_PREVIEW_06_17 = "gemini-2.5-flash-lite-preview-06-17";
145
+ var MODEL_GEMINI_2_0_FLASH = "gemini-2.0-flash";
146
+ var MODEL_GEMINI_2_0_FLASH_LITE = "gemini-2.0-flash-lite";
147
+ var MODEL_GEMINI_1_5_FLASH = "gemini-1.5-flash";
148
+ var MODEL_GEMINI_1_5_PRO = "gemini-1.5-pro";
149
+ var GEMINI_VISION_SUPPORTED_MODELS = [
150
+ MODEL_GEMINI_2_5_PRO,
151
+ MODEL_GEMINI_2_5_FLASH,
152
+ MODEL_GEMINI_2_5_FLASH_LITE,
153
+ MODEL_GEMINI_2_5_FLASH_LITE_PREVIEW_06_17,
154
+ MODEL_GEMINI_2_0_FLASH,
155
+ MODEL_GEMINI_2_0_FLASH_LITE,
156
+ MODEL_GEMINI_1_5_FLASH,
157
+ MODEL_GEMINI_1_5_PRO
158
+ ];
159
+
160
+ // src/constants/claude.ts
161
+ var ENDPOINT_CLAUDE_API = "https://api.anthropic.com/v1/messages";
162
+ var MODEL_CLAUDE_3_HAIKU = "claude-3-haiku-20240307";
163
+ var MODEL_CLAUDE_3_5_HAIKU = "claude-3-5-haiku-20241022";
164
+ var MODEL_CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20241022";
165
+ var MODEL_CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219";
166
+ var MODEL_CLAUDE_4_SONNET = "claude-4-sonnet-20250514";
167
+ var MODEL_CLAUDE_4_OPUS = "claude-4-opus-20250514";
168
+ var CLAUDE_VISION_SUPPORTED_MODELS = [
169
+ MODEL_CLAUDE_3_HAIKU,
170
+ MODEL_CLAUDE_3_5_HAIKU,
171
+ MODEL_CLAUDE_3_5_SONNET,
172
+ MODEL_CLAUDE_3_7_SONNET,
173
+ MODEL_CLAUDE_4_SONNET,
174
+ MODEL_CLAUDE_4_OPUS
175
+ ];
176
+
177
+ // src/constants/openrouter.ts
178
+ var ENDPOINT_OPENROUTER_API = "https://openrouter.ai/api/v1/chat/completions";
179
+ var MODEL_GPT_OSS_20B_FREE = "openai/gpt-oss-20b:free";
180
+ var OPENROUTER_FREE_MODELS = [MODEL_GPT_OSS_20B_FREE];
181
+ var OPENROUTER_VISION_SUPPORTED_MODELS = [];
182
+ var OPENROUTER_FREE_RATE_LIMIT_PER_MINUTE = 20;
183
+ var OPENROUTER_FREE_DAILY_LIMIT_LOW_CREDITS = 50;
184
+ var OPENROUTER_FREE_DAILY_LIMIT_HIGH_CREDITS = 1e3;
185
+ var OPENROUTER_CREDITS_THRESHOLD = 10;
186
+ function isOpenRouterFreeModel(model) {
187
+ return OPENROUTER_FREE_MODELS.some((freeModel) => model.includes(freeModel));
188
+ }
189
+ function isOpenRouterVisionModel(model) {
190
+ return OPENROUTER_VISION_SUPPORTED_MODELS.some(
191
+ (visionModel) => model.includes(visionModel)
192
+ );
193
+ }
194
+
195
+ // src/constants/chat.ts
196
+ var CHAT_RESPONSE_LENGTH = {
197
+ VERY_SHORT: "veryShort",
198
+ SHORT: "short",
199
+ MEDIUM: "medium",
200
+ LONG: "long",
201
+ VERY_LONG: "veryLong",
202
+ // Extended response length for longer outputs
203
+ DEEP: "deep"
204
+ };
205
+ var MAX_TOKENS_BY_LENGTH = {
206
+ [CHAT_RESPONSE_LENGTH.VERY_SHORT]: 40,
207
+ [CHAT_RESPONSE_LENGTH.SHORT]: 100,
208
+ [CHAT_RESPONSE_LENGTH.MEDIUM]: 200,
209
+ [CHAT_RESPONSE_LENGTH.LONG]: 300,
210
+ [CHAT_RESPONSE_LENGTH.VERY_LONG]: 1e3,
211
+ // Extended response length for longer outputs
212
+ [CHAT_RESPONSE_LENGTH.DEEP]: 5e3
213
+ };
214
+ var DEFAULT_MAX_TOKENS = 5e3;
215
+ var GPT5_PRESETS = {
216
+ casual: {
217
+ reasoning_effort: "minimal",
218
+ verbosity: "low",
219
+ description: "Fast responses for casual chat, quick questions (GPT-4 like experience)"
220
+ },
221
+ balanced: {
222
+ reasoning_effort: "medium",
223
+ verbosity: "medium",
224
+ description: "Balanced reasoning for business tasks, learning, general problem solving"
225
+ },
226
+ expert: {
227
+ reasoning_effort: "high",
228
+ verbosity: "high",
229
+ description: "Deep reasoning for research, complex analysis, expert-level tasks"
230
+ }
231
+ };
232
+ function getMaxTokensForResponseLength(responseLength) {
233
+ if (!responseLength) {
234
+ return DEFAULT_MAX_TOKENS;
235
+ }
236
+ return MAX_TOKENS_BY_LENGTH[responseLength] ?? DEFAULT_MAX_TOKENS;
237
+ }
238
+
239
+ // src/constants/prompts.ts
240
+ var DEFAULT_VISION_PROMPT = "You are a friendly AI avatar. Comment on the situation based on the broadcast screen.";
241
+ var DEFAULT_SUMMARY_PROMPT_TEMPLATE = `You are a skilled summarizing assistant.
242
+ Analyze the following conversation and produce a summary in the **same language** as the majority of the conversation:
243
+ - Summaries should highlight key points
244
+ - Stay concise (around {maxLength} characters if possible)
245
+ - No redundant expressions
246
+
247
+ If the conversation is in Japanese, summarize in Japanese.
248
+ If it's in English, summarize in English.
249
+ If it's in another language, summarize in that language.
250
+ `;
251
+
252
+ // src/utils/streamTextAccumulator.ts
253
+ var StreamTextAccumulator = class {
254
+ /**
255
+ * Append text to the blocks array, merging with the last block if it's a text block
256
+ * @param blocks Array of chat blocks
257
+ * @param text Text to append
258
+ */
259
+ static append(blocks, text) {
260
+ if (!text) return;
261
+ const lastBlock = blocks[blocks.length - 1];
262
+ if (lastBlock && lastBlock.type === "text") {
263
+ lastBlock.text += text;
264
+ } else {
265
+ blocks.push({ type: "text", text });
266
+ }
267
+ }
268
+ /**
269
+ * Get the full concatenated text from all text blocks
270
+ * @param blocks Array of chat blocks
271
+ * @returns Concatenated text from all text blocks
272
+ */
273
+ static getFullText(blocks) {
274
+ return blocks.filter(
275
+ (block) => block.type === "text"
276
+ ).map((block) => block.text).join("");
277
+ }
278
+ /**
279
+ * Add a text block without merging
280
+ * @param blocks Array of chat blocks
281
+ * @param text Text to add as a new block
282
+ */
283
+ static addTextBlock(blocks, text) {
284
+ if (!text) return;
285
+ blocks.push({ type: "text", text });
286
+ }
287
+ };
288
+
289
+ // src/utils/chatServiceHttpClient.ts
290
+ var HttpError = class extends Error {
291
+ constructor(status, statusText, body) {
292
+ super(`HTTP ${status}: ${statusText}`);
293
+ this.status = status;
294
+ this.statusText = statusText;
295
+ this.body = body;
296
+ this.name = "HttpError";
297
+ }
298
+ };
299
+ var _ChatServiceHttpClient = class _ChatServiceHttpClient {
300
+ /**
301
+ * Set custom fetch implementation
302
+ */
303
+ static setFetch(fn) {
304
+ this.fetchImpl = fn;
305
+ }
306
+ /**
307
+ * Make a POST request with common error handling
308
+ * @param url Request URL
309
+ * @param body Request body
310
+ * @param headers Request headers
311
+ * @param options Additional options
312
+ * @returns Response object
313
+ */
314
+ static async post(url, body, headers = {}, options = {}) {
315
+ const { timeout = 3e4, retries = 0, retryDelay = 1e3 } = options;
316
+ const defaultHeaders = {
317
+ "Content-Type": "application/json"
318
+ };
319
+ const finalHeaders = { ...defaultHeaders, ...headers };
320
+ let lastError = null;
321
+ for (let attempt = 0; attempt <= retries; attempt++) {
322
+ try {
323
+ const hasAbort = typeof AbortController !== "undefined";
324
+ const controller = hasAbort ? new AbortController() : void 0;
325
+ const timeoutId = hasAbort ? setTimeout(() => controller.abort(), timeout) : void 0;
326
+ const response = await _ChatServiceHttpClient.fetchImpl(url, {
327
+ method: "POST",
328
+ headers: finalHeaders,
329
+ body: typeof body === "string" ? body : JSON.stringify(body),
330
+ // Attach signal only when controller exists
331
+ ...controller ? { signal: controller.signal } : {}
332
+ });
333
+ if (timeoutId) clearTimeout(timeoutId);
334
+ if (!response.ok) {
335
+ const errorBody = await response.text();
336
+ throw new HttpError(response.status, response.statusText, errorBody);
337
+ }
338
+ return response;
339
+ } catch (error) {
340
+ lastError = error;
341
+ if (error instanceof HttpError && error.status >= 400 && error.status < 500) {
342
+ throw error;
343
+ }
344
+ if (error instanceof Error && error.name === "AbortError") {
345
+ throw new Error(`Request timeout after ${timeout}ms`);
346
+ }
347
+ if (attempt < retries) {
348
+ await new Promise(
349
+ (resolve) => setTimeout(resolve, retryDelay * (attempt + 1))
350
+ );
351
+ }
352
+ }
353
+ }
354
+ throw lastError || new Error("Request failed");
355
+ }
356
+ /**
357
+ * Handle error response and throw appropriate error
358
+ * @param res Response object
359
+ * @returns Never (always throws)
360
+ */
361
+ static async handleErrorResponse(res) {
362
+ const errorBody = await res.text();
363
+ throw new HttpError(res.status, res.statusText, errorBody);
364
+ }
365
+ /**
366
+ * Make a GET request (for fetching images, etc.)
367
+ * @param url Request URL
368
+ * @param headers Request headers
369
+ * @param options Additional options
370
+ * @returns Response object
371
+ */
372
+ static async get(url, headers = {}, options = {}) {
373
+ const { timeout = 3e4, retries = 0, retryDelay = 1e3 } = options;
374
+ let lastError = null;
375
+ for (let attempt = 0; attempt <= retries; attempt++) {
376
+ try {
377
+ const hasAbort = typeof AbortController !== "undefined";
378
+ const controller = hasAbort ? new AbortController() : void 0;
379
+ const timeoutId = hasAbort ? setTimeout(() => controller.abort(), timeout) : void 0;
380
+ const response = await _ChatServiceHttpClient.fetchImpl(url, {
381
+ method: "GET",
382
+ headers,
383
+ ...controller ? { signal: controller.signal } : {}
384
+ });
385
+ if (timeoutId) clearTimeout(timeoutId);
386
+ if (!response.ok) {
387
+ const errorBody = await response.text();
388
+ throw new HttpError(response.status, response.statusText, errorBody);
389
+ }
390
+ return response;
391
+ } catch (error) {
392
+ lastError = error;
393
+ if (error instanceof HttpError && error.status >= 400 && error.status < 500) {
394
+ throw error;
395
+ }
396
+ if (error instanceof Error && error.name === "AbortError") {
397
+ throw new Error(`Request timeout after ${timeout}ms`);
398
+ }
399
+ if (attempt < retries) {
400
+ await new Promise(
401
+ (resolve) => setTimeout(resolve, retryDelay * (attempt + 1))
402
+ );
403
+ }
404
+ }
405
+ }
406
+ throw lastError || new Error("Request failed");
407
+ }
408
+ };
409
+ /**
410
+ * Injectable fetch implementation (browser/node: native fetch, GAS: UrlFetchApp wrapper)
411
+ */
412
+ _ChatServiceHttpClient.fetchImpl = (u, i) => fetch(u, i);
413
+ var ChatServiceHttpClient = _ChatServiceHttpClient;
414
+
415
+ // src/services/providers/openai/OpenAIChatService.ts
416
+ var OpenAIChatService = class {
417
+ /**
418
+ * Constructor
419
+ * @param apiKey OpenAI API key
420
+ * @param model Name of the model to use
421
+ * @param visionModel Name of the vision model
422
+ */
423
+ constructor(apiKey, model = MODEL_GPT_4O_MINI, visionModel = MODEL_GPT_4O_MINI, tools, endpoint = ENDPOINT_OPENAI_CHAT_COMPLETIONS_API, mcpServers = [], responseLength, verbosity, reasoning_effort, enableReasoningSummary = false) {
424
+ /** Provider name */
425
+ this.provider = "openai";
426
+ this.apiKey = apiKey;
427
+ this.model = model;
428
+ this.tools = tools || [];
429
+ this.endpoint = endpoint;
430
+ this.mcpServers = mcpServers;
431
+ this.responseLength = responseLength;
432
+ this.verbosity = verbosity;
433
+ this.reasoning_effort = reasoning_effort;
434
+ this.enableReasoningSummary = enableReasoningSummary;
435
+ if (!VISION_SUPPORTED_MODELS.includes(visionModel)) {
436
+ throw new Error(
437
+ `Model ${visionModel} does not support vision capabilities.`
438
+ );
439
+ }
440
+ this.visionModel = visionModel;
441
+ }
442
+ /**
443
+ * Get the current model name
444
+ * @returns Model name
445
+ */
446
+ getModel() {
447
+ return this.model;
448
+ }
449
+ /**
450
+ * Get the current vision model name
451
+ * @returns Vision model name
452
+ */
453
+ getVisionModel() {
454
+ return this.visionModel;
455
+ }
456
+ /**
457
+ * Process chat messages
458
+ * @param messages Array of messages to send
459
+ * @param onPartialResponse Callback to receive each part of streaming response
460
+ * @param onCompleteResponse Callback to execute when response is complete
461
+ */
462
+ async processChat(messages, onPartialResponse, onCompleteResponse) {
463
+ if (this.tools.length === 0) {
464
+ const res = await this.callOpenAI(messages, this.model, true);
465
+ const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
466
+ try {
467
+ if (isResponsesAPI) {
468
+ const result = await this.parseResponsesStream(
469
+ res,
470
+ onPartialResponse
471
+ );
472
+ const full = result.blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
473
+ await onCompleteResponse(full);
474
+ } else {
475
+ const full = await this.handleStream(res, onPartialResponse);
476
+ await onCompleteResponse(full);
477
+ }
478
+ } catch (error) {
479
+ console.error("[processChat] Error in streaming/completion:", error);
480
+ throw error;
481
+ }
482
+ return;
483
+ }
484
+ const { blocks, stop_reason } = await this.chatOnce(messages);
485
+ if (stop_reason === "end") {
486
+ const full = blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
487
+ await onCompleteResponse(full);
488
+ return;
489
+ }
490
+ throw new Error(
491
+ "processChat received tool_calls. ChatProcessor must use chatOnce() loop when tools are enabled."
492
+ );
493
+ }
494
+ /**
495
+ * Process chat messages with images
496
+ * @param messages Array of messages to send (including images)
497
+ * @param onPartialResponse Callback to receive each part of streaming response
498
+ * @param onCompleteResponse Callback to execute when response is complete
499
+ * @throws Error if the selected model doesn't support vision
500
+ */
501
+ async processVisionChat(messages, onPartialResponse, onCompleteResponse) {
502
+ try {
503
+ if (this.tools.length === 0) {
504
+ const res = await this.callOpenAI(messages, this.visionModel, true);
505
+ const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
506
+ try {
507
+ if (isResponsesAPI) {
508
+ const result = await this.parseResponsesStream(
509
+ res,
510
+ onPartialResponse
511
+ );
512
+ const full = result.blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
513
+ await onCompleteResponse(full);
514
+ } else {
515
+ const full = await this.handleStream(res, onPartialResponse);
516
+ await onCompleteResponse(full);
517
+ }
518
+ } catch (streamError) {
519
+ console.error(
520
+ "[processVisionChat] Error in streaming/completion:",
521
+ streamError
522
+ );
523
+ throw streamError;
524
+ }
525
+ return;
526
+ }
527
+ const { blocks, stop_reason } = await this.visionChatOnce(
528
+ messages,
529
+ true,
530
+ onPartialResponse
531
+ );
532
+ if (stop_reason === "end") {
533
+ const full = blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
534
+ await onCompleteResponse(full);
535
+ return;
536
+ }
537
+ throw new Error(
538
+ "processVisionChat received tool_calls. ChatProcessor must use visionChatOnce() loop when tools are enabled."
539
+ );
540
+ } catch (error) {
541
+ console.error("Error in processVisionChat:", error);
542
+ throw error;
543
+ }
544
+ }
545
+ /**
546
+ * Process chat messages with tools (text only)
547
+ * @param messages Array of messages to send
548
+ * @param stream Whether to use streaming
549
+ * @param onPartialResponse Callback for partial responses
550
+ * @param maxTokens Maximum tokens for response (optional)
551
+ * @returns Tool chat completion
552
+ */
553
+ async chatOnce(messages, stream = true, onPartialResponse = () => {
554
+ }, maxTokens) {
555
+ const res = await this.callOpenAI(messages, this.model, stream, maxTokens);
556
+ return this.parseResponse(res, stream, onPartialResponse);
557
+ }
558
+ /**
559
+ * Process vision chat messages with tools
560
+ * @param messages Array of messages to send (including images)
561
+ * @param stream Whether to use streaming
562
+ * @param onPartialResponse Callback for partial responses
563
+ * @param maxTokens Maximum tokens for response (optional)
564
+ * @returns Tool chat completion
565
+ */
566
+ async visionChatOnce(messages, stream = false, onPartialResponse = () => {
567
+ }, maxTokens) {
568
+ const res = await this.callOpenAI(
569
+ messages,
570
+ this.visionModel,
571
+ stream,
572
+ maxTokens
573
+ );
574
+ return this.parseResponse(res, stream, onPartialResponse);
575
+ }
576
+ /**
577
+ * Parse response based on endpoint type
578
+ */
579
+ async parseResponse(res, stream, onPartialResponse) {
580
+ const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
581
+ if (isResponsesAPI) {
582
+ return stream ? this.parseResponsesStream(res, onPartialResponse) : this.parseResponsesOneShot(await res.json());
583
+ }
584
+ return stream ? this.parseStream(res, onPartialResponse) : this.parseOneShot(await res.json());
585
+ }
586
+ async callOpenAI(messages, model, stream = false, maxTokens) {
587
+ const body = this.buildRequestBody(messages, model, stream, maxTokens);
588
+ const res = await ChatServiceHttpClient.post(this.endpoint, body, {
589
+ Authorization: `Bearer ${this.apiKey}`
590
+ });
591
+ return res;
592
+ }
593
+ /**
594
+ * Build request body based on the endpoint type
595
+ */
596
+ buildRequestBody(messages, model, stream, maxTokens) {
597
+ const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
598
+ this.validateMCPCompatibility();
599
+ const body = {
600
+ model,
601
+ stream
602
+ };
603
+ const tokenLimit = maxTokens !== void 0 ? maxTokens : getMaxTokensForResponseLength(this.responseLength);
604
+ if (isResponsesAPI) {
605
+ body.max_output_tokens = tokenLimit;
606
+ } else {
607
+ body.max_completion_tokens = tokenLimit;
608
+ }
609
+ if (isResponsesAPI) {
610
+ body.input = this.cleanMessagesForResponsesAPI(messages);
611
+ } else {
612
+ body.messages = messages;
613
+ }
614
+ if (isGPT5Model(model)) {
615
+ if (isResponsesAPI) {
616
+ if (this.reasoning_effort) {
617
+ body.reasoning = {
618
+ ...body.reasoning,
619
+ effort: this.reasoning_effort
620
+ };
621
+ if (this.enableReasoningSummary) {
622
+ body.reasoning.summary = "auto";
623
+ }
624
+ }
625
+ if (this.verbosity) {
626
+ body.text = {
627
+ ...body.text,
628
+ format: { type: "text" },
629
+ verbosity: this.verbosity
630
+ };
631
+ }
632
+ } else {
633
+ if (this.reasoning_effort) {
634
+ body.reasoning_effort = this.reasoning_effort;
635
+ }
636
+ if (this.verbosity) {
637
+ body.verbosity = this.verbosity;
638
+ }
639
+ }
640
+ }
641
+ const tools = this.buildToolsDefinition();
642
+ if (tools.length > 0) {
643
+ body.tools = tools;
644
+ if (!isResponsesAPI) {
645
+ body.tool_choice = "auto";
646
+ }
647
+ }
648
+ return body;
649
+ }
650
+ /**
651
+ * Validate MCP servers compatibility with the current endpoint
652
+ */
653
+ validateMCPCompatibility() {
654
+ if (this.mcpServers.length > 0 && this.endpoint === ENDPOINT_OPENAI_CHAT_COMPLETIONS_API) {
655
+ throw new Error(
656
+ `MCP servers are not supported with Chat Completions API. Current endpoint: ${this.endpoint}. Please use OpenAI Responses API endpoint: ${ENDPOINT_OPENAI_RESPONSES_API}. MCP tools are only available in the Responses API endpoint.`
657
+ );
658
+ }
659
+ }
660
+ /**
661
+ * Clean messages for Responses API (remove timestamp and other extra properties)
662
+ */
663
+ cleanMessagesForResponsesAPI(messages) {
664
+ return messages.map((msg) => {
665
+ const role = msg.role === "tool" ? "user" : msg.role;
666
+ const cleanMsg = {
667
+ role
668
+ };
669
+ if (typeof msg.content === "string") {
670
+ cleanMsg.content = msg.content;
671
+ } else if (Array.isArray(msg.content)) {
672
+ cleanMsg.content = msg.content.map((block) => {
673
+ if (block.type === "text") {
674
+ return {
675
+ type: "input_text",
676
+ text: block.text
677
+ };
678
+ } else if (block.type === "image_url") {
679
+ return {
680
+ type: "input_image",
681
+ image_url: block.image_url.url
682
+ // Extract the URL string directly
683
+ };
684
+ }
685
+ return block;
686
+ });
687
+ } else {
688
+ cleanMsg.content = msg.content;
689
+ }
690
+ return cleanMsg;
691
+ });
692
+ }
693
+ /**
694
+ * Build tools definition based on the endpoint type
695
+ */
696
+ buildToolsDefinition() {
697
+ const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
698
+ const toolDefs = [];
699
+ if (this.tools.length > 0) {
700
+ if (isResponsesAPI) {
701
+ toolDefs.push(
702
+ ...this.tools.map((t) => ({
703
+ type: "function",
704
+ name: t.name,
705
+ description: t.description,
706
+ parameters: t.parameters
707
+ }))
708
+ );
709
+ } else {
710
+ toolDefs.push(
711
+ ...this.tools.map((t) => ({
712
+ type: "function",
713
+ function: {
714
+ name: t.name,
715
+ description: t.description,
716
+ parameters: t.parameters
717
+ }
718
+ }))
719
+ );
720
+ }
721
+ }
722
+ if (this.mcpServers.length > 0 && isResponsesAPI) {
723
+ toolDefs.push(...this.buildMCPToolsDefinition());
724
+ }
725
+ return toolDefs;
726
+ }
727
+ /**
728
+ * Build MCP tools definition for Responses API
729
+ */
730
+ buildMCPToolsDefinition() {
731
+ return this.mcpServers.map((server) => {
732
+ const mcpDef = {
733
+ type: "mcp",
734
+ // Using 'mcp' as indicated by the error message
735
+ server_label: server.name,
736
+ // Use server_label as required by API
737
+ server_url: server.url
738
+ // Use server_url instead of url
739
+ };
740
+ if (server.tool_configuration?.allowed_tools) {
741
+ mcpDef.allowed_tools = server.tool_configuration.allowed_tools;
742
+ }
743
+ if (server.authorization_token) {
744
+ mcpDef.headers = {
745
+ Authorization: `Bearer ${server.authorization_token}`
746
+ };
747
+ }
748
+ return mcpDef;
749
+ });
750
+ }
751
+ async handleStream(res, onPartial) {
752
+ const reader = res.body.getReader();
753
+ const dec = new TextDecoder();
754
+ let buffer = "";
755
+ let full = "";
756
+ while (true) {
757
+ const { done, value } = await reader.read();
758
+ if (done) break;
759
+ buffer += dec.decode(value, { stream: true });
760
+ let idx;
761
+ while ((idx = buffer.indexOf("\n\n")) !== -1) {
762
+ const raw = buffer.slice(0, idx).trim();
763
+ buffer = buffer.slice(idx + 2);
764
+ if (!raw.startsWith("data:")) continue;
765
+ const jsonStr = raw.slice(5).trim();
766
+ if (jsonStr === "[DONE]") {
767
+ buffer = "";
768
+ break;
769
+ }
770
+ const json = JSON.parse(jsonStr);
771
+ const content = json.choices[0]?.delta?.content || "";
772
+ if (content) {
773
+ onPartial(content);
774
+ full += content;
775
+ }
776
+ }
777
+ }
778
+ return full;
779
+ }
780
+ async parseStream(res, onPartial) {
781
+ const reader = res.body.getReader();
782
+ const dec = new TextDecoder();
783
+ const textBlocks = [];
784
+ const toolCallsMap = /* @__PURE__ */ new Map();
785
+ let buf = "";
786
+ while (true) {
787
+ const { done, value } = await reader.read();
788
+ if (done) break;
789
+ buf += dec.decode(value, { stream: true });
790
+ let sep;
791
+ while ((sep = buf.indexOf("\n\n")) !== -1) {
792
+ const raw = buf.slice(0, sep).trim();
793
+ buf = buf.slice(sep + 2);
794
+ if (!raw.startsWith("data:")) continue;
795
+ const payload = raw.slice(5).trim();
796
+ if (payload === "[DONE]") {
797
+ buf = "";
798
+ break;
799
+ }
800
+ const json = JSON.parse(payload);
801
+ const delta = json.choices[0].delta;
802
+ if (delta.content) {
803
+ onPartial(delta.content);
804
+ textBlocks.push({ type: "text", text: delta.content });
805
+ }
806
+ if (delta.tool_calls) {
807
+ delta.tool_calls.forEach((c) => {
808
+ const entry = toolCallsMap.get(c.index) ?? {
809
+ id: c.id,
810
+ name: c.function.name,
811
+ args: ""
812
+ };
813
+ entry.args += c.function.arguments || "";
814
+ toolCallsMap.set(c.index, entry);
815
+ });
816
+ }
817
+ }
818
+ }
819
+ const toolBlocks = Array.from(toolCallsMap.entries()).sort((a, b) => a[0] - b[0]).map(([_, e]) => ({
820
+ type: "tool_use",
821
+ id: e.id,
822
+ name: e.name,
823
+ input: JSON.parse(e.args || "{}")
824
+ }));
825
+ const blocks = [...textBlocks, ...toolBlocks];
826
+ return {
827
+ blocks,
828
+ stop_reason: toolBlocks.length ? "tool_use" : "end"
829
+ };
830
+ }
831
+ parseOneShot(data) {
832
+ const choice = data.choices[0];
833
+ const blocks = [];
834
+ if (choice.finish_reason === "tool_calls") {
835
+ choice.message.tool_calls.forEach(
836
+ (c) => blocks.push({
837
+ type: "tool_use",
838
+ id: c.id,
839
+ name: c.function.name,
840
+ input: JSON.parse(c.function.arguments || "{}")
841
+ })
842
+ );
843
+ } else {
844
+ blocks.push({ type: "text", text: choice.message.content });
845
+ }
846
+ return {
847
+ blocks,
848
+ stop_reason: choice.finish_reason === "tool_calls" ? "tool_use" : "end"
849
+ };
850
+ }
851
+ /**
852
+ * Parse streaming response from Responses API (SSE format)
853
+ */
854
+ async parseResponsesStream(res, onPartial) {
855
+ const reader = res.body.getReader();
856
+ const dec = new TextDecoder();
857
+ const textBlocks = [];
858
+ const toolCallsMap = /* @__PURE__ */ new Map();
859
+ let buf = "";
860
+ while (true) {
861
+ const { done, value } = await reader.read();
862
+ if (done) break;
863
+ buf += dec.decode(value, { stream: true });
864
+ let eventType = "";
865
+ let eventData = "";
866
+ const lines = buf.split("\n");
867
+ buf = lines.pop() || "";
868
+ for (let i = 0; i < lines.length; i++) {
869
+ const line = lines[i].trim();
870
+ if (line.startsWith("event:")) {
871
+ eventType = line.slice(6).trim();
872
+ } else if (line.startsWith("data:")) {
873
+ eventData = line.slice(5).trim();
874
+ } else if (line === "" && eventType && eventData) {
875
+ try {
876
+ const json = JSON.parse(eventData);
877
+ const completionResult = this.handleResponsesSSEEvent(
878
+ eventType,
879
+ json,
880
+ onPartial,
881
+ textBlocks,
882
+ toolCallsMap
883
+ );
884
+ if (completionResult === "completed") {
885
+ }
886
+ } catch (e) {
887
+ console.warn("Failed to parse SSE data:", eventData);
888
+ }
889
+ eventType = "";
890
+ eventData = "";
891
+ }
892
+ }
893
+ }
894
+ const toolBlocks = Array.from(toolCallsMap.values()).map(
895
+ (tool) => ({
896
+ type: "tool_use",
897
+ id: tool.id,
898
+ name: tool.name,
899
+ input: tool.input || {}
900
+ })
901
+ );
902
+ const blocks = [...textBlocks, ...toolBlocks];
903
+ return {
904
+ blocks,
905
+ stop_reason: toolBlocks.length ? "tool_use" : "end"
906
+ };
907
+ }
908
+ /**
909
+ * Handle specific SSE events from Responses API
910
+ * @returns 'completed' if the response is completed, undefined otherwise
911
+ */
912
+ handleResponsesSSEEvent(eventType, data, onPartial, textBlocks, toolCallsMap) {
913
+ switch (eventType) {
914
+ // Item addition events
915
+ case "response.output_item.added":
916
+ if (data.item?.type === "message" && Array.isArray(data.item.content)) {
917
+ data.item.content.forEach((c) => {
918
+ if (c.type === "output_text" && c.text) {
919
+ onPartial(c.text);
920
+ StreamTextAccumulator.append(textBlocks, c.text);
921
+ }
922
+ });
923
+ } else if (data.item?.type === "function_call") {
924
+ toolCallsMap.set(data.item.id, {
925
+ id: data.item.id,
926
+ name: data.item.name,
927
+ input: data.item.arguments ? JSON.parse(data.item.arguments) : {}
928
+ });
929
+ }
930
+ break;
931
+ // Initial content part events
932
+ case "response.content_part.added":
933
+ if (data.part?.type === "output_text" && typeof data.part.text === "string") {
934
+ onPartial(data.part.text);
935
+ StreamTextAccumulator.append(textBlocks, data.part.text);
936
+ }
937
+ break;
938
+ // Text delta events
939
+ case "response.output_text.delta":
940
+ case "response.content_part.delta":
941
+ {
942
+ const deltaText = typeof data.delta === "string" ? data.delta : data.delta?.text ?? "";
943
+ if (deltaText) {
944
+ onPartial(deltaText);
945
+ StreamTextAccumulator.append(textBlocks, deltaText);
946
+ }
947
+ }
948
+ break;
949
+ // Text completion events - do not add text here as it's already accumulated via delta events
950
+ case "response.output_text.done":
951
+ case "response.content_part.done":
952
+ break;
953
+ // Response completion events
954
+ case "response.completed":
955
+ return "completed";
956
+ // GPT-5 reasoning token events (not visible but counted for billing)
957
+ case "response.reasoning.started":
958
+ case "response.reasoning.delta":
959
+ case "response.reasoning.done":
960
+ break;
961
+ default:
962
+ break;
963
+ }
964
+ return void 0;
965
+ }
966
+ /**
967
+ * Parse non-streaming response from Responses API
968
+ */
969
+ parseResponsesOneShot(data) {
970
+ const blocks = [];
971
+ if (data.output && Array.isArray(data.output)) {
972
+ data.output.forEach((outputItem) => {
973
+ if (outputItem.type === "message" && outputItem.content) {
974
+ outputItem.content.forEach((content) => {
975
+ if (content.type === "output_text" && content.text) {
976
+ blocks.push({ type: "text", text: content.text });
977
+ }
978
+ });
979
+ }
980
+ if (outputItem.type === "function_call") {
981
+ blocks.push({
982
+ type: "tool_use",
983
+ id: outputItem.id,
984
+ name: outputItem.name,
985
+ input: outputItem.arguments ? JSON.parse(outputItem.arguments) : {}
986
+ });
987
+ }
988
+ });
989
+ }
990
+ return {
991
+ blocks,
992
+ stop_reason: blocks.some((b) => b.type === "tool_use") ? "tool_use" : "end"
993
+ };
994
+ }
995
+ };
996
+
997
+ // src/services/providers/openai/OpenAIChatServiceProvider.ts
998
+ var OpenAIChatServiceProvider = class {
999
+ /**
1000
+ * Create a chat service instance
1001
+ * @param options Service options
1002
+ * @returns OpenAIChatService instance
1003
+ */
1004
+ createChatService(options) {
1005
+ const optimizedOptions = this.optimizeGPT5Options(options);
1006
+ const visionModel = optimizedOptions.visionModel || (this.supportsVisionForModel(
1007
+ optimizedOptions.model || this.getDefaultModel()
1008
+ ) ? optimizedOptions.model : this.getDefaultModel());
1009
+ const tools = optimizedOptions.tools;
1010
+ const mcpServers = optimizedOptions.mcpServers ?? [];
1011
+ const modelName = optimizedOptions.model || this.getDefaultModel();
1012
+ let shouldUseResponsesAPI = false;
1013
+ if (mcpServers.length > 0) {
1014
+ shouldUseResponsesAPI = true;
1015
+ } else if (isGPT5Model(modelName)) {
1016
+ const preference = optimizedOptions.gpt5EndpointPreference || "chat";
1017
+ shouldUseResponsesAPI = preference === "responses";
1018
+ }
1019
+ const endpoint = optimizedOptions.endpoint || (shouldUseResponsesAPI ? ENDPOINT_OPENAI_RESPONSES_API : ENDPOINT_OPENAI_CHAT_COMPLETIONS_API);
1020
+ return new OpenAIChatService(
1021
+ optimizedOptions.apiKey,
1022
+ modelName,
1023
+ visionModel,
1024
+ tools,
1025
+ endpoint,
1026
+ mcpServers,
1027
+ optimizedOptions.responseLength,
1028
+ optimizedOptions.verbosity,
1029
+ optimizedOptions.reasoning_effort,
1030
+ optimizedOptions.enableReasoningSummary
1031
+ );
1032
+ }
1033
+ /**
1034
+ * Get the provider name
1035
+ * @returns Provider name ('openai')
1036
+ */
1037
+ getProviderName() {
1038
+ return "openai";
1039
+ }
1040
+ /**
1041
+ * Get the list of supported models
1042
+ * @returns Array of supported model names
1043
+ */
1044
+ getSupportedModels() {
1045
+ return [
1046
+ MODEL_GPT_5_NANO,
1047
+ MODEL_GPT_5_MINI,
1048
+ MODEL_GPT_5,
1049
+ MODEL_GPT_5_CHAT_LATEST,
1050
+ MODEL_GPT_4_1,
1051
+ MODEL_GPT_4_1_MINI,
1052
+ MODEL_GPT_4_1_NANO,
1053
+ MODEL_GPT_4O_MINI,
1054
+ MODEL_GPT_4O,
1055
+ MODEL_O3_MINI,
1056
+ MODEL_O1_MINI,
1057
+ MODEL_O1,
1058
+ MODEL_GPT_4_5_PREVIEW
1059
+ ];
1060
+ }
1061
+ /**
1062
+ * Get the default model
1063
+ * @returns Default model name
1064
+ */
1065
+ getDefaultModel() {
1066
+ return MODEL_GPT_5_NANO;
1067
+ }
1068
+ /**
1069
+ * Check if this provider supports vision (image processing)
1070
+ * @returns Vision support status (true)
1071
+ */
1072
+ supportsVision() {
1073
+ return true;
1074
+ }
1075
+ /**
1076
+ * Check if a specific model supports vision capabilities
1077
+ * @param model The model name to check
1078
+ * @returns True if the model supports vision, false otherwise
1079
+ */
1080
+ supportsVisionForModel(model) {
1081
+ return VISION_SUPPORTED_MODELS.includes(model);
1082
+ }
1083
+ /**
1084
+ * Apply GPT-5 specific optimizations to options
1085
+ * @param options Original chat service options
1086
+ * @returns Optimized options for GPT-5 usage
1087
+ */
1088
+ optimizeGPT5Options(options) {
1089
+ const modelName = options.model || this.getDefaultModel();
1090
+ if (!isGPT5Model(modelName)) {
1091
+ return options;
1092
+ }
1093
+ const optimized = { ...options };
1094
+ if (options.gpt5Preset) {
1095
+ const preset = GPT5_PRESETS[options.gpt5Preset];
1096
+ optimized.reasoning_effort = preset.reasoning_effort;
1097
+ optimized.verbosity = preset.verbosity;
1098
+ } else {
1099
+ if (!options.reasoning_effort) {
1100
+ optimized.reasoning_effort = "medium";
1101
+ }
1102
+ }
1103
+ return optimized;
1104
+ }
1105
+ };
1106
+
1107
+ // src/utils/mcpSchemaFetcher.ts
1108
+ var MCPSchemaFetcher = class {
1109
+ /**
1110
+ * Fetch tool schemas from MCP server
1111
+ * @param serverConfig MCP server configuration
1112
+ * @returns Array of tool definitions
1113
+ */
1114
+ static async fetchToolSchemas(serverConfig) {
1115
+ try {
1116
+ const headers = {
1117
+ "Content-Type": "application/json"
1118
+ };
1119
+ if (serverConfig.authorization_token) {
1120
+ headers["Authorization"] = `Bearer ${serverConfig.authorization_token}`;
1121
+ }
1122
+ const response = await ChatServiceHttpClient.post(
1123
+ `${serverConfig.url}/tools`,
1124
+ {},
1125
+ headers
1126
+ );
1127
+ const toolsData = await response.json();
1128
+ if (Array.isArray(toolsData.tools)) {
1129
+ return toolsData.tools.map((tool) => ({
1130
+ name: `mcp_${serverConfig.name}_${tool.name}`,
1131
+ description: tool.description || `Tool from ${serverConfig.name} MCP server`,
1132
+ parameters: tool.inputSchema || {
1133
+ type: "object",
1134
+ properties: {},
1135
+ required: []
1136
+ }
1137
+ }));
1138
+ }
1139
+ return [
1140
+ {
1141
+ name: `mcp_${serverConfig.name}_search`,
1142
+ description: `Search using ${serverConfig.name} MCP server`,
1143
+ parameters: {
1144
+ type: "object",
1145
+ properties: {
1146
+ query: {
1147
+ type: "string",
1148
+ description: "Search query"
1149
+ }
1150
+ },
1151
+ required: ["query"]
1152
+ }
1153
+ }
1154
+ ];
1155
+ } catch (error) {
1156
+ console.warn(
1157
+ `Failed to fetch MCP schemas from ${serverConfig.name}:`,
1158
+ error
1159
+ );
1160
+ return [
1161
+ {
1162
+ name: `mcp_${serverConfig.name}_search`,
1163
+ description: `Search using ${serverConfig.name} MCP server (schema fetch failed)`,
1164
+ parameters: {
1165
+ type: "object",
1166
+ properties: {
1167
+ query: {
1168
+ type: "string",
1169
+ description: "Search query"
1170
+ }
1171
+ },
1172
+ required: ["query"]
1173
+ }
1174
+ }
1175
+ ];
1176
+ }
1177
+ }
1178
+ /**
1179
+ * Fetch all tool schemas from multiple MCP servers
1180
+ * @param mcpServers Array of MCP server configurations
1181
+ * @returns Array of all tool definitions
1182
+ */
1183
+ static async fetchAllToolSchemas(mcpServers) {
1184
+ const allSchemas = [];
1185
+ for (const server of mcpServers) {
1186
+ try {
1187
+ const schemas = await this.fetchToolSchemas(server);
1188
+ allSchemas.push(...schemas);
1189
+ } catch (error) {
1190
+ console.error(`Failed to fetch schemas from ${server.name}:`, error);
1191
+ }
1192
+ }
1193
+ return allSchemas;
1194
+ }
1195
+ };
1196
+
1197
+ // src/services/providers/gemini/GeminiChatService.ts
1198
+ var GeminiChatService = class {
1199
+ /**
1200
+ * Constructor
1201
+ * @param apiKey Google API key
1202
+ * @param model Name of the model to use
1203
+ * @param visionModel Name of the vision model
1204
+ * @param tools Array of tool definitions
1205
+ * @param mcpServers Array of MCP server configurations
1206
+ */
1207
+ constructor(apiKey, model = MODEL_GEMINI_2_0_FLASH_LITE, visionModel = MODEL_GEMINI_2_0_FLASH_LITE, tools = [], mcpServers = [], responseLength) {
1208
+ /** Provider name */
1209
+ this.provider = "gemini";
1210
+ this.mcpToolSchemas = [];
1211
+ this.mcpSchemasInitialized = false;
1212
+ /** id(OpenAI) → name(Gemini) mapping */
1213
+ this.callIdMap = /* @__PURE__ */ new Map();
1214
+ this.apiKey = apiKey;
1215
+ this.model = model;
1216
+ this.responseLength = responseLength;
1217
+ if (!GEMINI_VISION_SUPPORTED_MODELS.includes(visionModel)) {
1218
+ throw new Error(
1219
+ `Model ${visionModel} does not support vision capabilities.`
1220
+ );
1221
+ }
1222
+ this.visionModel = visionModel;
1223
+ this.tools = tools;
1224
+ this.mcpServers = mcpServers;
1225
+ }
1226
+ /* ────────────────────────────────── */
1227
+ /* Utilities */
1228
+ /* ────────────────────────────────── */
1229
+ safeJsonParse(str) {
1230
+ try {
1231
+ return JSON.parse(str);
1232
+ } catch {
1233
+ return str;
1234
+ }
1235
+ }
1236
+ normalizeToolResult(val) {
1237
+ if (val === null) return { content: null };
1238
+ if (typeof val === "object") return val;
1239
+ return { content: val };
1240
+ }
1241
+ /**
1242
+ * camelCase → snake_case conversion (v1beta)
1243
+ */
1244
+ adaptKeysForApi(obj) {
1245
+ const map = {
1246
+ toolConfig: "tool_config",
1247
+ functionCallingConfig: "function_calling_config",
1248
+ functionDeclarations: "function_declarations",
1249
+ functionCall: "function_call",
1250
+ functionResponse: "function_response"
1251
+ };
1252
+ if (Array.isArray(obj)) return obj.map((v) => this.adaptKeysForApi(v));
1253
+ if (obj && typeof obj === "object") {
1254
+ return Object.fromEntries(
1255
+ Object.entries(obj).map(([k, v]) => [
1256
+ map[k] ?? k,
1257
+ this.adaptKeysForApi(v)
1258
+ ])
1259
+ );
1260
+ }
1261
+ return obj;
1262
+ }
1263
+ /**
1264
+ * Get the current model name
1265
+ * @returns Model name
1266
+ */
1267
+ getModel() {
1268
+ return this.model;
1269
+ }
1270
+ /**
1271
+ * Get the current vision model name
1272
+ * @returns Vision model name
1273
+ */
1274
+ getVisionModel() {
1275
+ return this.visionModel;
1276
+ }
1277
+ /**
1278
+ * Get configured MCP servers
1279
+ * @returns Array of MCP server configurations
1280
+ */
1281
+ getMCPServers() {
1282
+ return this.mcpServers;
1283
+ }
1284
+ /**
1285
+ * Add MCP server configuration
1286
+ * @param serverConfig MCP server configuration
1287
+ */
1288
+ addMCPServer(serverConfig) {
1289
+ this.mcpServers.push(serverConfig);
1290
+ this.mcpSchemasInitialized = false;
1291
+ }
1292
+ /**
1293
+ * Remove MCP server by name
1294
+ * @param serverName Name of the server to remove
1295
+ */
1296
+ removeMCPServer(serverName) {
1297
+ this.mcpServers = this.mcpServers.filter(
1298
+ (server) => server.name !== serverName
1299
+ );
1300
+ this.mcpSchemasInitialized = false;
1301
+ }
1302
+ /**
1303
+ * Check if MCP servers are configured
1304
+ * @returns True if MCP servers are configured
1305
+ */
1306
+ hasMCPServers() {
1307
+ return this.mcpServers.length > 0;
1308
+ }
1309
+ /**
1310
+ * Initialize MCP tool schemas by fetching from servers
1311
+ * @private
1312
+ */
1313
+ async initializeMCPSchemas() {
1314
+ if (this.mcpSchemasInitialized || this.mcpServers.length === 0) {
1315
+ return;
1316
+ }
1317
+ try {
1318
+ const timeoutPromise = new Promise(
1319
+ (_, reject) => setTimeout(() => reject(new Error("MCP schema fetch timeout")), 5e3)
1320
+ );
1321
+ const schemasPromise = MCPSchemaFetcher.fetchAllToolSchemas(
1322
+ this.mcpServers
1323
+ );
1324
+ this.mcpToolSchemas = await Promise.race([
1325
+ schemasPromise,
1326
+ timeoutPromise
1327
+ ]);
1328
+ this.mcpSchemasInitialized = true;
1329
+ } catch (error) {
1330
+ console.warn("Failed to initialize MCP schemas, using fallback:", error);
1331
+ this.mcpToolSchemas = this.mcpServers.map((server) => ({
1332
+ name: `mcp_${server.name}_search`,
1333
+ description: `Search using ${server.name} MCP server (fallback)`,
1334
+ parameters: {
1335
+ type: "object",
1336
+ properties: {
1337
+ query: {
1338
+ type: "string",
1339
+ description: "Search query"
1340
+ }
1341
+ },
1342
+ required: ["query"]
1343
+ }
1344
+ }));
1345
+ this.mcpSchemasInitialized = true;
1346
+ }
1347
+ }
1348
+ /**
1349
+ * Process chat messages
1350
+ * @param messages Array of messages to send
1351
+ * @param onPartialResponse Callback to receive each part of streaming response
1352
+ * @param onCompleteResponse Callback to execute when response is complete
1353
+ */
1354
+ async processChat(messages, onPartialResponse, onCompleteResponse) {
1355
+ try {
1356
+ if (this.tools.length === 0 && this.mcpServers.length === 0) {
1357
+ const res = await this.callGemini(messages, this.model, true);
1358
+ const { blocks: blocks2 } = await this.parseStream(res, onPartialResponse);
1359
+ const full = blocks2.filter((b) => b.type === "text").map((b) => b.text).join("");
1360
+ await onCompleteResponse(full);
1361
+ return;
1362
+ }
1363
+ const { blocks, stop_reason } = await this.chatOnce(
1364
+ messages,
1365
+ true,
1366
+ onPartialResponse
1367
+ );
1368
+ if (stop_reason === "end") {
1369
+ const full = blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
1370
+ await onCompleteResponse(full);
1371
+ return;
1372
+ }
1373
+ throw new Error(
1374
+ "Received functionCall. Use chatOnce() loop when tools are enabled."
1375
+ );
1376
+ } catch (err) {
1377
+ console.error("Error in processChat:", err);
1378
+ throw err;
1379
+ }
1380
+ }
1381
+ async processVisionChat(messages, onPartialResponse, onCompleteResponse) {
1382
+ try {
1383
+ if (this.tools.length === 0 && this.mcpServers.length === 0) {
1384
+ const res = await this.callGemini(messages, this.visionModel, true);
1385
+ const { blocks: blocks2 } = await this.parseStream(res, onPartialResponse);
1386
+ const full = blocks2.filter((b) => b.type === "text").map((b) => b.text).join("");
1387
+ await onCompleteResponse(full);
1388
+ return;
1389
+ }
1390
+ const { blocks, stop_reason } = await this.visionChatOnce(messages);
1391
+ blocks.filter((b) => b.type === "text").forEach((b) => onPartialResponse(b.text));
1392
+ if (stop_reason === "end") {
1393
+ const full = blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
1394
+ await onCompleteResponse(full);
1395
+ return;
1396
+ }
1397
+ throw new Error(
1398
+ "Received functionCall. Use visionChatOnce() loop when tools are enabled."
1399
+ );
1400
+ } catch (err) {
1401
+ console.error("Error in processVisionChat:", err);
1402
+ throw err;
1403
+ }
1404
+ }
1405
+ /* ────────────────────────────────── */
1406
+ /* OpenAI → Gemini conversion */
1407
+ /* ────────────────────────────────── */
1408
+ convertMessagesToGeminiFormat(messages) {
1409
+ const gemini = [];
1410
+ let currentRole = null;
1411
+ let currentParts = [];
1412
+ const pushCurrent = () => {
1413
+ if (currentRole && currentParts.length) {
1414
+ gemini.push({ role: currentRole, parts: [...currentParts] });
1415
+ currentParts = [];
1416
+ }
1417
+ };
1418
+ for (const msg of messages) {
1419
+ const role = this.mapRoleToGemini(msg.role);
1420
+ if (msg.tool_calls) {
1421
+ pushCurrent();
1422
+ for (const call of msg.tool_calls) {
1423
+ this.callIdMap.set(call.id, call.function.name);
1424
+ gemini.push({
1425
+ role: "model",
1426
+ parts: [
1427
+ {
1428
+ functionCall: {
1429
+ name: call.function.name,
1430
+ args: JSON.parse(call.function.arguments || "{}")
1431
+ }
1432
+ }
1433
+ ]
1434
+ });
1435
+ }
1436
+ continue;
1437
+ }
1438
+ if (msg.role === "tool") {
1439
+ pushCurrent();
1440
+ const funcName = msg.name ?? this.callIdMap.get(msg.tool_call_id) ?? "result";
1441
+ gemini.push({
1442
+ role: "user",
1443
+ parts: [
1444
+ {
1445
+ functionResponse: {
1446
+ name: funcName,
1447
+ response: this.normalizeToolResult(
1448
+ this.safeJsonParse(msg.content)
1449
+ )
1450
+ }
1451
+ }
1452
+ ]
1453
+ });
1454
+ continue;
1455
+ }
1456
+ if (role !== currentRole) pushCurrent();
1457
+ currentRole = role;
1458
+ currentParts.push({ text: msg.content });
1459
+ }
1460
+ pushCurrent();
1461
+ return gemini;
1462
+ }
1463
+ /* ────────────────────────────────── */
1464
+ /* HTTP call */
1465
+ /* ────────────────────────────────── */
1466
+ async callGemini(messages, model, stream = false, maxTokens) {
1467
+ const hasVision = messages.some(
1468
+ (m) => Array.isArray(m.content) && m.content.some(
1469
+ (b) => b?.type === "image_url" || b?.inlineData
1470
+ )
1471
+ );
1472
+ const contents = hasVision ? await this.convertVisionMessagesToGeminiFormat(
1473
+ messages
1474
+ ) : this.convertMessagesToGeminiFormat(messages);
1475
+ const body = {
1476
+ contents,
1477
+ generationConfig: {
1478
+ maxOutputTokens: maxTokens !== void 0 ? maxTokens : getMaxTokensForResponseLength(this.responseLength)
1479
+ }
1480
+ };
1481
+ const allToolDeclarations = [];
1482
+ if (this.tools.length > 0) {
1483
+ allToolDeclarations.push(
1484
+ ...this.tools.map((t) => ({
1485
+ name: t.name,
1486
+ description: t.description,
1487
+ parameters: t.parameters
1488
+ }))
1489
+ );
1490
+ }
1491
+ if (this.mcpServers.length > 0) {
1492
+ try {
1493
+ await this.initializeMCPSchemas();
1494
+ allToolDeclarations.push(
1495
+ ...this.mcpToolSchemas.map((t) => ({
1496
+ name: t.name,
1497
+ description: t.description,
1498
+ parameters: t.parameters
1499
+ }))
1500
+ );
1501
+ } catch (error) {
1502
+ console.warn("MCP initialization failed, skipping MCP tools:", error);
1503
+ }
1504
+ }
1505
+ if (allToolDeclarations.length > 0) {
1506
+ body.tools = [
1507
+ {
1508
+ functionDeclarations: allToolDeclarations
1509
+ }
1510
+ ];
1511
+ body.toolConfig = { functionCallingConfig: { mode: "AUTO" } };
1512
+ }
1513
+ const fetchOnce = async (ver, payload) => {
1514
+ const fn = stream ? "streamGenerateContent" : "generateContent";
1515
+ const alt = stream ? "?alt=sse" : "";
1516
+ const url = `${ENDPOINT_GEMINI_API}/${ver}/models/${model}:${fn}${alt}${alt ? "&" : "?"}key=${this.apiKey}`;
1517
+ return ChatServiceHttpClient.post(url, payload);
1518
+ };
1519
+ const isLite = /flash[-_]lite/.test(model);
1520
+ const isGemini25 = /gemini-2\.5/.test(model);
1521
+ const firstVer = isLite || isGemini25 ? "v1beta" : "v1";
1522
+ const tryApi = async () => {
1523
+ try {
1524
+ const payload = firstVer === "v1" ? body : this.adaptKeysForApi(body);
1525
+ return await fetchOnce(firstVer, payload);
1526
+ } catch (e) {
1527
+ if (!(isLite || isGemini25) && /Unknown name|Cannot find field|404/.test(e.message)) {
1528
+ return await fetchOnce("v1beta", this.adaptKeysForApi(body));
1529
+ }
1530
+ throw e;
1531
+ }
1532
+ };
1533
+ try {
1534
+ const res = await tryApi();
1535
+ return res;
1536
+ } catch (error) {
1537
+ if (error.body) {
1538
+ console.error("Gemini API Error Details:", error.body);
1539
+ console.error("Request Body:", JSON.stringify(body, null, 2));
1540
+ }
1541
+ throw error;
1542
+ }
1543
+ }
1544
+ /**
1545
+ * Convert AITuber OnAir vision messages to Gemini format
1546
+ * @param messages Array of vision messages
1547
+ * @returns Gemini formatted vision messages
1548
+ */
1549
+ async convertVisionMessagesToGeminiFormat(messages) {
1550
+ const geminiMessages = [];
1551
+ let currentRole = null;
1552
+ let currentParts = [];
1553
+ for (const msg of messages) {
1554
+ const role = this.mapRoleToGemini(msg.role);
1555
+ if (msg.tool_calls) {
1556
+ for (const call of msg.tool_calls) {
1557
+ geminiMessages.push({
1558
+ role: "model",
1559
+ parts: [
1560
+ {
1561
+ functionCall: {
1562
+ name: call.function.name,
1563
+ args: JSON.parse(call.function.arguments || "{}")
1564
+ }
1565
+ }
1566
+ ]
1567
+ });
1568
+ }
1569
+ continue;
1570
+ }
1571
+ if (msg.role === "tool") {
1572
+ const funcName = msg.name ?? this.callIdMap.get(msg.tool_call_id) ?? "result";
1573
+ geminiMessages.push({
1574
+ role: "user",
1575
+ parts: [
1576
+ {
1577
+ functionResponse: {
1578
+ name: funcName,
1579
+ response: this.normalizeToolResult(
1580
+ this.safeJsonParse(msg.content)
1581
+ )
1582
+ }
1583
+ }
1584
+ ]
1585
+ });
1586
+ continue;
1587
+ }
1588
+ if (role !== currentRole && currentParts.length > 0) {
1589
+ geminiMessages.push({
1590
+ role: currentRole,
1591
+ parts: [...currentParts]
1592
+ });
1593
+ currentParts = [];
1594
+ }
1595
+ currentRole = role;
1596
+ if (typeof msg.content === "string") {
1597
+ currentParts.push({ text: msg.content });
1598
+ } else if (Array.isArray(msg.content)) {
1599
+ for (const block of msg.content) {
1600
+ if (block.type === "text") {
1601
+ currentParts.push({ text: block.text });
1602
+ } else if (block.type === "image_url") {
1603
+ try {
1604
+ const imageResponse = await ChatServiceHttpClient.get(
1605
+ block.image_url.url
1606
+ );
1607
+ const imageBlob = await imageResponse.blob();
1608
+ const base64Data = await this.blobToBase64(imageBlob);
1609
+ currentParts.push({
1610
+ inlineData: {
1611
+ mimeType: imageBlob.type || "image/jpeg",
1612
+ data: base64Data.split(",")[1]
1613
+ // Remove the "data:image/jpeg;base64," prefix
1614
+ }
1615
+ });
1616
+ } catch (error) {
1617
+ console.error("Error processing image:", error);
1618
+ throw new Error(`Failed to process image: ${error.message}`);
1619
+ }
1620
+ }
1621
+ }
1622
+ }
1623
+ }
1624
+ if (currentRole && currentParts.length > 0) {
1625
+ geminiMessages.push({
1626
+ role: currentRole,
1627
+ parts: [...currentParts]
1628
+ });
1629
+ }
1630
+ return geminiMessages;
1631
+ }
1632
+ /**
1633
+ * Convert Blob to Base64 string
1634
+ * @param blob Image blob
1635
+ * @returns Promise with base64 encoded string
1636
+ */
1637
+ blobToBase64(blob) {
1638
+ return new Promise((resolve, reject) => {
1639
+ const reader = new FileReader();
1640
+ reader.onloadend = () => resolve(reader.result);
1641
+ reader.onerror = reject;
1642
+ reader.readAsDataURL(blob);
1643
+ });
1644
+ }
1645
+ /**
1646
+ * Map AITuber OnAir roles to Gemini roles
1647
+ * @param role AITuber OnAir role
1648
+ * @returns Gemini role
1649
+ */
1650
+ mapRoleToGemini(role) {
1651
+ switch (role) {
1652
+ case "system":
1653
+ return "model";
1654
+ // Gemini uses 'model' for system messages
1655
+ case "user":
1656
+ return "user";
1657
+ case "assistant":
1658
+ return "model";
1659
+ default:
1660
+ return "user";
1661
+ }
1662
+ }
1663
+ /* ────────────────────────────────────────────────────────── */
1664
+ /* Convert NDJSON stream to common format */
1665
+ /* ────────────────────────────────────────────────────────── */
1666
+ async parseStream(res, onPartial) {
1667
+ const reader = res.body.getReader();
1668
+ const dec = new TextDecoder();
1669
+ const textBlocks = [];
1670
+ const toolBlocks = [];
1671
+ let buf = "";
1672
+ const flush = (payload) => {
1673
+ if (!payload || payload === "[DONE]") return;
1674
+ let obj;
1675
+ try {
1676
+ obj = JSON.parse(payload);
1677
+ } catch {
1678
+ return;
1679
+ }
1680
+ for (const cand of obj.candidates ?? []) {
1681
+ for (const part of cand.content?.parts ?? []) {
1682
+ if (part.text) {
1683
+ onPartial(part.text);
1684
+ StreamTextAccumulator.addTextBlock(textBlocks, part.text);
1685
+ }
1686
+ if (part.functionCall) {
1687
+ toolBlocks.push({
1688
+ type: "tool_use",
1689
+ id: this.genUUID(),
1690
+ name: part.functionCall.name,
1691
+ input: part.functionCall.args ?? {}
1692
+ });
1693
+ }
1694
+ if (part.functionResponse) {
1695
+ toolBlocks.push({
1696
+ type: "tool_result",
1697
+ tool_use_id: part.functionResponse.name,
1698
+ content: JSON.stringify(part.functionResponse.response)
1699
+ });
1700
+ }
1701
+ }
1702
+ }
1703
+ };
1704
+ while (true) {
1705
+ const { done, value } = await reader.read();
1706
+ if (done) break;
1707
+ buf += dec.decode(value, { stream: true });
1708
+ let nl;
1709
+ while ((nl = buf.indexOf("\n")) !== -1) {
1710
+ let line = buf.slice(0, nl);
1711
+ buf = buf.slice(nl + 1);
1712
+ if (line.endsWith("\r")) line = line.slice(0, -1);
1713
+ if (!line.trim()) {
1714
+ flush("");
1715
+ continue;
1716
+ }
1717
+ if (line.startsWith("data:")) line = line.slice(5).trim();
1718
+ if (!line) continue;
1719
+ flush(line);
1720
+ }
1721
+ }
1722
+ if (buf) flush(buf);
1723
+ const blocks = [...textBlocks, ...toolBlocks];
1724
+ return {
1725
+ blocks,
1726
+ stop_reason: toolBlocks.some((b) => b.type === "tool_use") ? "tool_use" : "end"
1727
+ };
1728
+ }
1729
+ /* ────────────────────────────────────────────────────────── */
1730
+ /* Convert JSON of non-stream (= generateContent) */
1731
+ /* ────────────────────────────────────────────────────────── */
1732
+ parseOneShot(data) {
1733
+ const textBlocks = [];
1734
+ const toolBlocks = [];
1735
+ for (const cand of data.candidates ?? []) {
1736
+ for (const part of cand.content?.parts ?? []) {
1737
+ if (part.text) {
1738
+ textBlocks.push({ type: "text", text: part.text });
1739
+ }
1740
+ if (part.functionCall) {
1741
+ toolBlocks.push({
1742
+ type: "tool_use",
1743
+ id: this.genUUID(),
1744
+ name: part.functionCall.name,
1745
+ input: part.functionCall.args ?? {}
1746
+ });
1747
+ }
1748
+ if (part.functionResponse) {
1749
+ toolBlocks.push({
1750
+ type: "tool_result",
1751
+ tool_use_id: part.functionResponse.name,
1752
+ content: JSON.stringify(part.functionResponse.response)
1753
+ });
1754
+ }
1755
+ }
1756
+ }
1757
+ const blocks = [...textBlocks, ...toolBlocks];
1758
+ return {
1759
+ blocks,
1760
+ stop_reason: toolBlocks.some((b) => b.type === "tool_use") ? "tool_use" : "end"
1761
+ };
1762
+ }
1763
+ /* ────────────────────────────────────────────────────────── */
1764
+ /* chatOnce (text) */
1765
+ /* ────────────────────────────────────────────────────────── */
1766
+ async chatOnce(messages, stream = true, onPartialResponse = () => {
1767
+ }, maxTokens) {
1768
+ const res = await this.callGemini(messages, this.model, stream, maxTokens);
1769
+ return stream ? this.parseStream(res, onPartialResponse) : this.parseOneShot(await res.json());
1770
+ }
1771
+ /* ────────────────────────────────────────────────────────── */
1772
+ /* visionChatOnce (images) */
1773
+ /* ────────────────────────────────────────────────────────── */
1774
+ async visionChatOnce(messages, stream = false, onPartialResponse = () => {
1775
+ }, maxTokens) {
1776
+ const res = await this.callGemini(
1777
+ messages,
1778
+ this.visionModel,
1779
+ stream,
1780
+ maxTokens
1781
+ );
1782
+ return stream ? this.parseStream(res, onPartialResponse) : this.parseOneShot(await res.json());
1783
+ }
1784
+ /* ────────────────────────────────────────────────────────── */
1785
+ /* UUID helper */
1786
+ /* ────────────────────────────────────────────────────────── */
1787
+ genUUID() {
1788
+ return typeof crypto !== "undefined" && crypto.randomUUID ? crypto.randomUUID() : "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, (c) => {
1789
+ const r = Math.random() * 16 | 0;
1790
+ const v = c === "x" ? r : r & 3 | 8;
1791
+ return v.toString(16);
1792
+ });
1793
+ }
1794
+ };
1795
+
1796
+ // src/services/providers/gemini/GeminiChatServiceProvider.ts
1797
+ var GeminiChatServiceProvider = class {
1798
+ /**
1799
+ * Create a chat service instance
1800
+ * @param options Service options
1801
+ * @returns GeminiChatService instance
1802
+ */
1803
+ createChatService(options) {
1804
+ const visionModel = options.visionModel || (this.supportsVisionForModel(options.model || this.getDefaultModel()) ? options.model : this.getDefaultModel());
1805
+ return new GeminiChatService(
1806
+ options.apiKey,
1807
+ options.model || this.getDefaultModel(),
1808
+ visionModel,
1809
+ options.tools || [],
1810
+ options.mcpServers || [],
1811
+ options.responseLength
1812
+ );
1813
+ }
1814
+ /**
1815
+ * Get the provider name
1816
+ * @returns Provider name ('gemini')
1817
+ */
1818
+ getProviderName() {
1819
+ return "gemini";
1820
+ }
1821
+ /**
1822
+ * Get the list of supported models
1823
+ * @returns Array of supported model names
1824
+ */
1825
+ getSupportedModels() {
1826
+ return [
1827
+ MODEL_GEMINI_2_5_PRO,
1828
+ MODEL_GEMINI_2_5_FLASH,
1829
+ MODEL_GEMINI_2_5_FLASH_LITE,
1830
+ MODEL_GEMINI_2_5_FLASH_LITE_PREVIEW_06_17,
1831
+ MODEL_GEMINI_2_0_FLASH,
1832
+ MODEL_GEMINI_2_0_FLASH_LITE,
1833
+ MODEL_GEMINI_1_5_FLASH,
1834
+ MODEL_GEMINI_1_5_PRO
1835
+ ];
1836
+ }
1837
+ /**
1838
+ * Get the default model
1839
+ * @returns Default model name
1840
+ */
1841
+ getDefaultModel() {
1842
+ return MODEL_GEMINI_2_0_FLASH_LITE;
1843
+ }
1844
+ /**
1845
+ * Check if this provider supports vision (image processing)
1846
+ * @returns Vision support status (true)
1847
+ */
1848
+ supportsVision() {
1849
+ return true;
1850
+ }
1851
+ /**
1852
+ * Check if a specific model supports vision capabilities
1853
+ * @param model The model name to check
1854
+ * @returns True if the model supports vision, false otherwise
1855
+ */
1856
+ supportsVisionForModel(model) {
1857
+ return GEMINI_VISION_SUPPORTED_MODELS.includes(model);
1858
+ }
1859
+ };
1860
+
1861
+ // src/services/providers/claude/ClaudeChatService.ts
1862
+ var ClaudeChatService = class {
1863
+ /**
1864
+ * Constructor
1865
+ * @param apiKey Anthropic API key
1866
+ * @param model Name of the model to use
1867
+ * @param visionModel Name of the vision model
1868
+ * @param tools Array of tool definitions
1869
+ * @param mcpServers Array of MCP server configurations (optional)
1870
+ * @throws Error if the vision model doesn't support vision capabilities
1871
+ */
1872
+ constructor(apiKey, model = MODEL_CLAUDE_3_HAIKU, visionModel = MODEL_CLAUDE_3_HAIKU, tools = [], mcpServers = [], responseLength) {
1873
+ /** Provider name */
1874
+ this.provider = "claude";
1875
+ this.apiKey = apiKey;
1876
+ this.model = model || MODEL_CLAUDE_3_HAIKU;
1877
+ this.visionModel = visionModel || MODEL_CLAUDE_3_HAIKU;
1878
+ this.tools = tools;
1879
+ this.mcpServers = mcpServers;
1880
+ this.responseLength = responseLength;
1881
+ if (!CLAUDE_VISION_SUPPORTED_MODELS.includes(this.visionModel)) {
1882
+ throw new Error(
1883
+ `Model ${this.visionModel} does not support vision capabilities.`
1884
+ );
1885
+ }
1886
+ }
1887
+ /**
1888
+ * Get the current model name
1889
+ * @returns Model name
1890
+ */
1891
+ getModel() {
1892
+ return this.model;
1893
+ }
1894
+ /**
1895
+ * Get the current vision model name
1896
+ * @returns Vision model name
1897
+ */
1898
+ getVisionModel() {
1899
+ return this.visionModel;
1900
+ }
1901
+ /**
1902
+ * Get configured MCP servers
1903
+ * @returns Array of MCP server configurations
1904
+ */
1905
+ getMCPServers() {
1906
+ return this.mcpServers;
1907
+ }
1908
+ /**
1909
+ * Add MCP server configuration
1910
+ * @param serverConfig MCP server configuration
1911
+ */
1912
+ addMCPServer(serverConfig) {
1913
+ this.mcpServers.push(serverConfig);
1914
+ }
1915
+ /**
1916
+ * Remove MCP server by name
1917
+ * @param serverName Name of the server to remove
1918
+ */
1919
+ removeMCPServer(serverName) {
1920
+ this.mcpServers = this.mcpServers.filter(
1921
+ (server) => server.name !== serverName
1922
+ );
1923
+ }
1924
+ /**
1925
+ * Check if MCP servers are configured
1926
+ * @returns True if MCP servers are configured
1927
+ */
1928
+ hasMCPServers() {
1929
+ return this.mcpServers.length > 0;
1930
+ }
1931
+ /**
1932
+ * Process chat messages
1933
+ * @param messages Array of messages to send
1934
+ * @param onPartialResponse Callback to receive each part of streaming response
1935
+ * @param onCompleteResponse Callback to execute when response is complete
1936
+ */
1937
+ async processChat(messages, onPartialResponse, onCompleteResponse) {
1938
+ if (this.tools.length === 0 && this.mcpServers.length === 0) {
1939
+ const res = await this.callClaude(messages, this.model, true);
1940
+ const full = await this.parsePureStream(res, onPartialResponse);
1941
+ await onCompleteResponse(full);
1942
+ return;
1943
+ }
1944
+ const result = await this.chatOnce(messages, true, onPartialResponse);
1945
+ if (result.stop_reason === "end") {
1946
+ const full = result.blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
1947
+ await onCompleteResponse(full);
1948
+ return;
1949
+ }
1950
+ throw new Error(
1951
+ "processChat received tool_calls. ChatProcessor must use chatOnce() loop when tools are enabled."
1952
+ );
1953
+ }
1954
+ /**
1955
+ * Process chat messages with images
1956
+ * @param messages Array of messages to send (including images)
1957
+ * @param onPartialResponse Callback to receive each part of streaming response
1958
+ * @param onCompleteResponse Callback to execute when response is complete
1959
+ */
1960
+ async processVisionChat(messages, onPartialResponse, onCompleteResponse) {
1961
+ if (this.tools.length === 0 && this.mcpServers.length === 0) {
1962
+ const res = await this.callClaude(messages, this.visionModel, true);
1963
+ const full = await this.parsePureStream(res, onPartialResponse);
1964
+ await onCompleteResponse(full);
1965
+ return;
1966
+ }
1967
+ const result = await this.visionChatOnce(messages);
1968
+ if (result.stop_reason === "end") {
1969
+ const full = result.blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
1970
+ await onCompleteResponse(full);
1971
+ return;
1972
+ }
1973
+ throw new Error(
1974
+ "processVisionChat received tool_calls. ChatProcessor must use chatOnce() loop when tools are enabled."
1975
+ );
1976
+ }
1977
+ /**
1978
+ * Convert AITuber OnAir messages to Claude format
1979
+ * @param messages Array of messages
1980
+ * @returns Claude formatted messages
1981
+ */
1982
+ convertMessagesToClaudeFormat(messages) {
1983
+ return messages.map((msg) => {
1984
+ return {
1985
+ role: this.mapRoleToClaude(msg.role),
1986
+ content: msg.content
1987
+ };
1988
+ });
1989
+ }
1990
+ /**
1991
+ * Convert AITuber OnAir vision messages to Claude format
1992
+ * @param messages Array of vision messages
1993
+ * @returns Claude formatted vision messages
1994
+ */
1995
+ convertVisionMessagesToClaudeFormat(messages) {
1996
+ return messages.map((msg) => {
1997
+ if (typeof msg.content === "string") {
1998
+ return {
1999
+ role: this.mapRoleToClaude(msg.role),
2000
+ content: [
2001
+ {
2002
+ type: "text",
2003
+ text: msg.content
2004
+ }
2005
+ ]
2006
+ };
2007
+ }
2008
+ if (Array.isArray(msg.content)) {
2009
+ const content = msg.content.map((block) => {
2010
+ if (block.type === "image_url") {
2011
+ if (block.image_url.url.startsWith("data:")) {
2012
+ const m = block.image_url.url.match(
2013
+ /^data:([^;]+);base64,(.+)$/
2014
+ );
2015
+ if (m) {
2016
+ return {
2017
+ type: "image",
2018
+ source: { type: "base64", media_type: m[1], data: m[2] }
2019
+ };
2020
+ }
2021
+ return null;
2022
+ }
2023
+ return {
2024
+ type: "image",
2025
+ source: {
2026
+ type: "url",
2027
+ url: block.image_url.url,
2028
+ media_type: this.getMimeTypeFromUrl(block.image_url.url)
2029
+ }
2030
+ };
2031
+ }
2032
+ return block;
2033
+ }).filter((b) => b);
2034
+ return {
2035
+ role: this.mapRoleToClaude(msg.role),
2036
+ content
2037
+ };
2038
+ }
2039
+ return {
2040
+ role: this.mapRoleToClaude(msg.role),
2041
+ content: []
2042
+ };
2043
+ });
2044
+ }
2045
+ /**
2046
+ * Map AITuber OnAir roles to Claude roles
2047
+ * @param role AITuber OnAir role
2048
+ * @returns Claude role
2049
+ */
2050
+ mapRoleToClaude(role) {
2051
+ switch (role) {
2052
+ case "system":
2053
+ return "system";
2054
+ case "user":
2055
+ return "user";
2056
+ case "assistant":
2057
+ return "assistant";
2058
+ default:
2059
+ return "user";
2060
+ }
2061
+ }
2062
+ /**
2063
+ * Get MIME type from URL
2064
+ * @param url Image URL
2065
+ * @returns MIME type
2066
+ */
2067
+ getMimeTypeFromUrl(url) {
2068
+ const extension = url.split(".").pop()?.toLowerCase();
2069
+ switch (extension) {
2070
+ case "jpg":
2071
+ case "jpeg":
2072
+ return "image/jpeg";
2073
+ case "png":
2074
+ return "image/png";
2075
+ case "gif":
2076
+ return "image/gif";
2077
+ case "webp":
2078
+ return "image/webp";
2079
+ default:
2080
+ return "image/jpeg";
2081
+ }
2082
+ }
2083
+ /**
2084
+ * Call Claude API
2085
+ * @param messages Array of messages to send
2086
+ * @param model Model name
2087
+ * @param stream Whether to stream the response
2088
+ * @param maxTokens Maximum tokens for response (optional)
2089
+ * @returns Response
2090
+ */
2091
+ async callClaude(messages, model, stream, maxTokens) {
2092
+ const system = messages.find((m) => m.role === "system")?.content ?? "";
2093
+ const content = messages.filter((m) => m.role !== "system");
2094
+ const hasVision = content.some(
2095
+ (m) => Array.isArray(m.content) && m.content.some(
2096
+ (b) => b.type === "image_url" || b.type === "image"
2097
+ )
2098
+ );
2099
+ const body = {
2100
+ model,
2101
+ system,
2102
+ messages: hasVision ? this.convertVisionMessagesToClaudeFormat(
2103
+ content
2104
+ ) : this.convertMessagesToClaudeFormat(content),
2105
+ stream,
2106
+ max_tokens: maxTokens !== void 0 ? maxTokens : getMaxTokensForResponseLength(this.responseLength)
2107
+ };
2108
+ if (this.tools.length) {
2109
+ body.tools = this.tools.map((t) => ({
2110
+ name: t.name,
2111
+ description: t.description,
2112
+ input_schema: t.parameters
2113
+ }));
2114
+ body.tool_choice = { type: "auto" };
2115
+ }
2116
+ if (this.mcpServers.length > 0) {
2117
+ body.mcp_servers = this.mcpServers;
2118
+ }
2119
+ const headers = {
2120
+ "Content-Type": "application/json",
2121
+ "x-api-key": this.apiKey,
2122
+ "anthropic-version": "2023-06-01",
2123
+ "anthropic-dangerous-direct-browser-access": "true"
2124
+ };
2125
+ if (this.mcpServers.length > 0) {
2126
+ headers["anthropic-beta"] = "mcp-client-2025-04-04";
2127
+ }
2128
+ const res = await ChatServiceHttpClient.post(
2129
+ ENDPOINT_CLAUDE_API,
2130
+ body,
2131
+ headers
2132
+ );
2133
+ return res;
2134
+ }
2135
+ /**
2136
+ * Parse stream response
2137
+ * @param res Response
2138
+ * @param onPartial Callback to receive each part of streaming response
2139
+ * @returns ClaudeInternalCompletion
2140
+ */
2141
+ async parseStream(res, onPartial) {
2142
+ const reader = res.body.getReader();
2143
+ const dec = new TextDecoder();
2144
+ const textBlocks = [];
2145
+ const toolCalls = /* @__PURE__ */ new Map();
2146
+ let buf = "";
2147
+ while (true) {
2148
+ const { done, value } = await reader.read();
2149
+ if (done) break;
2150
+ buf += dec.decode(value, { stream: true });
2151
+ let nl;
2152
+ while ((nl = buf.indexOf("\n")) !== -1) {
2153
+ const line = buf.slice(0, nl).trim();
2154
+ buf = buf.slice(nl + 1);
2155
+ if (!line.startsWith("data:")) continue;
2156
+ const payload = line.slice(5).trim();
2157
+ if (payload === "[DONE]") break;
2158
+ const ev = JSON.parse(payload);
2159
+ if (ev.type === "content_block_delta" && ev.delta?.text) {
2160
+ onPartial(ev.delta.text);
2161
+ textBlocks.push({ type: "text", text: ev.delta.text });
2162
+ }
2163
+ if (ev.type === "content_block_start" && ev.content_block?.type === "tool_use") {
2164
+ toolCalls.set(ev.index, {
2165
+ id: ev.content_block.id,
2166
+ name: ev.content_block.name,
2167
+ args: ""
2168
+ });
2169
+ } else if (ev.type === "content_block_start" && ev.content_block?.type === "mcp_tool_use") {
2170
+ toolCalls.set(ev.index, {
2171
+ id: ev.content_block.id,
2172
+ name: ev.content_block.name,
2173
+ args: "",
2174
+ server_name: ev.content_block.server_name
2175
+ });
2176
+ } else if (ev.type === "content_block_start" && // case of non-stream
2177
+ ev.content_block?.type === "tool_result") {
2178
+ textBlocks.push({
2179
+ type: "tool_result",
2180
+ tool_use_id: ev.content_block.tool_use_id,
2181
+ content: ev.content_block.content ?? ""
2182
+ });
2183
+ } else if (ev.type === "content_block_start" && ev.content_block?.type === "mcp_tool_result") {
2184
+ textBlocks.push({
2185
+ type: "mcp_tool_result",
2186
+ tool_use_id: ev.content_block.tool_use_id,
2187
+ is_error: ev.content_block.is_error ?? false,
2188
+ content: ev.content_block.content ?? []
2189
+ });
2190
+ }
2191
+ if (ev.type === "content_block_delta" && ev.delta?.type === "input_json_delta") {
2192
+ const entry = toolCalls.get(ev.index);
2193
+ if (entry) entry.args += ev.delta.partial_json || "";
2194
+ }
2195
+ if (ev.type === "content_block_stop" && toolCalls.has(ev.index)) {
2196
+ const { id, name, args, server_name } = toolCalls.get(ev.index);
2197
+ if (server_name) {
2198
+ textBlocks.push({
2199
+ type: "mcp_tool_use",
2200
+ id,
2201
+ name,
2202
+ server_name,
2203
+ input: JSON.parse(args || "{}")
2204
+ });
2205
+ } else {
2206
+ textBlocks.push({
2207
+ type: "tool_use",
2208
+ id,
2209
+ name,
2210
+ input: JSON.parse(args || "{}")
2211
+ });
2212
+ }
2213
+ toolCalls.delete(ev.index);
2214
+ }
2215
+ }
2216
+ }
2217
+ return {
2218
+ blocks: textBlocks,
2219
+ stop_reason: textBlocks.some(
2220
+ (b) => b.type === "tool_use" || b.type === "mcp_tool_use"
2221
+ ) ? "tool_use" : "end"
2222
+ };
2223
+ }
2224
+ async parsePureStream(res, onPartial) {
2225
+ const { blocks } = await this.parseStream(res, onPartial);
2226
+ return blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
2227
+ }
2228
+ parseOneShot(data) {
2229
+ const blocks = [];
2230
+ (data.content ?? []).forEach((c) => {
2231
+ if (c.type === "text") {
2232
+ blocks.push({ type: "text", text: c.text });
2233
+ } else if (c.type === "tool_use") {
2234
+ blocks.push({
2235
+ type: "tool_use",
2236
+ id: c.id,
2237
+ name: c.name,
2238
+ input: c.input ?? {}
2239
+ });
2240
+ } else if (c.type === "mcp_tool_use") {
2241
+ blocks.push({
2242
+ type: "mcp_tool_use",
2243
+ id: c.id,
2244
+ name: c.name,
2245
+ server_name: c.server_name,
2246
+ input: c.input ?? {}
2247
+ });
2248
+ } else if (c.type === "tool_result") {
2249
+ blocks.push({
2250
+ type: "tool_result",
2251
+ tool_use_id: c.tool_use_id,
2252
+ content: c.content ?? ""
2253
+ });
2254
+ } else if (c.type === "mcp_tool_result") {
2255
+ blocks.push({
2256
+ type: "mcp_tool_result",
2257
+ tool_use_id: c.tool_use_id,
2258
+ is_error: c.is_error ?? false,
2259
+ content: c.content ?? []
2260
+ });
2261
+ }
2262
+ });
2263
+ return {
2264
+ blocks,
2265
+ stop_reason: blocks.some(
2266
+ (b) => b.type === "tool_use" || b.type === "mcp_tool_use"
2267
+ ) ? "tool_use" : "end"
2268
+ };
2269
+ }
2270
+ /**
2271
+ * Process chat messages
2272
+ * @param messages Array of messages to send
2273
+ * @param stream Whether to stream the response
2274
+ * @param onPartial Callback to receive each part of streaming response
2275
+ * @param maxTokens Maximum tokens for response (optional)
2276
+ * @returns ToolChatCompletion
2277
+ */
2278
+ async chatOnce(messages, stream = true, onPartial = () => {
2279
+ }, maxTokens) {
2280
+ const res = await this.callClaude(messages, this.model, stream, maxTokens);
2281
+ const internalResult = stream ? await this.parseStream(res, onPartial) : this.parseOneShot(await res.json());
2282
+ return this.convertToStandardCompletion(internalResult);
2283
+ }
2284
+ /**
2285
+ * Process vision chat messages
2286
+ * @param messages Array of messages to send
2287
+ * @param stream Whether to stream the response
2288
+ * @param onPartial Callback to receive each part of streaming response
2289
+ * @param maxTokens Maximum tokens for response (optional)
2290
+ * @returns ToolChatCompletion
2291
+ */
2292
+ async visionChatOnce(messages, stream = false, onPartial = () => {
2293
+ }, maxTokens) {
2294
+ const res = await this.callClaude(
2295
+ messages,
2296
+ this.visionModel,
2297
+ stream,
2298
+ maxTokens
2299
+ );
2300
+ const internalResult = stream ? await this.parseStream(res, onPartial) : this.parseOneShot(await res.json());
2301
+ return this.convertToStandardCompletion(internalResult);
2302
+ }
2303
+ /**
2304
+ * Convert internal completion to standard ToolChatCompletion
2305
+ * @param completion Internal completion result
2306
+ * @returns Standard ToolChatCompletion
2307
+ */
2308
+ convertToStandardCompletion(completion) {
2309
+ const standardBlocks = completion.blocks.filter(
2310
+ (block) => {
2311
+ return block.type === "text" || block.type === "tool_use" || block.type === "tool_result";
2312
+ }
2313
+ );
2314
+ return {
2315
+ blocks: standardBlocks,
2316
+ stop_reason: completion.stop_reason
2317
+ };
2318
+ }
2319
+ };
2320
+
2321
+ // src/services/providers/claude/ClaudeChatServiceProvider.ts
2322
+ var ClaudeChatServiceProvider = class {
2323
+ /**
2324
+ * Create a chat service instance
2325
+ * @param options Service options (can include mcpServers)
2326
+ * @returns ClaudeChatService instance
2327
+ */
2328
+ createChatService(options) {
2329
+ const visionModel = options.visionModel || (this.supportsVisionForModel(options.model || this.getDefaultModel()) ? options.model : this.getDefaultModel());
2330
+ return new ClaudeChatService(
2331
+ options.apiKey,
2332
+ options.model || this.getDefaultModel(),
2333
+ visionModel,
2334
+ options.tools ?? [],
2335
+ options.mcpServers ?? [],
2336
+ options.responseLength
2337
+ );
2338
+ }
2339
+ /**
2340
+ * Get the provider name
2341
+ * @returns Provider name ('claude')
2342
+ */
2343
+ getProviderName() {
2344
+ return "claude";
2345
+ }
2346
+ /**
2347
+ * Get the list of supported models
2348
+ * @returns Array of supported model names
2349
+ */
2350
+ getSupportedModels() {
2351
+ return [
2352
+ MODEL_CLAUDE_3_HAIKU,
2353
+ MODEL_CLAUDE_3_5_HAIKU,
2354
+ MODEL_CLAUDE_3_5_SONNET,
2355
+ MODEL_CLAUDE_3_7_SONNET,
2356
+ MODEL_CLAUDE_4_SONNET,
2357
+ MODEL_CLAUDE_4_OPUS
2358
+ ];
2359
+ }
2360
+ /**
2361
+ * Get the default model
2362
+ * @returns Default model name
2363
+ */
2364
+ getDefaultModel() {
2365
+ return MODEL_CLAUDE_3_HAIKU;
2366
+ }
2367
+ /**
2368
+ * Check if this provider supports vision (image processing)
2369
+ * @returns Vision support status (true)
2370
+ */
2371
+ supportsVision() {
2372
+ return true;
2373
+ }
2374
+ /**
2375
+ * Check if a specific model supports vision capabilities
2376
+ * @param model The model name to check
2377
+ * @returns True if the model supports vision, false otherwise
2378
+ */
2379
+ supportsVisionForModel(model) {
2380
+ return CLAUDE_VISION_SUPPORTED_MODELS.includes(model);
2381
+ }
2382
+ };
2383
+
2384
+ // src/services/providers/openrouter/OpenRouterChatService.ts
2385
+ var OpenRouterChatService = class {
2386
+ /**
2387
+ * Constructor
2388
+ * @param apiKey OpenRouter API key
2389
+ * @param model Name of the model to use
2390
+ * @param visionModel Name of the vision model
2391
+ * @param tools Tool definitions (optional)
2392
+ * @param endpoint API endpoint (optional)
2393
+ * @param responseLength Response length configuration (optional)
2394
+ * @param appName Application name for OpenRouter analytics (optional)
2395
+ * @param appUrl Application URL for OpenRouter analytics (optional)
2396
+ * @param reasoning_effort Reasoning effort level (optional)
2397
+ * @param includeReasoning Whether to include reasoning in response (optional)
2398
+ * @param reasoningMaxTokens Maximum tokens for reasoning (optional)
2399
+ */
2400
+ constructor(apiKey, model = MODEL_GPT_OSS_20B_FREE, visionModel = MODEL_GPT_OSS_20B_FREE, tools, endpoint = ENDPOINT_OPENROUTER_API, responseLength, appName, appUrl, reasoning_effort, includeReasoning, reasoningMaxTokens) {
2401
+ /** Provider name */
2402
+ this.provider = "openrouter";
2403
+ this.lastRequestTime = 0;
2404
+ this.requestCount = 0;
2405
+ this.apiKey = apiKey;
2406
+ this.model = model;
2407
+ this.tools = tools || [];
2408
+ this.endpoint = endpoint;
2409
+ this.responseLength = responseLength;
2410
+ this.appName = appName;
2411
+ this.appUrl = appUrl;
2412
+ this.reasoning_effort = reasoning_effort;
2413
+ this.includeReasoning = includeReasoning;
2414
+ this.reasoningMaxTokens = reasoningMaxTokens;
2415
+ this.visionModel = visionModel;
2416
+ }
2417
+ /**
2418
+ * Get the current model name
2419
+ * @returns Model name
2420
+ */
2421
+ getModel() {
2422
+ return this.model;
2423
+ }
2424
+ /**
2425
+ * Get the current vision model name
2426
+ * @returns Vision model name
2427
+ */
2428
+ getVisionModel() {
2429
+ return this.visionModel;
2430
+ }
2431
+ /**
2432
+ * Apply rate limiting for free tier models
2433
+ */
2434
+ async applyRateLimiting() {
2435
+ if (!isOpenRouterFreeModel(this.model)) {
2436
+ return;
2437
+ }
2438
+ const now = Date.now();
2439
+ const timeSinceLastRequest = now - this.lastRequestTime;
2440
+ if (timeSinceLastRequest > 6e4) {
2441
+ this.requestCount = 0;
2442
+ }
2443
+ if (this.requestCount >= OPENROUTER_FREE_RATE_LIMIT_PER_MINUTE) {
2444
+ const waitTime = 6e4 - timeSinceLastRequest;
2445
+ if (waitTime > 0) {
2446
+ console.log(
2447
+ `Rate limit reached for free tier. Waiting ${waitTime}ms...`
2448
+ );
2449
+ await new Promise((resolve) => setTimeout(resolve, waitTime));
2450
+ this.requestCount = 0;
2451
+ }
2452
+ }
2453
+ this.lastRequestTime = now;
2454
+ this.requestCount++;
2455
+ }
2456
+ /**
2457
+ * Process chat messages
2458
+ * @param messages Array of messages to send
2459
+ * @param onPartialResponse Callback to receive each part of streaming response
2460
+ * @param onCompleteResponse Callback to execute when response is complete
2461
+ */
2462
+ async processChat(messages, onPartialResponse, onCompleteResponse) {
2463
+ await this.applyRateLimiting();
2464
+ if (this.tools.length === 0) {
2465
+ const res = await this.callOpenRouter(messages, this.model, true);
2466
+ const full = await this.handleStream(res, onPartialResponse);
2467
+ await onCompleteResponse(full);
2468
+ return;
2469
+ }
2470
+ const { blocks, stop_reason } = await this.chatOnce(messages);
2471
+ if (stop_reason === "end") {
2472
+ const full = blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
2473
+ await onCompleteResponse(full);
2474
+ return;
2475
+ }
2476
+ throw new Error(
2477
+ "processChat received tool_calls. ChatProcessor must use chatOnce() loop when tools are enabled."
2478
+ );
2479
+ }
2480
+ /**
2481
+ * Process chat messages with images
2482
+ * @param messages Array of messages to send (including images)
2483
+ * @param onPartialResponse Callback to receive each part of streaming response
2484
+ * @param onCompleteResponse Callback to execute when response is complete
2485
+ */
2486
+ async processVisionChat(messages, onPartialResponse, onCompleteResponse) {
2487
+ if (!isOpenRouterVisionModel(this.visionModel)) {
2488
+ throw new Error(
2489
+ `Model ${this.visionModel} does not support vision capabilities.`
2490
+ );
2491
+ }
2492
+ await this.applyRateLimiting();
2493
+ try {
2494
+ if (this.tools.length === 0) {
2495
+ const res = await this.callOpenRouter(messages, this.visionModel, true);
2496
+ const full = await this.handleStream(res, onPartialResponse);
2497
+ await onCompleteResponse(full);
2498
+ return;
2499
+ }
2500
+ const { blocks, stop_reason } = await this.visionChatOnce(
2501
+ messages,
2502
+ true,
2503
+ onPartialResponse
2504
+ );
2505
+ if (stop_reason === "end") {
2506
+ const full = blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
2507
+ await onCompleteResponse(full);
2508
+ return;
2509
+ }
2510
+ throw new Error(
2511
+ "processVisionChat received tool_calls. ChatProcessor must use visionChatOnce() loop when tools are enabled."
2512
+ );
2513
+ } catch (error) {
2514
+ console.error("Error in processVisionChat:", error);
2515
+ throw error;
2516
+ }
2517
+ }
2518
+ /**
2519
+ * Process chat messages with tools (text only)
2520
+ * @param messages Array of messages to send
2521
+ * @param stream Whether to use streaming
2522
+ * @param onPartialResponse Callback for partial responses
2523
+ * @param maxTokens Maximum tokens for response (optional)
2524
+ * @returns Tool chat completion
2525
+ */
2526
+ async chatOnce(messages, stream = true, onPartialResponse = () => {
2527
+ }, maxTokens) {
2528
+ await this.applyRateLimiting();
2529
+ const res = await this.callOpenRouter(
2530
+ messages,
2531
+ this.model,
2532
+ stream,
2533
+ maxTokens
2534
+ );
2535
+ return stream ? this.parseStream(res, onPartialResponse) : this.parseOneShot(await res.json());
2536
+ }
2537
+ /**
2538
+ * Process vision chat messages with tools
2539
+ * @param messages Array of messages to send (including images)
2540
+ * @param stream Whether to use streaming
2541
+ * @param onPartialResponse Callback for partial responses
2542
+ * @param maxTokens Maximum tokens for response (optional)
2543
+ * @returns Tool chat completion
2544
+ */
2545
+ async visionChatOnce(messages, stream = false, onPartialResponse = () => {
2546
+ }, maxTokens) {
2547
+ if (!isOpenRouterVisionModel(this.visionModel)) {
2548
+ throw new Error(
2549
+ `Model ${this.visionModel} does not support vision capabilities.`
2550
+ );
2551
+ }
2552
+ await this.applyRateLimiting();
2553
+ const res = await this.callOpenRouter(
2554
+ messages,
2555
+ this.visionModel,
2556
+ stream,
2557
+ maxTokens
2558
+ );
2559
+ return stream ? this.parseStream(res, onPartialResponse) : this.parseOneShot(await res.json());
2560
+ }
2561
+ /**
2562
+ * Call OpenRouter API
2563
+ */
2564
+ async callOpenRouter(messages, model, stream = false, maxTokens) {
2565
+ const body = this.buildRequestBody(messages, model, stream, maxTokens);
2566
+ const headers = {
2567
+ Authorization: `Bearer ${this.apiKey}`
2568
+ };
2569
+ if (this.appUrl) {
2570
+ headers["HTTP-Referer"] = this.appUrl;
2571
+ }
2572
+ if (this.appName) {
2573
+ headers["X-Title"] = this.appName;
2574
+ }
2575
+ const res = await ChatServiceHttpClient.post(this.endpoint, body, headers);
2576
+ return res;
2577
+ }
2578
+ /**
2579
+ * Build request body for OpenRouter API (OpenAI-compatible format)
2580
+ */
2581
+ buildRequestBody(messages, model, stream, maxTokens) {
2582
+ const body = {
2583
+ model,
2584
+ messages,
2585
+ stream
2586
+ };
2587
+ const tokenLimit = maxTokens !== void 0 ? maxTokens : getMaxTokensForResponseLength(this.responseLength);
2588
+ if (tokenLimit) {
2589
+ console.warn(
2590
+ `OpenRouter: Token limits are not supported for gpt-oss-20b model due to known issues. Using unlimited tokens instead.`
2591
+ );
2592
+ }
2593
+ if (this.reasoning_effort || this.includeReasoning !== void 0 || this.reasoningMaxTokens) {
2594
+ body.reasoning = {};
2595
+ if (this.reasoning_effort) {
2596
+ const effort = this.reasoning_effort === "minimal" ? "low" : this.reasoning_effort;
2597
+ body.reasoning.effort = effort;
2598
+ }
2599
+ if (this.includeReasoning !== true) {
2600
+ body.reasoning.exclude = true;
2601
+ }
2602
+ if (this.reasoningMaxTokens) {
2603
+ body.reasoning.max_tokens = this.reasoningMaxTokens;
2604
+ }
2605
+ } else {
2606
+ body.reasoning = { exclude: true };
2607
+ }
2608
+ if (this.tools.length > 0) {
2609
+ body.tools = this.tools.map((t) => ({
2610
+ type: "function",
2611
+ function: {
2612
+ name: t.name,
2613
+ description: t.description,
2614
+ parameters: t.parameters
2615
+ }
2616
+ }));
2617
+ body.tool_choice = "auto";
2618
+ }
2619
+ return body;
2620
+ }
2621
+ /**
2622
+ * Handle streaming response from OpenRouter
2623
+ * OpenRouter uses SSE format with potential comment lines
2624
+ */
2625
+ async handleStream(res, onPartial) {
2626
+ const reader = res.body.getReader();
2627
+ const dec = new TextDecoder();
2628
+ let buffer = "";
2629
+ let full = "";
2630
+ while (true) {
2631
+ const { done, value } = await reader.read();
2632
+ if (done) break;
2633
+ buffer += dec.decode(value, { stream: true });
2634
+ const lines = buffer.split("\n");
2635
+ buffer = lines.pop() || "";
2636
+ for (const line of lines) {
2637
+ const trimmedLine = line.trim();
2638
+ if (!trimmedLine || trimmedLine.startsWith(":")) continue;
2639
+ if (!trimmedLine.startsWith("data:")) continue;
2640
+ const jsonStr = trimmedLine.slice(5).trim();
2641
+ if (jsonStr === "[DONE]") {
2642
+ return full;
2643
+ }
2644
+ try {
2645
+ const json = JSON.parse(jsonStr);
2646
+ const content = json.choices?.[0]?.delta?.content || "";
2647
+ if (content) {
2648
+ onPartial(content);
2649
+ full += content;
2650
+ }
2651
+ } catch (e) {
2652
+ console.debug("Failed to parse SSE data:", jsonStr);
2653
+ }
2654
+ }
2655
+ }
2656
+ return full;
2657
+ }
2658
+ /**
2659
+ * Parse streaming response with tool support
2660
+ */
2661
+ async parseStream(res, onPartial) {
2662
+ const reader = res.body.getReader();
2663
+ const dec = new TextDecoder();
2664
+ const textBlocks = [];
2665
+ const toolCallsMap = /* @__PURE__ */ new Map();
2666
+ let buf = "";
2667
+ while (true) {
2668
+ const { done, value } = await reader.read();
2669
+ if (done) break;
2670
+ buf += dec.decode(value, { stream: true });
2671
+ const lines = buf.split("\n");
2672
+ buf = lines.pop() || "";
2673
+ for (const line of lines) {
2674
+ const trimmedLine = line.trim();
2675
+ if (!trimmedLine || trimmedLine.startsWith(":")) continue;
2676
+ if (!trimmedLine.startsWith("data:")) continue;
2677
+ const payload = trimmedLine.slice(5).trim();
2678
+ if (payload === "[DONE]") {
2679
+ break;
2680
+ }
2681
+ try {
2682
+ const json = JSON.parse(payload);
2683
+ const delta = json.choices?.[0]?.delta;
2684
+ if (delta?.content) {
2685
+ onPartial(delta.content);
2686
+ StreamTextAccumulator.append(textBlocks, delta.content);
2687
+ }
2688
+ if (delta?.tool_calls) {
2689
+ delta.tool_calls.forEach((c) => {
2690
+ const entry = toolCallsMap.get(c.index) ?? {
2691
+ id: c.id,
2692
+ name: c.function?.name,
2693
+ args: ""
2694
+ };
2695
+ entry.args += c.function?.arguments || "";
2696
+ toolCallsMap.set(c.index, entry);
2697
+ });
2698
+ }
2699
+ } catch (e) {
2700
+ console.debug("Failed to parse SSE data:", payload);
2701
+ }
2702
+ }
2703
+ }
2704
+ const toolBlocks = Array.from(toolCallsMap.entries()).sort((a, b) => a[0] - b[0]).map(([_, e]) => ({
2705
+ type: "tool_use",
2706
+ id: e.id,
2707
+ name: e.name,
2708
+ input: JSON.parse(e.args || "{}")
2709
+ }));
2710
+ const blocks = [...textBlocks, ...toolBlocks];
2711
+ return {
2712
+ blocks,
2713
+ stop_reason: toolBlocks.length ? "tool_use" : "end"
2714
+ };
2715
+ }
2716
+ /**
2717
+ * Parse non-streaming response
2718
+ */
2719
+ parseOneShot(data) {
2720
+ const choice = data.choices?.[0];
2721
+ const blocks = [];
2722
+ if (choice?.finish_reason === "tool_calls" && choice?.message?.tool_calls) {
2723
+ choice.message.tool_calls.forEach(
2724
+ (c) => blocks.push({
2725
+ type: "tool_use",
2726
+ id: c.id,
2727
+ name: c.function?.name,
2728
+ input: JSON.parse(c.function?.arguments || "{}")
2729
+ })
2730
+ );
2731
+ } else if (choice?.message?.content) {
2732
+ blocks.push({ type: "text", text: choice.message.content });
2733
+ }
2734
+ return {
2735
+ blocks,
2736
+ stop_reason: choice?.finish_reason === "tool_calls" ? "tool_use" : "end"
2737
+ };
2738
+ }
2739
+ };
2740
+
2741
+ // src/services/providers/openrouter/OpenRouterChatServiceProvider.ts
2742
+ var OpenRouterChatServiceProvider = class {
2743
+ /**
2744
+ * Create a chat service instance
2745
+ * @param options Service options
2746
+ * @returns OpenRouterChatService instance
2747
+ */
2748
+ createChatService(options) {
2749
+ const visionModel = options.visionModel || options.model || this.getDefaultModel();
2750
+ if (options.visionModel && !this.supportsVisionForModel(options.visionModel)) {
2751
+ throw new Error(
2752
+ `Model ${options.visionModel} does not support vision capabilities.`
2753
+ );
2754
+ }
2755
+ const tools = options.tools;
2756
+ const appName = options.appName;
2757
+ const appUrl = options.appUrl;
2758
+ return new OpenRouterChatService(
2759
+ options.apiKey,
2760
+ options.model || this.getDefaultModel(),
2761
+ visionModel,
2762
+ tools,
2763
+ options.endpoint,
2764
+ options.responseLength,
2765
+ appName,
2766
+ appUrl,
2767
+ options.reasoning_effort,
2768
+ options.includeReasoning,
2769
+ options.reasoningMaxTokens
2770
+ );
2771
+ }
2772
+ /**
2773
+ * Get the provider name
2774
+ * @returns Provider name ('openrouter')
2775
+ */
2776
+ getProviderName() {
2777
+ return "openrouter";
2778
+ }
2779
+ /**
2780
+ * Get the list of supported models
2781
+ * Currently only supports gpt-oss-20b:free
2782
+ * @returns Array of supported model names
2783
+ */
2784
+ getSupportedModels() {
2785
+ return [
2786
+ // Free models
2787
+ MODEL_GPT_OSS_20B_FREE
2788
+ ];
2789
+ }
2790
+ /**
2791
+ * Get the default model
2792
+ * @returns Default model name (gpt-oss-20b:free)
2793
+ */
2794
+ getDefaultModel() {
2795
+ return MODEL_GPT_OSS_20B_FREE;
2796
+ }
2797
+ /**
2798
+ * Check if this provider supports vision (image processing)
2799
+ * @returns Vision support status (false - gpt-oss-20b does not support vision)
2800
+ */
2801
+ supportsVision() {
2802
+ return false;
2803
+ }
2804
+ /**
2805
+ * Check if a specific model supports vision capabilities
2806
+ * @param model The model name to check
2807
+ * @returns True if the model supports vision, false otherwise
2808
+ */
2809
+ supportsVisionForModel(model) {
2810
+ return isOpenRouterVisionModel(model);
2811
+ }
2812
+ /**
2813
+ * Get list of free tier models
2814
+ * @returns Array of free model names
2815
+ */
2816
+ getFreeModels() {
2817
+ return OPENROUTER_FREE_MODELS;
2818
+ }
2819
+ /**
2820
+ * Check if a model is free tier
2821
+ * @param model Model name to check
2822
+ * @returns True if the model is free
2823
+ */
2824
+ isModelFree(model) {
2825
+ return OPENROUTER_FREE_MODELS.includes(model) || model.endsWith(":free");
2826
+ }
2827
+ };
2828
+
2829
+ // src/services/ChatServiceFactory.ts
2830
+ var ChatServiceFactory = class {
2831
+ /**
2832
+ * Register a new provider
2833
+ * @param provider Provider instance
2834
+ */
2835
+ static registerProvider(provider) {
2836
+ this.providers.set(provider.getProviderName(), provider);
2837
+ }
2838
+ /**
2839
+ * Create a chat service with the specified provider name and options
2840
+ * @param providerName Provider name
2841
+ * @param options Service options
2842
+ * @returns Created ChatService instance
2843
+ */
2844
+ static createChatService(providerName, options) {
2845
+ const provider = this.providers.get(providerName);
2846
+ if (!provider) {
2847
+ throw new Error(`Unknown chat provider: ${providerName}`);
2848
+ }
2849
+ return provider.createChatService(options);
2850
+ }
2851
+ /**
2852
+ * Get registered providers
2853
+ * @returns Provider map
2854
+ */
2855
+ static getProviders() {
2856
+ return this.providers;
2857
+ }
2858
+ /**
2859
+ * Get array of available provider names
2860
+ * @returns Array of provider names
2861
+ */
2862
+ static getAvailableProviders() {
2863
+ return Array.from(this.providers.keys());
2864
+ }
2865
+ /**
2866
+ * Get models supported by the specified provider
2867
+ * @param providerName Provider name
2868
+ * @returns Array of supported models, empty array if provider doesn't exist
2869
+ */
2870
+ static getSupportedModels(providerName) {
2871
+ const provider = this.providers.get(providerName);
2872
+ return provider ? provider.getSupportedModels() : [];
2873
+ }
2874
+ };
2875
+ /** Map of registered providers */
2876
+ ChatServiceFactory.providers = /* @__PURE__ */ new Map();
2877
+ ChatServiceFactory.registerProvider(new OpenAIChatServiceProvider());
2878
+ ChatServiceFactory.registerProvider(new GeminiChatServiceProvider());
2879
+ ChatServiceFactory.registerProvider(new ClaudeChatServiceProvider());
2880
+ ChatServiceFactory.registerProvider(new OpenRouterChatServiceProvider());
2881
+
2882
+ // src/utils/emotionParser.ts
2883
+ var emotions = ["happy", "sad", "angry", "surprised", "neutral"];
2884
+ var EMOTION_TAG_REGEX = /\[([a-z]+)\]/i;
2885
+ var EMOTION_TAG_CLEANUP_REGEX = /\[[a-z]+\]\s*/gi;
2886
+ var EmotionParser = class {
2887
+ /**
2888
+ * Extract emotion from text and return clean text
2889
+ * @param text Text that may contain emotion tags like [happy]
2890
+ * @returns Object containing extracted emotion and clean text
2891
+ */
2892
+ static extractEmotion(text) {
2893
+ const match = text.match(EMOTION_TAG_REGEX);
2894
+ if (match) {
2895
+ const emotion = match[1].toLowerCase();
2896
+ const cleanText = text.replace(EMOTION_TAG_CLEANUP_REGEX, "").trim();
2897
+ return {
2898
+ emotion,
2899
+ cleanText
2900
+ };
2901
+ }
2902
+ return { cleanText: text };
2903
+ }
2904
+ /**
2905
+ * Check if an emotion is valid
2906
+ * @param emotion Emotion string to validate
2907
+ * @returns True if the emotion is valid
2908
+ */
2909
+ static isValidEmotion(emotion) {
2910
+ return emotions.includes(emotion);
2911
+ }
2912
+ /**
2913
+ * Remove all emotion tags from text
2914
+ * @param text Text containing emotion tags
2915
+ * @returns Clean text without emotion tags
2916
+ */
2917
+ static cleanEmotionTags(text) {
2918
+ return text.replace(EMOTION_TAG_CLEANUP_REGEX, "").trim();
2919
+ }
2920
+ /**
2921
+ * Add emotion tag to text
2922
+ * @param emotion Emotion to add
2923
+ * @param text Text content
2924
+ * @returns Text with emotion tag prepended
2925
+ */
2926
+ static addEmotionTag(emotion, text) {
2927
+ return `[${emotion}] ${text}`;
2928
+ }
2929
+ };
2930
+
2931
+ // src/utils/screenplay.ts
2932
+ function textToScreenplay(text) {
2933
+ const { emotion, cleanText } = EmotionParser.extractEmotion(text);
2934
+ if (emotion) {
2935
+ return {
2936
+ emotion,
2937
+ text: cleanText
2938
+ };
2939
+ }
2940
+ return { text: cleanText };
2941
+ }
2942
+ function textsToScreenplay(texts) {
2943
+ return texts.map((text) => textToScreenplay(text));
2944
+ }
2945
+ function screenplayToText(screenplay) {
2946
+ if (screenplay.emotion) {
2947
+ return EmotionParser.addEmotionTag(screenplay.emotion, screenplay.text);
2948
+ }
2949
+ return screenplay.text;
2950
+ }
2951
+
2952
+ // src/utils/runOnce.ts
2953
+ async function runOnceText(chat, messages) {
2954
+ const { blocks } = await chat.chatOnce(messages, false, () => {
2955
+ });
2956
+ return StreamTextAccumulator.getFullText(blocks);
2957
+ }
2958
+
2959
+ // src/adapters/gasFetch.ts
2960
+ function installGASFetch() {
2961
+ ChatServiceHttpClient.setFetch(async (url, init = {}) => {
2962
+ const method = (init.method || "GET").toString().toUpperCase();
2963
+ const rawHeaders = init.headers;
2964
+ const headers = {};
2965
+ if (Array.isArray(rawHeaders)) {
2966
+ for (const [k, v] of rawHeaders) headers[k] = String(v);
2967
+ } else if (rawHeaders && typeof rawHeaders === "object") {
2968
+ for (const [k, v] of Object.entries(rawHeaders)) headers[k] = String(v);
2969
+ }
2970
+ const params = {
2971
+ method,
2972
+ headers,
2973
+ muteHttpExceptions: true
2974
+ };
2975
+ const body = init.body;
2976
+ if (typeof body === "string") {
2977
+ params.payload = body;
2978
+ } else if (body != null) {
2979
+ if (!headers["Content-Type"])
2980
+ headers["Content-Type"] = "application/json";
2981
+ params.payload = JSON.stringify(body);
2982
+ }
2983
+ const res = UrlFetchApp.fetch(url, params);
2984
+ const status = res.getResponseCode();
2985
+ const text = res.getContentText();
2986
+ const response = {
2987
+ ok: status >= 200 && status < 300,
2988
+ status,
2989
+ statusText: String(status),
2990
+ text: async () => text,
2991
+ json: async () => text ? JSON.parse(text) : null
2992
+ };
2993
+ return response;
2994
+ });
2995
+ }
2996
+ return __toCommonJS(index_exports);
2997
+ })();