mcard-js 2.1.49 → 2.1.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/dist/CardCollection-EMSBVZP3.js +10 -0
  2. package/dist/CardCollection-KQWR4PCV.js +10 -0
  3. package/dist/CardCollection-ORGE2XBG.js +10 -0
  4. package/dist/EngineRegistry-ABZXHZWO.js +17 -0
  5. package/dist/EngineRegistry-EIOT4MUZ.js +17 -0
  6. package/dist/EngineRegistry-IQ6EVO72.js +17 -0
  7. package/dist/EngineRegistry-PHRFXEOE.js +17 -0
  8. package/dist/IndexedDBEngine-EWA3SLAO.js +12 -0
  9. package/dist/IndexedDBEngine-FXAD42F3.js +12 -0
  10. package/dist/IndexedDBEngine-RD4447IS.js +12 -0
  11. package/dist/LLMRuntime-ARUWOX52.js +17 -0
  12. package/dist/LLMRuntime-C3XCO7WF.js +17 -0
  13. package/dist/LLMRuntime-CQ7X43QR.js +17 -0
  14. package/dist/LLMRuntime-PD45COKE.js +17 -0
  15. package/dist/LLMRuntime-QOUMLT33.js +17 -0
  16. package/dist/LLMRuntime-SZNLTHD7.js +17 -0
  17. package/dist/LLMRuntime-TVJGK2BG.js +17 -0
  18. package/dist/LambdaRuntime-25GMEJCU.js +19 -0
  19. package/dist/LambdaRuntime-7KQUMHPI.js +19 -0
  20. package/dist/LambdaRuntime-DRT7ODPC.js +19 -0
  21. package/dist/LambdaRuntime-HSREEYQG.js +19 -0
  22. package/dist/LambdaRuntime-IH7NVG6Z.js +19 -0
  23. package/dist/LambdaRuntime-MPG27FM2.js +19 -0
  24. package/dist/LambdaRuntime-ODSWIMNM.js +19 -0
  25. package/dist/LambdaRuntime-PHGRZYAW.js +19 -0
  26. package/dist/LambdaRuntime-QOEYR37L.js +19 -0
  27. package/dist/LambdaRuntime-RT33TFN2.js +19 -0
  28. package/dist/LambdaRuntime-W6TQBP5O.js +19 -0
  29. package/dist/Loader-35WSUC53.js +14 -0
  30. package/dist/Loader-STS3G4OQ.js +16 -0
  31. package/dist/Loader-W22AEM6F.js +12 -0
  32. package/dist/Loader-YBPWP43S.js +12 -0
  33. package/dist/Loader-ZYSS7B4D.js +12 -0
  34. package/dist/NetworkRuntime-KR2QITXV.js +987 -0
  35. package/dist/NetworkRuntime-S6V2CMZV.js +1575 -0
  36. package/dist/OllamaProvider-2ANW6EB2.js +9 -0
  37. package/dist/OllamaProvider-5QFJKYAC.js +9 -0
  38. package/dist/OllamaProvider-6QXJGR7V.js +9 -0
  39. package/dist/OllamaProvider-ABEEFX7M.js +9 -0
  40. package/dist/OllamaProvider-Z2CGY5LY.js +9 -0
  41. package/dist/VCard-225X42W7.js +25 -0
  42. package/dist/chunk-2APJYBH4.js +368 -0
  43. package/dist/chunk-4DFTWDRB.js +497 -0
  44. package/dist/chunk-4PBRTFSY.js +112 -0
  45. package/dist/chunk-4T3H25AP.js +299 -0
  46. package/dist/chunk-5DFXPIRL.js +42 -0
  47. package/dist/chunk-5HRZV4R3.js +217 -0
  48. package/dist/chunk-6ZRJXVJ3.js +529 -0
  49. package/dist/chunk-7N7JYGN2.js +364 -0
  50. package/dist/chunk-7QTJUGYQ.js +74 -0
  51. package/dist/chunk-7TXIPJI2.js +2360 -0
  52. package/dist/chunk-BFJUD527.js +2369 -0
  53. package/dist/chunk-CHXIVTQV.js +364 -0
  54. package/dist/chunk-DM2ABCA4.js +497 -0
  55. package/dist/chunk-DTPHGTBQ.js +275 -0
  56. package/dist/chunk-EDAJ5FO6.js +405 -0
  57. package/dist/chunk-ETJWXHKZ.js +246 -0
  58. package/dist/chunk-FLYGNPUC.js +2369 -0
  59. package/dist/chunk-FSDRDWOP.js +34 -0
  60. package/dist/chunk-GIKMCG4D.js +497 -0
  61. package/dist/chunk-IJKS3LGK.js +428 -0
  62. package/dist/chunk-JUQ2VQZA.js +428 -0
  63. package/dist/chunk-JVW4J7BY.js +2369 -0
  64. package/dist/chunk-JWTRVEC3.js +2369 -0
  65. package/dist/chunk-KJM4C65U.js +299 -0
  66. package/dist/chunk-KMC566CN.js +591 -0
  67. package/dist/chunk-KMNP6DBL.js +455 -0
  68. package/dist/chunk-LVU7O5IY.js +597 -0
  69. package/dist/chunk-M4C6RWLA.js +373 -0
  70. package/dist/chunk-NAAAKSEO.js +541 -0
  71. package/dist/chunk-NKIXLPHL.js +373 -0
  72. package/dist/chunk-NOEDMK7I.js +428 -0
  73. package/dist/chunk-NOPYSBOQ.js +2360 -0
  74. package/dist/chunk-P4G42QCY.js +2369 -0
  75. package/dist/chunk-PKLONZCF.js +253 -0
  76. package/dist/chunk-PNGECWPN.js +597 -0
  77. package/dist/chunk-PYP6T64W.js +217 -0
  78. package/dist/chunk-QFT3COE2.js +217 -0
  79. package/dist/chunk-QFZFXMNX.js +275 -0
  80. package/dist/chunk-QZGRQRJP.js +2369 -0
  81. package/dist/chunk-R3XRBAM7.js +253 -0
  82. package/dist/chunk-RYP66UMH.js +74 -0
  83. package/dist/chunk-RZIZYRLF.js +112 -0
  84. package/dist/chunk-T43V44RS.js +2369 -0
  85. package/dist/chunk-UCNVX5BZ.js +74 -0
  86. package/dist/chunk-UDF7HS4V.js +368 -0
  87. package/dist/chunk-VJPXJVEH.js +299 -0
  88. package/dist/chunk-VW3KBDK5.js +74 -0
  89. package/dist/chunk-X72XIYSN.js +364 -0
  90. package/dist/chunk-XETU7TV4.js +112 -0
  91. package/dist/chunk-Y4BT6LHA.js +368 -0
  92. package/dist/chunk-YQGB6BIA.js +2369 -0
  93. package/dist/chunk-ZEQPO3XV.js +217 -0
  94. package/dist/chunk-ZKRKWXEQ.js +2369 -0
  95. package/dist/chunk-ZMK2HTZ5.js +275 -0
  96. package/dist/constants-CLB7B6MN.js +101 -0
  97. package/dist/constants-O343SMHL.js +103 -0
  98. package/dist/constants-YPGDEX5X.js +103 -0
  99. package/dist/index.browser.cjs +11 -5
  100. package/dist/index.browser.js +12 -12
  101. package/dist/index.cjs +2358 -1896
  102. package/dist/index.d.cts +934 -776
  103. package/dist/index.d.ts +934 -776
  104. package/dist/index.js +1353 -1271
  105. package/dist/storage/SqliteNodeEngine.cjs +12 -6
  106. package/dist/storage/SqliteNodeEngine.js +4 -4
  107. package/dist/storage/SqliteWasmEngine.cjs +11 -5
  108. package/dist/storage/SqliteWasmEngine.js +4 -4
  109. package/package.json +5 -3
@@ -0,0 +1,591 @@
1
+ import {
2
+ BaseLLMProvider,
3
+ DEFAULT_PROVIDER,
4
+ LLMConfig,
5
+ LLM_PROVIDERS,
6
+ OllamaProvider
7
+ } from "./chunk-Y4BT6LHA.js";
8
+ import {
9
+ IO
10
+ } from "./chunk-MPMRBT5R.js";
11
+ import {
12
+ Either
13
+ } from "./chunk-2KADE3SE.js";
14
+ import {
15
+ DEFAULT_FALLBACK_MODEL,
16
+ DEFAULT_OLLAMA_BASE_URL,
17
+ LLM_DEFAULT_TIMEOUT_SECS
18
+ } from "./chunk-PKLONZCF.js";
19
+
20
+ // src/ptr/llm/providers/WebLLMProvider.ts
21
+ function asOptionalNumber(value) {
22
+ return typeof value === "number" ? value : void 0;
23
+ }
24
+ var WebLLMProvider = class extends BaseLLMProvider {
25
+ provider_name = "webllm";
26
+ config;
27
+ engine = null;
28
+ current_model = null;
29
+ initialization_promise = null;
30
+ constructor() {
31
+ super();
32
+ this.config = LLM_PROVIDERS["webllm"];
33
+ }
34
+ async _get_engine(model_id) {
35
+ if (this.engine && this.current_model === model_id) {
36
+ return Either.right(this.engine);
37
+ }
38
+ if (this.initialization_promise) {
39
+ await this.initialization_promise;
40
+ if (this.engine && this.current_model === model_id) {
41
+ return Either.right(this.engine);
42
+ }
43
+ }
44
+ this.initialization_promise = (async () => {
45
+ try {
46
+ if (typeof window === "undefined") {
47
+ throw new Error("WebLLM only supports browser environments.");
48
+ }
49
+ const windowWithWebLLM = window;
50
+ let webllm = windowWithWebLLM.webllm;
51
+ if (!webllm) {
52
+ try {
53
+ webllm = await import("@mlc-ai/web-llm");
54
+ } catch (e) {
55
+ void e;
56
+ }
57
+ }
58
+ if (!webllm) {
59
+ throw new Error("WebLLM library not found. Please include @mlc-ai/web-llm or add script tag.");
60
+ }
61
+ if (!this.engine) {
62
+ this.engine = await webllm.CreateMLCEngine(model_id, {
63
+ initProgressCallback: (report) => {
64
+ console.debug(`[WebLLM] ${report.text}`);
65
+ }
66
+ });
67
+ } else {
68
+ await this.engine.reload(model_id);
69
+ }
70
+ this.current_model = model_id;
71
+ } catch (e) {
72
+ this.engine = null;
73
+ this.current_model = null;
74
+ throw e;
75
+ }
76
+ })();
77
+ try {
78
+ await this.initialization_promise;
79
+ return Either.right(this.engine);
80
+ } catch (e) {
81
+ this.initialization_promise = null;
82
+ const error = e;
83
+ return Either.left(`WebLLM init failed: ${error.message || String(e)}`);
84
+ }
85
+ }
86
+ async complete(prompt, params, images) {
87
+ const model = typeof params.model === "string" ? params.model : this.config.default_model;
88
+ const engineResult = await this._get_engine(model);
89
+ if (engineResult.isLeft) return Either.left(engineResult.left);
90
+ const engine = engineResult.right;
91
+ try {
92
+ const completion = await engine.chat.completions.create({
93
+ messages: [{ role: "user", content: prompt }],
94
+ temperature: asOptionalNumber(params.temperature),
95
+ max_tokens: asOptionalNumber(params.max_tokens),
96
+ top_p: asOptionalNumber(params.top_p),
97
+ stream: false
98
+ });
99
+ const content = completion.choices?.[0]?.message?.content || "";
100
+ return Either.right(content);
101
+ } catch (e) {
102
+ const error = e;
103
+ return Either.left(`WebLLM completion error: ${error.message || String(e)}`);
104
+ }
105
+ }
106
+ async chat(messages, params) {
107
+ const model = typeof params.model === "string" ? params.model : this.config.default_model;
108
+ const engineResult = await this._get_engine(model);
109
+ if (engineResult.isLeft) return Either.left(engineResult.left);
110
+ const engine = engineResult.right;
111
+ try {
112
+ const completion = await engine.chat.completions.create({
113
+ messages,
114
+ temperature: asOptionalNumber(params.temperature),
115
+ max_tokens: asOptionalNumber(params.max_tokens),
116
+ top_p: asOptionalNumber(params.top_p),
117
+ stream: false
118
+ });
119
+ const choice = completion.choices?.[0];
120
+ return Either.right({
121
+ content: choice?.message?.content || "",
122
+ role: choice?.message?.role || "assistant",
123
+ model,
124
+ usage: completion.usage
125
+ });
126
+ } catch (e) {
127
+ const error = e;
128
+ return Either.left(`WebLLM chat error: ${error.message || String(e)}`);
129
+ }
130
+ }
131
+ async validate_connection() {
132
+ if (typeof window === "undefined") return false;
133
+ if (window.webllm) return true;
134
+ try {
135
+ await import("@mlc-ai/web-llm");
136
+ return true;
137
+ } catch {
138
+ return false;
139
+ }
140
+ }
141
+ async list_models() {
142
+ return Either.right(this.config.available_models);
143
+ }
144
+ };
145
+
146
+ // src/ptr/llm/providers/MLCLLMProvider.ts
147
+ import * as http from "http";
148
+ import * as https from "https";
149
+ var MLCLLMProvider = class extends BaseLLMProvider {
150
+ provider_name = "mlc-llm";
151
+ base_url;
152
+ timeout;
153
+ config;
154
+ constructor(base_url = null, timeout = LLM_DEFAULT_TIMEOUT_SECS) {
155
+ super();
156
+ this.config = LLM_PROVIDERS["mlc-llm"];
157
+ this.base_url = (base_url || this.config.base_url).replace(/\/$/, "");
158
+ this.timeout = timeout * 1e3;
159
+ }
160
+ async _fetch_json(endpoint, options) {
161
+ if (typeof globalThis.fetch === "function") {
162
+ try {
163
+ const controller = new AbortController();
164
+ const id = setTimeout(() => controller.abort(), this.timeout);
165
+ const response = await fetch(`${this.base_url}${endpoint}`, {
166
+ ...options,
167
+ signal: controller.signal
168
+ });
169
+ clearTimeout(id);
170
+ if (!response.ok) {
171
+ return Either.left(`HTTP error ${response.status}: ${await response.text()}`);
172
+ }
173
+ const data = await response.json();
174
+ return Either.right(data);
175
+ } catch (e) {
176
+ const error = e;
177
+ return Either.left(`Connection error: ${error.message || String(e)}`);
178
+ }
179
+ }
180
+ return this._node_request(endpoint, options);
181
+ }
182
+ _node_request(endpoint, options) {
183
+ const urlStr = `${this.base_url}${endpoint}`;
184
+ const url = new URL(urlStr);
185
+ const isHttps = url.protocol === "https:";
186
+ const client = isHttps ? https : http;
187
+ const reqOptions = {
188
+ method: options.method || "GET",
189
+ headers: options.headers || {},
190
+ timeout: this.timeout
191
+ };
192
+ return new Promise((resolve) => {
193
+ const req = client.request(url, reqOptions, (res) => {
194
+ let body = "";
195
+ res.on("data", (chunk) => body += chunk);
196
+ res.on("end", () => {
197
+ if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
198
+ try {
199
+ resolve(Either.right(JSON.parse(body)));
200
+ } catch (e) {
201
+ resolve(Either.left(`Parse error: ${e}`));
202
+ }
203
+ } else {
204
+ resolve(Either.left(`HTTP Error ${res.statusCode}: ${body}`));
205
+ }
206
+ });
207
+ });
208
+ req.on("error", (e) => resolve(Either.left(e.message)));
209
+ req.on("timeout", () => {
210
+ req.destroy();
211
+ resolve(Either.left("Request timed out"));
212
+ });
213
+ if (options.body) {
214
+ req.write(options.body);
215
+ }
216
+ req.end();
217
+ });
218
+ }
219
+ async complete(prompt, params, images) {
220
+ const data = {
221
+ model: typeof params.model === "string" ? params.model : this.config.default_model,
222
+ messages: [{ role: "user", content: prompt }],
223
+ max_tokens: params.max_tokens,
224
+ temperature: params.temperature,
225
+ top_p: params.top_p,
226
+ stream: false
227
+ };
228
+ const result = await this._fetch_json(this.config.api_path, {
229
+ method: "POST",
230
+ headers: { "Content-Type": "application/json" },
231
+ body: JSON.stringify(data)
232
+ });
233
+ if (result.isLeft) return Either.left(result.left);
234
+ const response = result.right;
235
+ if (response.choices && response.choices.length > 0) {
236
+ return Either.right(response.choices[0].text || "");
237
+ }
238
+ return Either.left(`Unexpected response format: ${JSON.stringify(response)}`);
239
+ }
240
+ async chat(messages, params) {
241
+ const data = {
242
+ model: params.model || this.config.default_model,
243
+ messages,
244
+ max_tokens: params.max_tokens,
245
+ temperature: params.temperature,
246
+ top_p: params.top_p,
247
+ stream: false
248
+ };
249
+ const result = await this._fetch_json(this.config.chat_path, {
250
+ method: "POST",
251
+ headers: { "Content-Type": "application/json" },
252
+ body: JSON.stringify(data)
253
+ });
254
+ if (result.isLeft) return Either.left(result.left);
255
+ const response = result.right;
256
+ if (response.choices && response.choices.length > 0) {
257
+ const message = response.choices[0].message;
258
+ return Either.right({
259
+ content: message?.content || "",
260
+ role: message?.role || "assistant",
261
+ model: response.model,
262
+ usage: response.usage
263
+ });
264
+ }
265
+ return Either.left(`Unexpected response format: ${JSON.stringify(response)}`);
266
+ }
267
+ async validate_connection() {
268
+ const result = await this._fetch_json(this.config.models_path, { method: "GET" });
269
+ return result.isRight;
270
+ }
271
+ async list_models() {
272
+ const result = await this._fetch_json(this.config.models_path, { method: "GET" });
273
+ if (result.isLeft) return Either.left(result.left);
274
+ const response = result.right;
275
+ if (response.data && Array.isArray(response.data)) {
276
+ return Either.right(response.data.map((m) => m.id).filter((id) => typeof id === "string"));
277
+ }
278
+ return Either.left("Invalid models response");
279
+ }
280
+ };
281
+
282
+ // src/ptr/llm/IntelligenceRouter.ts
283
+ import * as http2 from "http";
284
+ import * as https2 from "https";
285
+ var IntelligenceRouter = class {
286
+ localUrl;
287
+ localModel;
288
+ constructor(localUrl, localModel) {
289
+ this.localUrl = localUrl ?? DEFAULT_OLLAMA_BASE_URL;
290
+ this.localModel = localModel ?? DEFAULT_FALLBACK_MODEL;
291
+ }
292
+ /**
293
+ * Probe a remote provider URL with a lightweight HTTP request.
294
+ * Returns true if reachable within the timeout.
295
+ */
296
+ async probeConnection(providerUrl, timeoutMs = 2e3) {
297
+ return new Promise((resolve) => {
298
+ try {
299
+ const url = new URL(providerUrl);
300
+ const client = url.protocol === "https:" ? https2 : http2;
301
+ const req = client.get(url, { timeout: timeoutMs }, (res) => {
302
+ res.resume();
303
+ resolve(true);
304
+ });
305
+ req.on("error", () => {
306
+ resolve(false);
307
+ });
308
+ req.on("timeout", () => {
309
+ req.destroy();
310
+ resolve(false);
311
+ });
312
+ } catch {
313
+ resolve(false);
314
+ }
315
+ });
316
+ }
317
+ /**
318
+ * Returns the (provider, model, baseUrl) tuple that is optimal.
319
+ * Currently favors the requested configuration unless a failure explicitly overrides it.
320
+ */
321
+ getOptimalProvider(requestedProvider, requestedModel, requestedUrl = null) {
322
+ return { provider: requestedProvider, model: requestedModel, url: requestedUrl };
323
+ }
324
+ /**
325
+ * Handle a provider failure and return fallback configuration + warning message.
326
+ */
327
+ handleFailure(failedProvider, error) {
328
+ const errorName = error instanceof Error ? error.constructor.name : "Error";
329
+ const warning = `Adaptive Fallback Triggered: Provider '${failedProvider}' failed with ${errorName}. Defaulting to Edge Fallback [${this.localModel}] via Ollama.`;
330
+ console.warn(`[IntelligenceRouter] ${warning}`);
331
+ return {
332
+ provider: "ollama",
333
+ model: this.localModel,
334
+ url: this.localUrl,
335
+ warning
336
+ };
337
+ }
338
+ };
339
+
340
+ // src/ptr/llm/LLMRuntime.ts
341
+ function get_provider(provider_name = DEFAULT_PROVIDER, base_url = null, timeout = LLM_DEFAULT_TIMEOUT_SECS) {
342
+ if (provider_name === "ollama") {
343
+ return new OllamaProvider(base_url, timeout);
344
+ }
345
+ if (provider_name === "webllm") {
346
+ return new WebLLMProvider();
347
+ }
348
+ if (provider_name === "mlc-llm") {
349
+ return new MLCLLMProvider(base_url, timeout);
350
+ }
351
+ throw new Error(`Unknown provider: ${provider_name}`);
352
+ }
353
+ var LLMRuntime = class {
354
+ provider_name;
355
+ _provider = null;
356
+ constructor(provider_name = DEFAULT_PROVIDER) {
357
+ this.provider_name = provider_name;
358
+ }
359
+ get provider() {
360
+ if (!this._provider) {
361
+ this._provider = get_provider(this.provider_name);
362
+ }
363
+ return this._provider;
364
+ }
365
+ async execute(codeOrPath, context, config, chapterDir) {
366
+ let configCtx = {};
367
+ if (typeof context === "object" && context !== null) {
368
+ configCtx = context;
369
+ }
370
+ const concrete = config;
371
+ const llmConfig = LLMConfig.from_concrete(concrete, configCtx);
372
+ if (llmConfig.provider !== this.provider_name || llmConfig.endpoint_url) {
373
+ this.provider_name = llmConfig.provider;
374
+ this._provider = get_provider(llmConfig.provider, llmConfig.endpoint_url, llmConfig.timeout);
375
+ }
376
+ let prompt = "";
377
+ let images;
378
+ if (typeof context === "string") {
379
+ prompt = context;
380
+ } else if (context && typeof context === "object") {
381
+ const ctx = context;
382
+ if (typeof ctx.prompt === "string") {
383
+ prompt = ctx.prompt;
384
+ if (Array.isArray(ctx.images)) {
385
+ images = ctx.images;
386
+ }
387
+ } else {
388
+ prompt = JSON.stringify(context);
389
+ }
390
+ }
391
+ const router = new IntelligenceRouter();
392
+ const attemptExecution = async (cfg) => {
393
+ if (cfg.provider !== this.provider_name || cfg.endpoint_url) {
394
+ this.provider_name = cfg.provider;
395
+ this._provider = get_provider(cfg.provider, cfg.endpoint_url, cfg.timeout);
396
+ }
397
+ if (cfg.system_prompt) {
398
+ return this._execute_chat(prompt, cfg, images);
399
+ } else {
400
+ return this._execute_completion(prompt, cfg, images);
401
+ }
402
+ };
403
+ let result = await attemptExecution(llmConfig);
404
+ if (result.isLeft) {
405
+ const errMsg = String(result.left).toLowerCase();
406
+ if (errMsg.includes("timeout") || errMsg.includes("connection") || errMsg.includes("failed")) {
407
+ const fallback = router.handleFailure(llmConfig.provider, new Error(String(result.left)));
408
+ if (configCtx) {
409
+ if (!Array.isArray(configCtx.execution_warnings)) {
410
+ configCtx.execution_warnings = [];
411
+ }
412
+ configCtx.execution_warnings.push(fallback.warning);
413
+ }
414
+ llmConfig.provider = fallback.provider;
415
+ llmConfig.model = fallback.model;
416
+ llmConfig.endpoint_url = fallback.url;
417
+ this.provider_name = fallback.provider;
418
+ this._provider = null;
419
+ result = await attemptExecution(llmConfig);
420
+ }
421
+ }
422
+ if (result.isLeft) {
423
+ return `Error: ${result.left}`;
424
+ }
425
+ return this._format_response(result.right, llmConfig, configCtx);
426
+ }
427
+ async _execute_completion(prompt, config, images) {
428
+ const params = config.to_provider_params();
429
+ return this.provider.complete(prompt, params, images);
430
+ }
431
+ async _execute_chat(prompt, config, images) {
432
+ const messages = [];
433
+ if (config.system_prompt) {
434
+ messages.push({ role: "system", content: config.system_prompt });
435
+ }
436
+ const userMsg = { role: "user", content: prompt };
437
+ if (images && images.length > 0) {
438
+ userMsg.images = images;
439
+ }
440
+ messages.push(userMsg);
441
+ if (config.assistant_instruction) {
442
+ messages.push({ role: "assistant", content: config.assistant_instruction });
443
+ }
444
+ const params = config.to_provider_params();
445
+ return this.provider.chat(messages, params);
446
+ }
447
+ _format_response(response, config, context) {
448
+ let content = response;
449
+ if (response && typeof response === "object" && "content" in response) {
450
+ content = response.content;
451
+ }
452
+ if (typeof content === "string" && content.includes("<think>")) {
453
+ const regex = /<think>([\s\S]*?)<\/think>/g;
454
+ let match;
455
+ let thinkingBlocks = [];
456
+ while ((match = regex.exec(content)) !== null) {
457
+ if (match[1]) {
458
+ thinkingBlocks.push(match[1].trim());
459
+ }
460
+ }
461
+ if (thinkingBlocks.length > 0 && context) {
462
+ context.thinking_process = thinkingBlocks.join("\n\n");
463
+ }
464
+ content = content.replace(/<think>[\s\S]*?<\/think>/g, "").trim();
465
+ }
466
+ if (config.response_format === "json") {
467
+ try {
468
+ if (typeof content === "string") {
469
+ const start = content.indexOf("{");
470
+ const end = content.lastIndexOf("}") + 1;
471
+ if (start >= 0 && end > start) {
472
+ return JSON.parse(content.substring(start, end));
473
+ }
474
+ }
475
+ return content;
476
+ } catch (e) {
477
+ return content;
478
+ }
479
+ }
480
+ return content;
481
+ }
482
+ };
483
+ function promptMonad(prompt, config = {}) {
484
+ return IO.of(async () => {
485
+ try {
486
+ const llmConfig = new LLMConfig(config);
487
+ const router = new IntelligenceRouter();
488
+ const runtime = new LLMRuntime(llmConfig.provider);
489
+ const params = llmConfig.to_provider_params();
490
+ let result = await runtime.provider.complete(prompt, params);
491
+ if (result.isLeft) {
492
+ const errMsg = String(result.left).toLowerCase();
493
+ if (errMsg.includes("timeout") || errMsg.includes("connection") || errMsg.includes("failed")) {
494
+ const fallback = router.handleFailure(llmConfig.provider, new Error(String(result.left)));
495
+ llmConfig.provider = fallback.provider;
496
+ llmConfig.model = fallback.model;
497
+ llmConfig.endpoint_url = fallback.url;
498
+ const fallbackRuntime = new LLMRuntime(fallback.provider);
499
+ result = await fallbackRuntime.provider.complete(prompt, llmConfig.to_provider_params());
500
+ }
501
+ }
502
+ if (result.isRight) {
503
+ let response = result.right;
504
+ let content = typeof response === "string" ? response : response.content || "";
505
+ if (typeof content === "string" && content.includes("<think>")) {
506
+ const regex = /<think>([\s\S]*?)<\/think>/g;
507
+ let match;
508
+ let thinkingBlocks = [];
509
+ while ((match = regex.exec(content)) !== null) {
510
+ if (match[1]) thinkingBlocks.push(match[1].trim());
511
+ }
512
+ content = content.replace(/<think>[\s\S]*?<\/think>/g, "").trim();
513
+ if (typeof response === "string") {
514
+ response = { content, thinking_process: thinkingBlocks.join("\n\n") };
515
+ } else {
516
+ response.content = content;
517
+ response.thinking_process = thinkingBlocks.join("\n\n");
518
+ }
519
+ result = Either.right(response);
520
+ }
521
+ }
522
+ return result;
523
+ } catch (e) {
524
+ return Either.left(`LLM execution failed: ${e}`);
525
+ }
526
+ });
527
+ }
528
+ function chatMonad(messages = null, prompt = null, system_prompt = "", config = {}) {
529
+ return IO.of(async () => {
530
+ try {
531
+ const router = new IntelligenceRouter();
532
+ const configData = { ...config };
533
+ if (system_prompt) configData.system_prompt = system_prompt;
534
+ const llmConfig = new LLMConfig(configData);
535
+ const msgs = messages ? [...messages] : [];
536
+ if (msgs.length === 0) {
537
+ if (llmConfig.system_prompt) {
538
+ msgs.push({ role: "system", content: llmConfig.system_prompt });
539
+ }
540
+ if (prompt) {
541
+ msgs.push({ role: "user", content: prompt });
542
+ }
543
+ if (llmConfig.assistant_instruction) {
544
+ msgs.push({ role: "assistant", content: llmConfig.assistant_instruction });
545
+ }
546
+ }
547
+ const attemptChat = async (cfg) => {
548
+ const rt = new LLMRuntime(cfg.provider);
549
+ const params = cfg.to_provider_params();
550
+ return rt.provider.chat(msgs, params);
551
+ };
552
+ let result = await attemptChat(llmConfig);
553
+ if (result.isLeft) {
554
+ const errMsg = String(result.left).toLowerCase();
555
+ if (errMsg.includes("timeout") || errMsg.includes("connection") || errMsg.includes("failed")) {
556
+ const fallback = router.handleFailure(llmConfig.provider, new Error(String(result.left)));
557
+ llmConfig.provider = fallback.provider;
558
+ llmConfig.model = fallback.model;
559
+ llmConfig.endpoint_url = fallback.url;
560
+ result = await attemptChat(llmConfig);
561
+ }
562
+ }
563
+ if (result.isRight) {
564
+ let response = result.right;
565
+ let content = response.content || "";
566
+ if (typeof content === "string" && content.includes("<think>")) {
567
+ const regex = /<think>([\s\S]*?)<\/think>/g;
568
+ let match;
569
+ let thinkingBlocks = [];
570
+ while ((match = regex.exec(content)) !== null) {
571
+ if (match[1]) thinkingBlocks.push(match[1].trim());
572
+ }
573
+ content = content.replace(/<think>[\s\S]*?<\/think>/g, "").trim();
574
+ response.content = content;
575
+ response.thinking_process = thinkingBlocks.join("\n\n");
576
+ result = Either.right(response);
577
+ }
578
+ }
579
+ return result;
580
+ } catch (e) {
581
+ return Either.left(`LLM chat failed: ${e}`);
582
+ }
583
+ });
584
+ }
585
+
586
+ export {
587
+ get_provider,
588
+ LLMRuntime,
589
+ promptMonad,
590
+ chatMonad
591
+ };