mcard-js 2.1.49 → 2.1.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,10 @@
1
+ import {
2
+ CardCollection
3
+ } from "./chunk-VJPXJVEH.js";
4
+ import "./chunk-GGQCF7ZK.js";
5
+ import "./chunk-ETJWXHKZ.js";
6
+ import "./chunk-ASW6AOA7.js";
7
+ import "./chunk-PNKVD2UK.js";
8
+ export {
9
+ CardCollection
10
+ };
@@ -0,0 +1,17 @@
1
+ import {
2
+ ENGINE_INFO,
3
+ EngineType,
4
+ createEngine,
5
+ getAvailableEngines,
6
+ getEngineInfo,
7
+ getEnginesByEnvironment
8
+ } from "./chunk-VW3KBDK5.js";
9
+ import "./chunk-PNKVD2UK.js";
10
+ export {
11
+ ENGINE_INFO,
12
+ EngineType,
13
+ createEngine,
14
+ getAvailableEngines,
15
+ getEngineInfo,
16
+ getEnginesByEnvironment
17
+ };
@@ -0,0 +1,12 @@
1
+ import {
2
+ IndexedDBEngine
3
+ } from "./chunk-ZMK2HTZ5.js";
4
+ import "./chunk-3EIBJPNF.js";
5
+ import "./chunk-GGQCF7ZK.js";
6
+ import "./chunk-ADV52544.js";
7
+ import "./chunk-ETJWXHKZ.js";
8
+ import "./chunk-ASW6AOA7.js";
9
+ import "./chunk-PNKVD2UK.js";
10
+ export {
11
+ IndexedDBEngine
12
+ };
@@ -0,0 +1,17 @@
1
+ import {
2
+ LLMRuntime,
3
+ chatMonad,
4
+ get_provider,
5
+ promptMonad
6
+ } from "./chunk-JUQ2VQZA.js";
7
+ import "./chunk-2APJYBH4.js";
8
+ import "./chunk-MPMRBT5R.js";
9
+ import "./chunk-2KADE3SE.js";
10
+ import "./chunk-ETJWXHKZ.js";
11
+ import "./chunk-PNKVD2UK.js";
12
+ export {
13
+ LLMRuntime,
14
+ chatMonad,
15
+ get_provider,
16
+ promptMonad
17
+ };
@@ -0,0 +1,17 @@
1
+ import {
2
+ LLMRuntime,
3
+ chatMonad,
4
+ get_provider,
5
+ promptMonad
6
+ } from "./chunk-IJKS3LGK.js";
7
+ import "./chunk-OUW2SUGM.js";
8
+ import "./chunk-MPMRBT5R.js";
9
+ import "./chunk-2KADE3SE.js";
10
+ import "./chunk-3FFEA2XK.js";
11
+ import "./chunk-PNKVD2UK.js";
12
+ export {
13
+ LLMRuntime,
14
+ chatMonad,
15
+ get_provider,
16
+ promptMonad
17
+ };
@@ -0,0 +1,19 @@
1
+ import {
2
+ LambdaRuntime,
3
+ parseLambdaExpression
4
+ } from "./chunk-NOPYSBOQ.js";
5
+ import "./chunk-VJPXJVEH.js";
6
+ import "./chunk-MPMRBT5R.js";
7
+ import "./chunk-2KADE3SE.js";
8
+ import "./chunk-5HRZV4R3.js";
9
+ import "./chunk-GIKMCG4D.js";
10
+ import "./chunk-3EIBJPNF.js";
11
+ import "./chunk-GGQCF7ZK.js";
12
+ import "./chunk-ADV52544.js";
13
+ import "./chunk-ETJWXHKZ.js";
14
+ import "./chunk-ASW6AOA7.js";
15
+ import "./chunk-PNKVD2UK.js";
16
+ export {
17
+ LambdaRuntime,
18
+ parseLambdaExpression
19
+ };
@@ -0,0 +1,19 @@
1
+ import {
2
+ LambdaRuntime,
3
+ parseLambdaExpression
4
+ } from "./chunk-7TXIPJI2.js";
5
+ import "./chunk-MPMRBT5R.js";
6
+ import "./chunk-2KADE3SE.js";
7
+ import "./chunk-Z7EFXSTO.js";
8
+ import "./chunk-HIVVDGE5.js";
9
+ import "./chunk-3EIBJPNF.js";
10
+ import "./chunk-ADV52544.js";
11
+ import "./chunk-QPVEUPMU.js";
12
+ import "./chunk-3FFEA2XK.js";
13
+ import "./chunk-GGQCF7ZK.js";
14
+ import "./chunk-ASW6AOA7.js";
15
+ import "./chunk-PNKVD2UK.js";
16
+ export {
17
+ LambdaRuntime,
18
+ parseLambdaExpression
19
+ };
@@ -0,0 +1,12 @@
1
+ import {
2
+ loadFileToCollection,
3
+ processAndStoreFile
4
+ } from "./chunk-CHXIVTQV.js";
5
+ import "./chunk-GGQCF7ZK.js";
6
+ import "./chunk-ETJWXHKZ.js";
7
+ import "./chunk-ASW6AOA7.js";
8
+ import "./chunk-PNKVD2UK.js";
9
+ export {
10
+ loadFileToCollection,
11
+ processAndStoreFile
12
+ };
@@ -0,0 +1,9 @@
1
+ import {
2
+ OllamaProvider
3
+ } from "./chunk-2APJYBH4.js";
4
+ import "./chunk-2KADE3SE.js";
5
+ import "./chunk-ETJWXHKZ.js";
6
+ import "./chunk-PNKVD2UK.js";
7
+ export {
8
+ OllamaProvider
9
+ };
@@ -0,0 +1,368 @@
1
+ import {
2
+ Either
3
+ } from "./chunk-2KADE3SE.js";
4
+ import {
5
+ LLM_DEFAULT_RETRY_COUNT,
6
+ LLM_DEFAULT_RETRY_DELAY_SECS,
7
+ LLM_DEFAULT_TIMEOUT_SECS
8
+ } from "./chunk-ETJWXHKZ.js";
9
+
10
+ // src/ptr/llm/providers/LLMProvider.ts
11
+ var BaseLLMProvider = class {
12
+ async get_status() {
13
+ const available = await this.validate_connection();
14
+ let models = [];
15
+ if (available) {
16
+ const result = await this.list_models();
17
+ if (result.isRight) {
18
+ models = result.right;
19
+ } else {
20
+ models = result.left;
21
+ }
22
+ } else {
23
+ models = "Not connected";
24
+ }
25
+ return {
26
+ provider: this.provider_name,
27
+ available,
28
+ models: Array.isArray(models) ? models : [],
29
+ error: typeof models === "string" ? models : null
30
+ };
31
+ }
32
+ };
33
+
34
+ // src/ptr/llm/Config.ts
35
+ var DEFAULT_PROVIDER = "ollama";
36
+ var LLM_PROVIDERS = {
37
+ "ollama": {
38
+ base_url: "http://localhost:11434",
39
+ api_path: "/api/generate",
40
+ chat_path: "/api/chat",
41
+ models_path: "/api/tags",
42
+ default_model: "gemma3:latest",
43
+ available_models: ["gemma3:latest", "llama3:latest", "qwen3:latest"]
44
+ },
45
+ "webllm": {
46
+ base_url: "",
47
+ // running in-browser
48
+ api_path: "",
49
+ chat_path: "",
50
+ models_path: null,
51
+ default_model: "Llama-3-8B-Instruct-q4f32_1-MLC",
52
+ available_models: ["Llama-3-8B-Instruct-q4f32_1-MLC", "Hermes-2-Pro-Llama-3-8B-q4f16_1-MLC", "Phi-3-Mini-4k-Instruct-q4f16_1-MLC"]
53
+ },
54
+ "mlc-llm": {
55
+ base_url: "http://localhost:8000",
56
+ api_path: "/v1/completions",
57
+ chat_path: "/v1/chat/completions",
58
+ models_path: "/v1/models",
59
+ default_model: "Llama-3-8B-Instruct-q4f16_1-MLC",
60
+ available_models: []
61
+ },
62
+ "lmstudio": {
63
+ base_url: "http://localhost:1234",
64
+ api_path: "/v1/completions",
65
+ chat_path: "/v1/chat/completions",
66
+ models_path: "/v1/models",
67
+ default_model: "local-model",
68
+ available_models: []
69
+ },
70
+ "openai": {
71
+ base_url: "https://api.openai.com",
72
+ api_path: "/v1/completions",
73
+ chat_path: "/v1/chat/completions",
74
+ models_path: "/v1/models",
75
+ default_model: "gpt-4",
76
+ available_models: ["gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"]
77
+ },
78
+ "anthropic": {
79
+ base_url: "https://api.anthropic.com",
80
+ api_path: "/v1/messages",
81
+ chat_path: "/v1/messages",
82
+ models_path: null,
83
+ default_model: "claude-3-sonnet-20240229",
84
+ available_models: ["claude-3-opus", "claude-3-sonnet", "claude-3-haiku"]
85
+ }
86
+ };
87
+ var DEFAULT_LLM_CONFIG = {
88
+ temperature: 0.7,
89
+ max_tokens: 2048,
90
+ top_p: 1,
91
+ top_k: 40,
92
+ timeout: LLM_DEFAULT_TIMEOUT_SECS,
93
+ stream: false,
94
+ response_format: "text",
95
+ retry_count: LLM_DEFAULT_RETRY_COUNT,
96
+ retry_delay: LLM_DEFAULT_RETRY_DELAY_SECS
97
+ };
98
+ var RESPONSE_FORMATS = ["text", "json", "structured", "markdown"];
99
+ var LLMConfig = class _LLMConfig {
100
+ provider;
101
+ model;
102
+ endpoint_url;
103
+ api_key;
104
+ system_prompt;
105
+ assistant_instruction;
106
+ temperature;
107
+ max_tokens;
108
+ top_p;
109
+ top_k;
110
+ frequency_penalty;
111
+ presence_penalty;
112
+ stop_sequences;
113
+ response_format;
114
+ json_schema;
115
+ timeout;
116
+ retry_count;
117
+ retry_delay;
118
+ stream;
119
+ constructor(data = {}) {
120
+ this.provider = data.provider || DEFAULT_PROVIDER;
121
+ this.model = data.model || null;
122
+ this.endpoint_url = data.endpoint_url || null;
123
+ this.api_key = data.api_key || null;
124
+ this.system_prompt = data.system_prompt || "";
125
+ this.assistant_instruction = data.assistant_instruction || "";
126
+ this.temperature = data.temperature ?? DEFAULT_LLM_CONFIG.temperature;
127
+ this.max_tokens = data.max_tokens ?? DEFAULT_LLM_CONFIG.max_tokens;
128
+ this.top_p = data.top_p ?? DEFAULT_LLM_CONFIG.top_p;
129
+ this.top_k = data.top_k ?? DEFAULT_LLM_CONFIG.top_k;
130
+ this.frequency_penalty = data.frequency_penalty || 0;
131
+ this.presence_penalty = data.presence_penalty || 0;
132
+ this.stop_sequences = data.stop_sequences || [];
133
+ this.response_format = data.response_format || DEFAULT_LLM_CONFIG.response_format;
134
+ this.json_schema = data.json_schema || null;
135
+ this.timeout = data.timeout ?? DEFAULT_LLM_CONFIG.timeout;
136
+ this.retry_count = data.retry_count ?? DEFAULT_LLM_CONFIG.retry_count;
137
+ this.retry_delay = data.retry_delay ?? DEFAULT_LLM_CONFIG.retry_delay;
138
+ this.stream = data.stream ?? DEFAULT_LLM_CONFIG.stream;
139
+ this.validate();
140
+ }
141
+ validate() {
142
+ if (!LLM_PROVIDERS[this.provider]) {
143
+ throw new Error(`Unknown provider: ${this.provider}. Available: ${Object.keys(LLM_PROVIDERS).join(", ")}`);
144
+ }
145
+ if (!RESPONSE_FORMATS.includes(this.response_format)) {
146
+ throw new Error(`Unknown response format: ${this.response_format}. Available: ${RESPONSE_FORMATS.join(", ")}`);
147
+ }
148
+ }
149
+ get effective_model() {
150
+ return this.model || LLM_PROVIDERS[this.provider].default_model;
151
+ }
152
+ get effective_base_url() {
153
+ if (this.endpoint_url) {
154
+ return this.endpoint_url.replace(/\/$/, "");
155
+ }
156
+ return LLM_PROVIDERS[this.provider].base_url;
157
+ }
158
+ to_provider_params() {
159
+ const params = {
160
+ model: this.effective_model,
161
+ temperature: this.temperature
162
+ };
163
+ if (this.provider === "ollama") {
164
+ params.options = {
165
+ num_predict: this.max_tokens,
166
+ top_p: this.top_p,
167
+ top_k: this.top_k,
168
+ temperature: this.temperature
169
+ };
170
+ if (this.stop_sequences.length > 0) {
171
+ params.options.stop = this.stop_sequences;
172
+ }
173
+ } else {
174
+ params.max_tokens = this.max_tokens;
175
+ params.top_p = this.top_p;
176
+ if (this.stop_sequences.length > 0) {
177
+ params.stop = this.stop_sequences;
178
+ }
179
+ if (this.frequency_penalty) params.frequency_penalty = this.frequency_penalty;
180
+ if (this.presence_penalty) params.presence_penalty = this.presence_penalty;
181
+ }
182
+ return params;
183
+ }
184
+ static from_concrete(concrete, context = {}) {
185
+ const configData = { ...concrete.llm_config || {} };
186
+ ["provider", "model", "system_prompt", "temperature", "max_tokens"].forEach((key) => {
187
+ if (key in concrete) {
188
+ configData[key] = concrete[key];
189
+ }
190
+ });
191
+ const contextKeys = [
192
+ "provider",
193
+ "model",
194
+ "endpoint_url",
195
+ "api_key",
196
+ "system_prompt",
197
+ "assistant_instruction",
198
+ "temperature",
199
+ "max_tokens",
200
+ "top_p",
201
+ "top_k",
202
+ "response_format",
203
+ "timeout"
204
+ ];
205
+ contextKeys.forEach((key) => {
206
+ if (key in context) {
207
+ configData[key] = context[key];
208
+ }
209
+ });
210
+ return new _LLMConfig(configData);
211
+ }
212
+ };
213
+
214
+ // src/ptr/llm/providers/OllamaProvider.ts
215
+ import * as http from "http";
216
+ import * as https from "https";
217
+ var OllamaProvider = class extends BaseLLMProvider {
218
+ provider_name = "ollama";
219
+ base_url;
220
+ timeout;
221
+ config;
222
+ constructor(base_url = null, timeout = LLM_DEFAULT_TIMEOUT_SECS) {
223
+ super();
224
+ this.config = LLM_PROVIDERS["ollama"];
225
+ this.base_url = (base_url || this.config.base_url).replace(/\/$/, "");
226
+ this.timeout = timeout * 1e3;
227
+ }
228
+ async _make_request(endpoint, data = null, method = "POST") {
229
+ const urlStr = `${this.base_url}${endpoint}`;
230
+ const url = new URL(urlStr);
231
+ const isHttps = url.protocol === "https:";
232
+ const client = isHttps ? https : http;
233
+ const options = {
234
+ method,
235
+ headers: {
236
+ "Content-Type": "application/json"
237
+ },
238
+ timeout: this.timeout
239
+ };
240
+ return new Promise((resolve) => {
241
+ let payload;
242
+ if (data) {
243
+ payload = JSON.stringify(data);
244
+ options.headers["Content-Length"] = Buffer.byteLength(payload);
245
+ }
246
+ const req = client.request(url, options, (res) => {
247
+ let body = "";
248
+ res.on("data", (chunk) => {
249
+ body += chunk;
250
+ });
251
+ res.on("end", () => {
252
+ if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
253
+ try {
254
+ if (body.includes("\n") && !body.trim().startsWith("{")) {
255
+ resolve(Either.right(JSON.parse(body)));
256
+ } else {
257
+ resolve(Either.right(JSON.parse(body)));
258
+ }
259
+ } catch (e) {
260
+ const lines = body.trim().split("\n").filter((l) => l);
261
+ if (lines.length > 0) {
262
+ try {
263
+ resolve(Either.right(JSON.parse(lines[lines.length - 1])));
264
+ } catch (parseErr) {
265
+ resolve(Either.left(`Ollama response parse error: ${parseErr}`));
266
+ }
267
+ } else {
268
+ resolve(Either.left(`Ollama response parse error: ${e}`));
269
+ }
270
+ }
271
+ } else {
272
+ resolve(Either.left(`Ollama HTTP error ${res.statusCode}: ${body}`));
273
+ }
274
+ });
275
+ });
276
+ req.on("error", (e) => {
277
+ resolve(Either.left(`Ollama connection error: ${e.message}`));
278
+ });
279
+ req.on("timeout", () => {
280
+ req.destroy();
281
+ resolve(Either.left(`Ollama request timed out after ${this.timeout}ms`));
282
+ });
283
+ if (payload) {
284
+ req.write(payload);
285
+ }
286
+ req.end();
287
+ });
288
+ }
289
+ async complete(prompt, params, images) {
290
+ const data = {
291
+ model: typeof params.model === "string" ? params.model : this.config.default_model,
292
+ prompt,
293
+ stream: false
294
+ };
295
+ if (images && images.length > 0) {
296
+ data.images = images;
297
+ }
298
+ if (params.options) {
299
+ data.options = params.options;
300
+ }
301
+ const result = await this._make_request(this.config.api_path, data);
302
+ if (result.isLeft) {
303
+ return Either.left(result.left);
304
+ }
305
+ const response = result.right;
306
+ if (response.response !== void 0) {
307
+ return Either.right(response.response);
308
+ } else if (response.error) {
309
+ return Either.left(`Ollama error: ${response.error}`);
310
+ } else {
311
+ return Either.left(`Unexpected Ollama response format: ${JSON.stringify(response)}`);
312
+ }
313
+ }
314
+ async chat(messages, params) {
315
+ const data = {
316
+ model: typeof params.model === "string" ? params.model : this.config.default_model,
317
+ messages,
318
+ stream: false
319
+ };
320
+ if (params.options) {
321
+ data.options = params.options;
322
+ }
323
+ const result = await this._make_request(this.config.chat_path, data);
324
+ if (result.isLeft) {
325
+ return Either.left(result.left);
326
+ }
327
+ const response = result.right;
328
+ if (response.message) {
329
+ return Either.right({
330
+ content: response.message.content || "",
331
+ role: response.message.role || "assistant",
332
+ model: response.model || data.model,
333
+ done: response.done ?? true,
334
+ total_duration: response.total_duration,
335
+ eval_count: response.eval_count
336
+ });
337
+ } else if (response.error) {
338
+ return Either.left(`Ollama error: ${response.error}`);
339
+ } else {
340
+ return Either.left(`Unexpected Ollama chat response format: ${JSON.stringify(response)}`);
341
+ }
342
+ }
343
+ async validate_connection() {
344
+ const result = await this._make_request(this.config.models_path, null, "GET");
345
+ return result.isRight;
346
+ }
347
+ async list_models() {
348
+ const result = await this._make_request(this.config.models_path, null, "GET");
349
+ if (result.isLeft) {
350
+ return Either.left(result.left);
351
+ }
352
+ const response = result.right;
353
+ if (response.models) {
354
+ const models = response.models.map((m) => m.name || m.model || "unknown");
355
+ return Either.right(models);
356
+ } else {
357
+ return Either.left(`Unexpected models response: ${JSON.stringify(response)}`);
358
+ }
359
+ }
360
+ };
361
+
362
+ export {
363
+ DEFAULT_PROVIDER,
364
+ LLM_PROVIDERS,
365
+ LLMConfig,
366
+ BaseLLMProvider,
367
+ OllamaProvider
368
+ };