@easynet/agent-model 1.0.66 → 1.0.68
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/create-agent-model-registry.d.ts +19 -0
- package/dist/api/create-agent-model-registry.d.ts.map +1 -0
- package/dist/{chunk-4OLU43SH.js → chunk-6EQCGQTV.js} +2 -13
- package/dist/chunk-6EQCGQTV.js.map +1 -0
- package/dist/{chunk-EPVJLBGC.js → chunk-AUQEXHUP.js} +2 -15
- package/dist/chunk-AUQEXHUP.js.map +1 -0
- package/dist/{chunk-K3JR2N4E.js → chunk-KE7IMUSA.js} +2 -29
- package/dist/chunk-KE7IMUSA.js.map +1 -0
- package/dist/chunk-MFLWZSWI.js +17 -0
- package/dist/chunk-MFLWZSWI.js.map +1 -0
- package/dist/{chunk-HSU6XZOI.js → chunk-RRZDREGU.js} +3 -3
- package/dist/cli/index.js +252 -8
- package/dist/cli/index.js.map +1 -1
- package/dist/config/index.js +44 -7
- package/dist/config/index.js.map +1 -1
- package/dist/connectivity/index.js +1 -2
- package/dist/extensions/index.js +3 -5
- package/dist/index.d.ts +3 -57
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +191 -109
- package/dist/index.js.map +1 -1
- package/dist/langchain/index.js +1 -2
- package/dist/model/index.js +172 -5
- package/dist/model/index.js.map +1 -1
- package/dist/npm/index.js +1 -2
- package/dist/registry/index.js +0 -2
- package/package.json +1 -33
- package/dist/chunk-4OLU43SH.js.map +0 -1
- package/dist/chunk-5QOKZ2JF.js +0 -83
- package/dist/chunk-5QOKZ2JF.js.map +0 -1
- package/dist/chunk-EPVJLBGC.js.map +0 -1
- package/dist/chunk-HCU4AWIV.js +0 -19
- package/dist/chunk-HCU4AWIV.js.map +0 -1
- package/dist/chunk-K3JR2N4E.js.map +0 -1
- package/dist/chunk-PZ5AY32C.js +0 -10
- package/dist/chunk-PZ5AY32C.js.map +0 -1
- package/dist/chunk-WMEFXWNS.js +0 -260
- package/dist/chunk-WMEFXWNS.js.map +0 -1
- package/dist/chunk-YOOYQBGK.js +0 -43
- package/dist/chunk-YOOYQBGK.js.map +0 -1
- package/dist/chunk-Z4E7LN4P.js +0 -205
- package/dist/chunk-Z4E7LN4P.js.map +0 -1
- /package/dist/{chunk-HSU6XZOI.js.map → chunk-RRZDREGU.js.map} +0 -0
package/dist/chunk-WMEFXWNS.js
DELETED
|
@@ -1,260 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
loadModelConfig
|
|
3
|
-
} from "./chunk-3HA2CHZN.js";
|
|
4
|
-
import {
|
|
5
|
-
buildUnreachableError,
|
|
6
|
-
checkEndpointConnectivity
|
|
7
|
-
} from "./chunk-EPVJLBGC.js";
|
|
8
|
-
import {
|
|
9
|
-
resolveLlmSectionWithNpm
|
|
10
|
-
} from "./chunk-HSU6XZOI.js";
|
|
11
|
-
import {
|
|
12
|
-
createChatModelFromLlmConfig
|
|
13
|
-
} from "./chunk-4OLU43SH.js";
|
|
14
|
-
import {
|
|
15
|
-
parseLlmSection
|
|
16
|
-
} from "./chunk-SPDXNDDD.js";
|
|
17
|
-
|
|
18
|
-
// src/cli/index.ts
|
|
19
|
-
var cli_exports = {};
|
|
20
|
-
|
|
21
|
-
// src/api/create-agent-llm.ts
|
|
22
|
-
import { join } from "path";
|
|
23
|
-
function applyDefaultToolChoice(model) {
|
|
24
|
-
const m = model;
|
|
25
|
-
const orig = m.bindTools?.bind(model);
|
|
26
|
-
if (!orig) return;
|
|
27
|
-
m.bindTools = function(tools2, opts) {
|
|
28
|
-
return orig(tools2, { ...opts, tool_choice: "auto" });
|
|
29
|
-
};
|
|
30
|
-
}
|
|
31
|
-
var CIS_DEFAULT_RESOLVE_HOST = "s0010-ml-https.s0010.us-west-2.awswd";
|
|
32
|
-
var CIS_DEFAULT_RESOLVE_IP = "10.210.98.124";
|
|
33
|
-
function buildEndpointConnectivityOptions(config) {
|
|
34
|
-
const opts = config.options ?? config;
|
|
35
|
-
const provider = typeof config.provider === "string" ? config.provider : "";
|
|
36
|
-
const baseURL = config.baseURL;
|
|
37
|
-
const isCis = provider === "cis" || provider.includes("cis");
|
|
38
|
-
const useCisDefault = isCis && baseURL.includes(CIS_DEFAULT_RESOLVE_HOST) && opts?.resolveHost == null;
|
|
39
|
-
const resolveHost = opts?.resolveHost != null && typeof opts.resolveHost.from === "string" ? opts.resolveHost : useCisDefault ? { from: CIS_DEFAULT_RESOLVE_HOST, to: CIS_DEFAULT_RESOLVE_IP } : void 0;
|
|
40
|
-
const host = typeof opts?.host === "string" ? opts.host : resolveHost ? resolveHost.from : void 0;
|
|
41
|
-
if (resolveHost == null && host == null) return void 0;
|
|
42
|
-
const verifySSL = opts?.verifySSL === true;
|
|
43
|
-
const bypassAuth = opts?.bypassAuth !== false;
|
|
44
|
-
return {
|
|
45
|
-
resolveHost,
|
|
46
|
-
host,
|
|
47
|
-
verifySSL: resolveHost != null ? false : verifySSL ? true : void 0,
|
|
48
|
-
bypassAuth: bypassAuth ? true : void 0,
|
|
49
|
-
featureKey: typeof opts?.featureKey === "string" ? opts.featureKey : void 0
|
|
50
|
-
};
|
|
51
|
-
}
|
|
52
|
-
function resolveDefaultConfigPath() {
|
|
53
|
-
return join(process.cwd(), "model.yaml");
|
|
54
|
-
}
|
|
55
|
-
function normalizeOptions(configPathOrOptions) {
|
|
56
|
-
if (configPathOrOptions == null) return {};
|
|
57
|
-
if (typeof configPathOrOptions === "string") return { configPath: configPathOrOptions };
|
|
58
|
-
return configPathOrOptions;
|
|
59
|
-
}
|
|
60
|
-
function normalizeError(e, context) {
|
|
61
|
-
if (e instanceof Error) return new Error(`${context}: ${e.message}`, { cause: e });
|
|
62
|
-
return new Error(`${context}: ${String(e)}`);
|
|
63
|
-
}
|
|
64
|
-
async function ensureConnectivity(resolvedLlmSection, options) {
|
|
65
|
-
let configs;
|
|
66
|
-
try {
|
|
67
|
-
const parsed = parseLlmSection(resolvedLlmSection ?? null);
|
|
68
|
-
configs = parsed.configs.filter(
|
|
69
|
-
(c) => typeof c.baseURL === "string" && c.baseURL.length > 0 && (c.baseURL.startsWith("http://") || c.baseURL.startsWith("https://")) && !c.baseURL.includes("${")
|
|
70
|
-
);
|
|
71
|
-
} catch {
|
|
72
|
-
return;
|
|
73
|
-
}
|
|
74
|
-
const shouldCheck = options.checkConnectivity !== false && configs.length > 0;
|
|
75
|
-
if (!shouldCheck) return;
|
|
76
|
-
const report = (status) => options.onConnectionStatus?.(status);
|
|
77
|
-
const timeoutMs = options.connectivityTimeoutMs ?? 8e3;
|
|
78
|
-
for (const config of configs) {
|
|
79
|
-
const { id, baseURL } = config;
|
|
80
|
-
report({
|
|
81
|
-
phase: "checking",
|
|
82
|
-
endpointId: id,
|
|
83
|
-
baseURL,
|
|
84
|
-
message: "Checking connection..."
|
|
85
|
-
});
|
|
86
|
-
const endpointOpts = buildEndpointConnectivityOptions(config);
|
|
87
|
-
const result = await checkEndpointConnectivity(baseURL, {
|
|
88
|
-
timeoutMs,
|
|
89
|
-
...endpointOpts
|
|
90
|
-
});
|
|
91
|
-
if (result.reachable) {
|
|
92
|
-
report({
|
|
93
|
-
phase: "reachable",
|
|
94
|
-
endpointId: id,
|
|
95
|
-
baseURL,
|
|
96
|
-
message: result.message ?? "Connected"
|
|
97
|
-
});
|
|
98
|
-
} else {
|
|
99
|
-
report({
|
|
100
|
-
phase: "unreachable",
|
|
101
|
-
endpointId: id,
|
|
102
|
-
baseURL,
|
|
103
|
-
message: result.message ?? "Unreachable"
|
|
104
|
-
});
|
|
105
|
-
throw new Error(buildUnreachableError(id, baseURL, result.message));
|
|
106
|
-
}
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
async function createAgentLlm(configPathOrOptions) {
|
|
110
|
-
try {
|
|
111
|
-
const options = normalizeOptions(configPathOrOptions);
|
|
112
|
-
const configPath = options.configPath ?? resolveDefaultConfigPath();
|
|
113
|
-
const modelConfig = loadModelConfig(configPath);
|
|
114
|
-
if (modelConfig?.llm == null) {
|
|
115
|
-
throw new Error(
|
|
116
|
-
`No LLM config at ${configPath}. Add model.yaml in the current directory, or pass configPath.`
|
|
117
|
-
);
|
|
118
|
-
}
|
|
119
|
-
const resolvedSection = await resolveLlmSectionWithNpm(modelConfig.llm, {
|
|
120
|
-
installNpmIfMissing: options.installNpmIfMissing !== false,
|
|
121
|
-
cwd: process.cwd()
|
|
122
|
-
});
|
|
123
|
-
const checkConnectivity = options.checkConnectivity ?? modelConfig.runtime.check_connectivity;
|
|
124
|
-
await ensureConnectivity(resolvedSection, {
|
|
125
|
-
checkConnectivity,
|
|
126
|
-
onConnectionStatus: options.onConnectionStatus,
|
|
127
|
-
connectivityTimeoutMs: options.connectivityTimeoutMs
|
|
128
|
-
});
|
|
129
|
-
const model = createChatModelFromLlmConfig({ llmSection: resolvedSection });
|
|
130
|
-
applyDefaultToolChoice(model);
|
|
131
|
-
return model;
|
|
132
|
-
} catch (e) {
|
|
133
|
-
if (e instanceof Error && e.message.includes("No LLM config")) throw e;
|
|
134
|
-
if (e instanceof Error && e.message.includes("Cannot connect to")) throw e;
|
|
135
|
-
throw normalizeError(e, "createAgentLlm failed");
|
|
136
|
-
}
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
// src/cli/utils.ts
|
|
140
|
-
function messageContentToString(content) {
|
|
141
|
-
if (typeof content === "string") return content;
|
|
142
|
-
if (Array.isArray(content)) {
|
|
143
|
-
return content.map((c) => "text" in c && c.text ? c.text : "").join("");
|
|
144
|
-
}
|
|
145
|
-
return String(content ?? "");
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
// src/cli/index.ts
|
|
149
|
-
import { tool } from "@langchain/core/tools";
|
|
150
|
-
import { z } from "zod";
|
|
151
|
-
import { HumanMessage, ToolMessage } from "@langchain/core/messages";
|
|
152
|
-
import { fileURLToPath } from "url";
|
|
153
|
-
function parseArgv() {
|
|
154
|
-
const args = process.argv.slice(2);
|
|
155
|
-
let configPath;
|
|
156
|
-
let useTools = false;
|
|
157
|
-
const rest = [];
|
|
158
|
-
for (let i = 0; i < args.length; i++) {
|
|
159
|
-
if (args[i] === "--config" && args[i + 1]) {
|
|
160
|
-
configPath = args[i + 1];
|
|
161
|
-
i++;
|
|
162
|
-
} else if (args[i] === "--tools") {
|
|
163
|
-
useTools = true;
|
|
164
|
-
} else {
|
|
165
|
-
rest.push(args[i]);
|
|
166
|
-
}
|
|
167
|
-
}
|
|
168
|
-
const query = rest.join(" ").trim() || "hi";
|
|
169
|
-
return { configPath, useTools, query };
|
|
170
|
-
}
|
|
171
|
-
var getWeather = tool(
|
|
172
|
-
(input) => {
|
|
173
|
-
const loc = input.location.toLowerCase();
|
|
174
|
-
if (["sf", "sf bay"].includes(loc)) return "It's 60\xB0F and foggy in the Bay Area.";
|
|
175
|
-
if (["ny", "new york"].includes(loc)) return "It's 72\xB0F and partly cloudy in New York.";
|
|
176
|
-
return `Weather for ${input.location}: 70\xB0F and sunny.`;
|
|
177
|
-
},
|
|
178
|
-
{
|
|
179
|
-
name: "get_weather",
|
|
180
|
-
description: "Get the current weather for a location.",
|
|
181
|
-
schema: z.object({
|
|
182
|
-
location: z.string().describe("City or place name (e.g. SF, New York)")
|
|
183
|
-
})
|
|
184
|
-
}
|
|
185
|
-
);
|
|
186
|
-
var addNumbers = tool(
|
|
187
|
-
(input) => String(input.a + input.b),
|
|
188
|
-
{
|
|
189
|
-
name: "add_numbers",
|
|
190
|
-
description: "Add two numbers.",
|
|
191
|
-
schema: z.object({
|
|
192
|
-
a: z.number().describe("First number"),
|
|
193
|
-
b: z.number().describe("Second number")
|
|
194
|
-
})
|
|
195
|
-
}
|
|
196
|
-
);
|
|
197
|
-
var tools = [getWeather, addNumbers];
|
|
198
|
-
var toolsByName = new Map(tools.map((t) => [t.name, t]));
|
|
199
|
-
var MAX_TURNS = 10;
|
|
200
|
-
async function runSimpleChat(model, query) {
|
|
201
|
-
const messages = [new HumanMessage(query)];
|
|
202
|
-
const response = await model.invoke(messages);
|
|
203
|
-
return messageContentToString(response.content);
|
|
204
|
-
}
|
|
205
|
-
async function runAgentWithTools(model, query) {
|
|
206
|
-
const withTools = model.bindTools?.(tools, { tool_choice: "auto" });
|
|
207
|
-
const messages = [new HumanMessage(query)];
|
|
208
|
-
if (!withTools) return runSimpleChat(model, query);
|
|
209
|
-
for (let turn = 0; turn < MAX_TURNS; turn++) {
|
|
210
|
-
const response = await withTools.invoke(messages);
|
|
211
|
-
const aiMessage = response;
|
|
212
|
-
if (!aiMessage.tool_calls?.length) {
|
|
213
|
-
return messageContentToString(aiMessage.content);
|
|
214
|
-
}
|
|
215
|
-
messages.push(aiMessage);
|
|
216
|
-
for (const tc of aiMessage.tool_calls) {
|
|
217
|
-
const id = tc.id ?? `call_${turn}_${tc.name}`;
|
|
218
|
-
const toolFn = toolsByName.get(tc.name);
|
|
219
|
-
let result;
|
|
220
|
-
if (toolFn) {
|
|
221
|
-
try {
|
|
222
|
-
const out = await toolFn.invoke(
|
|
223
|
-
tc.args
|
|
224
|
-
);
|
|
225
|
-
result = typeof out === "string" ? out : JSON.stringify(out);
|
|
226
|
-
} catch (err) {
|
|
227
|
-
result = `Error: ${err instanceof Error ? err.message : String(err)}`;
|
|
228
|
-
}
|
|
229
|
-
} else {
|
|
230
|
-
result = `Unknown tool: ${tc.name}`;
|
|
231
|
-
}
|
|
232
|
-
messages.push(new ToolMessage({ content: result, tool_call_id: id }));
|
|
233
|
-
}
|
|
234
|
-
}
|
|
235
|
-
return "Agent reached max turns without a final answer.";
|
|
236
|
-
}
|
|
237
|
-
async function main() {
|
|
238
|
-
const { configPath, useTools, query } = parseArgv();
|
|
239
|
-
const model = await createAgentLlm(configPath ? { configPath } : void 0);
|
|
240
|
-
console.log("Query:", query);
|
|
241
|
-
console.log("---");
|
|
242
|
-
const answer = useTools ? await runAgentWithTools(model, query) : await runSimpleChat(model, query);
|
|
243
|
-
console.log("Answer:", answer);
|
|
244
|
-
console.log("---");
|
|
245
|
-
console.log("Done.");
|
|
246
|
-
}
|
|
247
|
-
var entryArg = process.argv[1];
|
|
248
|
-
var isDirectRun = typeof entryArg === "string" && entryArg.length > 0 && fileURLToPath(import.meta.url) === entryArg;
|
|
249
|
-
if (isDirectRun) {
|
|
250
|
-
main().catch((err) => {
|
|
251
|
-
console.error(err);
|
|
252
|
-
process.exit(1);
|
|
253
|
-
});
|
|
254
|
-
}
|
|
255
|
-
|
|
256
|
-
export {
|
|
257
|
-
createAgentLlm,
|
|
258
|
-
cli_exports
|
|
259
|
-
};
|
|
260
|
-
//# sourceMappingURL=chunk-WMEFXWNS.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/cli/index.ts","../src/api/create-agent-llm.ts","../src/cli/utils.ts"],"sourcesContent":["#!/usr/bin/env node\n/**\n * CLI for @easynet/agent-model: chat with the configured LLM (LangChain ChatOpenAI).\n * Usage: agent-model \"your question\"\n * or: agent-model --config ./model.yaml \"hi\"\n * or: agent-model --tools \"what is the weather in SF?\"\n */\nimport { createAgentLlm } from \"../api/create-agent-llm.js\";\nimport { messageContentToString } from \"./utils.js\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { tool } from \"@langchain/core/tools\";\nimport { z } from \"zod\";\nimport { HumanMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport type { BaseMessage } from \"@langchain/core/messages\";\nimport { fileURLToPath } from \"node:url\";\n\nfunction parseArgv(): { configPath: string | undefined; useTools: boolean; query: string } {\n const args = process.argv.slice(2);\n let configPath: string | undefined;\n let useTools = false;\n const rest: string[] = [];\n for (let i = 0; i < args.length; i++) {\n if (args[i] === \"--config\" && args[i + 1]) {\n configPath = args[i + 1];\n i++;\n } else if (args[i] === \"--tools\") {\n useTools = true;\n } else {\n rest.push(args[i]);\n }\n }\n const query = rest.join(\" \").trim() || \"hi\";\n return { configPath, useTools, query };\n}\n\nconst getWeather = tool(\n (input: { location: string }) => {\n const loc = input.location.toLowerCase();\n if ([\"sf\", \"sf bay\"].includes(loc)) return \"It's 60°F and foggy in the Bay Area.\";\n if ([\"ny\", \"new york\"].includes(loc)) return \"It's 72°F and partly cloudy in New York.\";\n return `Weather for ${input.location}: 70°F and sunny.`;\n },\n {\n name: \"get_weather\",\n description: \"Get the current weather for a location.\",\n schema: z.object({\n location: z.string().describe(\"City or place name (e.g. SF, New York)\"),\n }),\n }\n);\n\nconst addNumbers = tool(\n (input: { a: number; b: number }) => String(input.a + input.b),\n {\n name: \"add_numbers\",\n description: \"Add two numbers.\",\n schema: z.object({\n a: z.number().describe(\"First number\"),\n b: z.number().describe(\"Second number\"),\n }),\n }\n);\n\nconst tools = [getWeather, addNumbers];\nconst toolsByName = new Map(tools.map((t) => [t.name, t]));\nconst MAX_TURNS = 10;\n\nasync function runSimpleChat(model: BaseChatModel, query: string): Promise<string> {\n const messages: BaseMessage[] = [new HumanMessage(query)];\n const response = await model.invoke(messages);\n return messageContentToString(response.content);\n}\n\nasync function runAgentWithTools(model: BaseChatModel, query: string): Promise<string> {\n const withTools = model.bindTools?.(tools, { tool_choice: \"auto\" });\n const messages: BaseMessage[] = [new HumanMessage(query)];\n if (!withTools) return runSimpleChat(model, query);\n\n for (let turn = 0; turn < MAX_TURNS; turn++) {\n const response = await withTools.invoke(messages);\n const aiMessage = response as AIMessage;\n\n if (!aiMessage.tool_calls?.length) {\n return messageContentToString(aiMessage.content);\n }\n\n messages.push(aiMessage);\n for (const tc of aiMessage.tool_calls) {\n const id = tc.id ?? `call_${turn}_${tc.name}`;\n const toolFn = toolsByName.get(tc.name as \"get_weather\" | \"add_numbers\");\n let result: string;\n if (toolFn) {\n try {\n const out = await (toolFn as { invoke: (args: Record<string, unknown>) => Promise<unknown> }).invoke(\n tc.args as Record<string, unknown>\n );\n result = typeof out === \"string\" ? out : JSON.stringify(out);\n } catch (err) {\n result = `Error: ${err instanceof Error ? err.message : String(err)}`;\n }\n } else {\n result = `Unknown tool: ${tc.name}`;\n }\n messages.push(new ToolMessage({ content: result, tool_call_id: id }));\n }\n }\n return \"Agent reached max turns without a final answer.\";\n}\n\nasync function main() {\n const { configPath, useTools, query } = parseArgv();\n const model = await createAgentLlm(configPath ? { configPath } : undefined);\n\n console.log(\"Query:\", query);\n console.log(\"---\");\n const answer = useTools\n ? await runAgentWithTools(model, query)\n : await runSimpleChat(model, query);\n console.log(\"Answer:\", answer);\n console.log(\"---\");\n console.log(\"Done.\");\n}\n\nconst entryArg = process.argv[1];\nconst isDirectRun =\n typeof entryArg === \"string\" && entryArg.length > 0 && fileURLToPath(import.meta.url) === entryArg;\n\nif (isDirectRun) {\n main().catch((err) => {\n console.error(err);\n process.exit(1);\n });\n}\n","/**\n * Simple API: create LangChain ChatModel from model.yaml config.\n * Supports OpenAI-compatible providers with optional connectivity check and npm: provider resolution.\n */\nimport { join } from \"node:path\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport {\n checkEndpointConnectivity,\n buildUnreachableError,\n type EndpointConnectivityOptions,\n type ConnectionStatus,\n} from \"../connectivity/index.js\";\nimport { parseLlmSection } from \"../model/llm-parser.js\";\nimport type { LLMConfig } from \"../model/types.js\";\nimport { createChatModelFromLlmConfig } from \"../langchain/index.js\";\nimport { resolveLlmSectionWithNpm } from \"../extensions/npm-protocol.js\";\nimport { loadModelConfig } from \"../config/loader.js\";\n\n/**\n * Ensure bindTools always receives tool_choice: \"auto\" when tools are bound.\n * Fixes \"Tool choice is none, but model called a tool\" when using this model\n * with LangChain createAgent (AgentNode leaves tool_choice undefined for non-structured tools).\n * Mutates the model in place so it still passes isBaseChatModel / bindTools checks.\n */\nfunction applyDefaultToolChoice(model: BaseChatModel): void {\n const m = model as {\n bindTools?: (tools: unknown, opts?: Record<string, unknown>) => unknown;\n };\n const orig = m.bindTools?.bind(model);\n if (!orig) return;\n m.bindTools = function (tools: unknown, opts?: Record<string, unknown>) {\n return orig(tools, { ...opts, tool_choice: \"auto\" });\n };\n}\n\nconst CIS_DEFAULT_RESOLVE_HOST = \"s0010-ml-https.s0010.us-west-2.awswd\";\nconst CIS_DEFAULT_RESOLVE_IP = \"10.210.98.124\";\n\nfunction buildEndpointConnectivityOptions(\n config: LLMConfig & { baseURL: string }\n): EndpointConnectivityOptions | undefined {\n const opts = (config.options as Record<string, unknown> | undefined) ?? config;\n const provider = typeof config.provider === \"string\" ? config.provider : \"\";\n const baseURL = config.baseURL;\n const isCis = provider === \"cis\" || provider.includes(\"cis\");\n const useCisDefault =\n isCis &&\n baseURL.includes(CIS_DEFAULT_RESOLVE_HOST) &&\n opts?.resolveHost == null;\n\n const resolveHost =\n opts?.resolveHost != null && typeof (opts.resolveHost as { from?: string; to?: string }).from === \"string\"\n ? (opts.resolveHost as { from: string; to: string })\n : useCisDefault\n ? { from: CIS_DEFAULT_RESOLVE_HOST, to: CIS_DEFAULT_RESOLVE_IP }\n : undefined;\n const host = typeof opts?.host === \"string\" ? opts.host : (resolveHost ? resolveHost.from : undefined);\n if (resolveHost == null && host == null) return undefined;\n\n const verifySSL = opts?.verifySSL === true;\n const bypassAuth = opts?.bypassAuth !== false;\n\n return {\n resolveHost,\n host,\n verifySSL: resolveHost != null ? false : (verifySSL ? true : undefined),\n bypassAuth: bypassAuth ? true : undefined,\n featureKey: typeof opts?.featureKey === \"string\" ? opts.featureKey : undefined,\n };\n}\n\nexport interface CreateAgentLlmOptions {\n configPath?: string;\n installNpmIfMissing?: boolean;\n checkConnectivity?: boolean;\n onConnectionStatus?: (status: ConnectionStatus) => void;\n connectivityTimeoutMs?: number;\n}\n\nfunction resolveDefaultConfigPath(): string {\n return join(process.cwd(), \"model.yaml\");\n}\n\nfunction normalizeOptions(\n configPathOrOptions?: string | CreateAgentLlmOptions\n): CreateAgentLlmOptions {\n if (configPathOrOptions == null) return {};\n if (typeof configPathOrOptions === \"string\") return { configPath: configPathOrOptions };\n return configPathOrOptions;\n}\n\nfunction normalizeError(e: unknown, context: string): Error {\n if (e instanceof Error) return new Error(`${context}: ${e.message}`, { cause: e });\n return new Error(`${context}: ${String(e)}`);\n}\n\nasync function ensureConnectivity(\n resolvedLlmSection: unknown,\n options: {\n checkConnectivity?: boolean;\n onConnectionStatus?: (status: ConnectionStatus) => void;\n connectivityTimeoutMs?: number;\n }\n): Promise<void> {\n let configs: Array<LLMConfig & { baseURL: string }>;\n try {\n const parsed = parseLlmSection(resolvedLlmSection ?? null);\n configs = parsed.configs.filter(\n (c: LLMConfig): c is LLMConfig & { baseURL: string } =>\n typeof c.baseURL === \"string\" &&\n c.baseURL.length > 0 &&\n (c.baseURL.startsWith(\"http://\") || c.baseURL.startsWith(\"https://\")) &&\n !c.baseURL.includes(\"${\")\n );\n } catch {\n return;\n }\n const shouldCheck = options.checkConnectivity !== false && configs.length > 0;\n if (!shouldCheck) return;\n\n const report = (status: ConnectionStatus) => options.onConnectionStatus?.(status);\n const timeoutMs = options.connectivityTimeoutMs ?? 8000;\n\n for (const config of configs) {\n const { id, baseURL } = config;\n report({\n phase: \"checking\",\n endpointId: id,\n baseURL,\n message: \"Checking connection...\",\n });\n\n const endpointOpts = buildEndpointConnectivityOptions(config);\n const result = await checkEndpointConnectivity(baseURL, {\n timeoutMs,\n ...endpointOpts,\n });\n\n if (result.reachable) {\n report({\n phase: \"reachable\",\n endpointId: id,\n baseURL,\n message: result.message ?? \"Connected\",\n });\n } else {\n report({\n phase: \"unreachable\",\n endpointId: id,\n baseURL,\n message: result.message ?? \"Unreachable\",\n });\n throw new Error(buildUnreachableError(id, baseURL, result.message));\n }\n }\n}\n\n/**\n * Create a LangChain ChatModel from model.yaml config.\n * Returns BaseChatModel compatible with LangChain's createAgent and other tools.\n */\nexport async function createAgentLlm(\n configPathOrOptions?: string | CreateAgentLlmOptions\n): Promise<BaseChatModel> {\n try {\n const options = normalizeOptions(configPathOrOptions);\n const configPath = options.configPath ?? resolveDefaultConfigPath();\n const modelConfig = loadModelConfig(configPath);\n\n if (modelConfig?.llm == null) {\n throw new Error(\n `No LLM config at ${configPath}. Add model.yaml in the current directory, or pass configPath.`\n );\n }\n\n const resolvedSection = await resolveLlmSectionWithNpm(modelConfig.llm, {\n installNpmIfMissing: options.installNpmIfMissing !== false,\n cwd: process.cwd(),\n });\n\n // Priority: caller option > YAML runtime > default (true)\n const checkConnectivity =\n options.checkConnectivity ?? modelConfig.runtime.check_connectivity;\n\n await ensureConnectivity(resolvedSection, {\n checkConnectivity,\n onConnectionStatus: options.onConnectionStatus,\n connectivityTimeoutMs: options.connectivityTimeoutMs,\n });\n\n const model = createChatModelFromLlmConfig({ llmSection: resolvedSection });\n applyDefaultToolChoice(model);\n return model;\n } catch (e) {\n if (e instanceof Error && e.message.includes(\"No LLM config\")) throw e;\n if (e instanceof Error && e.message.includes(\"Cannot connect to\")) throw e;\n throw normalizeError(e, \"createAgentLlm failed\");\n }\n}\n","/**\n * Shared CLI helpers for agent-model and provider CLIs.\n */\n\n/**\n * Turn LangChain message content (string | array of parts) into a single string.\n */\nexport function messageContentToString(content: unknown): string {\n if (typeof content === \"string\") return content;\n if (Array.isArray(content)) {\n return (content as { type?: string; text?: string }[])\n .map((c) => (\"text\" in c && c.text ? c.text : \"\"))\n .join(\"\");\n }\n return String(content ?? \"\");\n}\n\n/**\n * Log error and exit. Use in CLIs for consistent error handling.\n */\nexport function exitWithError(err: unknown, code = 1): never {\n console.error(\"Error:\", err instanceof Error ? err.message : String(err));\n process.exit(code);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;;;ACIA,SAAS,YAAY;AAoBrB,SAAS,uBAAuB,OAA4B;AAC1D,QAAM,IAAI;AAGV,QAAM,OAAO,EAAE,WAAW,KAAK,KAAK;AACpC,MAAI,CAAC,KAAM;AACX,IAAE,YAAY,SAAUA,QAAgB,MAAgC;AACtE,WAAO,KAAKA,QAAO,EAAE,GAAG,MAAM,aAAa,OAAO,CAAC;AAAA,EACrD;AACF;AAEA,IAAM,2BAA2B;AACjC,IAAM,yBAAyB;AAE/B,SAAS,iCACP,QACyC;AACzC,QAAM,OAAQ,OAAO,WAAmD;AACxE,QAAM,WAAW,OAAO,OAAO,aAAa,WAAW,OAAO,WAAW;AACzE,QAAM,UAAU,OAAO;AACvB,QAAM,QAAQ,aAAa,SAAS,SAAS,SAAS,KAAK;AAC3D,QAAM,gBACJ,SACA,QAAQ,SAAS,wBAAwB,KACzC,MAAM,eAAe;AAEvB,QAAM,cACJ,MAAM,eAAe,QAAQ,OAAQ,KAAK,YAA+C,SAAS,WAC7F,KAAK,cACN,gBACE,EAAE,MAAM,0BAA0B,IAAI,uBAAuB,IAC7D;AACR,QAAM,OAAO,OAAO,MAAM,SAAS,WAAW,KAAK,OAAQ,cAAc,YAAY,OAAO;AAC5F,MAAI,eAAe,QAAQ,QAAQ,KAAM,QAAO;AAEhD,QAAM,YAAY,MAAM,cAAc;AACtC,QAAM,aAAa,MAAM,eAAe;AAExC,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,WAAW,eAAe,OAAO,QAAS,YAAY,OAAO;AAAA,IAC7D,YAAY,aAAa,OAAO;AAAA,IAChC,YAAY,OAAO,MAAM,eAAe,WAAW,KAAK,aAAa;AAAA,EACvE;AACF;AAUA,SAAS,2BAAmC;AAC1C,SAAO,KAAK,QAAQ,IAAI,GAAG,YAAY;AACzC;AAEA,SAAS,iBACP,qBACuB;AACvB,MAAI,uBAAuB,KAAM,QAAO,CAAC;AACzC,MAAI,OAAO,wBAAwB,SAAU,QAAO,EAAE,YAAY,oBAAoB;AACtF,SAAO;AACT;AAEA,SAAS,eAAe,GAAY,SAAwB;AAC1D,MAAI,aAAa,MAAO,QAAO,IAAI,MAAM,GAAG,OAAO,KAAK,EAAE,OAAO,IAAI,EAAE,OAAO,EAAE,CAAC;AACjF,SAAO,IAAI,MAAM,GAAG,OAAO,KAAK,OAAO,CAAC,CAAC,EAAE;AAC7C;AAEA,eAAe,mBACb,oBACA,SAKe;AACf,MAAI;AACJ,MAAI;AACF,UAAM,SAAS,gBAAgB,sBAAsB,IAAI;AACzD,cAAU,OAAO,QAAQ;AAAA,MACvB,CAAC,MACC,OAAO,EAAE,YAAY,YACrB,EAAE,QAAQ,SAAS,MAClB,EAAE,QAAQ,WAAW,SAAS,KAAK,EAAE,QAAQ,WAAW,UAAU,MACnE,CAAC,EAAE,QAAQ,SAAS,IAAI;AAAA,IAC5B;AAAA,EACF,QAAQ;AACN;AAAA,EACF;AACA,QAAM,cAAc,QAAQ,sBAAsB,SAAS,QAAQ,SAAS;AAC5E,MAAI,CAAC,YAAa;AAElB,QAAM,SAAS,CAAC,WAA6B,QAAQ,qBAAqB,MAAM;AAChF,QAAM,YAAY,QAAQ,yBAAyB;AAEnD,aAAW,UAAU,SAAS;AAC5B,UAAM,EAAE,IAAI,QAAQ,IAAI;AACxB,WAAO;AAAA,MACL,OAAO;AAAA,MACP,YAAY;AAAA,MACZ;AAAA,MACA,SAAS;AAAA,IACX,CAAC;AAED,UAAM,eAAe,iCAAiC,MAAM;AAC5D,UAAM,SAAS,MAAM,0BAA0B,SAAS;AAAA,MACtD;AAAA,MACA,GAAG;AAAA,IACL,CAAC;AAED,QAAI,OAAO,WAAW;AACpB,aAAO;AAAA,QACL,OAAO;AAAA,QACP,YAAY;AAAA,QACZ;AAAA,QACA,SAAS,OAAO,WAAW;AAAA,MAC7B,CAAC;AAAA,IACH,OAAO;AACL,aAAO;AAAA,QACL,OAAO;AAAA,QACP,YAAY;AAAA,QACZ;AAAA,QACA,SAAS,OAAO,WAAW;AAAA,MAC7B,CAAC;AACD,YAAM,IAAI,MAAM,sBAAsB,IAAI,SAAS,OAAO,OAAO,CAAC;AAAA,IACpE;AAAA,EACF;AACF;AAMA,eAAsB,eACpB,qBACwB;AACxB,MAAI;AACF,UAAM,UAAU,iBAAiB,mBAAmB;AACpD,UAAM,aAAa,QAAQ,cAAc,yBAAyB;AAClE,UAAM,cAAc,gBAAgB,UAAU;AAE9C,QAAI,aAAa,OAAO,MAAM;AAC5B,YAAM,IAAI;AAAA,QACR,oBAAoB,UAAU;AAAA,MAChC;AAAA,IACF;AAEA,UAAM,kBAAkB,MAAM,yBAAyB,YAAY,KAAK;AAAA,MACtE,qBAAqB,QAAQ,wBAAwB;AAAA,MACrD,KAAK,QAAQ,IAAI;AAAA,IACnB,CAAC;AAGD,UAAM,oBACJ,QAAQ,qBAAqB,YAAY,QAAQ;AAEnD,UAAM,mBAAmB,iBAAiB;AAAA,MACxC;AAAA,MACA,oBAAoB,QAAQ;AAAA,MAC5B,uBAAuB,QAAQ;AAAA,IACjC,CAAC;AAED,UAAM,QAAQ,6BAA6B,EAAE,YAAY,gBAAgB,CAAC;AAC1E,2BAAuB,KAAK;AAC5B,WAAO;AAAA,EACT,SAAS,GAAG;AACV,QAAI,aAAa,SAAS,EAAE,QAAQ,SAAS,eAAe,EAAG,OAAM;AACrE,QAAI,aAAa,SAAS,EAAE,QAAQ,SAAS,mBAAmB,EAAG,OAAM;AACzE,UAAM,eAAe,GAAG,uBAAuB;AAAA,EACjD;AACF;;;AC/LO,SAAS,uBAAuB,SAA0B;AAC/D,MAAI,OAAO,YAAY,SAAU,QAAO;AACxC,MAAI,MAAM,QAAQ,OAAO,GAAG;AAC1B,WAAQ,QACL,IAAI,CAAC,MAAO,UAAU,KAAK,EAAE,OAAO,EAAE,OAAO,EAAG,EAChD,KAAK,EAAE;AAAA,EACZ;AACA,SAAO,OAAO,WAAW,EAAE;AAC7B;;;AFLA,SAAS,YAAY;AACrB,SAAS,SAAS;AAClB,SAAS,cAAyB,mBAAmB;AAErD,SAAS,qBAAqB;AAE9B,SAAS,YAAkF;AACzF,QAAM,OAAO,QAAQ,KAAK,MAAM,CAAC;AACjC,MAAI;AACJ,MAAI,WAAW;AACf,QAAM,OAAiB,CAAC;AACxB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,QAAI,KAAK,CAAC,MAAM,cAAc,KAAK,IAAI,CAAC,GAAG;AACzC,mBAAa,KAAK,IAAI,CAAC;AACvB;AAAA,IACF,WAAW,KAAK,CAAC,MAAM,WAAW;AAChC,iBAAW;AAAA,IACb,OAAO;AACL,WAAK,KAAK,KAAK,CAAC,CAAC;AAAA,IACnB;AAAA,EACF;AACA,QAAM,QAAQ,KAAK,KAAK,GAAG,EAAE,KAAK,KAAK;AACvC,SAAO,EAAE,YAAY,UAAU,MAAM;AACvC;AAEA,IAAM,aAAa;AAAA,EACjB,CAAC,UAAgC;AAC/B,UAAM,MAAM,MAAM,SAAS,YAAY;AACvC,QAAI,CAAC,MAAM,QAAQ,EAAE,SAAS,GAAG,EAAG,QAAO;AAC3C,QAAI,CAAC,MAAM,UAAU,EAAE,SAAS,GAAG,EAAG,QAAO;AAC7C,WAAO,eAAe,MAAM,QAAQ;AAAA,EACtC;AAAA,EACA;AAAA,IACE,MAAM;AAAA,IACN,aAAa;AAAA,IACb,QAAQ,EAAE,OAAO;AAAA,MACf,UAAU,EAAE,OAAO,EAAE,SAAS,wCAAwC;AAAA,IACxE,CAAC;AAAA,EACH;AACF;AAEA,IAAM,aAAa;AAAA,EACjB,CAAC,UAAoC,OAAO,MAAM,IAAI,MAAM,CAAC;AAAA,EAC7D;AAAA,IACE,MAAM;AAAA,IACN,aAAa;AAAA,IACb,QAAQ,EAAE,OAAO;AAAA,MACf,GAAG,EAAE,OAAO,EAAE,SAAS,cAAc;AAAA,MACrC,GAAG,EAAE,OAAO,EAAE,SAAS,eAAe;AAAA,IACxC,CAAC;AAAA,EACH;AACF;AAEA,IAAM,QAAQ,CAAC,YAAY,UAAU;AACrC,IAAM,cAAc,IAAI,IAAI,MAAM,IAAI,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AACzD,IAAM,YAAY;AAElB,eAAe,cAAc,OAAsB,OAAgC;AACjF,QAAM,WAA0B,CAAC,IAAI,aAAa,KAAK,CAAC;AACxD,QAAM,WAAW,MAAM,MAAM,OAAO,QAAQ;AAC5C,SAAO,uBAAuB,SAAS,OAAO;AAChD;AAEA,eAAe,kBAAkB,OAAsB,OAAgC;AACrF,QAAM,YAAY,MAAM,YAAY,OAAO,EAAE,aAAa,OAAO,CAAC;AAClE,QAAM,WAA0B,CAAC,IAAI,aAAa,KAAK,CAAC;AACxD,MAAI,CAAC,UAAW,QAAO,cAAc,OAAO,KAAK;AAEjD,WAAS,OAAO,GAAG,OAAO,WAAW,QAAQ;AAC3C,UAAM,WAAW,MAAM,UAAU,OAAO,QAAQ;AAChD,UAAM,YAAY;AAElB,QAAI,CAAC,UAAU,YAAY,QAAQ;AACjC,aAAO,uBAAuB,UAAU,OAAO;AAAA,IACjD;AAEA,aAAS,KAAK,SAAS;AACvB,eAAW,MAAM,UAAU,YAAY;AACrC,YAAM,KAAK,GAAG,MAAM,QAAQ,IAAI,IAAI,GAAG,IAAI;AAC3C,YAAM,SAAS,YAAY,IAAI,GAAG,IAAqC;AACvE,UAAI;AACJ,UAAI,QAAQ;AACV,YAAI;AACF,gBAAM,MAAM,MAAO,OAA2E;AAAA,YAC5F,GAAG;AAAA,UACL;AACA,mBAAS,OAAO,QAAQ,WAAW,MAAM,KAAK,UAAU,GAAG;AAAA,QAC7D,SAAS,KAAK;AACZ,mBAAS,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG,CAAC;AAAA,QACrE;AAAA,MACF,OAAO;AACL,iBAAS,iBAAiB,GAAG,IAAI;AAAA,MACnC;AACA,eAAS,KAAK,IAAI,YAAY,EAAE,SAAS,QAAQ,cAAc,GAAG,CAAC,CAAC;AAAA,IACtE;AAAA,EACF;AACA,SAAO;AACT;AAEA,eAAe,OAAO;AACpB,QAAM,EAAE,YAAY,UAAU,MAAM,IAAI,UAAU;AAClD,QAAM,QAAQ,MAAM,eAAe,aAAa,EAAE,WAAW,IAAI,MAAS;AAE1E,UAAQ,IAAI,UAAU,KAAK;AAC3B,UAAQ,IAAI,KAAK;AACjB,QAAM,SAAS,WACX,MAAM,kBAAkB,OAAO,KAAK,IACpC,MAAM,cAAc,OAAO,KAAK;AACpC,UAAQ,IAAI,WAAW,MAAM;AAC7B,UAAQ,IAAI,KAAK;AACjB,UAAQ,IAAI,OAAO;AACrB;AAEA,IAAM,WAAW,QAAQ,KAAK,CAAC;AAC/B,IAAM,cACJ,OAAO,aAAa,YAAY,SAAS,SAAS,KAAK,cAAc,YAAY,GAAG,MAAM;AAE5F,IAAI,aAAa;AACf,OAAK,EAAE,MAAM,CAAC,QAAQ;AACpB,YAAQ,MAAM,GAAG;AACjB,YAAQ,KAAK,CAAC;AAAA,EAChB,CAAC;AACH;","names":["tools"]}
|
package/dist/chunk-YOOYQBGK.js
DELETED
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
createChatModelWithNpm,
|
|
3
|
-
discoverLLMExtensions,
|
|
4
|
-
ensureNpmPackageInstalled,
|
|
5
|
-
getInstalledVersion,
|
|
6
|
-
getLatestVersion,
|
|
7
|
-
loadDiscoveredExtensions,
|
|
8
|
-
loadLLMExtensions,
|
|
9
|
-
resolveLLMExtensionPackages,
|
|
10
|
-
resolveLlmSectionWithNpm,
|
|
11
|
-
resolveNpmProvider
|
|
12
|
-
} from "./chunk-HSU6XZOI.js";
|
|
13
|
-
import {
|
|
14
|
-
NPM_PROTOCOL_PREFIX,
|
|
15
|
-
isNpmProviderSpec,
|
|
16
|
-
parseNpmProviderSpec
|
|
17
|
-
} from "./chunk-K3JR2N4E.js";
|
|
18
|
-
import {
|
|
19
|
-
__export
|
|
20
|
-
} from "./chunk-PZ5AY32C.js";
|
|
21
|
-
|
|
22
|
-
// src/extensions/index.ts
|
|
23
|
-
var extensions_exports = {};
|
|
24
|
-
__export(extensions_exports, {
|
|
25
|
-
NPM_PROTOCOL_PREFIX: () => NPM_PROTOCOL_PREFIX,
|
|
26
|
-
createChatModelWithNpm: () => createChatModelWithNpm,
|
|
27
|
-
discoverLLMExtensions: () => discoverLLMExtensions,
|
|
28
|
-
ensureNpmPackageInstalled: () => ensureNpmPackageInstalled,
|
|
29
|
-
getInstalledVersion: () => getInstalledVersion,
|
|
30
|
-
getLatestVersion: () => getLatestVersion,
|
|
31
|
-
isNpmProviderSpec: () => isNpmProviderSpec,
|
|
32
|
-
loadDiscoveredExtensions: () => loadDiscoveredExtensions,
|
|
33
|
-
loadLLMExtensions: () => loadLLMExtensions,
|
|
34
|
-
parseNpmProviderSpec: () => parseNpmProviderSpec,
|
|
35
|
-
resolveLLMExtensionPackages: () => resolveLLMExtensionPackages,
|
|
36
|
-
resolveLlmSectionWithNpm: () => resolveLlmSectionWithNpm,
|
|
37
|
-
resolveNpmProvider: () => resolveNpmProvider
|
|
38
|
-
});
|
|
39
|
-
|
|
40
|
-
export {
|
|
41
|
-
extensions_exports
|
|
42
|
-
};
|
|
43
|
-
//# sourceMappingURL=chunk-YOOYQBGK.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/extensions/index.ts"],"sourcesContent":["/**\n * Extensions: npm: protocol and extension loading.\n */\n\nexport {\n NPM_PROTOCOL_PREFIX,\n parseNpmProviderSpec,\n isNpmProviderSpec,\n resolveNpmProvider,\n createChatModelWithNpm,\n ensureNpmPackageInstalled,\n getLatestVersion,\n getInstalledVersion,\n resolveLlmSectionWithNpm,\n} from \"./npm-protocol.js\";\nexport type {\n ResolveNpmProviderOptions,\n CreateChatModelWithNpmOptions,\n EnsureNpmPackageInstalledOptions,\n ResolveLlmSectionWithNpmOptions,\n} from \"./npm-protocol.js\";\n\nexport {\n loadLLMExtensions,\n resolveLLMExtensionPackages,\n discoverLLMExtensions,\n loadDiscoveredExtensions,\n} from \"./loader.js\";\nexport type { LoadLLMExtensionsOptions } from \"./loader.js\";\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;","names":[]}
|
package/dist/chunk-Z4E7LN4P.js
DELETED
|
@@ -1,205 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
parseLlmSection
|
|
3
|
-
} from "./chunk-SPDXNDDD.js";
|
|
4
|
-
import {
|
|
5
|
-
__export
|
|
6
|
-
} from "./chunk-PZ5AY32C.js";
|
|
7
|
-
|
|
8
|
-
// src/model/index.ts
|
|
9
|
-
var model_exports = {};
|
|
10
|
-
__export(model_exports, {
|
|
11
|
-
chatCompletionViaOpenAICompatibleApi: () => chatCompletionViaOpenAICompatibleApi,
|
|
12
|
-
createModelHub: () => createModelHub,
|
|
13
|
-
embedViaOpenAICompatibleApi: () => embedViaOpenAICompatibleApi,
|
|
14
|
-
parseEmbedSection: () => parseEmbedSection,
|
|
15
|
-
parseLlmSection: () => parseLlmSection
|
|
16
|
-
});
|
|
17
|
-
|
|
18
|
-
// src/model/embed-parser.ts
|
|
19
|
-
function parseEmbedSection(section) {
|
|
20
|
-
const result = parseLlmSection(section);
|
|
21
|
-
for (const config of result.configs) {
|
|
22
|
-
config.type = "embed";
|
|
23
|
-
}
|
|
24
|
-
return result;
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
// src/model/chat.ts
|
|
28
|
-
var DEFAULT_CHAT_TIMEOUT_MS = 6e4;
|
|
29
|
-
function isLocalBaseUrl(url) {
|
|
30
|
-
try {
|
|
31
|
-
const u = new URL(url);
|
|
32
|
-
const host = u.hostname.toLowerCase();
|
|
33
|
-
return host === "localhost" || host === "127.0.0.1" || host === "::1";
|
|
34
|
-
} catch {
|
|
35
|
-
return false;
|
|
36
|
-
}
|
|
37
|
-
}
|
|
38
|
-
function normalizeContent(content) {
|
|
39
|
-
if (typeof content === "string") return content;
|
|
40
|
-
if (!Array.isArray(content)) return String(content ?? "");
|
|
41
|
-
const parts = [];
|
|
42
|
-
for (const p of content) {
|
|
43
|
-
if (typeof p === "string") parts.push(p);
|
|
44
|
-
else if (p && typeof p === "object" && typeof p.text === "string")
|
|
45
|
-
parts.push(p.text);
|
|
46
|
-
}
|
|
47
|
-
return parts.join("\n").trim();
|
|
48
|
-
}
|
|
49
|
-
async function chatCompletionViaOpenAICompatibleApi(options, request) {
|
|
50
|
-
const baseUrl = options.baseURL.replace(/\/$/, "");
|
|
51
|
-
const apiKey = options.apiKey?.trim();
|
|
52
|
-
const timeoutMs = options.timeoutMs ?? DEFAULT_CHAT_TIMEOUT_MS;
|
|
53
|
-
const modelName = typeof request.model === "string" && request.model.trim() !== "" ? request.model.trim() : options.model ?? "gpt-4o-mini";
|
|
54
|
-
if (!apiKey && !isLocalBaseUrl(baseUrl)) {
|
|
55
|
-
throw new Error("Chat completion API key is required for non-local baseURL");
|
|
56
|
-
}
|
|
57
|
-
const body = {
|
|
58
|
-
model: modelName,
|
|
59
|
-
messages: request.messages.map((m) => ({ role: m.role, content: m.content })),
|
|
60
|
-
temperature: typeof request.temperature === "number" ? request.temperature : 0
|
|
61
|
-
};
|
|
62
|
-
if (typeof request.maxTokens === "number") body.max_tokens = request.maxTokens;
|
|
63
|
-
const controller = new AbortController();
|
|
64
|
-
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
|
65
|
-
try {
|
|
66
|
-
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
67
|
-
method: "POST",
|
|
68
|
-
headers: {
|
|
69
|
-
"Content-Type": "application/json",
|
|
70
|
-
...apiKey ? { Authorization: `Bearer ${apiKey}` } : {}
|
|
71
|
-
},
|
|
72
|
-
body: JSON.stringify(body),
|
|
73
|
-
signal: controller.signal
|
|
74
|
-
});
|
|
75
|
-
if (!response.ok) {
|
|
76
|
-
const text = await response.text();
|
|
77
|
-
throw new Error(`Chat completion API error ${response.status}: ${text.slice(0, 500)}`);
|
|
78
|
-
}
|
|
79
|
-
const data = await response.json();
|
|
80
|
-
const raw = data.choices?.[0]?.message?.content ?? data.choices?.[0]?.text ?? "";
|
|
81
|
-
return {
|
|
82
|
-
text: normalizeContent(raw),
|
|
83
|
-
model: data.model ?? modelName
|
|
84
|
-
};
|
|
85
|
-
} finally {
|
|
86
|
-
clearTimeout(timer);
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
// src/model/embedding.ts
|
|
91
|
-
var DEFAULT_EMBEDDING_TIMEOUT_MS = 3e4;
|
|
92
|
-
function isLocalBaseUrl2(url) {
|
|
93
|
-
try {
|
|
94
|
-
const u = new URL(url);
|
|
95
|
-
const host = u.hostname.toLowerCase();
|
|
96
|
-
return host === "localhost" || host === "127.0.0.1" || host === "::1";
|
|
97
|
-
} catch {
|
|
98
|
-
return false;
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
async function embedViaOpenAICompatibleApi(options, input) {
|
|
102
|
-
const baseUrl = options.baseURL.replace(/\/$/, "");
|
|
103
|
-
const apiKey = options.apiKey?.trim();
|
|
104
|
-
const timeoutMs = options.timeoutMs ?? DEFAULT_EMBEDDING_TIMEOUT_MS;
|
|
105
|
-
const modelName = typeof input.model === "string" && input.model.trim() !== "" ? input.model.trim() : options.model ?? "text-embedding-3-small";
|
|
106
|
-
if (!apiKey && !isLocalBaseUrl2(baseUrl)) {
|
|
107
|
-
throw new Error("Embedding API key is required for non-local baseURL");
|
|
108
|
-
}
|
|
109
|
-
const controller = new AbortController();
|
|
110
|
-
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
|
111
|
-
try {
|
|
112
|
-
const response = await fetch(`${baseUrl}/embeddings`, {
|
|
113
|
-
method: "POST",
|
|
114
|
-
headers: {
|
|
115
|
-
"Content-Type": "application/json",
|
|
116
|
-
...apiKey ? { Authorization: `Bearer ${apiKey}` } : {}
|
|
117
|
-
},
|
|
118
|
-
body: JSON.stringify({ model: modelName, input: input.input }),
|
|
119
|
-
signal: controller.signal
|
|
120
|
-
});
|
|
121
|
-
if (!response.ok) {
|
|
122
|
-
const body = await response.text();
|
|
123
|
-
throw new Error(`Embedding API error ${response.status}: ${body.slice(0, 500)}`);
|
|
124
|
-
}
|
|
125
|
-
const data = await response.json();
|
|
126
|
-
const vectors = (data.data ?? []).slice().sort((a, b) => (a.index ?? 0) - (b.index ?? 0)).map((v) => v.embedding).filter((v) => Array.isArray(v));
|
|
127
|
-
return {
|
|
128
|
-
vectors,
|
|
129
|
-
dimensions: vectors[0]?.length,
|
|
130
|
-
model: data.model ?? modelName
|
|
131
|
-
};
|
|
132
|
-
} finally {
|
|
133
|
-
clearTimeout(timer);
|
|
134
|
-
}
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
// src/model/hub.ts
|
|
138
|
-
var DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small";
|
|
139
|
-
var DEFAULT_CHAT_MODEL = "gpt-4o-mini";
|
|
140
|
-
var DEFAULT_BASE_URL = "https://api.openai.com/v1";
|
|
141
|
-
function getDefaultLlmConfig(llmSection) {
|
|
142
|
-
const parsed = parseLlmSection(llmSection ?? null);
|
|
143
|
-
const config = parsed.configs.find((c) => c.id === parsed.defaultId) ?? parsed.configs[0] ?? null;
|
|
144
|
-
return config;
|
|
145
|
-
}
|
|
146
|
-
function createModelHub(options = {}) {
|
|
147
|
-
const baseCfg = getDefaultLlmConfig(options.llmSection);
|
|
148
|
-
const defaultBaseUrl = (options.embeddingBaseURL ?? baseCfg?.baseURL ?? DEFAULT_BASE_URL).replace(/\/$/, "");
|
|
149
|
-
const defaultApiKey = options.embeddingApiKey ?? baseCfg?.apiKey ?? process.env.OPENAI_API_KEY;
|
|
150
|
-
const defaultEmbeddingModel = options.embeddingModel ?? (baseCfg?.options && typeof baseCfg.options.embeddingModel === "string" ? baseCfg.options.embeddingModel : void 0) ?? baseCfg?.model ?? process.env.OPENAI_EMBEDDING_MODEL ?? DEFAULT_EMBEDDING_MODEL;
|
|
151
|
-
const defaultChatModel = baseCfg?.model ?? process.env.OPENAI_MODEL ?? DEFAULT_CHAT_MODEL;
|
|
152
|
-
const embeddingTimeoutMs = options.embeddingTimeoutMs;
|
|
153
|
-
const chatTimeoutMs = options.chatTimeoutMs;
|
|
154
|
-
return {
|
|
155
|
-
async generate(input) {
|
|
156
|
-
const messages = [];
|
|
157
|
-
if (typeof input.systemPrompt === "string" && input.systemPrompt.trim() !== "") {
|
|
158
|
-
messages.push({ role: "system", content: input.systemPrompt.trim() });
|
|
159
|
-
}
|
|
160
|
-
messages.push({ role: "user", content: input.input });
|
|
161
|
-
const result = await chatCompletionViaOpenAICompatibleApi(
|
|
162
|
-
{
|
|
163
|
-
baseURL: defaultBaseUrl,
|
|
164
|
-
apiKey: defaultApiKey,
|
|
165
|
-
model: defaultChatModel,
|
|
166
|
-
timeoutMs: chatTimeoutMs
|
|
167
|
-
},
|
|
168
|
-
{
|
|
169
|
-
messages,
|
|
170
|
-
model: input.model,
|
|
171
|
-
temperature: input.temperature
|
|
172
|
-
}
|
|
173
|
-
);
|
|
174
|
-
return {
|
|
175
|
-
text: result.text,
|
|
176
|
-
model: typeof input.model === "string" && input.model.trim() !== "" ? input.model : result.model ?? baseCfg?.model
|
|
177
|
-
};
|
|
178
|
-
},
|
|
179
|
-
async embed(input) {
|
|
180
|
-
const result = await embedViaOpenAICompatibleApi(
|
|
181
|
-
{
|
|
182
|
-
baseURL: defaultBaseUrl,
|
|
183
|
-
apiKey: defaultApiKey,
|
|
184
|
-
model: defaultEmbeddingModel,
|
|
185
|
-
timeoutMs: embeddingTimeoutMs
|
|
186
|
-
},
|
|
187
|
-
{ input: input.input, model: input.model }
|
|
188
|
-
);
|
|
189
|
-
return {
|
|
190
|
-
vectors: result.vectors,
|
|
191
|
-
dimensions: result.dimensions,
|
|
192
|
-
model: result.model
|
|
193
|
-
};
|
|
194
|
-
}
|
|
195
|
-
};
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
export {
|
|
199
|
-
chatCompletionViaOpenAICompatibleApi,
|
|
200
|
-
embedViaOpenAICompatibleApi,
|
|
201
|
-
createModelHub,
|
|
202
|
-
parseEmbedSection,
|
|
203
|
-
model_exports
|
|
204
|
-
};
|
|
205
|
-
//# sourceMappingURL=chunk-Z4E7LN4P.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/model/index.ts","../src/model/embed-parser.ts","../src/model/chat.ts","../src/model/embedding.ts","../src/model/hub.ts"],"sourcesContent":["/**\n * Model APIs (no LangChain dependency):\n * - LLM types and config parsing\n * - Chat completion and embedding HTTP APIs\n * - ModelHub (generate + embed)\n */\n\nexport type { LLMType, LLMConfig, AgentConfigLlmSection } from \"./types.js\";\nexport { parseLlmSection } from \"./llm-parser.js\";\nexport { parseEmbedSection } from \"./embed-parser.js\";\nexport {\n chatCompletionViaOpenAICompatibleApi,\n type ChatCompletionOptions,\n type ChatCompletionMessage,\n type ChatCompletionRequest,\n type ChatCompletionResult,\n} from \"./chat.js\";\nexport {\n embedViaOpenAICompatibleApi,\n type EmbeddingOptions,\n type EmbedRequest,\n type EmbedResult,\n} from \"./embedding.js\";\nexport {\n createModelHub,\n type CreateModelHubOptions,\n type ModelHub,\n type ChatGenerateRequest,\n type ChatGenerateResult,\n} from \"./hub.js\";\n","/**\n * Parse embed section from model.yaml into normalized LLMConfig[] with type \"embed\".\n * Reuses parseLlmSection internally since the config shape is identical.\n */\n\nimport { parseLlmSection } from \"./llm-parser.js\";\nimport type { LLMConfig } from \"./types.js\";\n\n/**\n * Parse an `embed:` section from model.yaml.\n * Accepts the same flat-entry, instances[], or single-object formats as the llm section.\n *\n * @example\n * ```yaml\n * embed:\n * default: gemma\n * gemma:\n * provider: openai\n * base_url: https://ollama.example.com/v1\n * model: embeddinggemma:latest\n * apiKey: ollama\n * ```\n */\nexport function parseEmbedSection(\n section: unknown,\n): { defaultId: string; configs: LLMConfig[] } {\n const result = parseLlmSection(section);\n // Override type to \"embed\" for all parsed configs\n for (const config of result.configs) {\n config.type = \"embed\";\n }\n return result;\n}\n","/**\n * OpenAI-compatible chat completion API: POST /chat/completions.\n * No LangChain dependency; for use by any module that needs LLM text generation.\n */\n\nconst DEFAULT_CHAT_TIMEOUT_MS = 60_000;\n\nexport interface ChatCompletionOptions {\n baseURL: string;\n apiKey?: string;\n model?: string;\n timeoutMs?: number;\n}\n\nexport interface ChatCompletionMessage {\n role: \"system\" | \"user\" | \"assistant\";\n content: string;\n}\n\nexport interface ChatCompletionRequest {\n messages: ChatCompletionMessage[];\n model?: string;\n temperature?: number;\n maxTokens?: number;\n}\n\nexport interface ChatCompletionResult {\n text: string;\n model?: string;\n}\n\nfunction isLocalBaseUrl(url: string): boolean {\n try {\n const u = new URL(url);\n const host = u.hostname.toLowerCase();\n return host === \"localhost\" || host === \"127.0.0.1\" || host === \"::1\";\n } catch {\n return false;\n }\n}\n\nfunction normalizeContent(content: unknown): string {\n if (typeof content === \"string\") return content;\n if (!Array.isArray(content)) return String(content ?? \"\");\n const parts: string[] = [];\n for (const p of content) {\n if (typeof p === \"string\") parts.push(p);\n else if (p && typeof p === \"object\" && typeof (p as { text?: unknown }).text === \"string\")\n parts.push((p as { text: string }).text);\n }\n return parts.join(\"\\n\").trim();\n}\n\n/**\n * Call OpenAI-compatible /chat/completions endpoint.\n * baseURL should be the API root (e.g. https://api.openai.com/v1); trailing slash is stripped.\n */\nexport async function chatCompletionViaOpenAICompatibleApi(\n options: ChatCompletionOptions,\n request: ChatCompletionRequest\n): Promise<ChatCompletionResult> {\n const baseUrl = options.baseURL.replace(/\\/$/, \"\");\n const apiKey = options.apiKey?.trim();\n const timeoutMs = options.timeoutMs ?? DEFAULT_CHAT_TIMEOUT_MS;\n const modelName =\n typeof request.model === \"string\" && request.model.trim() !== \"\"\n ? request.model.trim()\n : options.model ?? \"gpt-4o-mini\";\n\n if (!apiKey && !isLocalBaseUrl(baseUrl)) {\n throw new Error(\"Chat completion API key is required for non-local baseURL\");\n }\n\n const body: Record<string, unknown> = {\n model: modelName,\n messages: request.messages.map((m) => ({ role: m.role, content: m.content })),\n temperature: typeof request.temperature === \"number\" ? request.temperature : 0,\n };\n if (typeof request.maxTokens === \"number\") body.max_tokens = request.maxTokens;\n\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), timeoutMs);\n try {\n const response = await fetch(`${baseUrl}/chat/completions`, {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),\n },\n body: JSON.stringify(body),\n signal: controller.signal,\n });\n if (!response.ok) {\n const text = await response.text();\n throw new Error(`Chat completion API error ${response.status}: ${text.slice(0, 500)}`);\n }\n const data = (await response.json()) as {\n choices?: Array<{ message?: { content?: unknown }; text?: unknown }>;\n model?: string;\n };\n const raw =\n data.choices?.[0]?.message?.content ?? data.choices?.[0]?.text ?? \"\";\n return {\n text: normalizeContent(raw),\n model: data.model ?? modelName,\n };\n } finally {\n clearTimeout(timer);\n }\n}\n","/**\n * OpenAI-compatible embedding API: POST /embeddings.\n * No LangChain dependency; for use by any module that needs embeddings.\n */\n\nconst DEFAULT_EMBEDDING_TIMEOUT_MS = 30_000;\n\nexport interface EmbeddingOptions {\n baseURL: string;\n apiKey?: string;\n model?: string;\n timeoutMs?: number;\n}\n\nexport interface EmbedRequest {\n input: string | string[];\n model?: string;\n}\n\nexport interface EmbedResult {\n vectors: number[][];\n dimensions?: number;\n model?: string;\n}\n\nfunction isLocalBaseUrl(url: string): boolean {\n try {\n const u = new URL(url);\n const host = u.hostname.toLowerCase();\n return host === \"localhost\" || host === \"127.0.0.1\" || host === \"::1\";\n } catch {\n return false;\n }\n}\n\n/**\n * Call OpenAI-compatible /embeddings endpoint.\n * baseURL should be the API root (e.g. https://api.openai.com/v1); trailing slash is stripped.\n */\nexport async function embedViaOpenAICompatibleApi(\n options: EmbeddingOptions,\n input: EmbedRequest\n): Promise<EmbedResult> {\n const baseUrl = options.baseURL.replace(/\\/$/, \"\");\n const apiKey = options.apiKey?.trim();\n const timeoutMs = options.timeoutMs ?? DEFAULT_EMBEDDING_TIMEOUT_MS;\n const modelName =\n typeof input.model === \"string\" && input.model.trim() !== \"\"\n ? input.model.trim()\n : options.model ?? \"text-embedding-3-small\";\n\n if (!apiKey && !isLocalBaseUrl(baseUrl)) {\n throw new Error(\"Embedding API key is required for non-local baseURL\");\n }\n\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), timeoutMs);\n try {\n const response = await fetch(`${baseUrl}/embeddings`, {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),\n },\n body: JSON.stringify({ model: modelName, input: input.input }),\n signal: controller.signal,\n });\n if (!response.ok) {\n const body = await response.text();\n throw new Error(`Embedding API error ${response.status}: ${body.slice(0, 500)}`);\n }\n const data = (await response.json()) as {\n data?: Array<{ embedding?: number[]; index?: number }>;\n model?: string;\n };\n const vectors = (data.data ?? [])\n .slice()\n .sort((a, b) => (a.index ?? 0) - (b.index ?? 0))\n .map((v) => v.embedding)\n .filter((v): v is number[] => Array.isArray(v));\n return {\n vectors,\n dimensions: vectors[0]?.length,\n model: data.model ?? modelName,\n };\n } finally {\n clearTimeout(timer);\n }\n}\n","/**\n * Model hub: generate (chat completion) + embed, driven by llm section config.\n * No LangChain; uses OpenAI-compatible HTTP APIs so any package can use it via agent-model.\n */\n\nimport { parseLlmSection } from \"./llm-parser.js\";\nimport type { LLMConfig } from \"./types.js\";\nimport { chatCompletionViaOpenAICompatibleApi } from \"./chat.js\";\nimport { embedViaOpenAICompatibleApi } from \"./embedding.js\";\nimport type { EmbedRequest, EmbedResult } from \"./embedding.js\";\n\nconst DEFAULT_EMBEDDING_MODEL = \"text-embedding-3-small\";\nconst DEFAULT_CHAT_MODEL = \"gpt-4o-mini\";\nconst DEFAULT_BASE_URL = \"https://api.openai.com/v1\";\n\nexport interface CreateModelHubOptions {\n llmSection?: unknown;\n embeddingBaseURL?: string;\n embeddingApiKey?: string;\n embeddingModel?: string;\n embeddingTimeoutMs?: number;\n chatTimeoutMs?: number;\n}\n\nexport interface ChatGenerateRequest {\n input: string;\n systemPrompt?: string;\n model?: string;\n temperature?: number;\n}\n\nexport interface ChatGenerateResult {\n text: string;\n model?: string;\n}\n\nexport interface ModelHub {\n generate(input: ChatGenerateRequest): Promise<ChatGenerateResult>;\n embed(input: EmbedRequest): Promise<EmbedResult>;\n}\n\nfunction getDefaultLlmConfig(llmSection: unknown): LLMConfig | null {\n const parsed = parseLlmSection(llmSection ?? null);\n const config =\n parsed.configs.find((c: LLMConfig) => c.id === parsed.defaultId) ??\n parsed.configs[0] ??\n null;\n return config;\n}\n\nexport function createModelHub(options: CreateModelHubOptions = {}): ModelHub {\n const baseCfg = getDefaultLlmConfig(options.llmSection);\n const defaultBaseUrl = (\n options.embeddingBaseURL ??\n baseCfg?.baseURL ??\n DEFAULT_BASE_URL\n ).replace(/\\/$/, \"\");\n const defaultApiKey =\n options.embeddingApiKey ??\n baseCfg?.apiKey ??\n process.env.OPENAI_API_KEY;\n const defaultEmbeddingModel =\n options.embeddingModel ??\n (baseCfg?.options && typeof baseCfg.options.embeddingModel === \"string\"\n ? baseCfg.options.embeddingModel\n : undefined) ??\n baseCfg?.model ??\n process.env.OPENAI_EMBEDDING_MODEL ??\n DEFAULT_EMBEDDING_MODEL;\n const defaultChatModel =\n baseCfg?.model ??\n process.env.OPENAI_MODEL ??\n DEFAULT_CHAT_MODEL;\n const embeddingTimeoutMs = options.embeddingTimeoutMs;\n const chatTimeoutMs = options.chatTimeoutMs;\n\n return {\n async generate(input: ChatGenerateRequest): Promise<ChatGenerateResult> {\n const messages: Array<{ role: \"system\" | \"user\" | \"assistant\"; content: string }> = [];\n if (typeof input.systemPrompt === \"string\" && input.systemPrompt.trim() !== \"\") {\n messages.push({ role: \"system\", content: input.systemPrompt.trim() });\n }\n messages.push({ role: \"user\", content: input.input });\n const result = await chatCompletionViaOpenAICompatibleApi(\n {\n baseURL: defaultBaseUrl,\n apiKey: defaultApiKey,\n model: defaultChatModel,\n timeoutMs: chatTimeoutMs,\n },\n {\n messages,\n model: input.model,\n temperature: input.temperature,\n }\n );\n return {\n text: result.text,\n model:\n typeof input.model === \"string\" && input.model.trim() !== \"\"\n ? input.model\n : result.model ?? baseCfg?.model,\n };\n },\n async embed(input: EmbedRequest): Promise<EmbedResult> {\n const result = await embedViaOpenAICompatibleApi(\n {\n baseURL: defaultBaseUrl,\n apiKey: defaultApiKey,\n model: defaultEmbeddingModel,\n timeoutMs: embeddingTimeoutMs,\n },\n { input: input.input, model: input.model }\n );\n return {\n vectors: result.vectors,\n dimensions: result.dimensions,\n model: result.model,\n };\n },\n };\n}\n"],"mappings":";;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACuBO,SAAS,kBACd,SAC6C;AAC7C,QAAM,SAAS,gBAAgB,OAAO;AAEtC,aAAW,UAAU,OAAO,SAAS;AACnC,WAAO,OAAO;AAAA,EAChB;AACA,SAAO;AACT;;;AC3BA,IAAM,0BAA0B;AA0BhC,SAAS,eAAe,KAAsB;AAC5C,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,UAAM,OAAO,EAAE,SAAS,YAAY;AACpC,WAAO,SAAS,eAAe,SAAS,eAAe,SAAS;AAAA,EAClE,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,SAAS,iBAAiB,SAA0B;AAClD,MAAI,OAAO,YAAY,SAAU,QAAO;AACxC,MAAI,CAAC,MAAM,QAAQ,OAAO,EAAG,QAAO,OAAO,WAAW,EAAE;AACxD,QAAM,QAAkB,CAAC;AACzB,aAAW,KAAK,SAAS;AACvB,QAAI,OAAO,MAAM,SAAU,OAAM,KAAK,CAAC;AAAA,aAC9B,KAAK,OAAO,MAAM,YAAY,OAAQ,EAAyB,SAAS;AAC/E,YAAM,KAAM,EAAuB,IAAI;AAAA,EAC3C;AACA,SAAO,MAAM,KAAK,IAAI,EAAE,KAAK;AAC/B;AAMA,eAAsB,qCACpB,SACA,SAC+B;AAC/B,QAAM,UAAU,QAAQ,QAAQ,QAAQ,OAAO,EAAE;AACjD,QAAM,SAAS,QAAQ,QAAQ,KAAK;AACpC,QAAM,YAAY,QAAQ,aAAa;AACvC,QAAM,YACJ,OAAO,QAAQ,UAAU,YAAY,QAAQ,MAAM,KAAK,MAAM,KAC1D,QAAQ,MAAM,KAAK,IACnB,QAAQ,SAAS;AAEvB,MAAI,CAAC,UAAU,CAAC,eAAe,OAAO,GAAG;AACvC,UAAM,IAAI,MAAM,2DAA2D;AAAA,EAC7E;AAEA,QAAM,OAAgC;AAAA,IACpC,OAAO;AAAA,IACP,UAAU,QAAQ,SAAS,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,SAAS,EAAE,QAAQ,EAAE;AAAA,IAC5E,aAAa,OAAO,QAAQ,gBAAgB,WAAW,QAAQ,cAAc;AAAA,EAC/E;AACA,MAAI,OAAO,QAAQ,cAAc,SAAU,MAAK,aAAa,QAAQ;AAErE,QAAM,aAAa,IAAI,gBAAgB;AACvC,QAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,SAAS;AAC5D,MAAI;AACF,UAAM,WAAW,MAAM,MAAM,GAAG,OAAO,qBAAqB;AAAA,MAC1D,QAAQ;AAAA,MACR,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,GAAI,SAAS,EAAE,eAAe,UAAU,MAAM,GAAG,IAAI,CAAC;AAAA,MACxD;AAAA,MACA,MAAM,KAAK,UAAU,IAAI;AAAA,MACzB,QAAQ,WAAW;AAAA,IACrB,CAAC;AACD,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,YAAM,IAAI,MAAM,6BAA6B,SAAS,MAAM,KAAK,KAAK,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,IACvF;AACA,UAAM,OAAQ,MAAM,SAAS,KAAK;AAIlC,UAAM,MACJ,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW,KAAK,UAAU,CAAC,GAAG,QAAQ;AACpE,WAAO;AAAA,MACL,MAAM,iBAAiB,GAAG;AAAA,MAC1B,OAAO,KAAK,SAAS;AAAA,IACvB;AAAA,EACF,UAAE;AACA,iBAAa,KAAK;AAAA,EACpB;AACF;;;ACxGA,IAAM,+BAA+B;AAoBrC,SAASA,gBAAe,KAAsB;AAC5C,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,UAAM,OAAO,EAAE,SAAS,YAAY;AACpC,WAAO,SAAS,eAAe,SAAS,eAAe,SAAS;AAAA,EAClE,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAMA,eAAsB,4BACpB,SACA,OACsB;AACtB,QAAM,UAAU,QAAQ,QAAQ,QAAQ,OAAO,EAAE;AACjD,QAAM,SAAS,QAAQ,QAAQ,KAAK;AACpC,QAAM,YAAY,QAAQ,aAAa;AACvC,QAAM,YACJ,OAAO,MAAM,UAAU,YAAY,MAAM,MAAM,KAAK,MAAM,KACtD,MAAM,MAAM,KAAK,IACjB,QAAQ,SAAS;AAEvB,MAAI,CAAC,UAAU,CAACA,gBAAe,OAAO,GAAG;AACvC,UAAM,IAAI,MAAM,qDAAqD;AAAA,EACvE;AAEA,QAAM,aAAa,IAAI,gBAAgB;AACvC,QAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,SAAS;AAC5D,MAAI;AACF,UAAM,WAAW,MAAM,MAAM,GAAG,OAAO,eAAe;AAAA,MACpD,QAAQ;AAAA,MACR,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,GAAI,SAAS,EAAE,eAAe,UAAU,MAAM,GAAG,IAAI,CAAC;AAAA,MACxD;AAAA,MACA,MAAM,KAAK,UAAU,EAAE,OAAO,WAAW,OAAO,MAAM,MAAM,CAAC;AAAA,MAC7D,QAAQ,WAAW;AAAA,IACrB,CAAC;AACD,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,YAAM,IAAI,MAAM,uBAAuB,SAAS,MAAM,KAAK,KAAK,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,IACjF;AACA,UAAM,OAAQ,MAAM,SAAS,KAAK;AAIlC,UAAM,WAAW,KAAK,QAAQ,CAAC,GAC5B,MAAM,EACN,KAAK,CAAC,GAAG,OAAO,EAAE,SAAS,MAAM,EAAE,SAAS,EAAE,EAC9C,IAAI,CAAC,MAAM,EAAE,SAAS,EACtB,OAAO,CAAC,MAAqB,MAAM,QAAQ,CAAC,CAAC;AAChD,WAAO;AAAA,MACL;AAAA,MACA,YAAY,QAAQ,CAAC,GAAG;AAAA,MACxB,OAAO,KAAK,SAAS;AAAA,IACvB;AAAA,EACF,UAAE;AACA,iBAAa,KAAK;AAAA,EACpB;AACF;;;AC7EA,IAAM,0BAA0B;AAChC,IAAM,qBAAqB;AAC3B,IAAM,mBAAmB;AA4BzB,SAAS,oBAAoB,YAAuC;AAClE,QAAM,SAAS,gBAAgB,cAAc,IAAI;AACjD,QAAM,SACJ,OAAO,QAAQ,KAAK,CAAC,MAAiB,EAAE,OAAO,OAAO,SAAS,KAC/D,OAAO,QAAQ,CAAC,KAChB;AACF,SAAO;AACT;AAEO,SAAS,eAAe,UAAiC,CAAC,GAAa;AAC5E,QAAM,UAAU,oBAAoB,QAAQ,UAAU;AACtD,QAAM,kBACJ,QAAQ,oBACR,SAAS,WACT,kBACA,QAAQ,OAAO,EAAE;AACnB,QAAM,gBACJ,QAAQ,mBACR,SAAS,UACT,QAAQ,IAAI;AACd,QAAM,wBACJ,QAAQ,mBACP,SAAS,WAAW,OAAO,QAAQ,QAAQ,mBAAmB,WAC3D,QAAQ,QAAQ,iBAChB,WACJ,SAAS,SACT,QAAQ,IAAI,0BACZ;AACF,QAAM,mBACJ,SAAS,SACT,QAAQ,IAAI,gBACZ;AACF,QAAM,qBAAqB,QAAQ;AACnC,QAAM,gBAAgB,QAAQ;AAE9B,SAAO;AAAA,IACL,MAAM,SAAS,OAAyD;AACtE,YAAM,WAA8E,CAAC;AACrF,UAAI,OAAO,MAAM,iBAAiB,YAAY,MAAM,aAAa,KAAK,MAAM,IAAI;AAC9E,iBAAS,KAAK,EAAE,MAAM,UAAU,SAAS,MAAM,aAAa,KAAK,EAAE,CAAC;AAAA,MACtE;AACA,eAAS,KAAK,EAAE,MAAM,QAAQ,SAAS,MAAM,MAAM,CAAC;AACpD,YAAM,SAAS,MAAM;AAAA,QACnB;AAAA,UACE,SAAS;AAAA,UACT,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,WAAW;AAAA,QACb;AAAA,QACA;AAAA,UACE;AAAA,UACA,OAAO,MAAM;AAAA,UACb,aAAa,MAAM;AAAA,QACrB;AAAA,MACF;AACA,aAAO;AAAA,QACL,MAAM,OAAO;AAAA,QACb,OACE,OAAO,MAAM,UAAU,YAAY,MAAM,MAAM,KAAK,MAAM,KACtD,MAAM,QACN,OAAO,SAAS,SAAS;AAAA,MACjC;AAAA,IACF;AAAA,IACA,MAAM,MAAM,OAA2C;AACrD,YAAM,SAAS,MAAM;AAAA,QACnB;AAAA,UACE,SAAS;AAAA,UACT,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,WAAW;AAAA,QACb;AAAA,QACA,EAAE,OAAO,MAAM,OAAO,OAAO,MAAM,MAAM;AAAA,MAC3C;AACA,aAAO;AAAA,QACL,SAAS,OAAO;AAAA,QAChB,YAAY,OAAO;AAAA,QACnB,OAAO,OAAO;AAAA,MAChB;AAAA,IACF;AAAA,EACF;AACF;","names":["isLocalBaseUrl"]}
|
|
File without changes
|