@easynet/agent-llm 1.0.2 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -78
- package/dist/chunk-3Z3KXKNU.js +422 -0
- package/dist/chunk-3Z3KXKNU.js.map +1 -0
- package/dist/cli.d.ts +9 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +120 -0
- package/dist/cli.js.map +1 -0
- package/dist/config.d.ts +2 -2
- package/dist/createAgentLlM.d.ts +10 -0
- package/dist/createAgentLlM.d.ts.map +1 -0
- package/dist/factory.d.ts +2 -2
- package/dist/factory.d.ts.map +1 -1
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +23 -341
- package/dist/index.js.map +1 -1
- package/dist/llmAdapter.d.ts.map +1 -1
- package/dist/loadLlmConfig.d.ts +24 -0
- package/dist/loadLlmConfig.d.ts.map +1 -0
- package/dist/providers/openai.d.ts +2 -2
- package/dist/types.d.ts +31 -31
- package/dist/types.d.ts.map +1 -1
- package/package.json +12 -5
package/dist/cli.js
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import {
|
|
3
|
+
createChatModelFromLlmConfig,
|
|
4
|
+
loadLlmConfig
|
|
5
|
+
} from "./chunk-3Z3KXKNU.js";
|
|
6
|
+
|
|
7
|
+
// src/cli.ts
|
|
8
|
+
import { join } from "path";
|
|
9
|
+
import { tool } from "@langchain/core/tools";
|
|
10
|
+
import { z } from "zod";
|
|
11
|
+
import { HumanMessage, ToolMessage } from "@langchain/core/messages";
|
|
12
|
+
var defaultLlmSection = {
|
|
13
|
+
provider: "openai",
|
|
14
|
+
model: process.env.OPENAI_MODEL ?? "gpt-4o-mini",
|
|
15
|
+
temperature: 0,
|
|
16
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
17
|
+
...process.env.OPENAI_BASE_URL && { baseURL: process.env.OPENAI_BASE_URL }
|
|
18
|
+
};
|
|
19
|
+
function parseArgv() {
|
|
20
|
+
const args = process.argv.slice(2);
|
|
21
|
+
let configPath;
|
|
22
|
+
const rest = [];
|
|
23
|
+
for (let i = 0; i < args.length; i++) {
|
|
24
|
+
if (args[i] === "--config" && args[i + 1]) {
|
|
25
|
+
configPath = args[i + 1];
|
|
26
|
+
i++;
|
|
27
|
+
} else {
|
|
28
|
+
rest.push(args[i]);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
const query = rest.join(" ").trim() || "hi";
|
|
32
|
+
return { configPath, query };
|
|
33
|
+
}
|
|
34
|
+
function loadLlmSection(configPath) {
|
|
35
|
+
const path = configPath ?? join(process.cwd(), "config", "llm.yaml");
|
|
36
|
+
const llm = loadLlmConfig(path);
|
|
37
|
+
return llm ?? defaultLlmSection;
|
|
38
|
+
}
|
|
39
|
+
var getWeather = tool(
|
|
40
|
+
(input) => {
|
|
41
|
+
const loc = input.location.toLowerCase();
|
|
42
|
+
if (["sf", "san francisco"].includes(loc)) {
|
|
43
|
+
return "It's 60\xB0F and foggy in San Francisco.";
|
|
44
|
+
}
|
|
45
|
+
if (["ny", "new york"].includes(loc)) {
|
|
46
|
+
return "It's 72\xB0F and partly cloudy in New York.";
|
|
47
|
+
}
|
|
48
|
+
return `Weather for ${input.location}: 70\xB0F and sunny.`;
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
name: "get_weather",
|
|
52
|
+
description: "Get the current weather for a location.",
|
|
53
|
+
schema: z.object({
|
|
54
|
+
location: z.string().describe("City or place name (e.g. SF, New York)")
|
|
55
|
+
})
|
|
56
|
+
}
|
|
57
|
+
);
|
|
58
|
+
var addNumbers = tool(
|
|
59
|
+
(input) => String(input.a + input.b),
|
|
60
|
+
{
|
|
61
|
+
name: "add_numbers",
|
|
62
|
+
description: "Add two numbers.",
|
|
63
|
+
schema: z.object({
|
|
64
|
+
a: z.number().describe("First number"),
|
|
65
|
+
b: z.number().describe("Second number")
|
|
66
|
+
})
|
|
67
|
+
}
|
|
68
|
+
);
|
|
69
|
+
var tools = [getWeather, addNumbers];
|
|
70
|
+
var toolsByName = new Map(tools.map((t) => [t.name, t]));
|
|
71
|
+
var MAX_TURNS = 10;
|
|
72
|
+
async function runAgent(model, query) {
|
|
73
|
+
const withTools = model.bindTools?.(tools, { tool_choice: "auto" });
|
|
74
|
+
if (!withTools) throw new Error("Model does not support bindTools");
|
|
75
|
+
const messages = [new HumanMessage(query)];
|
|
76
|
+
for (let turn = 0; turn < MAX_TURNS; turn++) {
|
|
77
|
+
const response = await withTools.invoke(messages);
|
|
78
|
+
const aiMessage = response;
|
|
79
|
+
if (!aiMessage.tool_calls?.length) {
|
|
80
|
+
const content = typeof aiMessage.content === "string" ? aiMessage.content : Array.isArray(aiMessage.content) ? aiMessage.content.map((c) => "text" in c ? c.text : "").join("") : String(aiMessage.content ?? "");
|
|
81
|
+
return content;
|
|
82
|
+
}
|
|
83
|
+
messages.push(aiMessage);
|
|
84
|
+
for (const tc of aiMessage.tool_calls) {
|
|
85
|
+
const id = tc.id ?? `call_${turn}_${tc.name}`;
|
|
86
|
+
const toolFn = toolsByName.get(tc.name);
|
|
87
|
+
let result;
|
|
88
|
+
if (toolFn) {
|
|
89
|
+
try {
|
|
90
|
+
const out = await toolFn.invoke(
|
|
91
|
+
tc.args
|
|
92
|
+
);
|
|
93
|
+
result = typeof out === "string" ? out : JSON.stringify(out);
|
|
94
|
+
} catch (err) {
|
|
95
|
+
result = `Error: ${err instanceof Error ? err.message : String(err)}`;
|
|
96
|
+
}
|
|
97
|
+
} else {
|
|
98
|
+
result = `Unknown tool: ${tc.name}`;
|
|
99
|
+
}
|
|
100
|
+
messages.push(new ToolMessage({ content: result, tool_call_id: id }));
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
return "Agent reached max turns without a final answer.";
|
|
104
|
+
}
|
|
105
|
+
async function main() {
|
|
106
|
+
const { configPath, query } = parseArgv();
|
|
107
|
+
const llmSection = loadLlmSection(configPath);
|
|
108
|
+
const model = createChatModelFromLlmConfig({ llmSection });
|
|
109
|
+
console.log("Query:", query);
|
|
110
|
+
console.log("---");
|
|
111
|
+
const answer = await runAgent(model, query);
|
|
112
|
+
console.log("Answer:", answer);
|
|
113
|
+
console.log("---");
|
|
114
|
+
console.log("Done.");
|
|
115
|
+
}
|
|
116
|
+
main().catch((err) => {
|
|
117
|
+
console.error(err);
|
|
118
|
+
process.exit(1);
|
|
119
|
+
});
|
|
120
|
+
//# sourceMappingURL=cli.js.map
|
package/dist/cli.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/cli.ts"],"sourcesContent":["#!/usr/bin/env node\n/**\n * CLI for @easynet/agent-llm: run the LangChain agent with a query.\n * Usage: agent-llm \"your question\"\n * or: agent-llm --config ./config/llm.yaml \"hi\"\n * or: npx @easynet/agent-llm \"hi\"\n */\n\nimport { join } from \"node:path\";\nimport { createChatModelFromLlmConfig, loadLlmConfig } from \"./index.js\";\nimport { tool } from \"@langchain/core/tools\";\nimport { z } from \"zod\";\nimport { HumanMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport type { BaseMessage } from \"@langchain/core/messages\";\n\nconst defaultLlmSection = {\n provider: \"openai\" as const,\n model: process.env.OPENAI_MODEL ?? \"gpt-4o-mini\",\n temperature: 0,\n apiKey: process.env.OPENAI_API_KEY,\n ...(process.env.OPENAI_BASE_URL && { baseURL: process.env.OPENAI_BASE_URL }),\n};\n\nfunction parseArgv(): { configPath: string | undefined; query: string } {\n const args = process.argv.slice(2);\n let configPath: string | undefined;\n const rest: string[] = [];\n for (let i = 0; i < args.length; i++) {\n if (args[i] === \"--config\" && args[i + 1]) {\n configPath = args[i + 1];\n i++;\n } else {\n rest.push(args[i]);\n }\n }\n const query = rest.join(\" \").trim() || \"hi\";\n return { configPath, query };\n}\n\nfunction loadLlmSection(configPath: string | undefined): unknown {\n const path = configPath ?? join(process.cwd(), \"config\", \"llm.yaml\");\n const llm = loadLlmConfig(path);\n return llm ?? defaultLlmSection;\n}\n\nconst getWeather = tool(\n (input: { location: string }) => {\n const loc = input.location.toLowerCase();\n if ([\"sf\", \"san francisco\"].includes(loc)) {\n return \"It's 60°F and foggy in San Francisco.\";\n }\n if ([\"ny\", \"new york\"].includes(loc)) {\n return \"It's 72°F and partly cloudy in New York.\";\n }\n return `Weather for ${input.location}: 70°F and sunny.`;\n },\n {\n name: \"get_weather\",\n description: \"Get the current weather for a location.\",\n schema: z.object({\n location: z.string().describe(\"City or place name (e.g. SF, New York)\"),\n }),\n }\n);\n\nconst addNumbers = tool(\n (input: { a: number; b: number }) => String(input.a + input.b),\n {\n name: \"add_numbers\",\n description: \"Add two numbers.\",\n schema: z.object({\n a: z.number().describe(\"First number\"),\n b: z.number().describe(\"Second number\"),\n }),\n }\n);\n\nconst tools = [getWeather, addNumbers];\nconst toolsByName = new Map(tools.map((t) => [t.name, t]));\n\nconst MAX_TURNS = 10;\n\nasync function runAgent(\n model: ReturnType<typeof createChatModelFromLlmConfig>,\n query: string\n): Promise<string> {\n const withTools = model.bindTools?.(tools, { tool_choice: \"auto\" });\n if (!withTools) throw new Error(\"Model does not support bindTools\");\n const messages: BaseMessage[] = [new HumanMessage(query)];\n\n for (let turn = 0; turn < MAX_TURNS; turn++) {\n const response = await withTools.invoke(messages);\n const aiMessage = response as AIMessage;\n\n if (!aiMessage.tool_calls?.length) {\n const content =\n typeof aiMessage.content === \"string\"\n ? aiMessage.content\n : Array.isArray(aiMessage.content)\n ? (aiMessage.content as { type?: string; text?: string }[])\n .map((c) => (\"text\" in c ? c.text : \"\"))\n .join(\"\")\n : String(aiMessage.content ?? \"\");\n return content;\n }\n\n messages.push(aiMessage);\n\n for (const tc of aiMessage.tool_calls) {\n const id = tc.id ?? `call_${turn}_${tc.name}`;\n const toolFn = toolsByName.get(tc.name as \"get_weather\" | \"add_numbers\");\n let result: string;\n if (toolFn) {\n try {\n const out = await (toolFn as { invoke: (args: Record<string, unknown>) => Promise<unknown> }).invoke(\n tc.args as Record<string, unknown>\n );\n result = typeof out === \"string\" ? out : JSON.stringify(out);\n } catch (err) {\n result = `Error: ${err instanceof Error ? err.message : String(err)}`;\n }\n } else {\n result = `Unknown tool: ${tc.name}`;\n }\n messages.push(new ToolMessage({ content: result, tool_call_id: id }));\n }\n }\n\n return \"Agent reached max turns without a final answer.\";\n}\n\nasync function main() {\n const { configPath, query } = parseArgv();\n const llmSection = loadLlmSection(configPath);\n const model = createChatModelFromLlmConfig({ llmSection });\n\n console.log(\"Query:\", query);\n console.log(\"---\");\n\n const answer = await runAgent(model, query);\n console.log(\"Answer:\", answer);\n console.log(\"---\");\n console.log(\"Done.\");\n}\n\nmain().catch((err) => {\n console.error(err);\n process.exit(1);\n});\n"],"mappings":";;;;;;;AAQA,SAAS,YAAY;AAErB,SAAS,YAAY;AACrB,SAAS,SAAS;AAClB,SAAS,cAAyB,mBAAmB;AAGrD,IAAM,oBAAoB;AAAA,EACxB,UAAU;AAAA,EACV,OAAO,QAAQ,IAAI,gBAAgB;AAAA,EACnC,aAAa;AAAA,EACb,QAAQ,QAAQ,IAAI;AAAA,EACpB,GAAI,QAAQ,IAAI,mBAAmB,EAAE,SAAS,QAAQ,IAAI,gBAAgB;AAC5E;AAEA,SAAS,YAA+D;AACtE,QAAM,OAAO,QAAQ,KAAK,MAAM,CAAC;AACjC,MAAI;AACJ,QAAM,OAAiB,CAAC;AACxB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,QAAI,KAAK,CAAC,MAAM,cAAc,KAAK,IAAI,CAAC,GAAG;AACzC,mBAAa,KAAK,IAAI,CAAC;AACvB;AAAA,IACF,OAAO;AACL,WAAK,KAAK,KAAK,CAAC,CAAC;AAAA,IACnB;AAAA,EACF;AACA,QAAM,QAAQ,KAAK,KAAK,GAAG,EAAE,KAAK,KAAK;AACvC,SAAO,EAAE,YAAY,MAAM;AAC7B;AAEA,SAAS,eAAe,YAAyC;AAC/D,QAAM,OAAO,cAAc,KAAK,QAAQ,IAAI,GAAG,UAAU,UAAU;AACnE,QAAM,MAAM,cAAc,IAAI;AAC9B,SAAO,OAAO;AAChB;AAEA,IAAM,aAAa;AAAA,EACjB,CAAC,UAAgC;AAC/B,UAAM,MAAM,MAAM,SAAS,YAAY;AACvC,QAAI,CAAC,MAAM,eAAe,EAAE,SAAS,GAAG,GAAG;AACzC,aAAO;AAAA,IACT;AACA,QAAI,CAAC,MAAM,UAAU,EAAE,SAAS,GAAG,GAAG;AACpC,aAAO;AAAA,IACT;AACA,WAAO,eAAe,MAAM,QAAQ;AAAA,EACtC;AAAA,EACA;AAAA,IACE,MAAM;AAAA,IACN,aAAa;AAAA,IACb,QAAQ,EAAE,OAAO;AAAA,MACf,UAAU,EAAE,OAAO,EAAE,SAAS,wCAAwC;AAAA,IACxE,CAAC;AAAA,EACH;AACF;AAEA,IAAM,aAAa;AAAA,EACjB,CAAC,UAAoC,OAAO,MAAM,IAAI,MAAM,CAAC;AAAA,EAC7D;AAAA,IACE,MAAM;AAAA,IACN,aAAa;AAAA,IACb,QAAQ,EAAE,OAAO;AAAA,MACf,GAAG,EAAE,OAAO,EAAE,SAAS,cAAc;AAAA,MACrC,GAAG,EAAE,OAAO,EAAE,SAAS,eAAe;AAAA,IACxC,CAAC;AAAA,EACH;AACF;AAEA,IAAM,QAAQ,CAAC,YAAY,UAAU;AACrC,IAAM,cAAc,IAAI,IAAI,MAAM,IAAI,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AAEzD,IAAM,YAAY;AAElB,eAAe,SACb,OACA,OACiB;AACjB,QAAM,YAAY,MAAM,YAAY,OAAO,EAAE,aAAa,OAAO,CAAC;AAClE,MAAI,CAAC,UAAW,OAAM,IAAI,MAAM,kCAAkC;AAClE,QAAM,WAA0B,CAAC,IAAI,aAAa,KAAK,CAAC;AAExD,WAAS,OAAO,GAAG,OAAO,WAAW,QAAQ;AAC3C,UAAM,WAAW,MAAM,UAAU,OAAO,QAAQ;AAChD,UAAM,YAAY;AAElB,QAAI,CAAC,UAAU,YAAY,QAAQ;AACjC,YAAM,UACJ,OAAO,UAAU,YAAY,WACzB,UAAU,UACV,MAAM,QAAQ,UAAU,OAAO,IAC5B,UAAU,QACR,IAAI,CAAC,MAAO,UAAU,IAAI,EAAE,OAAO,EAAG,EACtC,KAAK,EAAE,IACV,OAAO,UAAU,WAAW,EAAE;AACtC,aAAO;AAAA,IACT;AAEA,aAAS,KAAK,SAAS;AAEvB,eAAW,MAAM,UAAU,YAAY;AACrC,YAAM,KAAK,GAAG,MAAM,QAAQ,IAAI,IAAI,GAAG,IAAI;AAC3C,YAAM,SAAS,YAAY,IAAI,GAAG,IAAqC;AACvE,UAAI;AACJ,UAAI,QAAQ;AACV,YAAI;AACF,gBAAM,MAAM,MAAO,OAA2E;AAAA,YAC5F,GAAG;AAAA,UACL;AACA,mBAAS,OAAO,QAAQ,WAAW,MAAM,KAAK,UAAU,GAAG;AAAA,QAC7D,SAAS,KAAK;AACZ,mBAAS,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG,CAAC;AAAA,QACrE;AAAA,MACF,OAAO;AACL,iBAAS,iBAAiB,GAAG,IAAI;AAAA,MACnC;AACA,eAAS,KAAK,IAAI,YAAY,EAAE,SAAS,QAAQ,cAAc,GAAG,CAAC,CAAC;AAAA,IACtE;AAAA,EACF;AAEA,SAAO;AACT;AAEA,eAAe,OAAO;AACpB,QAAM,EAAE,YAAY,MAAM,IAAI,UAAU;AACxC,QAAM,aAAa,eAAe,UAAU;AAC5C,QAAM,QAAQ,6BAA6B,EAAE,WAAW,CAAC;AAEzD,UAAQ,IAAI,UAAU,KAAK;AAC3B,UAAQ,IAAI,KAAK;AAEjB,QAAM,SAAS,MAAM,SAAS,OAAO,KAAK;AAC1C,UAAQ,IAAI,WAAW,MAAM;AAC7B,UAAQ,IAAI,KAAK;AACjB,UAAQ,IAAI,OAAO;AACrB;AAEA,KAAK,EAAE,MAAM,CAAC,QAAQ;AACpB,UAAQ,MAAM,GAAG;AACjB,UAAQ,KAAK,CAAC;AAChB,CAAC;","names":[]}
|
package/dist/config.d.ts
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Parse agent.yaml llm section into normalized LLMConfig[] and default id.
|
|
3
|
-
* Supports: flat (
|
|
3
|
+
* Supports: flat (each model keyed by name), instances[], or single object.
|
|
4
4
|
*/
|
|
5
5
|
import type { LLMConfig } from "./types.js";
|
|
6
6
|
/**
|
|
7
|
-
*
|
|
7
|
+
* Parse llm section: flat (each model keyed by name), default+instances, or single object.
|
|
8
8
|
*/
|
|
9
9
|
export declare function parseLlmSection(section: unknown): {
|
|
10
10
|
defaultId: string;
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
export interface CreateAgentLlMOptions {
|
|
2
|
+
/** Path to YAML config file. If omitted, uses llm.yaml in cwd or config/llm.yaml in cwd/parent. */
|
|
3
|
+
configPath?: string;
|
|
4
|
+
}
|
|
5
|
+
/**
|
|
6
|
+
* Create a LangChain-formatted LLM from config.
|
|
7
|
+
* Pass configPath to use a specific YAML file; otherwise uses llm.yaml (cwd) or config/llm.yaml (cwd/parent).
|
|
8
|
+
*/
|
|
9
|
+
export declare function createAgentLlM(options?: CreateAgentLlMOptions): import("@langchain/core/language_models/chat_models").BaseChatModel<import("@langchain/core/language_models/chat_models").BaseChatModelCallOptions, import("@langchain/core/messages").AIMessageChunk>;
|
|
10
|
+
//# sourceMappingURL=createAgentLlM.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"createAgentLlM.d.ts","sourceRoot":"","sources":["../src/createAgentLlM.ts"],"names":[],"mappings":"AASA,MAAM,WAAW,qBAAqB;IACpC,mGAAmG;IACnG,UAAU,CAAC,EAAE,MAAM,CAAC;CACrB;AAWD;;;GAGG;AACH,wBAAgB,cAAc,CAAC,OAAO,GAAE,qBAA0B,0MAOjE"}
|
package/dist/factory.d.ts
CHANGED
|
@@ -3,11 +3,11 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import type { AgentConfigLlmSection, ILLMRegistry } from "./types.js";
|
|
5
5
|
export interface CreateLLMRegistryOptions {
|
|
6
|
-
/**
|
|
6
|
+
/** Parsed llm section (e.g. from loadAgentConfig's config.llm) */
|
|
7
7
|
llmSection: AgentConfigLlmSection | null | undefined;
|
|
8
8
|
}
|
|
9
9
|
/**
|
|
10
|
-
*
|
|
10
|
+
* Create LLM registry from agent config llm section; supports multiple providers/models, each LLM has id and type.
|
|
11
11
|
*/
|
|
12
12
|
export declare function createLLMRegistry(options: CreateLLMRegistryOptions): ILLMRegistry;
|
|
13
13
|
//# sourceMappingURL=factory.d.ts.map
|
package/dist/factory.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"factory.d.ts","sourceRoot":"","sources":["../src/factory.ts"],"names":[],"mappings":"AAAA;;GAEG;AAIH,OAAO,KAAK,EAAE,qBAAqB,EAAc,YAAY,EAAE,MAAM,YAAY,CAAC;AAElF,MAAM,WAAW,wBAAwB;IACvC,
|
|
1
|
+
{"version":3,"file":"factory.d.ts","sourceRoot":"","sources":["../src/factory.ts"],"names":[],"mappings":"AAAA;;GAEG;AAIH,OAAO,KAAK,EAAE,qBAAqB,EAAc,YAAY,EAAE,MAAM,YAAY,CAAC;AAElF,MAAM,WAAW,wBAAwB;IACvC,kEAAkE;IAClE,UAAU,EAAE,qBAAqB,GAAG,IAAI,GAAG,SAAS,CAAC;CACtD;AAED;;GAEG;AACH,wBAAgB,iBAAiB,CAAC,OAAO,EAAE,wBAAwB,GAAG,YAAY,CAyBjF"}
|
package/dist/index.d.ts
CHANGED
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
* Extensions register via registerProvider/registerChatModelProvider; call loadLLMExtensions() before using them.
|
|
5
5
|
*/
|
|
6
6
|
export { parseLlmSection } from "./config.js";
|
|
7
|
+
export { loadLlmConfig, parseLlmYaml, substituteEnv } from "./loadLlmConfig.js";
|
|
8
|
+
export type { LoadLlmConfigOptions } from "./loadLlmConfig.js";
|
|
7
9
|
export { createLLMRegistry } from "./factory.js";
|
|
8
10
|
export type { CreateLLMRegistryOptions } from "./factory.js";
|
|
9
11
|
export { createClient, registerProvider } from "./providers/index.js";
|
|
@@ -11,6 +13,8 @@ export { registerChatModelProvider, getChatModelFactory } from "./chatModelRegis
|
|
|
11
13
|
export { createOpenAIClient, createOpenAIChatClient, createOpenAIImageClient } from "./providers/openai.js";
|
|
12
14
|
export { createChatModelFromLlmConfig } from "./llmAdapter.js";
|
|
13
15
|
export type { CreateChatModelFromLlmConfigOptions } from "./llmAdapter.js";
|
|
16
|
+
export { createAgentLlM } from "./createAgentLlM.js";
|
|
17
|
+
export type { CreateAgentLlMOptions } from "./createAgentLlM.js";
|
|
14
18
|
export { loadLLMExtensions, resolveLLMExtensionPackages } from "./loadLLMExtensions.js";
|
|
15
19
|
export type { LLMType, LLMConfig, AgentConfigLlmSection, ChatMessage, ChatResult, ImageResult, ToolDefinition, ChatWithToolsMessage, ChatWithToolsResult, ILLMClient, ILLMRegistry, } from "./types.js";
|
|
16
20
|
//# sourceMappingURL=index.d.ts.map
|
package/dist/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAC9C,OAAO,EAAE,iBAAiB,EAAE,MAAM,cAAc,CAAC;AACjD,YAAY,EAAE,wBAAwB,EAAE,MAAM,cAAc,CAAC;AAC7D,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,sBAAsB,CAAC;AACtE,OAAO,EAAE,yBAAyB,EAAE,mBAAmB,EAAE,MAAM,wBAAwB,CAAC;AACxF,OAAO,EAAE,kBAAkB,EAAE,sBAAsB,EAAE,uBAAuB,EAAE,MAAM,uBAAuB,CAAC;AAC5G,OAAO,EAAE,4BAA4B,EAAE,MAAM,iBAAiB,CAAC;AAC/D,YAAY,EAAE,mCAAmC,EAAE,MAAM,iBAAiB,CAAC;AAC3E,OAAO,EAAE,iBAAiB,EAAE,2BAA2B,EAAE,MAAM,wBAAwB,CAAC;AAExF,YAAY,EACV,OAAO,EACP,SAAS,EACT,qBAAqB,EACrB,WAAW,EACX,UAAU,EACV,WAAW,EACX,cAAc,EACd,oBAAoB,EACpB,mBAAmB,EACnB,UAAU,EACV,YAAY,GACb,MAAM,YAAY,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAC9C,OAAO,EAAE,aAAa,EAAE,YAAY,EAAE,aAAa,EAAE,MAAM,oBAAoB,CAAC;AAChF,YAAY,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAC/D,OAAO,EAAE,iBAAiB,EAAE,MAAM,cAAc,CAAC;AACjD,YAAY,EAAE,wBAAwB,EAAE,MAAM,cAAc,CAAC;AAC7D,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,sBAAsB,CAAC;AACtE,OAAO,EAAE,yBAAyB,EAAE,mBAAmB,EAAE,MAAM,wBAAwB,CAAC;AACxF,OAAO,EAAE,kBAAkB,EAAE,sBAAsB,EAAE,uBAAuB,EAAE,MAAM,uBAAuB,CAAC;AAC5G,OAAO,EAAE,4BAA4B,EAAE,MAAM,iBAAiB,CAAC;AAC/D,YAAY,EAAE,mCAAmC,EAAE,MAAM,iBAAiB,CAAC;AAC3E,OAAO,EAAE,cAAc,EAAE,MAAM,qBAAqB,CAAC;AACrD,YAAY,EAAE,qBAAqB,EAAE,MAAM,qBAAqB,CAAC;AACjE,OAAO,EAAE,iBAAiB,EAAE,2BAA2B,EAAE,MAAM,wBAAwB,CAAC;AAExF,YAAY,EACV,OAAO,EACP,SAAS,EACT,qBAAqB,EACrB,WAAW,EACX,UAAU,EACV,WAAW,EACX,cAAc,EACd,oBAAoB,EACpB,mBAAmB,EACnB,UAAU,EACV,YAAY,GACb,MAAM,YAAY,CAAC"}
|
package/dist/index.js
CHANGED
|
@@ -1,344 +1,23 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
}
|
|
20
|
-
if (Array.isArray(section)) {
|
|
21
|
-
const configs = section.filter((i) => i != null && typeof i === "object").map((item, i) => normalizeLlmConfig({ ...item, id: item.id ?? item.name ?? String(i) })).filter((c) => c != null);
|
|
22
|
-
const defaultId = configs.length > 0 ? configs[0].id : DEFAULT_LLM_ID;
|
|
23
|
-
return { defaultId, configs };
|
|
24
|
-
}
|
|
25
|
-
const s = section;
|
|
26
|
-
const flatEntries = Object.entries(s).filter(
|
|
27
|
-
([k, v]) => !RESERVED_KEYS.has(k) && v != null && typeof v === "object" && !Array.isArray(v)
|
|
28
|
-
);
|
|
29
|
-
if (flatEntries.length > 0) {
|
|
30
|
-
const configs = [];
|
|
31
|
-
for (const [id, entry] of flatEntries) {
|
|
32
|
-
const c = entryToLlmConfig(id, entry);
|
|
33
|
-
if (c) configs.push(c);
|
|
34
|
-
}
|
|
35
|
-
const defaultId = typeof s.default === "string" && s.default && flatEntries.some(([k]) => k === s.default) ? s.default : configs.length > 0 ? configs[0].id : DEFAULT_LLM_ID;
|
|
36
|
-
return { defaultId, configs };
|
|
37
|
-
}
|
|
38
|
-
if (Array.isArray(s.instances)) {
|
|
39
|
-
const configs = s.instances.filter((i) => i != null && typeof i === "object").map((i) => normalizeLlmConfig(i)).filter((c) => c != null);
|
|
40
|
-
const defaultId = typeof s.default === "string" && s.default ? s.default : configs.length > 0 ? configs[0].id : DEFAULT_LLM_ID;
|
|
41
|
-
return { defaultId, configs };
|
|
42
|
-
}
|
|
43
|
-
if (typeof s.provider === "string" || typeof s.model === "string" || typeof s.name === "string") {
|
|
44
|
-
const one = singleObjectToLlmConfig(s);
|
|
45
|
-
return { defaultId: one.id, configs: [one] };
|
|
46
|
-
}
|
|
47
|
-
return { defaultId: DEFAULT_LLM_ID, configs: [] };
|
|
48
|
-
}
|
|
49
|
-
var EXTENSION_OPTION_KEYS = ["featureKey", "tenant", "authToken", "verifySSL", "bypassAuth", "host", "resolveHost", "timeoutMs", "options"];
|
|
50
|
-
function entryToLlmConfig(id, entry) {
|
|
51
|
-
const opts = entry.options;
|
|
52
|
-
const baseURL = typeof entry.base_url === "string" ? entry.base_url : typeof entry.baseURL === "string" ? entry.baseURL : void 0;
|
|
53
|
-
const model = typeof entry.name === "string" ? entry.name : typeof entry.model === "string" ? entry.model : void 0;
|
|
54
|
-
const provider = typeof entry.provider === "string" && entry.provider ? entry.provider : "openai";
|
|
55
|
-
const config = {
|
|
56
|
-
id,
|
|
57
|
-
type: "chat",
|
|
58
|
-
provider,
|
|
59
|
-
model,
|
|
60
|
-
temperature: typeof opts?.temperature === "number" ? opts.temperature : typeof entry.temperature === "number" ? entry.temperature : void 0,
|
|
61
|
-
apiKey: typeof opts?.apiKey === "string" ? opts.apiKey : typeof entry.apiKey === "string" ? entry.apiKey : void 0,
|
|
62
|
-
baseURL
|
|
63
|
-
};
|
|
64
|
-
if (typeof entry.type === "string" && entry.type === "image") config.type = "image";
|
|
65
|
-
if (opts && typeof opts === "object") config.options = opts;
|
|
66
|
-
for (const k of EXTENSION_OPTION_KEYS) {
|
|
67
|
-
if (entry[k] !== void 0) config[k] = entry[k];
|
|
68
|
-
else if (opts && opts[k] !== void 0) config[k] = opts[k];
|
|
69
|
-
}
|
|
70
|
-
return config;
|
|
71
|
-
}
|
|
72
|
-
function singleObjectToLlmConfig(s) {
|
|
73
|
-
const one = {
|
|
74
|
-
id: DEFAULT_LLM_ID,
|
|
75
|
-
type: "chat",
|
|
76
|
-
provider: typeof s.provider === "string" ? s.provider : "openai",
|
|
77
|
-
model: typeof s.model === "string" ? s.model : typeof s.name === "string" ? s.name : void 0,
|
|
78
|
-
temperature: typeof s.temperature === "number" ? s.temperature : void 0,
|
|
79
|
-
apiKey: typeof s.apiKey === "string" ? s.apiKey : void 0,
|
|
80
|
-
baseURL: typeof s.baseURL === "string" ? s.baseURL : typeof s.base_url === "string" ? s.base_url : void 0
|
|
81
|
-
};
|
|
82
|
-
Object.keys(s).forEach((k) => {
|
|
83
|
-
if (!["id", "type", "provider", "model", "name", "temperature", "apiKey", "baseURL", "base_url", "default", "instances"].includes(k)) {
|
|
84
|
-
one[k] = s[k];
|
|
85
|
-
}
|
|
86
|
-
});
|
|
87
|
-
return one;
|
|
88
|
-
}
|
|
89
|
-
function normalizeLlmConfig(o) {
|
|
90
|
-
const id = typeof o.id === "string" && o.id ? o.id : DEFAULT_LLM_ID;
|
|
91
|
-
const type = o.type === "image" ? "image" : "chat";
|
|
92
|
-
const provider = typeof o.provider === "string" && o.provider ? o.provider : "openai";
|
|
93
|
-
const config = {
|
|
94
|
-
id,
|
|
95
|
-
type,
|
|
96
|
-
provider,
|
|
97
|
-
model: typeof o.model === "string" ? o.model : typeof o.name === "string" ? o.name : void 0,
|
|
98
|
-
temperature: typeof o.temperature === "number" ? o.temperature : void 0,
|
|
99
|
-
apiKey: typeof o.apiKey === "string" ? o.apiKey : void 0,
|
|
100
|
-
baseURL: typeof o.baseURL === "string" ? o.baseURL : typeof o.base_url === "string" ? o.base_url : void 0
|
|
101
|
-
};
|
|
102
|
-
Object.keys(o).forEach((k) => {
|
|
103
|
-
if (!["id", "type", "provider", "model", "name", "temperature", "apiKey", "baseURL", "base_url"].includes(k)) {
|
|
104
|
-
config[k] = o[k];
|
|
105
|
-
}
|
|
106
|
-
});
|
|
107
|
-
return config;
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
// src/providers/openai.ts
|
|
111
|
-
import OpenAI from "openai";
|
|
112
|
-
function getApiKey(config) {
|
|
113
|
-
const key = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
|
|
114
|
-
if (!key) throw new Error("OpenAI-compatible apiKey required (config.apiKey or OPENAI_API_KEY)");
|
|
115
|
-
return key;
|
|
116
|
-
}
|
|
117
|
-
function createOpenAIClientOptions(config) {
|
|
118
|
-
const opts = { apiKey: getApiKey(config) };
|
|
119
|
-
if (typeof config.baseURL === "string" && config.baseURL) opts.baseURL = config.baseURL;
|
|
120
|
-
return opts;
|
|
121
|
-
}
|
|
122
|
-
function serializeMessage(m) {
|
|
123
|
-
if (m.role === "tool")
|
|
124
|
-
return { role: "tool", content: m.content, tool_call_id: m.tool_call_id };
|
|
125
|
-
if (m.role === "assistant" && "tool_calls" in m && m.tool_calls?.length) {
|
|
126
|
-
return {
|
|
127
|
-
role: "assistant",
|
|
128
|
-
content: m.content ?? null,
|
|
129
|
-
tool_calls: m.tool_calls.map((tc) => ({
|
|
130
|
-
id: tc.id,
|
|
131
|
-
type: "function",
|
|
132
|
-
function: { name: tc.function.name, arguments: tc.function.arguments }
|
|
133
|
-
}))
|
|
134
|
-
};
|
|
135
|
-
}
|
|
136
|
-
return { role: m.role, content: m.content };
|
|
137
|
-
}
|
|
138
|
-
function createOpenAIChatClient(config) {
|
|
139
|
-
const client = new OpenAI(createOpenAIClientOptions(config));
|
|
140
|
-
const model = config.model ?? process.env.OPENAI_MODEL ?? "gpt-4o-mini";
|
|
141
|
-
const temperature = config.temperature ?? 0;
|
|
142
|
-
return {
|
|
143
|
-
id: config.id,
|
|
144
|
-
type: "chat",
|
|
145
|
-
async chat(messages) {
|
|
146
|
-
const resp = await client.chat.completions.create({
|
|
147
|
-
model,
|
|
148
|
-
temperature,
|
|
149
|
-
messages: messages.map((m) => ({ role: m.role, content: m.content }))
|
|
150
|
-
});
|
|
151
|
-
const content = resp.choices[0]?.message?.content ?? "";
|
|
152
|
-
const usage = resp.usage ? { promptTokens: resp.usage.prompt_tokens, completionTokens: resp.usage.completion_tokens } : void 0;
|
|
153
|
-
return { content, usage };
|
|
154
|
-
},
|
|
155
|
-
async chatWithTools(messages, tools, _options) {
|
|
156
|
-
const resp = await client.chat.completions.create({
|
|
157
|
-
model,
|
|
158
|
-
temperature,
|
|
159
|
-
messages: messages.map(serializeMessage),
|
|
160
|
-
tools: tools.map((t) => ({
|
|
161
|
-
type: "function",
|
|
162
|
-
function: {
|
|
163
|
-
name: t.function.name,
|
|
164
|
-
description: t.function.description,
|
|
165
|
-
parameters: t.function.parameters ?? void 0
|
|
166
|
-
}
|
|
167
|
-
}))
|
|
168
|
-
});
|
|
169
|
-
const msg = resp.choices[0]?.message;
|
|
170
|
-
const usage = resp.usage ? { promptTokens: resp.usage.prompt_tokens, completionTokens: resp.usage.completion_tokens } : void 0;
|
|
171
|
-
return {
|
|
172
|
-
message: {
|
|
173
|
-
role: "assistant",
|
|
174
|
-
content: msg?.content ?? null,
|
|
175
|
-
tool_calls: msg?.tool_calls?.map((tc) => ({
|
|
176
|
-
id: tc.id,
|
|
177
|
-
type: "function",
|
|
178
|
-
function: {
|
|
179
|
-
name: tc.function?.name ?? "",
|
|
180
|
-
arguments: tc.function?.arguments ?? ""
|
|
181
|
-
}
|
|
182
|
-
}))
|
|
183
|
-
},
|
|
184
|
-
usage
|
|
185
|
-
};
|
|
186
|
-
}
|
|
187
|
-
};
|
|
188
|
-
}
|
|
189
|
-
function createOpenAIImageClient(config) {
|
|
190
|
-
const client = new OpenAI(createOpenAIClientOptions(config));
|
|
191
|
-
const model = config.model ?? "dall-e-3";
|
|
192
|
-
return {
|
|
193
|
-
id: config.id,
|
|
194
|
-
type: "image",
|
|
195
|
-
async chat() {
|
|
196
|
-
throw new Error("OpenAI image model does not support chat; use generateImage()");
|
|
197
|
-
},
|
|
198
|
-
async generateImage(options) {
|
|
199
|
-
const resp = await client.images.generate({
|
|
200
|
-
model,
|
|
201
|
-
prompt: options.prompt,
|
|
202
|
-
size: options.size ?? "1024x1024",
|
|
203
|
-
n: options.n ?? 1,
|
|
204
|
-
response_format: "url"
|
|
205
|
-
});
|
|
206
|
-
const url = resp.data?.[0]?.url ?? void 0;
|
|
207
|
-
return { url };
|
|
208
|
-
}
|
|
209
|
-
};
|
|
210
|
-
}
|
|
211
|
-
function createOpenAIClient(config) {
|
|
212
|
-
if (config.type === "image") return createOpenAIImageClient(config);
|
|
213
|
-
return createOpenAIChatClient(config);
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
// src/providers/index.ts
|
|
217
|
-
var OPENAI_COMPATIBLE = "openai-compatible";
|
|
218
|
-
function createOpenAICompat(config) {
|
|
219
|
-
return createOpenAIClient(config);
|
|
220
|
-
}
|
|
221
|
-
var PROVIDERS = {
|
|
222
|
-
openai: createOpenAICompat,
|
|
223
|
-
[OPENAI_COMPATIBLE]: createOpenAICompat
|
|
224
|
-
};
|
|
225
|
-
function createClient(config) {
|
|
226
|
-
const p = (config.provider ?? "").toLowerCase();
|
|
227
|
-
const fn = PROVIDERS[p];
|
|
228
|
-
if (!fn) {
|
|
229
|
-
const supported = [.../* @__PURE__ */ new Set([...Object.keys(PROVIDERS), "extension providers"])].sort().join(", ");
|
|
230
|
-
throw new Error(
|
|
231
|
-
`Unsupported LLM provider: ${config.provider}. Supported: ${supported}.`
|
|
232
|
-
);
|
|
233
|
-
}
|
|
234
|
-
return fn(config);
|
|
235
|
-
}
|
|
236
|
-
function registerProvider(name, factory) {
|
|
237
|
-
PROVIDERS[name.toLowerCase()] = factory;
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
// src/factory.ts
|
|
241
|
-
function createLLMRegistry(options) {
|
|
242
|
-
const { defaultId, configs } = parseLlmSection(options.llmSection);
|
|
243
|
-
const map = /* @__PURE__ */ new Map();
|
|
244
|
-
for (const config of configs) {
|
|
245
|
-
try {
|
|
246
|
-
const client = createClient(config);
|
|
247
|
-
map.set(config.id, client);
|
|
248
|
-
} catch (err) {
|
|
249
|
-
console.warn(`[agent-llm] Skip LLM "${config.id}": ${err instanceof Error ? err.message : String(err)}`);
|
|
250
|
-
}
|
|
251
|
-
}
|
|
252
|
-
return {
|
|
253
|
-
get(id) {
|
|
254
|
-
return map.get(id);
|
|
255
|
-
},
|
|
256
|
-
defaultId() {
|
|
257
|
-
if (map.has(defaultId)) return defaultId;
|
|
258
|
-
return map.size > 0 ? [...map.keys()][0] : void 0;
|
|
259
|
-
},
|
|
260
|
-
ids() {
|
|
261
|
-
return [...map.keys()];
|
|
262
|
-
}
|
|
263
|
-
};
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
// src/chatModelRegistry.ts
|
|
267
|
-
var CHAT_MODEL_FACTORIES = /* @__PURE__ */ new Map();
|
|
268
|
-
function registerChatModelProvider(providerName, factory) {
|
|
269
|
-
CHAT_MODEL_FACTORIES.set(providerName.toLowerCase(), factory);
|
|
270
|
-
}
|
|
271
|
-
function getChatModelFactory(providerName) {
|
|
272
|
-
return CHAT_MODEL_FACTORIES.get(providerName.toLowerCase());
|
|
273
|
-
}
|
|
274
|
-
|
|
275
|
-
// src/llmAdapter.ts
|
|
276
|
-
import { ChatOpenAI } from "@langchain/openai";
|
|
277
|
-
var DEFAULT_MODEL = "gpt-4o-mini";
|
|
278
|
-
function createChatModelFromLlmConfig(options) {
|
|
279
|
-
const { llmSection, modelEnv, apiKeyEnv } = options;
|
|
280
|
-
const { defaultId, configs } = parseLlmSection(llmSection ?? null);
|
|
281
|
-
const defaultConfig = configs.find((c) => c.id === defaultId) ?? configs[0];
|
|
282
|
-
if (!defaultConfig) {
|
|
283
|
-
const model2 = modelEnv ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;
|
|
284
|
-
const apiKey2 = apiKeyEnv ?? process.env.OPENAI_API_KEY;
|
|
285
|
-
return new ChatOpenAI({
|
|
286
|
-
model: model2,
|
|
287
|
-
temperature: 0,
|
|
288
|
-
...apiKey2 ? { apiKey: apiKey2 } : {}
|
|
289
|
-
});
|
|
290
|
-
}
|
|
291
|
-
const provider = defaultConfig.provider ?? "openai";
|
|
292
|
-
const chatModelFactory = getChatModelFactory(provider);
|
|
293
|
-
if (chatModelFactory) {
|
|
294
|
-
const config = {
|
|
295
|
-
...defaultConfig,
|
|
296
|
-
model: modelEnv ?? defaultConfig.model ?? (provider === "cis" ? process.env.CIS_MODEL ?? "gcp/gemini-2.5-pro" : defaultConfig.model),
|
|
297
|
-
temperature: typeof defaultConfig.temperature === "number" ? defaultConfig.temperature : 0
|
|
298
|
-
};
|
|
299
|
-
return chatModelFactory(config);
|
|
300
|
-
}
|
|
301
|
-
const model = modelEnv ?? defaultConfig?.model ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;
|
|
302
|
-
const apiKey = apiKeyEnv ?? defaultConfig?.apiKey ?? process.env.OPENAI_API_KEY;
|
|
303
|
-
const temperature = typeof defaultConfig?.temperature === "number" ? defaultConfig.temperature : 0;
|
|
304
|
-
const baseURL = defaultConfig?.baseURL;
|
|
305
|
-
const constructorOptions = {
|
|
306
|
-
model,
|
|
307
|
-
temperature,
|
|
308
|
-
...apiKey ? { apiKey } : {},
|
|
309
|
-
...baseURL ? { configuration: { baseURL } } : {}
|
|
310
|
-
};
|
|
311
|
-
return new ChatOpenAI(constructorOptions);
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
// src/loadLLMExtensions.ts
|
|
315
|
-
var loadedPackages = /* @__PURE__ */ new Set();
|
|
316
|
-
var DEFAULT_EXTENSIONS = ["wallee-llm"];
|
|
317
|
-
function resolveLLMExtensionPackages(types) {
|
|
318
|
-
const typeList = types == null ? [] : Array.isArray(types) ? types : [types];
|
|
319
|
-
const packages = typeList.filter(
|
|
320
|
-
(t) => typeof t === "string" && t.length > 0
|
|
321
|
-
);
|
|
322
|
-
return packages.length > 0 ? packages : DEFAULT_EXTENSIONS;
|
|
323
|
-
}
|
|
324
|
-
async function loadLLMExtensions(extensionPackages) {
|
|
325
|
-
const packages = extensionPackages ?? DEFAULT_EXTENSIONS;
|
|
326
|
-
for (const pkg of packages) {
|
|
327
|
-
if (loadedPackages.has(pkg)) continue;
|
|
328
|
-
loadedPackages.add(pkg);
|
|
329
|
-
try {
|
|
330
|
-
const m = await import(
|
|
331
|
-
/* @vite-ignore */
|
|
332
|
-
pkg
|
|
333
|
-
);
|
|
334
|
-
if (typeof m.registerLLMExtension === "function") {
|
|
335
|
-
m.registerLLMExtension();
|
|
336
|
-
}
|
|
337
|
-
} catch {
|
|
338
|
-
}
|
|
339
|
-
}
|
|
340
|
-
}
|
|
1
|
+
import {
|
|
2
|
+
createAgentLlM,
|
|
3
|
+
createChatModelFromLlmConfig,
|
|
4
|
+
createClient,
|
|
5
|
+
createLLMRegistry,
|
|
6
|
+
createOpenAIChatClient,
|
|
7
|
+
createOpenAIClient,
|
|
8
|
+
createOpenAIImageClient,
|
|
9
|
+
getChatModelFactory,
|
|
10
|
+
loadLLMExtensions,
|
|
11
|
+
loadLlmConfig,
|
|
12
|
+
parseLlmSection,
|
|
13
|
+
parseLlmYaml,
|
|
14
|
+
registerChatModelProvider,
|
|
15
|
+
registerProvider,
|
|
16
|
+
resolveLLMExtensionPackages,
|
|
17
|
+
substituteEnv
|
|
18
|
+
} from "./chunk-3Z3KXKNU.js";
|
|
341
19
|
export {
|
|
20
|
+
createAgentLlM,
|
|
342
21
|
createChatModelFromLlmConfig,
|
|
343
22
|
createClient,
|
|
344
23
|
createLLMRegistry,
|
|
@@ -347,9 +26,12 @@ export {
|
|
|
347
26
|
createOpenAIImageClient,
|
|
348
27
|
getChatModelFactory,
|
|
349
28
|
loadLLMExtensions,
|
|
29
|
+
loadLlmConfig,
|
|
350
30
|
parseLlmSection,
|
|
31
|
+
parseLlmYaml,
|
|
351
32
|
registerChatModelProvider,
|
|
352
33
|
registerProvider,
|
|
353
|
-
resolveLLMExtensionPackages
|
|
34
|
+
resolveLLMExtensionPackages,
|
|
35
|
+
substituteEnv
|
|
354
36
|
};
|
|
355
37
|
//# sourceMappingURL=index.js.map
|