@easynet/agent-llm 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +101 -0
- package/dist/chatModelRegistry.d.ts +17 -0
- package/dist/chatModelRegistry.d.ts.map +1 -0
- package/dist/config.d.ts +13 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/factory.d.ts +13 -0
- package/dist/factory.d.ts.map +1 -0
- package/dist/index.d.ts +16 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +355 -0
- package/dist/index.js.map +1 -0
- package/dist/llmAdapter.d.ts +21 -0
- package/dist/llmAdapter.d.ts.map +1 -0
- package/dist/loadLLMExtensions.d.ts +18 -0
- package/dist/loadLLMExtensions.d.ts.map +1 -0
- package/dist/providers/index.d.ts +7 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/openai.d.ts +9 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/types.d.ts +140 -0
- package/dist/types.d.ts.map +1 -0
- package/package.json +45 -0
package/README.md
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# Agent LLM
|
|
2
|
+
|
|
3
|
+
**LLM component for the Agent ecosystem** (standalone: no dependency on agent-tool or agent-memory). Supports **OpenAI-compatible** format (`/v1/chat/completions`) and **extension providers** loaded by npm package name. Multi-instance: each LLM has an **id** and **type** (chat / image). Supports **baseURL** for Azure, Ollama, Groq, and other /v1-compatible endpoints. Has its own **llm** section in agent.yaml.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
cd agent-llm
|
|
9
|
+
npm install
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
## Config (agent.yaml llm or models section)
|
|
13
|
+
|
|
14
|
+
**provider** supports `openai`, `openai-compatible`, and providers registered by extensions (see **llm.type**). Optional **base_url** / **baseURL** for Groq, Ollama, and other compatible endpoints.
|
|
15
|
+
|
|
16
|
+
**Flat format** (id → base_url, name, options):
|
|
17
|
+
|
|
18
|
+
```yaml
|
|
19
|
+
llm:
|
|
20
|
+
default: strong
|
|
21
|
+
strong:
|
|
22
|
+
provider: openai
|
|
23
|
+
base_url: https://api.groq.com/openai
|
|
24
|
+
name: openai/gpt-oss-120b
|
|
25
|
+
options:
|
|
26
|
+
apiKey: ${GROQ_API_KEY}
|
|
27
|
+
temperature: 0.7
|
|
28
|
+
medium:
|
|
29
|
+
provider: openai
|
|
30
|
+
base_url: http://192.168.0.201:11434
|
|
31
|
+
name: qwen3:4b
|
|
32
|
+
options:
|
|
33
|
+
apiKey: ${API_KEY}
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
**Single object** (one default chat):
|
|
37
|
+
|
|
38
|
+
```yaml
|
|
39
|
+
llm:
|
|
40
|
+
provider: openai
|
|
41
|
+
model: gpt-4o-mini
|
|
42
|
+
temperature: 0
|
|
43
|
+
apiKey: ${OPENAI_API_KEY}
|
|
44
|
+
# base_url: https://api.groq.com/openai
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
**Multi-instance** (instances array with id, type, provider, model) see llm.yaml.example. Use **llm.type** to load extension packages that register additional providers.
|
|
48
|
+
|
|
49
|
+
## LangChain adapter
|
|
50
|
+
|
|
51
|
+
Build a LangChain ChatModel from the llm section of agent.yaml (for `createAgent`, etc.):
|
|
52
|
+
|
|
53
|
+
- **`createChatModelFromLlmConfig({ llmSection })`** — returns `ChatOpenAI` (openai) or an extension-registered ChatModel for other providers.
|
|
54
|
+
- Extension packages can register ChatModel factories via `registerChatModelProvider`; call **loadLLMExtensions** before use.
|
|
55
|
+
|
|
56
|
+
The agent package re-exports these; you can also depend on `@easynet/agent-llm` directly.
|
|
57
|
+
|
|
58
|
+
## Usage
|
|
59
|
+
|
|
60
|
+
```ts
|
|
61
|
+
import { createLLMRegistry } from "@easynet/agent-llm";
|
|
62
|
+
|
|
63
|
+
const config = await loadAgentConfig("agent.yaml");
|
|
64
|
+
const registry = createLLMRegistry({ llmSection: config.llm });
|
|
65
|
+
|
|
66
|
+
const defaultId = registry.defaultId();
|
|
67
|
+
if (defaultId) {
|
|
68
|
+
const llm = registry.get(defaultId)!;
|
|
69
|
+
const result = await llm.chat([
|
|
70
|
+
{ role: "user", content: "Hello" },
|
|
71
|
+
]);
|
|
72
|
+
console.log(result.content);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
const imageLlm = registry.get("image");
|
|
76
|
+
if (imageLlm?.generateImage) {
|
|
77
|
+
const img = await imageLlm.generateImage({ prompt: "A cat" });
|
|
78
|
+
console.log(img.url);
|
|
79
|
+
}
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## API
|
|
83
|
+
|
|
84
|
+
- **createLLMRegistry({ llmSection })** — create registry from agent config llm section
|
|
85
|
+
- **registry.get(id)** — get ILLMClient by id
|
|
86
|
+
- **registry.defaultId()** — default LLM id
|
|
87
|
+
- **registry.ids()** — all ids
|
|
88
|
+
- **ILLMClient.chat(messages)** — chat (type=chat)
|
|
89
|
+
- **ILLMClient.generateImage?(options)** — image generation (type=image)
|
|
90
|
+
|
|
91
|
+
## Ecosystem
|
|
92
|
+
|
|
93
|
+
- **agent** — orchestration; agent.yaml has llm section and uses agent-llm’s createLLMRegistry(config.llm).
|
|
94
|
+
- **agent-llm** — this repo: OpenAI-compatible and extension providers; config parsing and multi-instance chat/image API.
|
|
95
|
+
|
|
96
|
+
## Publishing to npm
|
|
97
|
+
|
|
98
|
+
Releases are published as **@easynet/agent-llm** to https://www.npmjs.com via GitHub Actions (patch-only, starting at 0.0.1).
|
|
99
|
+
|
|
100
|
+
1. In this repo: **Settings → Secrets and variables → Actions**, add secret **NPM_TOKEN** (npm token with Automation or Publish, allow bypass 2FA). Create at https://www.npmjs.com/settings/~/tokens.
|
|
101
|
+
2. Push to **master** or run **Actions → Release → Run workflow** to trigger the release. The workflow runs tests, builds, then semantic-release to bump patch and publish to npm.
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Registry for LangChain ChatModel by provider name.
|
|
3
|
+
* Extensions register via registerChatModelProvider; llmAdapter uses getChatModelFactory.
|
|
4
|
+
*/
|
|
5
|
+
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
6
|
+
import type { LLMConfig } from "./types.js";
|
|
7
|
+
export type ChatModelFactory = (config: LLMConfig) => BaseChatModel;
|
|
8
|
+
/**
|
|
9
|
+
* Register a ChatModel factory for a provider name.
|
|
10
|
+
* Called by extensions (e.g. wallee-llm) on load.
|
|
11
|
+
*/
|
|
12
|
+
export declare function registerChatModelProvider(providerName: string, factory: ChatModelFactory): void;
|
|
13
|
+
/**
|
|
14
|
+
* Get the ChatModel factory for a provider name, if registered.
|
|
15
|
+
*/
|
|
16
|
+
export declare function getChatModelFactory(providerName: string): ChatModelFactory | undefined;
|
|
17
|
+
//# sourceMappingURL=chatModelRegistry.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"chatModelRegistry.d.ts","sourceRoot":"","sources":["../src/chatModelRegistry.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,6CAA6C,CAAC;AACjF,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,YAAY,CAAC;AAE5C,MAAM,MAAM,gBAAgB,GAAG,CAAC,MAAM,EAAE,SAAS,KAAK,aAAa,CAAC;AAIpE;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,YAAY,EAAE,MAAM,EAAE,OAAO,EAAE,gBAAgB,GAAG,IAAI,CAE/F;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,YAAY,EAAE,MAAM,GAAG,gBAAgB,GAAG,SAAS,CAEtF"}
|
package/dist/config.d.ts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Parse agent.yaml llm section into normalized LLMConfig[] and default id.
|
|
3
|
+
* Supports: flat (每个模型一个 name 为 key)、instances[]、或单对象。
|
|
4
|
+
*/
|
|
5
|
+
import type { LLMConfig } from "./types.js";
|
|
6
|
+
/**
|
|
7
|
+
* 解析 llm section:扁平(每个模型一个 name 为 key)或 default+instances 或单对象。
|
|
8
|
+
*/
|
|
9
|
+
export declare function parseLlmSection(section: unknown): {
|
|
10
|
+
defaultId: string;
|
|
11
|
+
configs: LLMConfig[];
|
|
12
|
+
};
|
|
13
|
+
//# sourceMappingURL=config.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../src/config.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,YAAY,CAAC;AAkB5C;;GAEG;AACH,wBAAgB,eAAe,CAAC,OAAO,EAAE,OAAO,GAAG;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,SAAS,EAAE,CAAA;CAAE,CAsD7F"}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Create LLM registry from agent.yaml llm section.
|
|
3
|
+
*/
|
|
4
|
+
import type { AgentConfigLlmSection, ILLMRegistry } from "./types.js";
|
|
5
|
+
export interface CreateLLMRegistryOptions {
|
|
6
|
+
/** 已解析的 llm section(来自 loadAgentConfig 的 config.llm) */
|
|
7
|
+
llmSection: AgentConfigLlmSection | null | undefined;
|
|
8
|
+
}
|
|
9
|
+
/**
|
|
10
|
+
* 从 agent config 的 llm 段创建 LLM 注册表;支持多 provider、多模型、每 LLM 有 id 与 type。
|
|
11
|
+
*/
|
|
12
|
+
export declare function createLLMRegistry(options: CreateLLMRegistryOptions): ILLMRegistry;
|
|
13
|
+
//# sourceMappingURL=factory.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"factory.d.ts","sourceRoot":"","sources":["../src/factory.ts"],"names":[],"mappings":"AAAA;;GAEG;AAIH,OAAO,KAAK,EAAE,qBAAqB,EAAc,YAAY,EAAE,MAAM,YAAY,CAAC;AAElF,MAAM,WAAW,wBAAwB;IACvC,wDAAwD;IACxD,UAAU,EAAE,qBAAqB,GAAG,IAAI,GAAG,SAAS,CAAC;CACtD;AAED;;GAEG;AACH,wBAAgB,iBAAiB,CAAC,OAAO,EAAE,wBAAwB,GAAG,YAAY,CAyBjF"}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @easynet/agent-llm: multi-provider, multi-model LLM with id and type.
|
|
3
|
+
* Consumes agent.yaml llm section; provides simple chat/image API.
|
|
4
|
+
* Extensions register via registerProvider/registerChatModelProvider; call loadLLMExtensions() before using them.
|
|
5
|
+
*/
|
|
6
|
+
export { parseLlmSection } from "./config.js";
|
|
7
|
+
export { createLLMRegistry } from "./factory.js";
|
|
8
|
+
export type { CreateLLMRegistryOptions } from "./factory.js";
|
|
9
|
+
export { createClient, registerProvider } from "./providers/index.js";
|
|
10
|
+
export { registerChatModelProvider, getChatModelFactory } from "./chatModelRegistry.js";
|
|
11
|
+
export { createOpenAIClient, createOpenAIChatClient, createOpenAIImageClient } from "./providers/openai.js";
|
|
12
|
+
export { createChatModelFromLlmConfig } from "./llmAdapter.js";
|
|
13
|
+
export type { CreateChatModelFromLlmConfigOptions } from "./llmAdapter.js";
|
|
14
|
+
export { loadLLMExtensions, resolveLLMExtensionPackages } from "./loadLLMExtensions.js";
|
|
15
|
+
export type { LLMType, LLMConfig, AgentConfigLlmSection, ChatMessage, ChatResult, ImageResult, ToolDefinition, ChatWithToolsMessage, ChatWithToolsResult, ILLMClient, ILLMRegistry, } from "./types.js";
|
|
16
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAC9C,OAAO,EAAE,iBAAiB,EAAE,MAAM,cAAc,CAAC;AACjD,YAAY,EAAE,wBAAwB,EAAE,MAAM,cAAc,CAAC;AAC7D,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,sBAAsB,CAAC;AACtE,OAAO,EAAE,yBAAyB,EAAE,mBAAmB,EAAE,MAAM,wBAAwB,CAAC;AACxF,OAAO,EAAE,kBAAkB,EAAE,sBAAsB,EAAE,uBAAuB,EAAE,MAAM,uBAAuB,CAAC;AAC5G,OAAO,EAAE,4BAA4B,EAAE,MAAM,iBAAiB,CAAC;AAC/D,YAAY,EAAE,mCAAmC,EAAE,MAAM,iBAAiB,CAAC;AAC3E,OAAO,EAAE,iBAAiB,EAAE,2BAA2B,EAAE,MAAM,wBAAwB,CAAC;AAExF,YAAY,EACV,OAAO,EACP,SAAS,EACT,qBAAqB,EACrB,WAAW,EACX,UAAU,EACV,WAAW,EACX,cAAc,EACd,oBAAoB,EACpB,mBAAmB,EACnB,UAAU,EACV,YAAY,GACb,MAAM,YAAY,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
// src/config.ts
|
|
2
|
+
var DEFAULT_LLM_ID = "default";
|
|
3
|
+
var RESERVED_KEYS = /* @__PURE__ */ new Set([
|
|
4
|
+
"default",
|
|
5
|
+
"instances",
|
|
6
|
+
"catalog",
|
|
7
|
+
"provider",
|
|
8
|
+
"model",
|
|
9
|
+
"temperature",
|
|
10
|
+
"apiKey",
|
|
11
|
+
"baseURL",
|
|
12
|
+
"base_url",
|
|
13
|
+
"type",
|
|
14
|
+
"id"
|
|
15
|
+
]);
|
|
16
|
+
function parseLlmSection(section) {
|
|
17
|
+
if (section == null || typeof section !== "object") {
|
|
18
|
+
return { defaultId: DEFAULT_LLM_ID, configs: [] };
|
|
19
|
+
}
|
|
20
|
+
if (Array.isArray(section)) {
|
|
21
|
+
const configs = section.filter((i) => i != null && typeof i === "object").map((item, i) => normalizeLlmConfig({ ...item, id: item.id ?? item.name ?? String(i) })).filter((c) => c != null);
|
|
22
|
+
const defaultId = configs.length > 0 ? configs[0].id : DEFAULT_LLM_ID;
|
|
23
|
+
return { defaultId, configs };
|
|
24
|
+
}
|
|
25
|
+
const s = section;
|
|
26
|
+
const flatEntries = Object.entries(s).filter(
|
|
27
|
+
([k, v]) => !RESERVED_KEYS.has(k) && v != null && typeof v === "object" && !Array.isArray(v)
|
|
28
|
+
);
|
|
29
|
+
if (flatEntries.length > 0) {
|
|
30
|
+
const configs = [];
|
|
31
|
+
for (const [id, entry] of flatEntries) {
|
|
32
|
+
const c = entryToLlmConfig(id, entry);
|
|
33
|
+
if (c) configs.push(c);
|
|
34
|
+
}
|
|
35
|
+
const defaultId = typeof s.default === "string" && s.default && flatEntries.some(([k]) => k === s.default) ? s.default : configs.length > 0 ? configs[0].id : DEFAULT_LLM_ID;
|
|
36
|
+
return { defaultId, configs };
|
|
37
|
+
}
|
|
38
|
+
if (Array.isArray(s.instances)) {
|
|
39
|
+
const configs = s.instances.filter((i) => i != null && typeof i === "object").map((i) => normalizeLlmConfig(i)).filter((c) => c != null);
|
|
40
|
+
const defaultId = typeof s.default === "string" && s.default ? s.default : configs.length > 0 ? configs[0].id : DEFAULT_LLM_ID;
|
|
41
|
+
return { defaultId, configs };
|
|
42
|
+
}
|
|
43
|
+
if (typeof s.provider === "string" || typeof s.model === "string" || typeof s.name === "string") {
|
|
44
|
+
const one = singleObjectToLlmConfig(s);
|
|
45
|
+
return { defaultId: one.id, configs: [one] };
|
|
46
|
+
}
|
|
47
|
+
return { defaultId: DEFAULT_LLM_ID, configs: [] };
|
|
48
|
+
}
|
|
49
|
+
var EXTENSION_OPTION_KEYS = ["featureKey", "tenant", "authToken", "verifySSL", "bypassAuth", "host", "resolveHost", "timeoutMs", "options"];
|
|
50
|
+
function entryToLlmConfig(id, entry) {
|
|
51
|
+
const opts = entry.options;
|
|
52
|
+
const baseURL = typeof entry.base_url === "string" ? entry.base_url : typeof entry.baseURL === "string" ? entry.baseURL : void 0;
|
|
53
|
+
const model = typeof entry.name === "string" ? entry.name : typeof entry.model === "string" ? entry.model : void 0;
|
|
54
|
+
const provider = typeof entry.provider === "string" && entry.provider ? entry.provider : "openai";
|
|
55
|
+
const config = {
|
|
56
|
+
id,
|
|
57
|
+
type: "chat",
|
|
58
|
+
provider,
|
|
59
|
+
model,
|
|
60
|
+
temperature: typeof opts?.temperature === "number" ? opts.temperature : typeof entry.temperature === "number" ? entry.temperature : void 0,
|
|
61
|
+
apiKey: typeof opts?.apiKey === "string" ? opts.apiKey : typeof entry.apiKey === "string" ? entry.apiKey : void 0,
|
|
62
|
+
baseURL
|
|
63
|
+
};
|
|
64
|
+
if (typeof entry.type === "string" && entry.type === "image") config.type = "image";
|
|
65
|
+
if (opts && typeof opts === "object") config.options = opts;
|
|
66
|
+
for (const k of EXTENSION_OPTION_KEYS) {
|
|
67
|
+
if (entry[k] !== void 0) config[k] = entry[k];
|
|
68
|
+
else if (opts && opts[k] !== void 0) config[k] = opts[k];
|
|
69
|
+
}
|
|
70
|
+
return config;
|
|
71
|
+
}
|
|
72
|
+
function singleObjectToLlmConfig(s) {
|
|
73
|
+
const one = {
|
|
74
|
+
id: DEFAULT_LLM_ID,
|
|
75
|
+
type: "chat",
|
|
76
|
+
provider: typeof s.provider === "string" ? s.provider : "openai",
|
|
77
|
+
model: typeof s.model === "string" ? s.model : typeof s.name === "string" ? s.name : void 0,
|
|
78
|
+
temperature: typeof s.temperature === "number" ? s.temperature : void 0,
|
|
79
|
+
apiKey: typeof s.apiKey === "string" ? s.apiKey : void 0,
|
|
80
|
+
baseURL: typeof s.baseURL === "string" ? s.baseURL : typeof s.base_url === "string" ? s.base_url : void 0
|
|
81
|
+
};
|
|
82
|
+
Object.keys(s).forEach((k) => {
|
|
83
|
+
if (!["id", "type", "provider", "model", "name", "temperature", "apiKey", "baseURL", "base_url", "default", "instances"].includes(k)) {
|
|
84
|
+
one[k] = s[k];
|
|
85
|
+
}
|
|
86
|
+
});
|
|
87
|
+
return one;
|
|
88
|
+
}
|
|
89
|
+
function normalizeLlmConfig(o) {
|
|
90
|
+
const id = typeof o.id === "string" && o.id ? o.id : DEFAULT_LLM_ID;
|
|
91
|
+
const type = o.type === "image" ? "image" : "chat";
|
|
92
|
+
const provider = typeof o.provider === "string" && o.provider ? o.provider : "openai";
|
|
93
|
+
const config = {
|
|
94
|
+
id,
|
|
95
|
+
type,
|
|
96
|
+
provider,
|
|
97
|
+
model: typeof o.model === "string" ? o.model : typeof o.name === "string" ? o.name : void 0,
|
|
98
|
+
temperature: typeof o.temperature === "number" ? o.temperature : void 0,
|
|
99
|
+
apiKey: typeof o.apiKey === "string" ? o.apiKey : void 0,
|
|
100
|
+
baseURL: typeof o.baseURL === "string" ? o.baseURL : typeof o.base_url === "string" ? o.base_url : void 0
|
|
101
|
+
};
|
|
102
|
+
Object.keys(o).forEach((k) => {
|
|
103
|
+
if (!["id", "type", "provider", "model", "name", "temperature", "apiKey", "baseURL", "base_url"].includes(k)) {
|
|
104
|
+
config[k] = o[k];
|
|
105
|
+
}
|
|
106
|
+
});
|
|
107
|
+
return config;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// src/providers/openai.ts
|
|
111
|
+
import OpenAI from "openai";
|
|
112
|
+
function getApiKey(config) {
|
|
113
|
+
const key = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
|
|
114
|
+
if (!key) throw new Error("OpenAI-compatible apiKey required (config.apiKey or OPENAI_API_KEY)");
|
|
115
|
+
return key;
|
|
116
|
+
}
|
|
117
|
+
function createOpenAIClientOptions(config) {
|
|
118
|
+
const opts = { apiKey: getApiKey(config) };
|
|
119
|
+
if (typeof config.baseURL === "string" && config.baseURL) opts.baseURL = config.baseURL;
|
|
120
|
+
return opts;
|
|
121
|
+
}
|
|
122
|
+
function serializeMessage(m) {
|
|
123
|
+
if (m.role === "tool")
|
|
124
|
+
return { role: "tool", content: m.content, tool_call_id: m.tool_call_id };
|
|
125
|
+
if (m.role === "assistant" && "tool_calls" in m && m.tool_calls?.length) {
|
|
126
|
+
return {
|
|
127
|
+
role: "assistant",
|
|
128
|
+
content: m.content ?? null,
|
|
129
|
+
tool_calls: m.tool_calls.map((tc) => ({
|
|
130
|
+
id: tc.id,
|
|
131
|
+
type: "function",
|
|
132
|
+
function: { name: tc.function.name, arguments: tc.function.arguments }
|
|
133
|
+
}))
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
return { role: m.role, content: m.content };
|
|
137
|
+
}
|
|
138
|
+
function createOpenAIChatClient(config) {
|
|
139
|
+
const client = new OpenAI(createOpenAIClientOptions(config));
|
|
140
|
+
const model = config.model ?? process.env.OPENAI_MODEL ?? "gpt-4o-mini";
|
|
141
|
+
const temperature = config.temperature ?? 0;
|
|
142
|
+
return {
|
|
143
|
+
id: config.id,
|
|
144
|
+
type: "chat",
|
|
145
|
+
async chat(messages) {
|
|
146
|
+
const resp = await client.chat.completions.create({
|
|
147
|
+
model,
|
|
148
|
+
temperature,
|
|
149
|
+
messages: messages.map((m) => ({ role: m.role, content: m.content }))
|
|
150
|
+
});
|
|
151
|
+
const content = resp.choices[0]?.message?.content ?? "";
|
|
152
|
+
const usage = resp.usage ? { promptTokens: resp.usage.prompt_tokens, completionTokens: resp.usage.completion_tokens } : void 0;
|
|
153
|
+
return { content, usage };
|
|
154
|
+
},
|
|
155
|
+
async chatWithTools(messages, tools, _options) {
|
|
156
|
+
const resp = await client.chat.completions.create({
|
|
157
|
+
model,
|
|
158
|
+
temperature,
|
|
159
|
+
messages: messages.map(serializeMessage),
|
|
160
|
+
tools: tools.map((t) => ({
|
|
161
|
+
type: "function",
|
|
162
|
+
function: {
|
|
163
|
+
name: t.function.name,
|
|
164
|
+
description: t.function.description,
|
|
165
|
+
parameters: t.function.parameters ?? void 0
|
|
166
|
+
}
|
|
167
|
+
}))
|
|
168
|
+
});
|
|
169
|
+
const msg = resp.choices[0]?.message;
|
|
170
|
+
const usage = resp.usage ? { promptTokens: resp.usage.prompt_tokens, completionTokens: resp.usage.completion_tokens } : void 0;
|
|
171
|
+
return {
|
|
172
|
+
message: {
|
|
173
|
+
role: "assistant",
|
|
174
|
+
content: msg?.content ?? null,
|
|
175
|
+
tool_calls: msg?.tool_calls?.map((tc) => ({
|
|
176
|
+
id: tc.id,
|
|
177
|
+
type: "function",
|
|
178
|
+
function: {
|
|
179
|
+
name: tc.function?.name ?? "",
|
|
180
|
+
arguments: tc.function?.arguments ?? ""
|
|
181
|
+
}
|
|
182
|
+
}))
|
|
183
|
+
},
|
|
184
|
+
usage
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
function createOpenAIImageClient(config) {
|
|
190
|
+
const client = new OpenAI(createOpenAIClientOptions(config));
|
|
191
|
+
const model = config.model ?? "dall-e-3";
|
|
192
|
+
return {
|
|
193
|
+
id: config.id,
|
|
194
|
+
type: "image",
|
|
195
|
+
async chat() {
|
|
196
|
+
throw new Error("OpenAI image model does not support chat; use generateImage()");
|
|
197
|
+
},
|
|
198
|
+
async generateImage(options) {
|
|
199
|
+
const resp = await client.images.generate({
|
|
200
|
+
model,
|
|
201
|
+
prompt: options.prompt,
|
|
202
|
+
size: options.size ?? "1024x1024",
|
|
203
|
+
n: options.n ?? 1,
|
|
204
|
+
response_format: "url"
|
|
205
|
+
});
|
|
206
|
+
const url = resp.data?.[0]?.url ?? void 0;
|
|
207
|
+
return { url };
|
|
208
|
+
}
|
|
209
|
+
};
|
|
210
|
+
}
|
|
211
|
+
function createOpenAIClient(config) {
|
|
212
|
+
if (config.type === "image") return createOpenAIImageClient(config);
|
|
213
|
+
return createOpenAIChatClient(config);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// src/providers/index.ts
|
|
217
|
+
var OPENAI_COMPATIBLE = "openai-compatible";
|
|
218
|
+
function createOpenAICompat(config) {
|
|
219
|
+
return createOpenAIClient(config);
|
|
220
|
+
}
|
|
221
|
+
var PROVIDERS = {
|
|
222
|
+
openai: createOpenAICompat,
|
|
223
|
+
[OPENAI_COMPATIBLE]: createOpenAICompat
|
|
224
|
+
};
|
|
225
|
+
function createClient(config) {
|
|
226
|
+
const p = (config.provider ?? "").toLowerCase();
|
|
227
|
+
const fn = PROVIDERS[p];
|
|
228
|
+
if (!fn) {
|
|
229
|
+
const supported = [.../* @__PURE__ */ new Set([...Object.keys(PROVIDERS), "extension providers"])].sort().join(", ");
|
|
230
|
+
throw new Error(
|
|
231
|
+
`Unsupported LLM provider: ${config.provider}. Supported: ${supported}.`
|
|
232
|
+
);
|
|
233
|
+
}
|
|
234
|
+
return fn(config);
|
|
235
|
+
}
|
|
236
|
+
function registerProvider(name, factory) {
|
|
237
|
+
PROVIDERS[name.toLowerCase()] = factory;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
// src/factory.ts
|
|
241
|
+
function createLLMRegistry(options) {
|
|
242
|
+
const { defaultId, configs } = parseLlmSection(options.llmSection);
|
|
243
|
+
const map = /* @__PURE__ */ new Map();
|
|
244
|
+
for (const config of configs) {
|
|
245
|
+
try {
|
|
246
|
+
const client = createClient(config);
|
|
247
|
+
map.set(config.id, client);
|
|
248
|
+
} catch (err) {
|
|
249
|
+
console.warn(`[agent-llm] Skip LLM "${config.id}": ${err instanceof Error ? err.message : String(err)}`);
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
return {
|
|
253
|
+
get(id) {
|
|
254
|
+
return map.get(id);
|
|
255
|
+
},
|
|
256
|
+
defaultId() {
|
|
257
|
+
if (map.has(defaultId)) return defaultId;
|
|
258
|
+
return map.size > 0 ? [...map.keys()][0] : void 0;
|
|
259
|
+
},
|
|
260
|
+
ids() {
|
|
261
|
+
return [...map.keys()];
|
|
262
|
+
}
|
|
263
|
+
};
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// src/chatModelRegistry.ts
|
|
267
|
+
var CHAT_MODEL_FACTORIES = /* @__PURE__ */ new Map();
|
|
268
|
+
function registerChatModelProvider(providerName, factory) {
|
|
269
|
+
CHAT_MODEL_FACTORIES.set(providerName.toLowerCase(), factory);
|
|
270
|
+
}
|
|
271
|
+
function getChatModelFactory(providerName) {
|
|
272
|
+
return CHAT_MODEL_FACTORIES.get(providerName.toLowerCase());
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
// src/llmAdapter.ts
|
|
276
|
+
import { ChatOpenAI } from "@langchain/openai";
|
|
277
|
+
var DEFAULT_MODEL = "gpt-4o-mini";
|
|
278
|
+
function createChatModelFromLlmConfig(options) {
|
|
279
|
+
const { llmSection, modelEnv, apiKeyEnv } = options;
|
|
280
|
+
const { defaultId, configs } = parseLlmSection(llmSection ?? null);
|
|
281
|
+
const defaultConfig = configs.find((c) => c.id === defaultId) ?? configs[0];
|
|
282
|
+
if (!defaultConfig) {
|
|
283
|
+
const model2 = modelEnv ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;
|
|
284
|
+
const apiKey2 = apiKeyEnv ?? process.env.OPENAI_API_KEY;
|
|
285
|
+
return new ChatOpenAI({
|
|
286
|
+
model: model2,
|
|
287
|
+
temperature: 0,
|
|
288
|
+
...apiKey2 ? { apiKey: apiKey2 } : {}
|
|
289
|
+
});
|
|
290
|
+
}
|
|
291
|
+
const provider = defaultConfig.provider ?? "openai";
|
|
292
|
+
const chatModelFactory = getChatModelFactory(provider);
|
|
293
|
+
if (chatModelFactory) {
|
|
294
|
+
const config = {
|
|
295
|
+
...defaultConfig,
|
|
296
|
+
model: modelEnv ?? defaultConfig.model ?? (provider === "cis" ? process.env.CIS_MODEL ?? "gcp/gemini-2.5-pro" : defaultConfig.model),
|
|
297
|
+
temperature: typeof defaultConfig.temperature === "number" ? defaultConfig.temperature : 0
|
|
298
|
+
};
|
|
299
|
+
return chatModelFactory(config);
|
|
300
|
+
}
|
|
301
|
+
const model = modelEnv ?? defaultConfig?.model ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;
|
|
302
|
+
const apiKey = apiKeyEnv ?? defaultConfig?.apiKey ?? process.env.OPENAI_API_KEY;
|
|
303
|
+
const temperature = typeof defaultConfig?.temperature === "number" ? defaultConfig.temperature : 0;
|
|
304
|
+
const baseURL = defaultConfig?.baseURL;
|
|
305
|
+
const constructorOptions = {
|
|
306
|
+
model,
|
|
307
|
+
temperature,
|
|
308
|
+
...apiKey ? { apiKey } : {},
|
|
309
|
+
...baseURL ? { configuration: { baseURL } } : {}
|
|
310
|
+
};
|
|
311
|
+
return new ChatOpenAI(constructorOptions);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// src/loadLLMExtensions.ts
|
|
315
|
+
var loadedPackages = /* @__PURE__ */ new Set();
|
|
316
|
+
var DEFAULT_EXTENSIONS = ["wallee-llm"];
|
|
317
|
+
function resolveLLMExtensionPackages(types) {
|
|
318
|
+
const typeList = types == null ? [] : Array.isArray(types) ? types : [types];
|
|
319
|
+
const packages = typeList.filter(
|
|
320
|
+
(t) => typeof t === "string" && t.length > 0
|
|
321
|
+
);
|
|
322
|
+
return packages.length > 0 ? packages : DEFAULT_EXTENSIONS;
|
|
323
|
+
}
|
|
324
|
+
async function loadLLMExtensions(extensionPackages) {
|
|
325
|
+
const packages = extensionPackages ?? DEFAULT_EXTENSIONS;
|
|
326
|
+
for (const pkg of packages) {
|
|
327
|
+
if (loadedPackages.has(pkg)) continue;
|
|
328
|
+
loadedPackages.add(pkg);
|
|
329
|
+
try {
|
|
330
|
+
const m = await import(
|
|
331
|
+
/* @vite-ignore */
|
|
332
|
+
pkg
|
|
333
|
+
);
|
|
334
|
+
if (typeof m.registerLLMExtension === "function") {
|
|
335
|
+
m.registerLLMExtension();
|
|
336
|
+
}
|
|
337
|
+
} catch {
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
export {
|
|
342
|
+
createChatModelFromLlmConfig,
|
|
343
|
+
createClient,
|
|
344
|
+
createLLMRegistry,
|
|
345
|
+
createOpenAIChatClient,
|
|
346
|
+
createOpenAIClient,
|
|
347
|
+
createOpenAIImageClient,
|
|
348
|
+
getChatModelFactory,
|
|
349
|
+
loadLLMExtensions,
|
|
350
|
+
parseLlmSection,
|
|
351
|
+
registerChatModelProvider,
|
|
352
|
+
registerProvider,
|
|
353
|
+
resolveLLMExtensionPackages
|
|
354
|
+
};
|
|
355
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/config.ts","../src/providers/openai.ts","../src/providers/index.ts","../src/factory.ts","../src/chatModelRegistry.ts","../src/llmAdapter.ts","../src/loadLLMExtensions.ts"],"sourcesContent":["/**\n * Parse agent.yaml llm section into normalized LLMConfig[] and default id.\n * Supports: flat (每个模型一个 name 为 key)、instances[]、或单对象。\n */\n\nimport type { LLMConfig } from \"./types.js\";\n\nconst DEFAULT_LLM_ID = \"default\";\n\nconst RESERVED_KEYS = new Set([\n \"default\",\n \"instances\",\n \"catalog\",\n \"provider\",\n \"model\",\n \"temperature\",\n \"apiKey\",\n \"baseURL\",\n \"base_url\",\n \"type\",\n \"id\",\n]);\n\n/**\n * 解析 llm section:扁平(每个模型一个 name 为 key)或 default+instances 或单对象。\n */\nexport function parseLlmSection(section: unknown): { defaultId: string; configs: LLMConfig[] } {\n if (section == null || typeof section !== \"object\") {\n return { defaultId: DEFAULT_LLM_ID, configs: [] };\n }\n\n if (Array.isArray(section)) {\n const configs = section\n .filter((i): i is Record<string, unknown> => i != null && typeof i === \"object\")\n .map((item, i) => normalizeLlmConfig({ ...item, id: item.id ?? item.name ?? String(i) }))\n .filter((c): c is LLMConfig => c != null);\n const defaultId = configs.length > 0 ? configs[0]!.id : DEFAULT_LLM_ID;\n return { defaultId, configs };\n }\n\n const s = section as Record<string, unknown>;\n\n const flatEntries = Object.entries(s).filter(\n ([k, v]) => !RESERVED_KEYS.has(k) && v != null && typeof v === \"object\" && !Array.isArray(v)\n );\n if (flatEntries.length > 0) {\n const configs: LLMConfig[] = [];\n for (const [id, entry] of flatEntries) {\n const c = entryToLlmConfig(id, entry as Record<string, unknown>);\n if (c) configs.push(c);\n }\n const defaultId =\n typeof s.default === \"string\" && s.default && flatEntries.some(([k]) => k === s.default)\n ? s.default\n : configs.length > 0\n ? configs[0]!.id\n : DEFAULT_LLM_ID;\n return { defaultId, configs };\n }\n\n if (Array.isArray(s.instances)) {\n const configs = (s.instances as unknown[])\n .filter((i): i is Record<string, unknown> => i != null && typeof i === \"object\")\n .map((i) => normalizeLlmConfig(i))\n .filter((c): c is LLMConfig => c != null);\n const defaultId =\n typeof s.default === \"string\" && s.default\n ? s.default\n : configs.length > 0\n ? configs[0]!.id\n : DEFAULT_LLM_ID;\n return { defaultId, configs };\n }\n\n if (typeof s.provider === \"string\" || typeof s.model === \"string\" || typeof (s as { name?: string }).name === \"string\") {\n const one = singleObjectToLlmConfig(s);\n return { defaultId: one.id, configs: [one] };\n }\n\n return { defaultId: DEFAULT_LLM_ID, configs: [] };\n}\n\nconst EXTENSION_OPTION_KEYS = [\"featureKey\", \"tenant\", \"authToken\", \"verifySSL\", \"bypassAuth\", \"host\", \"resolveHost\", \"timeoutMs\", \"options\"];\n\nfunction entryToLlmConfig(id: string, entry: Record<string, unknown>): LLMConfig | null {\n const opts = entry.options as Record<string, unknown> | undefined;\n const baseURL =\n typeof entry.base_url === \"string\"\n ? entry.base_url\n : typeof entry.baseURL === \"string\"\n ? entry.baseURL\n : undefined;\n const model = typeof entry.name === \"string\" ? entry.name : typeof entry.model === \"string\" ? entry.model : undefined;\n const provider = typeof entry.provider === \"string\" && entry.provider ? entry.provider : \"openai\";\n const config: LLMConfig = {\n id,\n type: \"chat\",\n provider,\n model,\n temperature: typeof opts?.temperature === \"number\" ? opts.temperature : typeof entry.temperature === \"number\" ? entry.temperature : undefined,\n apiKey: typeof opts?.apiKey === \"string\" ? opts.apiKey : typeof entry.apiKey === \"string\" ? entry.apiKey : undefined,\n baseURL,\n };\n if (typeof entry.type === \"string\" && entry.type === \"image\") config.type = \"image\";\n if (opts && typeof opts === \"object\") (config as Record<string, unknown>).options = opts;\n for (const k of EXTENSION_OPTION_KEYS) {\n if (entry[k] !== undefined) (config as Record<string, unknown>)[k] = entry[k];\n else if (opts && opts[k] !== undefined) (config as Record<string, unknown>)[k] = opts[k];\n }\n return config;\n}\n\nfunction singleObjectToLlmConfig(s: Record<string, unknown>): LLMConfig {\n const one: LLMConfig = {\n id: DEFAULT_LLM_ID,\n type: \"chat\",\n provider: typeof s.provider === \"string\" ? s.provider : \"openai\",\n model: typeof s.model === \"string\" ? s.model : (typeof (s as { name?: string }).name === \"string\" ? (s as { name: string }).name : undefined),\n temperature: typeof s.temperature === \"number\" ? s.temperature : undefined,\n apiKey: typeof s.apiKey === \"string\" ? s.apiKey : undefined,\n baseURL:\n typeof s.baseURL === \"string\" ? s.baseURL : typeof s.base_url === \"string\" ? s.base_url : undefined,\n };\n Object.keys(s).forEach((k) => {\n if (![\"id\", \"type\", \"provider\", \"model\", \"name\", \"temperature\", \"apiKey\", \"baseURL\", \"base_url\", \"default\", \"instances\"].includes(k)) {\n (one as Record<string, unknown>)[k] = s[k];\n }\n });\n return one;\n}\n\nfunction normalizeLlmConfig(o: Record<string, unknown>): LLMConfig | null {\n const id = typeof o.id === \"string\" && o.id ? o.id : DEFAULT_LLM_ID;\n const type = o.type === \"image\" ? \"image\" : \"chat\";\n const provider = typeof o.provider === \"string\" && o.provider ? o.provider : \"openai\";\n const config: LLMConfig = {\n id,\n type,\n provider,\n model: typeof o.model === \"string\" ? o.model : (typeof o.name === \"string\" ? o.name : undefined),\n temperature: typeof o.temperature === \"number\" ? o.temperature : undefined,\n apiKey: typeof o.apiKey === \"string\" ? o.apiKey : undefined,\n baseURL: typeof o.baseURL === \"string\" ? o.baseURL : (typeof o.base_url === \"string\" ? o.base_url : undefined),\n };\n Object.keys(o).forEach((k) => {\n if (![\"id\", \"type\", \"provider\", \"model\", \"name\", \"temperature\", \"apiKey\", \"baseURL\", \"base_url\"].includes(k)) {\n (config as Record<string, unknown>)[k] = o[k];\n }\n });\n return config;\n}\n","/**\n * OpenAI 兼容格式:chat (/v1/chat/completions) 与 image。\n * 支持 baseURL 以对接 Azure、本地代理等兼容端点。\n */\n\nimport OpenAI from \"openai\";\nimport type {\n LLMConfig,\n ChatMessage,\n ChatResult,\n ImageResult,\n ILLMClient,\n ChatWithToolsMessage,\n ChatWithToolsResult,\n ToolDefinition,\n} from \"../types.js\";\n\nfunction getApiKey(config: LLMConfig): string {\n const key = config.apiKey ?? process.env.OPENAI_API_KEY ?? \"\";\n if (!key) throw new Error(\"OpenAI-compatible apiKey required (config.apiKey or OPENAI_API_KEY)\");\n return key;\n}\n\nfunction createOpenAIClientOptions(config: LLMConfig): { apiKey: string; baseURL?: string } {\n const opts: { apiKey: string; baseURL?: string } = { apiKey: getApiKey(config) };\n if (typeof config.baseURL === \"string\" && config.baseURL) opts.baseURL = config.baseURL;\n return opts;\n}\n\nfunction serializeMessage(\n m: ChatWithToolsMessage\n): OpenAI.Chat.Completions.ChatCompletionMessageParam {\n if (m.role === \"tool\")\n return { role: \"tool\", content: m.content, tool_call_id: m.tool_call_id };\n if (m.role === \"assistant\" && \"tool_calls\" in m && m.tool_calls?.length) {\n return {\n role: \"assistant\",\n content: m.content ?? null,\n tool_calls: m.tool_calls.map((tc) => ({\n id: tc.id,\n type: \"function\" as const,\n function: { name: tc.function.name, arguments: tc.function.arguments },\n })),\n };\n }\n return { role: m.role, content: (m as ChatMessage).content };\n}\n\nexport function createOpenAIChatClient(config: LLMConfig): ILLMClient {\n const client = new OpenAI(createOpenAIClientOptions(config));\n const model = config.model ?? process.env.OPENAI_MODEL ?? \"gpt-4o-mini\";\n const temperature = config.temperature ?? 0;\n\n return {\n id: config.id,\n type: \"chat\",\n async chat(messages: ChatMessage[]): Promise<ChatResult> {\n const resp = await client.chat.completions.create({\n model,\n temperature,\n messages: messages.map((m) => ({ role: m.role, content: m.content })),\n });\n const content = resp.choices[0]?.message?.content ?? \"\";\n const usage = resp.usage\n ? { promptTokens: resp.usage.prompt_tokens, completionTokens: resp.usage.completion_tokens }\n : undefined;\n return { content, usage };\n },\n async chatWithTools(\n messages: ChatWithToolsMessage[],\n tools: ToolDefinition[],\n _options?: { timeoutMs?: number }\n ): Promise<ChatWithToolsResult> {\n const resp = await client.chat.completions.create({\n model,\n temperature,\n messages: messages.map(serializeMessage),\n tools: tools.map((t) => ({\n type: \"function\" as const,\n function: {\n name: t.function.name,\n description: t.function.description,\n parameters: (t.function.parameters ?? undefined) as Record<string, unknown> | undefined,\n },\n })),\n });\n const msg = resp.choices[0]?.message;\n const usage = resp.usage\n ? { promptTokens: resp.usage.prompt_tokens, completionTokens: resp.usage.completion_tokens }\n : undefined;\n return {\n message: {\n role: \"assistant\",\n content: msg?.content ?? null,\n tool_calls: msg?.tool_calls?.map((tc) => ({\n id: tc.id,\n type: \"function\" as const,\n function: {\n name: tc.function?.name ?? \"\",\n arguments: tc.function?.arguments ?? \"\",\n },\n })),\n },\n usage,\n };\n },\n };\n}\n\nexport function createOpenAIImageClient(config: LLMConfig): ILLMClient {\n const client = new OpenAI(createOpenAIClientOptions(config));\n const model = (config.model as string) ?? \"dall-e-3\";\n\n return {\n id: config.id,\n type: \"image\",\n async chat(): Promise<ChatResult> {\n throw new Error(\"OpenAI image model does not support chat; use generateImage()\");\n },\n async generateImage(options: { prompt: string; size?: string; n?: number }): Promise<ImageResult> {\n const resp = await client.images.generate({\n model,\n prompt: options.prompt,\n size: (options.size as \"1024x1024\" | \"1792x1024\" | \"1024x1792\") ?? \"1024x1024\",\n n: options.n ?? 1,\n response_format: \"url\",\n });\n const url = resp.data?.[0]?.url ?? undefined;\n return { url };\n },\n };\n}\n\nexport function createOpenAIClient(config: LLMConfig): ILLMClient {\n if (config.type === \"image\") return createOpenAIImageClient(config);\n return createOpenAIChatClient(config);\n}\n","/**\n * Supports OpenAI-compatible and extension providers.\n */\n\nimport type { LLMConfig, ILLMClient } from \"../types.js\";\nimport { createOpenAIClient } from \"./openai.js\";\n\nconst OPENAI_COMPATIBLE = \"openai-compatible\";\n\nfunction createOpenAICompat(config: LLMConfig): ILLMClient {\n return createOpenAIClient(config);\n}\n\nconst PROVIDERS: Record<string, (config: LLMConfig) => ILLMClient> = {\n openai: createOpenAICompat,\n [OPENAI_COMPATIBLE]: createOpenAICompat,\n};\n\nexport function createClient(config: LLMConfig): ILLMClient {\n const p = (config.provider ?? \"\").toLowerCase();\n const fn = PROVIDERS[p];\n if (!fn) {\n const supported = [...new Set([...Object.keys(PROVIDERS), \"extension providers\"])].sort().join(\", \");\n throw new Error(\n `Unsupported LLM provider: ${config.provider}. Supported: ${supported}.`\n );\n }\n return fn(config);\n}\n\nexport function registerProvider(name: string, factory: (config: LLMConfig) => ILLMClient): void {\n PROVIDERS[name.toLowerCase()] = factory;\n}\n","/**\n * Create LLM registry from agent.yaml llm section.\n */\n\nimport { parseLlmSection } from \"./config.js\";\nimport { createClient } from \"./providers/index.js\";\nimport type { AgentConfigLlmSection, ILLMClient, ILLMRegistry } from \"./types.js\";\n\nexport interface CreateLLMRegistryOptions {\n /** 已解析的 llm section(来自 loadAgentConfig 的 config.llm) */\n llmSection: AgentConfigLlmSection | null | undefined;\n}\n\n/**\n * 从 agent config 的 llm 段创建 LLM 注册表;支持多 provider、多模型、每 LLM 有 id 与 type。\n */\nexport function createLLMRegistry(options: CreateLLMRegistryOptions): ILLMRegistry {\n const { defaultId, configs } = parseLlmSection(options.llmSection);\n const map = new Map<string, ILLMClient>();\n\n for (const config of configs) {\n try {\n const client = createClient(config);\n map.set(config.id, client);\n } catch (err) {\n console.warn(`[agent-llm] Skip LLM \"${config.id}\": ${err instanceof Error ? err.message : String(err)}`);\n }\n }\n\n return {\n get(id: string): ILLMClient | undefined {\n return map.get(id);\n },\n defaultId(): string | undefined {\n if (map.has(defaultId)) return defaultId;\n return map.size > 0 ? [...map.keys()][0] : undefined;\n },\n ids(): string[] {\n return [...map.keys()];\n },\n };\n}\n","/**\n * Registry for LangChain ChatModel by provider name.\n * Extensions register via registerChatModelProvider; llmAdapter uses getChatModelFactory.\n */\n\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { LLMConfig } from \"./types.js\";\n\nexport type ChatModelFactory = (config: LLMConfig) => BaseChatModel;\n\nconst CHAT_MODEL_FACTORIES = new Map<string, ChatModelFactory>();\n\n/**\n * Register a ChatModel factory for a provider name.\n * Called by extensions (e.g. wallee-llm) on load.\n */\nexport function registerChatModelProvider(providerName: string, factory: ChatModelFactory): void {\n CHAT_MODEL_FACTORIES.set(providerName.toLowerCase(), factory);\n}\n\n/**\n * Get the ChatModel factory for a provider name, if registered.\n */\nexport function getChatModelFactory(providerName: string): ChatModelFactory | undefined {\n return CHAT_MODEL_FACTORIES.get(providerName.toLowerCase());\n}\n","/**\n * Build LangChain ChatModel from agent.yaml llm section.\n * Supports single object, default + instances, and flat keyed configs.\n * When provider is registered by an extension, uses that extension's ChatModel;\n * otherwise uses ChatOpenAI.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { parseLlmSection } from \"./config.js\";\nimport { getChatModelFactory } from \"./chatModelRegistry.js\";\n\nconst DEFAULT_MODEL = \"gpt-4o-mini\";\n\nexport interface CreateChatModelFromLlmConfigOptions {\n /** agent.yaml llm section (raw or parsed); compatible with AgentConfigLlmSection / AgentConfigLlm */\n llmSection?: unknown;\n /** Override model from env */\n modelEnv?: string;\n /** Override API key from env */\n apiKeyEnv?: string;\n}\n\n/**\n * Create a LangChain ChatModel from agent config llm section.\n * Uses extension-registered ChatModel when available; otherwise ChatOpenAI.\n */\nexport function createChatModelFromLlmConfig(\n options: CreateChatModelFromLlmConfigOptions\n): BaseChatModel {\n const { llmSection, modelEnv, apiKeyEnv } = options;\n const { defaultId, configs } = parseLlmSection(llmSection ?? null);\n const defaultConfig = configs.find((c) => c.id === defaultId) ?? configs[0];\n\n if (!defaultConfig) {\n const model =\n modelEnv ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;\n const apiKey = apiKeyEnv ?? process.env.OPENAI_API_KEY;\n return new ChatOpenAI({\n model,\n temperature: 0,\n ...(apiKey ? { apiKey } : {}),\n });\n }\n\n const provider = (defaultConfig as { provider?: string }).provider ?? \"openai\";\n const chatModelFactory = getChatModelFactory(provider);\n if (chatModelFactory) {\n const config = {\n ...defaultConfig,\n model:\n modelEnv ??\n defaultConfig.model ??\n (provider === \"cis\" ? process.env.CIS_MODEL ?? \"gcp/gemini-2.5-pro\" : defaultConfig.model),\n temperature:\n typeof defaultConfig.temperature === \"number\"\n ? defaultConfig.temperature\n : 0,\n };\n return chatModelFactory(config);\n }\n\n const model =\n modelEnv ??\n defaultConfig?.model ??\n process.env.OPENAI_MODEL ??\n DEFAULT_MODEL;\n\n const apiKey =\n apiKeyEnv ?? defaultConfig?.apiKey ?? process.env.OPENAI_API_KEY;\n\n const temperature =\n typeof defaultConfig?.temperature === \"number\" ? defaultConfig.temperature : 0;\n\n const baseURL = defaultConfig?.baseURL;\n\n const constructorOptions: ConstructorParameters<typeof ChatOpenAI>[0] = {\n model,\n temperature,\n ...(apiKey ? { apiKey } : {}),\n ...(baseURL ? { configuration: { baseURL } } : {}),\n };\n\n return new ChatOpenAI(constructorOptions);\n}\n","/**\n * Load optional LLM extensions by npm package name (e.g. wallee-llm).\n * Call before createChatModelFromLlmConfig when using extension providers.\n * Config llm.type = npm package name(s); we dynamic load those packages. No extensions field.\n */\n\nconst loadedPackages = new Set<string>();\n\nconst DEFAULT_EXTENSIONS = [\"wallee-llm\"];\n\n/**\n * Resolve llm.type to a list of npm package names to load.\n * type is the npm package name or array of package names; we load them directly (no mapping).\n */\nexport function resolveLLMExtensionPackages(types?: string | string[]): string[] {\n const typeList = types == null ? [] : Array.isArray(types) ? types : [types];\n const packages = typeList.filter(\n (t): t is string => typeof t === \"string\" && t.length > 0\n );\n return packages.length > 0 ? packages : DEFAULT_EXTENSIONS;\n}\n\n/**\n * Dynamically load LLM extensions by npm package name.\n * Each package must export registerLLMExtension() and will register its provider(s) and ChatModel factory.\n * Safe to call multiple times; each package is loaded at most once.\n * @param extensionPackages npm package names; default [\"wallee-llm\"] when omitted\n */\nexport async function loadLLMExtensions(\n extensionPackages?: string[]\n): Promise<void> {\n const packages = extensionPackages ?? DEFAULT_EXTENSIONS;\n for (const pkg of packages) {\n if (loadedPackages.has(pkg)) continue;\n loadedPackages.add(pkg);\n try {\n const m = await import(/* @vite-ignore */ pkg);\n if (\n typeof (m as { registerLLMExtension?: () => void })\n .registerLLMExtension === \"function\"\n ) {\n (m as { registerLLMExtension: () => void }).registerLLMExtension();\n }\n } catch {\n // extension not installed or load failed\n }\n }\n}\n"],"mappings":";AAOA,IAAM,iBAAiB;AAEvB,IAAM,gBAAgB,oBAAI,IAAI;AAAA,EAC5B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAKM,SAAS,gBAAgB,SAA+D;AAC7F,MAAI,WAAW,QAAQ,OAAO,YAAY,UAAU;AAClD,WAAO,EAAE,WAAW,gBAAgB,SAAS,CAAC,EAAE;AAAA,EAClD;AAEA,MAAI,MAAM,QAAQ,OAAO,GAAG;AAC1B,UAAM,UAAU,QACb,OAAO,CAAC,MAAoC,KAAK,QAAQ,OAAO,MAAM,QAAQ,EAC9E,IAAI,CAAC,MAAM,MAAM,mBAAmB,EAAE,GAAG,MAAM,IAAI,KAAK,MAAM,KAAK,QAAQ,OAAO,CAAC,EAAE,CAAC,CAAC,EACvF,OAAO,CAAC,MAAsB,KAAK,IAAI;AAC1C,UAAM,YAAY,QAAQ,SAAS,IAAI,QAAQ,CAAC,EAAG,KAAK;AACxD,WAAO,EAAE,WAAW,QAAQ;AAAA,EAC9B;AAEA,QAAM,IAAI;AAEV,QAAM,cAAc,OAAO,QAAQ,CAAC,EAAE;AAAA,IACpC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,cAAc,IAAI,CAAC,KAAK,KAAK,QAAQ,OAAO,MAAM,YAAY,CAAC,MAAM,QAAQ,CAAC;AAAA,EAC7F;AACA,MAAI,YAAY,SAAS,GAAG;AAC1B,UAAM,UAAuB,CAAC;AAC9B,eAAW,CAAC,IAAI,KAAK,KAAK,aAAa;AACrC,YAAM,IAAI,iBAAiB,IAAI,KAAgC;AAC/D,UAAI,EAAG,SAAQ,KAAK,CAAC;AAAA,IACvB;AACA,UAAM,YACJ,OAAO,EAAE,YAAY,YAAY,EAAE,WAAW,YAAY,KAAK,CAAC,CAAC,CAAC,MAAM,MAAM,EAAE,OAAO,IACnF,EAAE,UACF,QAAQ,SAAS,IACf,QAAQ,CAAC,EAAG,KACZ;AACR,WAAO,EAAE,WAAW,QAAQ;AAAA,EAC9B;AAEA,MAAI,MAAM,QAAQ,EAAE,SAAS,GAAG;AAC9B,UAAM,UAAW,EAAE,UAChB,OAAO,CAAC,MAAoC,KAAK,QAAQ,OAAO,MAAM,QAAQ,EAC9E,IAAI,CAAC,MAAM,mBAAmB,CAAC,CAAC,EAChC,OAAO,CAAC,MAAsB,KAAK,IAAI;AAC1C,UAAM,YACJ,OAAO,EAAE,YAAY,YAAY,EAAE,UAC/B,EAAE,UACF,QAAQ,SAAS,IACf,QAAQ,CAAC,EAAG,KACZ;AACR,WAAO,EAAE,WAAW,QAAQ;AAAA,EAC9B;AAEA,MAAI,OAAO,EAAE,aAAa,YAAY,OAAO,EAAE,UAAU,YAAY,OAAQ,EAAwB,SAAS,UAAU;AACtH,UAAM,MAAM,wBAAwB,CAAC;AACrC,WAAO,EAAE,WAAW,IAAI,IAAI,SAAS,CAAC,GAAG,EAAE;AAAA,EAC7C;AAEA,SAAO,EAAE,WAAW,gBAAgB,SAAS,CAAC,EAAE;AAClD;AAEA,IAAM,wBAAwB,CAAC,cAAc,UAAU,aAAa,aAAa,cAAc,QAAQ,eAAe,aAAa,SAAS;AAE5I,SAAS,iBAAiB,IAAY,OAAkD;AACtF,QAAM,OAAO,MAAM;AACnB,QAAM,UACJ,OAAO,MAAM,aAAa,WACtB,MAAM,WACN,OAAO,MAAM,YAAY,WACvB,MAAM,UACN;AACR,QAAM,QAAQ,OAAO,MAAM,SAAS,WAAW,MAAM,OAAO,OAAO,MAAM,UAAU,WAAW,MAAM,QAAQ;AAC5G,QAAM,WAAW,OAAO,MAAM,aAAa,YAAY,MAAM,WAAW,MAAM,WAAW;AACzF,QAAM,SAAoB;AAAA,IACxB;AAAA,IACA,MAAM;AAAA,IACN;AAAA,IACA;AAAA,IACA,aAAa,OAAO,MAAM,gBAAgB,WAAW,KAAK,cAAc,OAAO,MAAM,gBAAgB,WAAW,MAAM,cAAc;AAAA,IACpI,QAAQ,OAAO,MAAM,WAAW,WAAW,KAAK,SAAS,OAAO,MAAM,WAAW,WAAW,MAAM,SAAS;AAAA,IAC3G;AAAA,EACF;AACA,MAAI,OAAO,MAAM,SAAS,YAAY,MAAM,SAAS,QAAS,QAAO,OAAO;AAC5E,MAAI,QAAQ,OAAO,SAAS,SAAU,CAAC,OAAmC,UAAU;AACpF,aAAW,KAAK,uBAAuB;AACrC,QAAI,MAAM,CAAC,MAAM,OAAW,CAAC,OAAmC,CAAC,IAAI,MAAM,CAAC;AAAA,aACnE,QAAQ,KAAK,CAAC,MAAM,OAAW,CAAC,OAAmC,CAAC,IAAI,KAAK,CAAC;AAAA,EACzF;AACA,SAAO;AACT;AAEA,SAAS,wBAAwB,GAAuC;AACtE,QAAM,MAAiB;AAAA,IACrB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW;AAAA,IACxD,OAAO,OAAO,EAAE,UAAU,WAAW,EAAE,QAAS,OAAQ,EAAwB,SAAS,WAAY,EAAuB,OAAO;AAAA,IACnI,aAAa,OAAO,EAAE,gBAAgB,WAAW,EAAE,cAAc;AAAA,IACjE,QAAQ,OAAO,EAAE,WAAW,WAAW,EAAE,SAAS;AAAA,IAClD,SACE,OAAO,EAAE,YAAY,WAAW,EAAE,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW;AAAA,EAC9F;AACA,SAAO,KAAK,CAAC,EAAE,QAAQ,CAAC,MAAM;AAC5B,QAAI,CAAC,CAAC,MAAM,QAAQ,YAAY,SAAS,QAAQ,eAAe,UAAU,WAAW,YAAY,WAAW,WAAW,EAAE,SAAS,CAAC,GAAG;AACpI,MAAC,IAAgC,CAAC,IAAI,EAAE,CAAC;AAAA,IAC3C;AAAA,EACF,CAAC;AACD,SAAO;AACT;AAEA,SAAS,mBAAmB,GAA8C;AACxE,QAAM,KAAK,OAAO,EAAE,OAAO,YAAY,EAAE,KAAK,EAAE,KAAK;AACrD,QAAM,OAAO,EAAE,SAAS,UAAU,UAAU;AAC5C,QAAM,WAAW,OAAO,EAAE,aAAa,YAAY,EAAE,WAAW,EAAE,WAAW;AAC7E,QAAM,SAAoB;AAAA,IACxB;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAO,OAAO,EAAE,UAAU,WAAW,EAAE,QAAS,OAAO,EAAE,SAAS,WAAW,EAAE,OAAO;AAAA,IACtF,aAAa,OAAO,EAAE,gBAAgB,WAAW,EAAE,cAAc;AAAA,IACjE,QAAQ,OAAO,EAAE,WAAW,WAAW,EAAE,SAAS;AAAA,IAClD,SAAS,OAAO,EAAE,YAAY,WAAW,EAAE,UAAW,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW;AAAA,EACtG;AACA,SAAO,KAAK,CAAC,EAAE,QAAQ,CAAC,MAAM;AAC5B,QAAI,CAAC,CAAC,MAAM,QAAQ,YAAY,SAAS,QAAQ,eAAe,UAAU,WAAW,UAAU,EAAE,SAAS,CAAC,GAAG;AAC5G,MAAC,OAAmC,CAAC,IAAI,EAAE,CAAC;AAAA,IAC9C;AAAA,EACF,CAAC;AACD,SAAO;AACT;;;ACjJA,OAAO,YAAY;AAYnB,SAAS,UAAU,QAA2B;AAC5C,QAAM,MAAM,OAAO,UAAU,QAAQ,IAAI,kBAAkB;AAC3D,MAAI,CAAC,IAAK,OAAM,IAAI,MAAM,qEAAqE;AAC/F,SAAO;AACT;AAEA,SAAS,0BAA0B,QAAyD;AAC1F,QAAM,OAA6C,EAAE,QAAQ,UAAU,MAAM,EAAE;AAC/E,MAAI,OAAO,OAAO,YAAY,YAAY,OAAO,QAAS,MAAK,UAAU,OAAO;AAChF,SAAO;AACT;AAEA,SAAS,iBACP,GACoD;AACpD,MAAI,EAAE,SAAS;AACb,WAAO,EAAE,MAAM,QAAQ,SAAS,EAAE,SAAS,cAAc,EAAE,aAAa;AAC1E,MAAI,EAAE,SAAS,eAAe,gBAAgB,KAAK,EAAE,YAAY,QAAQ;AACvE,WAAO;AAAA,MACL,MAAM;AAAA,MACN,SAAS,EAAE,WAAW;AAAA,MACtB,YAAY,EAAE,WAAW,IAAI,CAAC,QAAQ;AAAA,QACpC,IAAI,GAAG;AAAA,QACP,MAAM;AAAA,QACN,UAAU,EAAE,MAAM,GAAG,SAAS,MAAM,WAAW,GAAG,SAAS,UAAU;AAAA,MACvE,EAAE;AAAA,IACJ;AAAA,EACF;AACA,SAAO,EAAE,MAAM,EAAE,MAAM,SAAU,EAAkB,QAAQ;AAC7D;AAEO,SAAS,uBAAuB,QAA+B;AACpE,QAAM,SAAS,IAAI,OAAO,0BAA0B,MAAM,CAAC;AAC3D,QAAM,QAAQ,OAAO,SAAS,QAAQ,IAAI,gBAAgB;AAC1D,QAAM,cAAc,OAAO,eAAe;AAE1C,SAAO;AAAA,IACL,IAAI,OAAO;AAAA,IACX,MAAM;AAAA,IACN,MAAM,KAAK,UAA8C;AACvD,YAAM,OAAO,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,QAChD;AAAA,QACA;AAAA,QACA,UAAU,SAAS,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,SAAS,EAAE,QAAQ,EAAE;AAAA,MACtE,CAAC;AACD,YAAM,UAAU,KAAK,QAAQ,CAAC,GAAG,SAAS,WAAW;AACrD,YAAM,QAAQ,KAAK,QACf,EAAE,cAAc,KAAK,MAAM,eAAe,kBAAkB,KAAK,MAAM,kBAAkB,IACzF;AACJ,aAAO,EAAE,SAAS,MAAM;AAAA,IAC1B;AAAA,IACA,MAAM,cACJ,UACA,OACA,UAC8B;AAC9B,YAAM,OAAO,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,QAChD;AAAA,QACA;AAAA,QACA,UAAU,SAAS,IAAI,gBAAgB;AAAA,QACvC,OAAO,MAAM,IAAI,CAAC,OAAO;AAAA,UACvB,MAAM;AAAA,UACN,UAAU;AAAA,YACR,MAAM,EAAE,SAAS;AAAA,YACjB,aAAa,EAAE,SAAS;AAAA,YACxB,YAAa,EAAE,SAAS,cAAc;AAAA,UACxC;AAAA,QACF,EAAE;AAAA,MACJ,CAAC;AACD,YAAM,MAAM,KAAK,QAAQ,CAAC,GAAG;AAC7B,YAAM,QAAQ,KAAK,QACf,EAAE,cAAc,KAAK,MAAM,eAAe,kBAAkB,KAAK,MAAM,kBAAkB,IACzF;AACJ,aAAO;AAAA,QACL,SAAS;AAAA,UACP,MAAM;AAAA,UACN,SAAS,KAAK,WAAW;AAAA,UACzB,YAAY,KAAK,YAAY,IAAI,CAAC,QAAQ;AAAA,YACxC,IAAI,GAAG;AAAA,YACP,MAAM;AAAA,YACN,UAAU;AAAA,cACR,MAAM,GAAG,UAAU,QAAQ;AAAA,cAC3B,WAAW,GAAG,UAAU,aAAa;AAAA,YACvC;AAAA,UACF,EAAE;AAAA,QACJ;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEO,SAAS,wBAAwB,QAA+B;AACrE,QAAM,SAAS,IAAI,OAAO,0BAA0B,MAAM,CAAC;AAC3D,QAAM,QAAS,OAAO,SAAoB;AAE1C,SAAO;AAAA,IACL,IAAI,OAAO;AAAA,IACX,MAAM;AAAA,IACN,MAAM,OAA4B;AAChC,YAAM,IAAI,MAAM,+DAA+D;AAAA,IACjF;AAAA,IACA,MAAM,cAAc,SAA8E;AAChG,YAAM,OAAO,MAAM,OAAO,OAAO,SAAS;AAAA,QACxC;AAAA,QACA,QAAQ,QAAQ;AAAA,QAChB,MAAO,QAAQ,QAAoD;AAAA,QACnE,GAAG,QAAQ,KAAK;AAAA,QAChB,iBAAiB;AAAA,MACnB,CAAC;AACD,YAAM,MAAM,KAAK,OAAO,CAAC,GAAG,OAAO;AACnC,aAAO,EAAE,IAAI;AAAA,IACf;AAAA,EACF;AACF;AAEO,SAAS,mBAAmB,QAA+B;AAChE,MAAI,OAAO,SAAS,QAAS,QAAO,wBAAwB,MAAM;AAClE,SAAO,uBAAuB,MAAM;AACtC;;;ACjIA,IAAM,oBAAoB;AAE1B,SAAS,mBAAmB,QAA+B;AACzD,SAAO,mBAAmB,MAAM;AAClC;AAEA,IAAM,YAA+D;AAAA,EACnE,QAAQ;AAAA,EACR,CAAC,iBAAiB,GAAG;AACvB;AAEO,SAAS,aAAa,QAA+B;AAC1D,QAAM,KAAK,OAAO,YAAY,IAAI,YAAY;AAC9C,QAAM,KAAK,UAAU,CAAC;AACtB,MAAI,CAAC,IAAI;AACP,UAAM,YAAY,CAAC,GAAG,oBAAI,IAAI,CAAC,GAAG,OAAO,KAAK,SAAS,GAAG,qBAAqB,CAAC,CAAC,EAAE,KAAK,EAAE,KAAK,IAAI;AACnG,UAAM,IAAI;AAAA,MACR,6BAA6B,OAAO,QAAQ,gBAAgB,SAAS;AAAA,IACvE;AAAA,EACF;AACA,SAAO,GAAG,MAAM;AAClB;AAEO,SAAS,iBAAiB,MAAc,SAAkD;AAC/F,YAAU,KAAK,YAAY,CAAC,IAAI;AAClC;;;AChBO,SAAS,kBAAkB,SAAiD;AACjF,QAAM,EAAE,WAAW,QAAQ,IAAI,gBAAgB,QAAQ,UAAU;AACjE,QAAM,MAAM,oBAAI,IAAwB;AAExC,aAAW,UAAU,SAAS;AAC5B,QAAI;AACF,YAAM,SAAS,aAAa,MAAM;AAClC,UAAI,IAAI,OAAO,IAAI,MAAM;AAAA,IAC3B,SAAS,KAAK;AACZ,cAAQ,KAAK,yBAAyB,OAAO,EAAE,MAAM,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG,CAAC,EAAE;AAAA,IACzG;AAAA,EACF;AAEA,SAAO;AAAA,IACL,IAAI,IAAoC;AACtC,aAAO,IAAI,IAAI,EAAE;AAAA,IACnB;AAAA,IACA,YAAgC;AAC9B,UAAI,IAAI,IAAI,SAAS,EAAG,QAAO;AAC/B,aAAO,IAAI,OAAO,IAAI,CAAC,GAAG,IAAI,KAAK,CAAC,EAAE,CAAC,IAAI;AAAA,IAC7C;AAAA,IACA,MAAgB;AACd,aAAO,CAAC,GAAG,IAAI,KAAK,CAAC;AAAA,IACvB;AAAA,EACF;AACF;;;AC/BA,IAAM,uBAAuB,oBAAI,IAA8B;AAMxD,SAAS,0BAA0B,cAAsB,SAAiC;AAC/F,uBAAqB,IAAI,aAAa,YAAY,GAAG,OAAO;AAC9D;AAKO,SAAS,oBAAoB,cAAoD;AACtF,SAAO,qBAAqB,IAAI,aAAa,YAAY,CAAC;AAC5D;;;AClBA,SAAS,kBAAkB;AAK3B,IAAM,gBAAgB;AAef,SAAS,6BACd,SACe;AACf,QAAM,EAAE,YAAY,UAAU,UAAU,IAAI;AAC5C,QAAM,EAAE,WAAW,QAAQ,IAAI,gBAAgB,cAAc,IAAI;AACjE,QAAM,gBAAgB,QAAQ,KAAK,CAAC,MAAM,EAAE,OAAO,SAAS,KAAK,QAAQ,CAAC;AAE1E,MAAI,CAAC,eAAe;AAClB,UAAMA,SACJ,YAAY,QAAQ,IAAI,gBAAgB;AAC1C,UAAMC,UAAS,aAAa,QAAQ,IAAI;AACxC,WAAO,IAAI,WAAW;AAAA,MACpB,OAAAD;AAAA,MACA,aAAa;AAAA,MACb,GAAIC,UAAS,EAAE,QAAAA,QAAO,IAAI,CAAC;AAAA,IAC7B,CAAC;AAAA,EACH;AAEA,QAAM,WAAY,cAAwC,YAAY;AACtE,QAAM,mBAAmB,oBAAoB,QAAQ;AACrD,MAAI,kBAAkB;AACpB,UAAM,SAAS;AAAA,MACb,GAAG;AAAA,MACH,OACE,YACA,cAAc,UACb,aAAa,QAAQ,QAAQ,IAAI,aAAa,uBAAuB,cAAc;AAAA,MACtF,aACE,OAAO,cAAc,gBAAgB,WACjC,cAAc,cACd;AAAA,IACR;AACA,WAAO,iBAAiB,MAAM;AAAA,EAChC;AAEA,QAAM,QACJ,YACA,eAAe,SACf,QAAQ,IAAI,gBACZ;AAEF,QAAM,SACJ,aAAa,eAAe,UAAU,QAAQ,IAAI;AAEpD,QAAM,cACJ,OAAO,eAAe,gBAAgB,WAAW,cAAc,cAAc;AAE/E,QAAM,UAAU,eAAe;AAE/B,QAAM,qBAAkE;AAAA,IACtE;AAAA,IACA;AAAA,IACA,GAAI,SAAS,EAAE,OAAO,IAAI,CAAC;AAAA,IAC3B,GAAI,UAAU,EAAE,eAAe,EAAE,QAAQ,EAAE,IAAI,CAAC;AAAA,EAClD;AAEA,SAAO,IAAI,WAAW,kBAAkB;AAC1C;;;AC9EA,IAAM,iBAAiB,oBAAI,IAAY;AAEvC,IAAM,qBAAqB,CAAC,YAAY;AAMjC,SAAS,4BAA4B,OAAqC;AAC/E,QAAM,WAAW,SAAS,OAAO,CAAC,IAAI,MAAM,QAAQ,KAAK,IAAI,QAAQ,CAAC,KAAK;AAC3E,QAAM,WAAW,SAAS;AAAA,IACxB,CAAC,MAAmB,OAAO,MAAM,YAAY,EAAE,SAAS;AAAA,EAC1D;AACA,SAAO,SAAS,SAAS,IAAI,WAAW;AAC1C;AAQA,eAAsB,kBACpB,mBACe;AACf,QAAM,WAAW,qBAAqB;AACtC,aAAW,OAAO,UAAU;AAC1B,QAAI,eAAe,IAAI,GAAG,EAAG;AAC7B,mBAAe,IAAI,GAAG;AACtB,QAAI;AACF,YAAM,IAAI,MAAM;AAAA;AAAA,QAA0B;AAAA;AAC1C,UACE,OAAQ,EACL,yBAAyB,YAC5B;AACA,QAAC,EAA2C,qBAAqB;AAAA,MACnE;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AACF;","names":["model","apiKey"]}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Build LangChain ChatModel from agent.yaml llm section.
|
|
3
|
+
* Supports single object, default + instances, and flat keyed configs.
|
|
4
|
+
* When provider is registered by an extension, uses that extension's ChatModel;
|
|
5
|
+
* otherwise uses ChatOpenAI.
|
|
6
|
+
*/
|
|
7
|
+
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
8
|
+
export interface CreateChatModelFromLlmConfigOptions {
|
|
9
|
+
/** agent.yaml llm section (raw or parsed); compatible with AgentConfigLlmSection / AgentConfigLlm */
|
|
10
|
+
llmSection?: unknown;
|
|
11
|
+
/** Override model from env */
|
|
12
|
+
modelEnv?: string;
|
|
13
|
+
/** Override API key from env */
|
|
14
|
+
apiKeyEnv?: string;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Create a LangChain ChatModel from agent config llm section.
|
|
18
|
+
* Uses extension-registered ChatModel when available; otherwise ChatOpenAI.
|
|
19
|
+
*/
|
|
20
|
+
export declare function createChatModelFromLlmConfig(options: CreateChatModelFromLlmConfigOptions): BaseChatModel;
|
|
21
|
+
//# sourceMappingURL=llmAdapter.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"llmAdapter.d.ts","sourceRoot":"","sources":["../src/llmAdapter.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAGH,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,6CAA6C,CAAC;AAMjF,MAAM,WAAW,mCAAmC;IAClD,qGAAqG;IACrG,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB,8BAA8B;IAC9B,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,gCAAgC;IAChC,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;;GAGG;AACH,wBAAgB,4BAA4B,CAC1C,OAAO,EAAE,mCAAmC,GAC3C,aAAa,CAuDf"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Load optional LLM extensions by npm package name (e.g. wallee-llm).
|
|
3
|
+
* Call before createChatModelFromLlmConfig when using extension providers.
|
|
4
|
+
* Config llm.type = npm package name(s); we dynamic load those packages. No extensions field.
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Resolve llm.type to a list of npm package names to load.
|
|
8
|
+
* type is the npm package name or array of package names; we load them directly (no mapping).
|
|
9
|
+
*/
|
|
10
|
+
export declare function resolveLLMExtensionPackages(types?: string | string[]): string[];
|
|
11
|
+
/**
|
|
12
|
+
* Dynamically load LLM extensions by npm package name.
|
|
13
|
+
* Each package must export registerLLMExtension() and will register its provider(s) and ChatModel factory.
|
|
14
|
+
* Safe to call multiple times; each package is loaded at most once.
|
|
15
|
+
* @param extensionPackages npm package names; default ["wallee-llm"] when omitted
|
|
16
|
+
*/
|
|
17
|
+
export declare function loadLLMExtensions(extensionPackages?: string[]): Promise<void>;
|
|
18
|
+
//# sourceMappingURL=loadLLMExtensions.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"loadLLMExtensions.d.ts","sourceRoot":"","sources":["../src/loadLLMExtensions.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAMH;;;GAGG;AACH,wBAAgB,2BAA2B,CAAC,KAAK,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,GAAG,MAAM,EAAE,CAM/E;AAED;;;;;GAKG;AACH,wBAAsB,iBAAiB,CACrC,iBAAiB,CAAC,EAAE,MAAM,EAAE,GAC3B,OAAO,CAAC,IAAI,CAAC,CAiBf"}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Supports OpenAI-compatible and extension providers.
|
|
3
|
+
*/
|
|
4
|
+
import type { LLMConfig, ILLMClient } from "../types.js";
|
|
5
|
+
export declare function createClient(config: LLMConfig): ILLMClient;
|
|
6
|
+
export declare function registerProvider(name: string, factory: (config: LLMConfig) => ILLMClient): void;
|
|
7
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/providers/index.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,KAAK,EAAE,SAAS,EAAE,UAAU,EAAE,MAAM,aAAa,CAAC;AAczD,wBAAgB,YAAY,CAAC,MAAM,EAAE,SAAS,GAAG,UAAU,CAU1D;AAED,wBAAgB,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC,MAAM,EAAE,SAAS,KAAK,UAAU,GAAG,IAAI,CAE/F"}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI 兼容格式:chat (/v1/chat/completions) 与 image。
|
|
3
|
+
* 支持 baseURL 以对接 Azure、本地代理等兼容端点。
|
|
4
|
+
*/
|
|
5
|
+
import type { LLMConfig, ILLMClient } from "../types.js";
|
|
6
|
+
export declare function createOpenAIChatClient(config: LLMConfig): ILLMClient;
|
|
7
|
+
export declare function createOpenAIImageClient(config: LLMConfig): ILLMClient;
|
|
8
|
+
export declare function createOpenAIClient(config: LLMConfig): ILLMClient;
|
|
9
|
+
//# sourceMappingURL=openai.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../src/providers/openai.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAGH,OAAO,KAAK,EACV,SAAS,EAIT,UAAU,EAIX,MAAM,aAAa,CAAC;AAiCrB,wBAAgB,sBAAsB,CAAC,MAAM,EAAE,SAAS,GAAG,UAAU,CA2DpE;AAED,wBAAgB,uBAAuB,CAAC,MAAM,EAAE,SAAS,GAAG,UAAU,CAsBrE;AAED,wBAAgB,kBAAkB,CAAC,MAAM,EAAE,SAAS,GAAG,UAAU,CAGhE"}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent LLM: 暂时只支持 OpenAI 兼容格式(/v1/chat/completions 等)。
|
|
3
|
+
* 多实例,每 LLM 有 id、type;可选 baseURL 对接其他兼容端点。
|
|
4
|
+
*/
|
|
5
|
+
/** LLM 类型:chat=对话,image=图像生成(OpenAI 兼容格式) */
|
|
6
|
+
export type LLMType = "chat" | "image";
|
|
7
|
+
/** 单条 LLM 配置:id、type、model;仅支持 OpenAI 兼容 API */
|
|
8
|
+
export interface LLMConfig {
|
|
9
|
+
/** 唯一 id,用于从 registry 获取实例 */
|
|
10
|
+
id: string;
|
|
11
|
+
/** chat | image */
|
|
12
|
+
type: LLMType;
|
|
13
|
+
/** 固定为 openai 或 openai-compatible;暂时只支持此格式 */
|
|
14
|
+
provider: string;
|
|
15
|
+
/** 模型名,如 gpt-4o-mini、dall-e-3 */
|
|
16
|
+
model?: string;
|
|
17
|
+
/** 温度等;chat 常用 */
|
|
18
|
+
temperature?: number;
|
|
19
|
+
/** API key;也可用环境变量 */
|
|
20
|
+
apiKey?: string;
|
|
21
|
+
/** OpenAI 兼容端点 baseURL(如 Azure、本地代理、其他兼容 /v1 的厂商) */
|
|
22
|
+
baseURL?: string;
|
|
23
|
+
/** 其他选项(透传) */
|
|
24
|
+
[key: string]: unknown;
|
|
25
|
+
}
|
|
26
|
+
/** agent.yaml 内 llm 段:扁平(每个模型一个 name 为 key)或 default+instances 或单对象 */
|
|
27
|
+
export interface AgentConfigLlmSection {
|
|
28
|
+
/** 默认使用的模型 name(id) */
|
|
29
|
+
default?: string;
|
|
30
|
+
/** npm 包名或包名数组,动态加载;如 "wallee-llm" 或 ["wallee-llm"] */
|
|
31
|
+
type?: string | string[];
|
|
32
|
+
/** 多个 LLM 配置(数组格式) */
|
|
33
|
+
instances?: LLMConfig[];
|
|
34
|
+
/** 兼容单对象:provider、model 等,解析为 id=default 的 chat */
|
|
35
|
+
provider?: string;
|
|
36
|
+
model?: string;
|
|
37
|
+
name?: string;
|
|
38
|
+
temperature?: number;
|
|
39
|
+
apiKey?: string;
|
|
40
|
+
baseURL?: string;
|
|
41
|
+
base_url?: string;
|
|
42
|
+
/** 扁平:strong/medium/fast 等 name → 配置(provider、base_url、name、options) */
|
|
43
|
+
[key: string]: unknown;
|
|
44
|
+
}
|
|
45
|
+
/** 单条聊天消息 */
|
|
46
|
+
export interface ChatMessage {
|
|
47
|
+
role: "system" | "user" | "assistant";
|
|
48
|
+
content: string;
|
|
49
|
+
}
|
|
50
|
+
/** 极简聊天结果 */
|
|
51
|
+
export interface ChatResult {
|
|
52
|
+
content: string;
|
|
53
|
+
usage?: {
|
|
54
|
+
promptTokens?: number;
|
|
55
|
+
completionTokens?: number;
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
/** OpenAI 兼容的工具定义(function) */
|
|
59
|
+
export interface ToolDefinition {
|
|
60
|
+
type: "function";
|
|
61
|
+
function: {
|
|
62
|
+
name: string;
|
|
63
|
+
description?: string;
|
|
64
|
+
parameters?: object;
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
/** 带 tool 调用的消息(assistant 可能含 tool_calls;tool 为工具结果) */
|
|
68
|
+
export type ChatWithToolsMessage = ChatMessage | {
|
|
69
|
+
role: "tool";
|
|
70
|
+
content: string;
|
|
71
|
+
tool_call_id: string;
|
|
72
|
+
} | {
|
|
73
|
+
role: "assistant";
|
|
74
|
+
content?: string | null;
|
|
75
|
+
tool_calls?: Array<{
|
|
76
|
+
id: string;
|
|
77
|
+
type: "function";
|
|
78
|
+
function: {
|
|
79
|
+
name: string;
|
|
80
|
+
arguments: string;
|
|
81
|
+
};
|
|
82
|
+
}>;
|
|
83
|
+
};
|
|
84
|
+
/** 带工具调用的聊天结果 */
|
|
85
|
+
export interface ChatWithToolsResult {
|
|
86
|
+
message: {
|
|
87
|
+
role: "assistant";
|
|
88
|
+
content?: string | null;
|
|
89
|
+
tool_calls?: Array<{
|
|
90
|
+
id: string;
|
|
91
|
+
type: "function";
|
|
92
|
+
function: {
|
|
93
|
+
name: string;
|
|
94
|
+
arguments: string;
|
|
95
|
+
};
|
|
96
|
+
}>;
|
|
97
|
+
};
|
|
98
|
+
usage?: {
|
|
99
|
+
promptTokens?: number;
|
|
100
|
+
completionTokens?: number;
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
/** 图像生成结果 */
|
|
104
|
+
export interface ImageResult {
|
|
105
|
+
url?: string;
|
|
106
|
+
b64?: string;
|
|
107
|
+
}
|
|
108
|
+
/**
|
|
109
|
+
* 单个 LLM 实例的极简调用接口。
|
|
110
|
+
* type=chat 时提供 chat;type=image 时提供 generateImage。
|
|
111
|
+
* chat 型可选提供 chatWithTools 以支持 ReAct/Agent 工具调用。
|
|
112
|
+
*/
|
|
113
|
+
export interface ILLMClient {
|
|
114
|
+
readonly id: string;
|
|
115
|
+
readonly type: LLMType;
|
|
116
|
+
/** 对话(type=chat 时可用) */
|
|
117
|
+
chat(messages: ChatMessage[]): Promise<ChatResult>;
|
|
118
|
+
/**
|
|
119
|
+
* 带工具调用的对话(type=chat 时可选;用于 ReAct/Agent)。
|
|
120
|
+
* 若未实现,调用方可用 chat 轮询或使用其他客户端。
|
|
121
|
+
*/
|
|
122
|
+
chatWithTools?(messages: ChatWithToolsMessage[], tools: ToolDefinition[], options?: {
|
|
123
|
+
timeoutMs?: number;
|
|
124
|
+
}): Promise<ChatWithToolsResult>;
|
|
125
|
+
/** 图像生成(type=image 时可用);否则可抛错或忽略 */
|
|
126
|
+
generateImage?(options: {
|
|
127
|
+
prompt: string;
|
|
128
|
+
size?: string;
|
|
129
|
+
n?: number;
|
|
130
|
+
}): Promise<ImageResult>;
|
|
131
|
+
}
|
|
132
|
+
/**
|
|
133
|
+
* 从 llm section 创建的 LLM 注册表:按 id 获取实例。
|
|
134
|
+
*/
|
|
135
|
+
export interface ILLMRegistry {
|
|
136
|
+
get(id: string): ILLMClient | undefined;
|
|
137
|
+
defaultId(): string | undefined;
|
|
138
|
+
ids(): string[];
|
|
139
|
+
}
|
|
140
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,6CAA6C;AAC7C,MAAM,MAAM,OAAO,GAAG,MAAM,GAAG,OAAO,CAAC;AAEvC,gDAAgD;AAChD,MAAM,WAAW,SAAS;IACxB,8BAA8B;IAC9B,EAAE,EAAE,MAAM,CAAC;IACX,mBAAmB;IACnB,IAAI,EAAE,OAAO,CAAC;IACd,8CAA8C;IAC9C,QAAQ,EAAE,MAAM,CAAC;IACjB,iCAAiC;IACjC,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,kBAAkB;IAClB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,sBAAsB;IACtB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,qDAAqD;IACrD,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,eAAe;IACf,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CACxB;AAED,uEAAuE;AACvE,MAAM,WAAW,qBAAqB;IACpC,uBAAuB;IACvB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,uDAAuD;IACvD,IAAI,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;IACzB,sBAAsB;IACtB,SAAS,CAAC,EAAE,SAAS,EAAE,CAAC;IACxB,mDAAmD;IACnD,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,wEAAwE;IACxE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CACxB;AAED,aAAa;AACb,MAAM,WAAW,WAAW;IAC1B,IAAI,EAAE,QAAQ,GAAG,MAAM,GAAG,WAAW,CAAC;IACtC,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,aAAa;AACb,MAAM,WAAW,UAAU;IACzB,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,CAAC,EAAE;QAAE,YAAY,CAAC,EAAE,MAAM,CAAC;QAAC,gBAAgB,CAAC,EAAE,MAAM,CAAA;KAAE,CAAC;CAC9D;AAED,+BAA+B;AAC/B,MAAM,WAAW,cAAc;IAC7B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,UAAU,CAAC,EAAE,MAAM,CAAC;KACrB,CAAC;CACH;AAED,wDAAwD;AACxD,MAAM,MAAM,oBAAoB,GAC5B,WAAW,GACX;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,YAAY,EAAE,MAAM,CAAA;CAAE,GACvD;IACE,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;IACxB,UAAU,CAAC,EAAE,KAAK,CAAC;QACjB,EAAE,EAAE,MAAM,CAAC;QACX,IAAI,EAAE,UAAU,CAAC;QACjB,QAAQ,EAAE;YAAE,IAAI,EAAE,MAAM,CAAC;YAAC,SAAS,EAAE,MAAM,CAAA;SAAE,CAAC;KAC/C,CAAC,CAAC;CACJ,CAAC;AAEN,iBAAiB;AACjB,MAAM,WAAW,mBAAmB;IAClC,OAAO,EAAE;QACP,IAAI,EAAE,WAAW,CAAC;QAClB,OAAO,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;QACxB,UAAU,CAAC,EAAE,KAAK,CAAC;YACjB,EAAE,EAAE,MAAM,CAAC;YACX,IAAI,EAAE,UAAU,CAAC;YACjB,QAAQ,EAAE;gBAAE,IAAI,EAAE,MAAM,CAAC;gBAAC,SAAS,EAAE,MAAM,CAAA;aAAE,CAAC;SAC/C,CAAC,CAAC;KACJ,CAAC;IACF,KAAK,CAAC,EAAE;QAAE,YAAY,CAAC,EAAE,MAAM,CAAC;QAAC,gBAAgB,CAAC,EAAE,MAAM,CAAA;KAAE,CAAC;CAC9D;AAED,aAAa;AACb,MAAM,WAAW,WAAW;IAC1B,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,GAAG,CAAC,EAAE,MAAM,CAAC;CACd;AAED;;;;GAIG;AACH,MAAM,WAAW,UAAU;IACzB,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;IACpB,QAAQ,CAAC,IAAI,EAAE,OAAO,CAAC;IACvB,wBAAwB;IACxB,IAAI,CAAC,QAAQ,EAAE,WAAW,EAAE,GAAG,OAAO,CAAC,UAAU,CAAC,CAAC;IACnD;;;OAGG;IACH,aAAa,CAAC,CACZ,QAAQ,EAAE,oBAAoB,EAAE,EAChC,KAAK,EAAE,cAAc,EAAE,EACvB,OAAO,CAAC,EAAE;QAAE,SAAS,CAAC,EAAE,MAAM,CAAA;KAAE,GAC/B,OAAO,CAAC,mBAAmB,CAAC,CAAC;IAChC,oCAAoC;IACpC,aAAa,CAAC,CAAC,OAAO,EAAE;QAAE,MAAM,EAAE,MAAM,CAAC;QAAC,IAAI,CAAC,EAAE,MAAM,CAAC;QAAC,CAAC,CAAC,EAAE,MAAM,CAAA;KAAE,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC;CAC9F;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B,GAAG,CAAC,EAAE,EAAE,MAAM,GAAG,UAAU,GAAG,SAAS,CAAC;IACxC,SAAS,IAAI,MAAM,GAAG,SAAS,CAAC;IAChC,GAAG,IAAI,MAAM,EAAE,CAAC;CACjB"}
|
package/package.json
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@easynet/agent-llm",
|
|
3
|
+
"version": "1.0.1",
|
|
4
|
+
"description": "Agent LLM: multi-provider, multi-model, simple chat/image API. Consumes agent.yaml llm section.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"scripts": {
|
|
9
|
+
"build": "tsup && tsc -p tsconfig.dts.json",
|
|
10
|
+
"dev": "tsup --watch",
|
|
11
|
+
"test": "vitest run",
|
|
12
|
+
"test:watch": "vitest",
|
|
13
|
+
"test:llm-live": "vitest run llm-live.test.ts",
|
|
14
|
+
"test:link": "vitest run -t 'connects to CIS'",
|
|
15
|
+
"typecheck": "tsc --noEmit"
|
|
16
|
+
},
|
|
17
|
+
"dependencies": {
|
|
18
|
+
"@langchain/core": ">=0.3.0",
|
|
19
|
+
"@langchain/openai": "^1.2.3",
|
|
20
|
+
"axios": "^1.7.0",
|
|
21
|
+
"openai": "^4.77.0"
|
|
22
|
+
},
|
|
23
|
+
"devDependencies": {
|
|
24
|
+
"@semantic-release/git": "^10.0.1",
|
|
25
|
+
"@types/node": "^22.10.0",
|
|
26
|
+
"semantic-release": "^24.2.0",
|
|
27
|
+
"tsup": "^8.3.5",
|
|
28
|
+
"typescript": "^5.7.2",
|
|
29
|
+
"vitest": "^2.1.8"
|
|
30
|
+
},
|
|
31
|
+
"files": [
|
|
32
|
+
"dist"
|
|
33
|
+
],
|
|
34
|
+
"publishConfig": {
|
|
35
|
+
"access": "public"
|
|
36
|
+
},
|
|
37
|
+
"repository": {
|
|
38
|
+
"type": "git",
|
|
39
|
+
"url": "https://github.com/easynet-world/agent-llm.git"
|
|
40
|
+
},
|
|
41
|
+
"engines": {
|
|
42
|
+
"node": ">=18.0.0"
|
|
43
|
+
},
|
|
44
|
+
"license": "MIT"
|
|
45
|
+
}
|