@easynet/agent-model 1.0.53
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +72 -0
- package/dist/api/create-agent-llm.d.ts +15 -0
- package/dist/api/create-agent-llm.d.ts.map +1 -0
- package/dist/api/create-embed-fn.d.ts +10 -0
- package/dist/api/create-embed-fn.d.ts.map +1 -0
- package/dist/api/get-default-llm-config.d.ts +16 -0
- package/dist/api/get-default-llm-config.d.ts.map +1 -0
- package/dist/api/get-default-vlm-config.d.ts +15 -0
- package/dist/api/get-default-vlm-config.d.ts.map +1 -0
- package/dist/chunk-4OLU43SH.js +165 -0
- package/dist/chunk-4OLU43SH.js.map +1 -0
- package/dist/chunk-5YPJ43HW.js +205 -0
- package/dist/chunk-5YPJ43HW.js.map +1 -0
- package/dist/chunk-EPVJLBGC.js +118 -0
- package/dist/chunk-EPVJLBGC.js.map +1 -0
- package/dist/chunk-FZKECZUY.js +148 -0
- package/dist/chunk-FZKECZUY.js.map +1 -0
- package/dist/chunk-G7MKWPEI.js +14 -0
- package/dist/chunk-G7MKWPEI.js.map +1 -0
- package/dist/chunk-HCU4AWIV.js +19 -0
- package/dist/chunk-HCU4AWIV.js.map +1 -0
- package/dist/chunk-HSU6XZOI.js +354 -0
- package/dist/chunk-HSU6XZOI.js.map +1 -0
- package/dist/chunk-K3JR2N4E.js +250 -0
- package/dist/chunk-K3JR2N4E.js.map +1 -0
- package/dist/chunk-PZ5AY32C.js +10 -0
- package/dist/chunk-PZ5AY32C.js.map +1 -0
- package/dist/chunk-SPDXNDDD.js +114 -0
- package/dist/chunk-SPDXNDDD.js.map +1 -0
- package/dist/chunk-TKIZELZQ.js +255 -0
- package/dist/chunk-TKIZELZQ.js.map +1 -0
- package/dist/chunk-VBXTOU4S.js +50 -0
- package/dist/chunk-VBXTOU4S.js.map +1 -0
- package/dist/chunk-YOOYQBGK.js +43 -0
- package/dist/chunk-YOOYQBGK.js.map +1 -0
- package/dist/cli/index.d.ts +3 -0
- package/dist/cli/index.d.ts.map +1 -0
- package/dist/cli/index.js +11 -0
- package/dist/cli/index.js.map +1 -0
- package/dist/cli/utils.d.ts +12 -0
- package/dist/cli/utils.d.ts.map +1 -0
- package/dist/config/index.d.ts +6 -0
- package/dist/config/index.d.ts.map +1 -0
- package/dist/config/index.js +28 -0
- package/dist/config/index.js.map +1 -0
- package/dist/config/loader.d.ts +23 -0
- package/dist/config/loader.d.ts.map +1 -0
- package/dist/config/parser.d.ts +5 -0
- package/dist/config/parser.d.ts.map +1 -0
- package/dist/config/yaml-utils.d.ts +13 -0
- package/dist/config/yaml-utils.d.ts.map +1 -0
- package/dist/config/yaml.d.ts +9 -0
- package/dist/config/yaml.d.ts.map +1 -0
- package/dist/connectivity/check.d.ts +18 -0
- package/dist/connectivity/check.d.ts.map +1 -0
- package/dist/connectivity/index.d.ts +3 -0
- package/dist/connectivity/index.d.ts.map +1 -0
- package/dist/connectivity/index.js +12 -0
- package/dist/connectivity/index.js.map +1 -0
- package/dist/connectivity/types.d.ts +13 -0
- package/dist/connectivity/types.d.ts.map +1 -0
- package/dist/extensions/index.d.ts +8 -0
- package/dist/extensions/index.d.ts.map +1 -0
- package/dist/extensions/index.js +38 -0
- package/dist/extensions/index.js.map +1 -0
- package/dist/extensions/loader.d.ts +12 -0
- package/dist/extensions/loader.d.ts.map +1 -0
- package/dist/extensions/npm-protocol.d.ts +36 -0
- package/dist/extensions/npm-protocol.d.ts.map +1 -0
- package/dist/index.d.ts +58 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +133 -0
- package/dist/index.js.map +1 -0
- package/dist/langchain/index.d.ts +17 -0
- package/dist/langchain/index.d.ts.map +1 -0
- package/dist/langchain/index.js +12 -0
- package/dist/langchain/index.js.map +1 -0
- package/dist/langchain/openai-compatible.d.ts +21 -0
- package/dist/langchain/openai-compatible.d.ts.map +1 -0
- package/dist/langchain/tool-choice.d.ts +9 -0
- package/dist/langchain/tool-choice.d.ts.map +1 -0
- package/dist/model/chat.d.ts +30 -0
- package/dist/model/chat.d.ts.map +1 -0
- package/dist/model/embed-parser.d.ts +25 -0
- package/dist/model/embed-parser.d.ts.map +1 -0
- package/dist/model/embedding.d.ts +25 -0
- package/dist/model/embedding.d.ts.map +1 -0
- package/dist/model/hub.d.ts +29 -0
- package/dist/model/hub.d.ts.map +1 -0
- package/dist/model/index.d.ts +13 -0
- package/dist/model/index.d.ts.map +1 -0
- package/dist/model/index.js +18 -0
- package/dist/model/index.js.map +1 -0
- package/dist/model/llm-parser.d.ts +10 -0
- package/dist/model/llm-parser.d.ts.map +1 -0
- package/dist/model/types.d.ts +31 -0
- package/dist/model/types.d.ts.map +1 -0
- package/dist/npm/command.d.ts +37 -0
- package/dist/npm/command.d.ts.map +1 -0
- package/dist/npm/index.d.ts +5 -0
- package/dist/npm/index.d.ts.map +1 -0
- package/dist/npm/index.js +40 -0
- package/dist/npm/index.js.map +1 -0
- package/dist/npm/install.d.ts +9 -0
- package/dist/npm/install.d.ts.map +1 -0
- package/dist/npm/provider.d.ts +15 -0
- package/dist/npm/provider.d.ts.map +1 -0
- package/dist/npm/version.d.ts +12 -0
- package/dist/npm/version.d.ts.map +1 -0
- package/dist/registry/chat-model.d.ts +10 -0
- package/dist/registry/chat-model.d.ts.map +1 -0
- package/dist/registry/index.d.ts +3 -0
- package/dist/registry/index.d.ts.map +1 -0
- package/dist/registry/index.js +11 -0
- package/dist/registry/index.js.map +1 -0
- package/dist/types.d.ts +20 -0
- package/dist/types.d.ts.map +1 -0
- package/package.json +98 -0
package/README.md
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# @easynet/agent-model
|
|
2
|
+
|
|
3
|
+
统一的 Model Hub:从 `models.yaml` 创建 LangChain ChatModel、Embedding 等模型。
|
|
4
|
+
|
|
5
|
+
## 最小接口
|
|
6
|
+
|
|
7
|
+
```ts
|
|
8
|
+
import { createAgentLlm } from "@easynet/agent-model";
|
|
9
|
+
|
|
10
|
+
const llm = await createAgentLlm();
|
|
11
|
+
const result = await llm.invoke("hello");
|
|
12
|
+
console.log(result.content);
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## 最小 YAML(推荐先用这个)
|
|
16
|
+
|
|
17
|
+
`models.yaml`
|
|
18
|
+
|
|
19
|
+
```yaml
|
|
20
|
+
llm:
|
|
21
|
+
# 默认使用 small 这个实例
|
|
22
|
+
default: small
|
|
23
|
+
|
|
24
|
+
# 可按需保留 strong;不需要就删掉
|
|
25
|
+
strong:
|
|
26
|
+
provider: openai
|
|
27
|
+
base_url: ${LLM_BASE_URL}
|
|
28
|
+
model: ${LLM_MODEL}
|
|
29
|
+
|
|
30
|
+
# 本地 Ollama 最常见配置
|
|
31
|
+
small:
|
|
32
|
+
provider: openai
|
|
33
|
+
base_url: http://localhost:11434/v1
|
|
34
|
+
model: qwen3:0.6b
|
|
35
|
+
|
|
36
|
+
embed:
|
|
37
|
+
default: gemma
|
|
38
|
+
gemma:
|
|
39
|
+
provider: openai
|
|
40
|
+
base_url: https://ollama-nvidia-8g-2.easynet.world/v1
|
|
41
|
+
model: embeddinggemma:latest
|
|
42
|
+
apiKey: ollama
|
|
43
|
+
|
|
44
|
+
runtime:
|
|
45
|
+
check_connectivity: false
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## 最简单示例(带注释)
|
|
49
|
+
|
|
50
|
+
```ts
|
|
51
|
+
import { createAgentLlm } from "@easynet/agent-model";
|
|
52
|
+
|
|
53
|
+
async function main() {
|
|
54
|
+
// 1) 从当前目录 models.yaml 读取配置并创建模型
|
|
55
|
+
const llm = await createAgentLlm();
|
|
56
|
+
|
|
57
|
+
// 2) 直接调用模型
|
|
58
|
+
const response = await llm.invoke("请用一句话介绍你自己。");
|
|
59
|
+
|
|
60
|
+
// 3) 输出文本
|
|
61
|
+
console.log(response.content);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
main().catch(console.error);
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
## 可选扩展
|
|
68
|
+
|
|
69
|
+
- 自定义 provider:`registerChatModelProvider(...)`
|
|
70
|
+
- 只取默认模型配置:`getDefaultLlmConfig(...)`
|
|
71
|
+
- 直接从对象构建模型:`createChatModelFromLlmConfig(...)`
|
|
72
|
+
- 从 models.yaml 创建 EmbedFn:`createEmbedFnFromModelsConfig(...)`
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
2
|
+
import { type ConnectionStatus } from "../connectivity/index.js";
|
|
3
|
+
export interface CreateAgentLlmOptions {
|
|
4
|
+
configPath?: string;
|
|
5
|
+
installNpmIfMissing?: boolean;
|
|
6
|
+
checkConnectivity?: boolean;
|
|
7
|
+
onConnectionStatus?: (status: ConnectionStatus) => void;
|
|
8
|
+
connectivityTimeoutMs?: number;
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Create a LangChain ChatModel from models.yaml config.
|
|
12
|
+
* Returns BaseChatModel compatible with LangChain's createAgent and other tools.
|
|
13
|
+
*/
|
|
14
|
+
export declare function createAgentLlm(configPathOrOptions?: string | CreateAgentLlmOptions): Promise<BaseChatModel>;
|
|
15
|
+
//# sourceMappingURL=create-agent-llm.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"create-agent-llm.d.ts","sourceRoot":"","sources":["../../src/api/create-agent-llm.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,6CAA6C,CAAC;AACjF,OAAO,EAIL,KAAK,gBAAgB,EACtB,MAAM,0BAA0B,CAAC;AA4DlC,MAAM,WAAW,qBAAqB;IACpC,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,mBAAmB,CAAC,EAAE,OAAO,CAAC;IAC9B,iBAAiB,CAAC,EAAE,OAAO,CAAC;IAC5B,kBAAkB,CAAC,EAAE,CAAC,MAAM,EAAE,gBAAgB,KAAK,IAAI,CAAC;IACxD,qBAAqB,CAAC,EAAE,MAAM,CAAC;CAChC;AAgFD;;;GAGG;AACH,wBAAsB,cAAc,CAClC,mBAAmB,CAAC,EAAE,MAAM,GAAG,qBAAqB,GACnD,OAAO,CAAC,aAAa,CAAC,CAmCxB"}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
export type EmbedFn = (input: string | string[]) => Promise<number[][]>;
|
|
2
|
+
/**
|
|
3
|
+
* Load models.yaml and build an EmbedFn from the `embed:` section.
|
|
4
|
+
* Returns undefined if no embed section is configured.
|
|
5
|
+
*
|
|
6
|
+
* @param configPath - Path to models.yaml
|
|
7
|
+
* @param embedId - Which embed instance to use (defaults to the `default:` key)
|
|
8
|
+
*/
|
|
9
|
+
export declare function createEmbedFnFromModelsConfig(configPath: string, embedId?: string): EmbedFn | undefined;
|
|
10
|
+
//# sourceMappingURL=create-embed-fn.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"create-embed-fn.d.ts","sourceRoot":"","sources":["../../src/api/create-embed-fn.ts"],"names":[],"mappings":"AAOA,MAAM,MAAM,OAAO,GAAG,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,EAAE,KAAK,OAAO,CAAC,MAAM,EAAE,EAAE,CAAC,CAAC;AAExE;;;;;;GAMG;AACH,wBAAgB,6BAA6B,CAC3C,UAAU,EAAE,MAAM,EAClB,OAAO,CAAC,EAAE,MAAM,GACf,OAAO,GAAG,SAAS,CAsBrB"}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Load models.yaml, resolve npm providers, and return the default LLMConfig.
|
|
3
|
+
* For use by CLIs (e.g. wallee-llm) that need config without creating a model.
|
|
4
|
+
*/
|
|
5
|
+
import type { LLMConfig } from "../types.js";
|
|
6
|
+
export interface GetDefaultLlmConfigOptions {
|
|
7
|
+
/** Path to models.yaml. Default: process.cwd() + "/models.yaml" */
|
|
8
|
+
configPath?: string;
|
|
9
|
+
/** Install npm provider packages if missing. Default true. */
|
|
10
|
+
installNpmIfMissing?: boolean;
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Returns the default LLM config from models.yaml (after resolving npm: providers), or null if no config file.
|
|
14
|
+
*/
|
|
15
|
+
export declare function getDefaultLlmConfig(options?: GetDefaultLlmConfigOptions): Promise<LLMConfig | null>;
|
|
16
|
+
//# sourceMappingURL=get-default-llm-config.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"get-default-llm-config.d.ts","sourceRoot":"","sources":["../../src/api/get-default-llm-config.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAMH,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,aAAa,CAAC;AAE7C,MAAM,WAAW,0BAA0B;IACzC,mEAAmE;IACnE,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,8DAA8D;IAC9D,mBAAmB,CAAC,EAAE,OAAO,CAAC;CAC/B;AAED;;GAEG;AACH,wBAAsB,mBAAmB,CACvC,OAAO,GAAE,0BAA+B,GACvC,OAAO,CAAC,SAAS,GAAG,IAAI,CAAC,CAa3B"}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Load models.yaml, resolve npm providers, and return a default VLM (type=image) config.
|
|
3
|
+
*/
|
|
4
|
+
import type { LLMConfig } from "../types.js";
|
|
5
|
+
export interface GetDefaultVlmConfigOptions {
|
|
6
|
+
/** Path to models.yaml. Default: process.cwd() + "/models.yaml" */
|
|
7
|
+
configPath?: string;
|
|
8
|
+
/** Install npm provider packages if missing. Default true. */
|
|
9
|
+
installNpmIfMissing?: boolean;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Returns the default VLM config (type=image) from models.yaml, or null if not configured.
|
|
13
|
+
*/
|
|
14
|
+
export declare function getDefaultVlmConfig(options?: GetDefaultVlmConfigOptions): Promise<LLMConfig | null>;
|
|
15
|
+
//# sourceMappingURL=get-default-vlm-config.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"get-default-vlm-config.d.ts","sourceRoot":"","sources":["../../src/api/get-default-vlm-config.ts"],"names":[],"mappings":"AAAA;;GAEG;AAMH,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,aAAa,CAAC;AAE7C,MAAM,WAAW,0BAA0B;IACzC,mEAAmE;IACnE,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,8DAA8D;IAC9D,mBAAmB,CAAC,EAAE,OAAO,CAAC;CAC/B;AAED;;GAEG;AACH,wBAAsB,mBAAmB,CACvC,OAAO,GAAE,0BAA+B,GACvC,OAAO,CAAC,SAAS,GAAG,IAAI,CAAC,CAuB3B"}
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
import {
|
|
2
|
+
getChatModelFactory
|
|
3
|
+
} from "./chunk-G7MKWPEI.js";
|
|
4
|
+
import {
|
|
5
|
+
parseLlmSection
|
|
6
|
+
} from "./chunk-SPDXNDDD.js";
|
|
7
|
+
import {
|
|
8
|
+
__export
|
|
9
|
+
} from "./chunk-PZ5AY32C.js";
|
|
10
|
+
|
|
11
|
+
// src/langchain/index.ts
|
|
12
|
+
var langchain_exports = {};
|
|
13
|
+
__export(langchain_exports, {
|
|
14
|
+
applyToolChoiceAuto: () => applyToolChoiceAuto,
|
|
15
|
+
createChatModelFromLlmConfig: () => createChatModelFromLlmConfig
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
// src/langchain/openai-compatible.ts
|
|
19
|
+
import { ChatOpenAI } from "@langchain/openai";
|
|
20
|
+
function createChatOpenAI(options) {
|
|
21
|
+
const {
|
|
22
|
+
baseURL,
|
|
23
|
+
model,
|
|
24
|
+
temperature = 0,
|
|
25
|
+
apiKey,
|
|
26
|
+
defaultHeaders,
|
|
27
|
+
defaultQuery,
|
|
28
|
+
httpAgent
|
|
29
|
+
} = options;
|
|
30
|
+
const config = {};
|
|
31
|
+
if (baseURL) config.baseURL = baseURL;
|
|
32
|
+
if (defaultHeaders) config.defaultHeaders = defaultHeaders;
|
|
33
|
+
if (defaultQuery) config.defaultQuery = defaultQuery;
|
|
34
|
+
if (httpAgent) config.httpAgent = httpAgent;
|
|
35
|
+
return new ChatOpenAI({
|
|
36
|
+
model,
|
|
37
|
+
temperature,
|
|
38
|
+
...apiKey ? { apiKey } : {},
|
|
39
|
+
...Object.keys(config).length > 0 ? { configuration: config } : {}
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// src/langchain/tool-choice.ts
|
|
44
|
+
function hasTools(model, options) {
|
|
45
|
+
const optTools = options?.tools;
|
|
46
|
+
if (Array.isArray(optTools) && optTools.length > 0) return true;
|
|
47
|
+
const defaultTools = model.defaultOptions?.tools;
|
|
48
|
+
return Array.isArray(defaultTools) && defaultTools.length > 0;
|
|
49
|
+
}
|
|
50
|
+
function sanitizeOptions(model, options) {
|
|
51
|
+
if (!options) return options;
|
|
52
|
+
if (options.tool_choice !== "none") return options;
|
|
53
|
+
if (!hasTools(model, options)) return options;
|
|
54
|
+
return { ...options, tool_choice: "auto" };
|
|
55
|
+
}
|
|
56
|
+
function applyToolChoiceAuto(model) {
|
|
57
|
+
const m = model;
|
|
58
|
+
if (m.__agentLlmToolChoicePatched) return;
|
|
59
|
+
m.__agentLlmToolChoicePatched = true;
|
|
60
|
+
const origBindTools = m.bindTools?.bind(model);
|
|
61
|
+
if (origBindTools) {
|
|
62
|
+
m.bindTools = function(tools, opts) {
|
|
63
|
+
const bound = origBindTools(tools, { ...opts, tool_choice: "auto" });
|
|
64
|
+
applyToolChoiceAuto(
|
|
65
|
+
bound
|
|
66
|
+
);
|
|
67
|
+
return bound;
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
const origWithConfig = m.withConfig?.bind(model);
|
|
71
|
+
if (origWithConfig) {
|
|
72
|
+
m.withConfig = function(config) {
|
|
73
|
+
const sanitized = sanitizeOptions(this, config) ?? config;
|
|
74
|
+
const next = origWithConfig(sanitized);
|
|
75
|
+
applyToolChoiceAuto(
|
|
76
|
+
next
|
|
77
|
+
);
|
|
78
|
+
return next;
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
const origInvoke = m.invoke?.bind(model);
|
|
82
|
+
if (origInvoke) {
|
|
83
|
+
m.invoke = function(input, options) {
|
|
84
|
+
return origInvoke(input, sanitizeOptions(this, options));
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
const origStream = m.stream?.bind(model);
|
|
88
|
+
if (origStream) {
|
|
89
|
+
m.stream = function(input, options) {
|
|
90
|
+
return origStream(input, sanitizeOptions(this, options));
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// src/langchain/index.ts
|
|
96
|
+
var DEFAULT_MODEL = "gpt-4o-mini";
|
|
97
|
+
function normalizeError(e, context) {
|
|
98
|
+
if (e instanceof Error) return new Error(`${context}: ${e.message}`, { cause: e });
|
|
99
|
+
return new Error(`${context}: ${String(e)}`);
|
|
100
|
+
}
|
|
101
|
+
function createChatModelFromLlmConfig(options = {}) {
|
|
102
|
+
const { llmSection, modelEnv, apiKeyEnv } = options;
|
|
103
|
+
let defaultId;
|
|
104
|
+
let configs;
|
|
105
|
+
try {
|
|
106
|
+
const parsed = parseLlmSection(llmSection ?? null);
|
|
107
|
+
defaultId = parsed.defaultId;
|
|
108
|
+
configs = parsed.configs;
|
|
109
|
+
} catch (e) {
|
|
110
|
+
throw normalizeError(e, "Failed to parse llm section");
|
|
111
|
+
}
|
|
112
|
+
const config = configs.find((c) => c.id === defaultId) ?? configs[0];
|
|
113
|
+
if (!config) {
|
|
114
|
+
const model2 = modelEnv ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;
|
|
115
|
+
const apiKey2 = apiKeyEnv ?? process.env.OPENAI_API_KEY;
|
|
116
|
+
return createChatOpenAI({
|
|
117
|
+
model: model2,
|
|
118
|
+
temperature: 0,
|
|
119
|
+
...apiKey2 ? { apiKey: apiKey2 } : {}
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
const provider = config.provider ?? "openai";
|
|
123
|
+
const factory = getChatModelFactory(provider);
|
|
124
|
+
if (factory) {
|
|
125
|
+
try {
|
|
126
|
+
return factory({
|
|
127
|
+
...config,
|
|
128
|
+
model: modelEnv ?? config.model ?? DEFAULT_MODEL,
|
|
129
|
+
temperature: typeof config.temperature === "number" ? config.temperature : 0
|
|
130
|
+
});
|
|
131
|
+
} catch (e) {
|
|
132
|
+
throw normalizeError(e, `Failed to create ChatModel for provider "${provider}"`);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
const model = modelEnv ?? config.model ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;
|
|
136
|
+
let apiKey = apiKeyEnv ?? config.apiKey ?? process.env.OPENAI_API_KEY;
|
|
137
|
+
let baseURL = config.baseURL;
|
|
138
|
+
if (baseURL && !baseURL.replace(/\/$/, "").endsWith("/v1")) {
|
|
139
|
+
baseURL = baseURL.replace(/\/$/, "") + "/v1";
|
|
140
|
+
}
|
|
141
|
+
if (baseURL && !apiKey) {
|
|
142
|
+
apiKey = "not-needed";
|
|
143
|
+
}
|
|
144
|
+
const temperature = typeof config.temperature === "number" ? config.temperature : 0;
|
|
145
|
+
const opts = config.options;
|
|
146
|
+
const defaultHeaders = opts?.defaultHeaders;
|
|
147
|
+
const defaultQuery = opts?.defaultQuery;
|
|
148
|
+
const httpAgent = opts?.httpAgent;
|
|
149
|
+
return createChatOpenAI({
|
|
150
|
+
model,
|
|
151
|
+
temperature,
|
|
152
|
+
baseURL,
|
|
153
|
+
apiKey,
|
|
154
|
+
defaultHeaders,
|
|
155
|
+
defaultQuery,
|
|
156
|
+
httpAgent
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
export {
|
|
161
|
+
applyToolChoiceAuto,
|
|
162
|
+
createChatModelFromLlmConfig,
|
|
163
|
+
langchain_exports
|
|
164
|
+
};
|
|
165
|
+
//# sourceMappingURL=chunk-4OLU43SH.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/langchain/index.ts","../src/langchain/openai-compatible.ts","../src/langchain/tool-choice.ts"],"sourcesContent":["/**\n * Simple LangChain module: create ChatOpenAI from llm config.\n * Extensions can register custom ChatModel factories via the registry.\n */\n\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { parseLlmSection } from \"../model/llm-parser.js\";\nimport type { LLMConfig } from \"../model/types.js\";\nimport { getChatModelFactory } from \"../registry/chat-model.js\";\nimport { createChatOpenAI } from \"./openai-compatible.js\";\nimport type { Agent } from \"node:http\";\n\nconst DEFAULT_MODEL = \"gpt-4o-mini\";\n\nexport interface CreateChatModelOptions {\n llmSection?: unknown;\n modelEnv?: string;\n apiKeyEnv?: string;\n}\n\nexport { applyToolChoiceAuto } from \"./tool-choice.js\";\n\nfunction normalizeError(e: unknown, context: string): Error {\n if (e instanceof Error) return new Error(`${context}: ${e.message}`, { cause: e });\n return new Error(`${context}: ${String(e)}`);\n}\n\n/**\n * Create a LangChain ChatModel from agent config llm section.\n * Uses extension-registered factory when available; otherwise creates ChatOpenAI.\n */\nexport function createChatModelFromLlmConfig(\n options: CreateChatModelOptions = {}\n): BaseChatModel {\n const { llmSection, modelEnv, apiKeyEnv } = options;\n\n let defaultId: string;\n let configs: LLMConfig[];\n\n try {\n const parsed = parseLlmSection(llmSection ?? null);\n defaultId = parsed.defaultId;\n configs = parsed.configs;\n } catch (e) {\n throw normalizeError(e, \"Failed to parse llm section\");\n }\n\n const config = configs.find((c) => c.id === defaultId) ?? configs[0];\n\n // No config? Use default OpenAI\n if (!config) {\n const model = modelEnv ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;\n const apiKey = apiKeyEnv ?? process.env.OPENAI_API_KEY;\n\n return createChatOpenAI({\n model,\n temperature: 0,\n ...(apiKey ? { apiKey } : {}),\n });\n }\n\n // Check for registered custom factory\n const provider = config.provider ?? \"openai\";\n const factory = getChatModelFactory(provider);\n\n if (factory) {\n try {\n return factory({\n ...config,\n model: modelEnv ?? config.model ?? DEFAULT_MODEL,\n temperature: typeof config.temperature === \"number\" ? config.temperature : 0,\n });\n } catch (e) {\n throw normalizeError(e, `Failed to create ChatModel for provider \"${provider}\"`);\n }\n }\n\n // Create standard ChatOpenAI for OpenAI-compatible provider\n const model = modelEnv ?? config.model ?? process.env.OPENAI_MODEL ?? DEFAULT_MODEL;\n let apiKey = apiKeyEnv ?? config.apiKey ?? process.env.OPENAI_API_KEY;\n let baseURL = config.baseURL;\n\n // Ensure baseURL ends with /v1\n if (baseURL && !baseURL.replace(/\\/$/, \"\").endsWith(\"/v1\")) {\n baseURL = baseURL.replace(/\\/$/, \"\") + \"/v1\";\n }\n\n // For local providers without API keys, use a placeholder\n if (baseURL && !apiKey) {\n apiKey = \"not-needed\";\n }\n\n const temperature = typeof config.temperature === \"number\" ? config.temperature : 0;\n\n // Extract options\n const opts = config.options as Record<string, unknown> | undefined;\n const defaultHeaders = opts?.defaultHeaders as Record<string, string> | undefined;\n const defaultQuery = opts?.defaultQuery as Record<string, string> | undefined;\n const httpAgent = opts?.httpAgent as Agent | undefined;\n\n return createChatOpenAI({\n model,\n temperature,\n baseURL,\n apiKey,\n defaultHeaders,\n defaultQuery,\n httpAgent,\n });\n}\n","/**\n * Create ChatOpenAI from config - works with any OpenAI-compatible provider.\n * This is the ONLY place we create ChatOpenAI instances.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { Agent } from \"node:http\";\n\nexport interface CreateChatOpenAIOptions {\n baseURL?: string;\n model: string;\n temperature?: number;\n apiKey?: string;\n defaultHeaders?: Record<string, string>;\n defaultQuery?: Record<string, string>;\n httpAgent?: Agent;\n}\n\n/**\n * Create ChatOpenAI from options.\n * Works with OpenAI and any OpenAI-compatible endpoint (CIS, Ollama, etc.).\n */\nexport function createChatOpenAI(options: CreateChatOpenAIOptions): BaseChatModel {\n const {\n baseURL,\n model,\n temperature = 0,\n apiKey,\n defaultHeaders,\n defaultQuery,\n httpAgent,\n } = options;\n\n const config: {\n baseURL?: string;\n defaultHeaders?: Record<string, string>;\n defaultQuery?: Record<string, string>;\n httpAgent?: Agent;\n } = {};\n\n if (baseURL) config.baseURL = baseURL;\n if (defaultHeaders) config.defaultHeaders = defaultHeaders;\n if (defaultQuery) config.defaultQuery = defaultQuery;\n if (httpAgent) config.httpAgent = httpAgent;\n\n return new ChatOpenAI({\n model,\n temperature,\n ...(apiKey ? { apiKey } : {}),\n ...(Object.keys(config).length > 0 ? { configuration: config } : {}),\n });\n}\n","interface ToolChoicePatchTarget {\n __agentLlmToolChoicePatched?: boolean;\n defaultOptions?: { tools?: unknown[] };\n bindTools?: (tools: unknown, opts?: Record<string, unknown>) => unknown;\n withConfig?: (config: Record<string, unknown>) => unknown;\n invoke?: (input: unknown, options?: Record<string, unknown>) => unknown;\n stream?: (input: unknown, options?: Record<string, unknown>) => unknown;\n}\n\nfunction hasTools(model: ToolChoicePatchTarget, options?: Record<string, unknown>): boolean {\n const optTools = options?.tools;\n if (Array.isArray(optTools) && optTools.length > 0) return true;\n const defaultTools = model.defaultOptions?.tools;\n return Array.isArray(defaultTools) && defaultTools.length > 0;\n}\n\nfunction sanitizeOptions(\n model: ToolChoicePatchTarget,\n options?: Record<string, unknown>\n): Record<string, unknown> | undefined {\n if (!options) return options;\n if (options.tool_choice !== \"none\") return options;\n if (!hasTools(model, options)) return options;\n return { ...options, tool_choice: \"auto\" };\n}\n\n/**\n * Force tool_choice to \"auto\" when tools are present.\n * Patches bindTools/withConfig/invoke/stream in-place for compatibility with providers\n * that default to tool_choice: \"none\".\n */\nexport function applyToolChoiceAuto(\n model: { bindTools?: (tools: unknown, opts?: Record<string, unknown>) => unknown }\n): void {\n const m = model as ToolChoicePatchTarget;\n if (m.__agentLlmToolChoicePatched) return;\n m.__agentLlmToolChoicePatched = true;\n\n const origBindTools = m.bindTools?.bind(model);\n if (origBindTools) {\n m.bindTools = function (tools: unknown, opts?: Record<string, unknown>) {\n const bound = origBindTools(tools, { ...opts, tool_choice: \"auto\" });\n applyToolChoiceAuto(\n bound as { bindTools?: (tools: unknown, opts?: Record<string, unknown>) => unknown }\n );\n return bound;\n };\n }\n\n const origWithConfig = m.withConfig?.bind(model);\n if (origWithConfig) {\n m.withConfig = function (config: Record<string, unknown>) {\n const sanitized = sanitizeOptions(this as ToolChoicePatchTarget, config) ?? config;\n const next = origWithConfig(sanitized);\n applyToolChoiceAuto(\n next as { bindTools?: (tools: unknown, opts?: Record<string, unknown>) => unknown }\n );\n return next;\n };\n }\n\n const origInvoke = m.invoke?.bind(model);\n if (origInvoke) {\n m.invoke = function (input: unknown, options?: Record<string, unknown>) {\n return origInvoke(input, sanitizeOptions(this as ToolChoicePatchTarget, options));\n };\n }\n\n const origStream = m.stream?.bind(model);\n if (origStream) {\n m.stream = function (input: unknown, options?: Record<string, unknown>) {\n return origStream(input, sanitizeOptions(this as ToolChoicePatchTarget, options));\n };\n }\n}\n"],"mappings":";;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACKA,SAAS,kBAAkB;AAkBpB,SAAS,iBAAiB,SAAiD;AAChF,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,cAAc;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,QAAM,SAKF,CAAC;AAEL,MAAI,QAAS,QAAO,UAAU;AAC9B,MAAI,eAAgB,QAAO,iBAAiB;AAC5C,MAAI,aAAc,QAAO,eAAe;AACxC,MAAI,UAAW,QAAO,YAAY;AAElC,SAAO,IAAI,WAAW;AAAA,IACpB;AAAA,IACA;AAAA,IACA,GAAI,SAAS,EAAE,OAAO,IAAI,CAAC;AAAA,IAC3B,GAAI,OAAO,KAAK,MAAM,EAAE,SAAS,IAAI,EAAE,eAAe,OAAO,IAAI,CAAC;AAAA,EACpE,CAAC;AACH;;;AC3CA,SAAS,SAAS,OAA8B,SAA4C;AAC1F,QAAM,WAAW,SAAS;AAC1B,MAAI,MAAM,QAAQ,QAAQ,KAAK,SAAS,SAAS,EAAG,QAAO;AAC3D,QAAM,eAAe,MAAM,gBAAgB;AAC3C,SAAO,MAAM,QAAQ,YAAY,KAAK,aAAa,SAAS;AAC9D;AAEA,SAAS,gBACP,OACA,SACqC;AACrC,MAAI,CAAC,QAAS,QAAO;AACrB,MAAI,QAAQ,gBAAgB,OAAQ,QAAO;AAC3C,MAAI,CAAC,SAAS,OAAO,OAAO,EAAG,QAAO;AACtC,SAAO,EAAE,GAAG,SAAS,aAAa,OAAO;AAC3C;AAOO,SAAS,oBACd,OACM;AACN,QAAM,IAAI;AACV,MAAI,EAAE,4BAA6B;AACnC,IAAE,8BAA8B;AAEhC,QAAM,gBAAgB,EAAE,WAAW,KAAK,KAAK;AAC7C,MAAI,eAAe;AACjB,MAAE,YAAY,SAAU,OAAgB,MAAgC;AACtE,YAAM,QAAQ,cAAc,OAAO,EAAE,GAAG,MAAM,aAAa,OAAO,CAAC;AACnE;AAAA,QACE;AAAA,MACF;AACA,aAAO;AAAA,IACT;AAAA,EACF;AAEA,QAAM,iBAAiB,EAAE,YAAY,KAAK,KAAK;AAC/C,MAAI,gBAAgB;AAClB,MAAE,aAAa,SAAU,QAAiC;AACxD,YAAM,YAAY,gBAAgB,MAA+B,MAAM,KAAK;AAC5E,YAAM,OAAO,eAAe,SAAS;AACrC;AAAA,QACE;AAAA,MACF;AACA,aAAO;AAAA,IACT;AAAA,EACF;AAEA,QAAM,aAAa,EAAE,QAAQ,KAAK,KAAK;AACvC,MAAI,YAAY;AACd,MAAE,SAAS,SAAU,OAAgB,SAAmC;AACtE,aAAO,WAAW,OAAO,gBAAgB,MAA+B,OAAO,CAAC;AAAA,IAClF;AAAA,EACF;AAEA,QAAM,aAAa,EAAE,QAAQ,KAAK,KAAK;AACvC,MAAI,YAAY;AACd,MAAE,SAAS,SAAU,OAAgB,SAAmC;AACtE,aAAO,WAAW,OAAO,gBAAgB,MAA+B,OAAO,CAAC;AAAA,IAClF;AAAA,EACF;AACF;;;AF9DA,IAAM,gBAAgB;AAUtB,SAAS,eAAe,GAAY,SAAwB;AAC1D,MAAI,aAAa,MAAO,QAAO,IAAI,MAAM,GAAG,OAAO,KAAK,EAAE,OAAO,IAAI,EAAE,OAAO,EAAE,CAAC;AACjF,SAAO,IAAI,MAAM,GAAG,OAAO,KAAK,OAAO,CAAC,CAAC,EAAE;AAC7C;AAMO,SAAS,6BACd,UAAkC,CAAC,GACpB;AACf,QAAM,EAAE,YAAY,UAAU,UAAU,IAAI;AAE5C,MAAI;AACJ,MAAI;AAEJ,MAAI;AACF,UAAM,SAAS,gBAAgB,cAAc,IAAI;AACjD,gBAAY,OAAO;AACnB,cAAU,OAAO;AAAA,EACnB,SAAS,GAAG;AACV,UAAM,eAAe,GAAG,6BAA6B;AAAA,EACvD;AAEA,QAAM,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,OAAO,SAAS,KAAK,QAAQ,CAAC;AAGnE,MAAI,CAAC,QAAQ;AACX,UAAMA,SAAQ,YAAY,QAAQ,IAAI,gBAAgB;AACtD,UAAMC,UAAS,aAAa,QAAQ,IAAI;AAExC,WAAO,iBAAiB;AAAA,MACtB,OAAAD;AAAA,MACA,aAAa;AAAA,MACb,GAAIC,UAAS,EAAE,QAAAA,QAAO,IAAI,CAAC;AAAA,IAC7B,CAAC;AAAA,EACH;AAGA,QAAM,WAAW,OAAO,YAAY;AACpC,QAAM,UAAU,oBAAoB,QAAQ;AAE5C,MAAI,SAAS;AACX,QAAI;AACF,aAAO,QAAQ;AAAA,QACb,GAAG;AAAA,QACH,OAAO,YAAY,OAAO,SAAS;AAAA,QACnC,aAAa,OAAO,OAAO,gBAAgB,WAAW,OAAO,cAAc;AAAA,MAC7E,CAAC;AAAA,IACH,SAAS,GAAG;AACV,YAAM,eAAe,GAAG,4CAA4C,QAAQ,GAAG;AAAA,IACjF;AAAA,EACF;AAGA,QAAM,QAAQ,YAAY,OAAO,SAAS,QAAQ,IAAI,gBAAgB;AACtE,MAAI,SAAS,aAAa,OAAO,UAAU,QAAQ,IAAI;AACvD,MAAI,UAAU,OAAO;AAGrB,MAAI,WAAW,CAAC,QAAQ,QAAQ,OAAO,EAAE,EAAE,SAAS,KAAK,GAAG;AAC1D,cAAU,QAAQ,QAAQ,OAAO,EAAE,IAAI;AAAA,EACzC;AAGA,MAAI,WAAW,CAAC,QAAQ;AACtB,aAAS;AAAA,EACX;AAEA,QAAM,cAAc,OAAO,OAAO,gBAAgB,WAAW,OAAO,cAAc;AAGlF,QAAM,OAAO,OAAO;AACpB,QAAM,iBAAiB,MAAM;AAC7B,QAAM,eAAe,MAAM;AAC3B,QAAM,YAAY,MAAM;AAExB,SAAO,iBAAiB;AAAA,IACtB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC;AACH;","names":["model","apiKey"]}
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
import {
|
|
2
|
+
parseLlmSection
|
|
3
|
+
} from "./chunk-SPDXNDDD.js";
|
|
4
|
+
import {
|
|
5
|
+
__export
|
|
6
|
+
} from "./chunk-PZ5AY32C.js";
|
|
7
|
+
|
|
8
|
+
// src/model/index.ts
|
|
9
|
+
var model_exports = {};
|
|
10
|
+
__export(model_exports, {
|
|
11
|
+
chatCompletionViaOpenAICompatibleApi: () => chatCompletionViaOpenAICompatibleApi,
|
|
12
|
+
createModelHub: () => createModelHub,
|
|
13
|
+
embedViaOpenAICompatibleApi: () => embedViaOpenAICompatibleApi,
|
|
14
|
+
parseEmbedSection: () => parseEmbedSection,
|
|
15
|
+
parseLlmSection: () => parseLlmSection
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
// src/model/embed-parser.ts
|
|
19
|
+
function parseEmbedSection(section) {
|
|
20
|
+
const result = parseLlmSection(section);
|
|
21
|
+
for (const config of result.configs) {
|
|
22
|
+
config.type = "embed";
|
|
23
|
+
}
|
|
24
|
+
return result;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// src/model/chat.ts
|
|
28
|
+
var DEFAULT_CHAT_TIMEOUT_MS = 6e4;
|
|
29
|
+
function isLocalBaseUrl(url) {
|
|
30
|
+
try {
|
|
31
|
+
const u = new URL(url);
|
|
32
|
+
const host = u.hostname.toLowerCase();
|
|
33
|
+
return host === "localhost" || host === "127.0.0.1" || host === "::1";
|
|
34
|
+
} catch {
|
|
35
|
+
return false;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
function normalizeContent(content) {
|
|
39
|
+
if (typeof content === "string") return content;
|
|
40
|
+
if (!Array.isArray(content)) return String(content ?? "");
|
|
41
|
+
const parts = [];
|
|
42
|
+
for (const p of content) {
|
|
43
|
+
if (typeof p === "string") parts.push(p);
|
|
44
|
+
else if (p && typeof p === "object" && typeof p.text === "string")
|
|
45
|
+
parts.push(p.text);
|
|
46
|
+
}
|
|
47
|
+
return parts.join("\n").trim();
|
|
48
|
+
}
|
|
49
|
+
async function chatCompletionViaOpenAICompatibleApi(options, request) {
|
|
50
|
+
const baseUrl = options.baseURL.replace(/\/$/, "");
|
|
51
|
+
const apiKey = options.apiKey?.trim();
|
|
52
|
+
const timeoutMs = options.timeoutMs ?? DEFAULT_CHAT_TIMEOUT_MS;
|
|
53
|
+
const modelName = typeof request.model === "string" && request.model.trim() !== "" ? request.model.trim() : options.model ?? "gpt-4o-mini";
|
|
54
|
+
if (!apiKey && !isLocalBaseUrl(baseUrl)) {
|
|
55
|
+
throw new Error("Chat completion API key is required for non-local baseURL");
|
|
56
|
+
}
|
|
57
|
+
const body = {
|
|
58
|
+
model: modelName,
|
|
59
|
+
messages: request.messages.map((m) => ({ role: m.role, content: m.content })),
|
|
60
|
+
temperature: typeof request.temperature === "number" ? request.temperature : 0
|
|
61
|
+
};
|
|
62
|
+
if (typeof request.maxTokens === "number") body.max_tokens = request.maxTokens;
|
|
63
|
+
const controller = new AbortController();
|
|
64
|
+
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
|
65
|
+
try {
|
|
66
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
67
|
+
method: "POST",
|
|
68
|
+
headers: {
|
|
69
|
+
"Content-Type": "application/json",
|
|
70
|
+
...apiKey ? { Authorization: `Bearer ${apiKey}` } : {}
|
|
71
|
+
},
|
|
72
|
+
body: JSON.stringify(body),
|
|
73
|
+
signal: controller.signal
|
|
74
|
+
});
|
|
75
|
+
if (!response.ok) {
|
|
76
|
+
const text = await response.text();
|
|
77
|
+
throw new Error(`Chat completion API error ${response.status}: ${text.slice(0, 500)}`);
|
|
78
|
+
}
|
|
79
|
+
const data = await response.json();
|
|
80
|
+
const raw = data.choices?.[0]?.message?.content ?? data.choices?.[0]?.text ?? "";
|
|
81
|
+
return {
|
|
82
|
+
text: normalizeContent(raw),
|
|
83
|
+
model: data.model ?? modelName
|
|
84
|
+
};
|
|
85
|
+
} finally {
|
|
86
|
+
clearTimeout(timer);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// src/model/embedding.ts
|
|
91
|
+
var DEFAULT_EMBEDDING_TIMEOUT_MS = 3e4;
|
|
92
|
+
function isLocalBaseUrl2(url) {
|
|
93
|
+
try {
|
|
94
|
+
const u = new URL(url);
|
|
95
|
+
const host = u.hostname.toLowerCase();
|
|
96
|
+
return host === "localhost" || host === "127.0.0.1" || host === "::1";
|
|
97
|
+
} catch {
|
|
98
|
+
return false;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
async function embedViaOpenAICompatibleApi(options, input) {
|
|
102
|
+
const baseUrl = options.baseURL.replace(/\/$/, "");
|
|
103
|
+
const apiKey = options.apiKey?.trim();
|
|
104
|
+
const timeoutMs = options.timeoutMs ?? DEFAULT_EMBEDDING_TIMEOUT_MS;
|
|
105
|
+
const modelName = typeof input.model === "string" && input.model.trim() !== "" ? input.model.trim() : options.model ?? "text-embedding-3-small";
|
|
106
|
+
if (!apiKey && !isLocalBaseUrl2(baseUrl)) {
|
|
107
|
+
throw new Error("Embedding API key is required for non-local baseURL");
|
|
108
|
+
}
|
|
109
|
+
const controller = new AbortController();
|
|
110
|
+
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
|
111
|
+
try {
|
|
112
|
+
const response = await fetch(`${baseUrl}/embeddings`, {
|
|
113
|
+
method: "POST",
|
|
114
|
+
headers: {
|
|
115
|
+
"Content-Type": "application/json",
|
|
116
|
+
...apiKey ? { Authorization: `Bearer ${apiKey}` } : {}
|
|
117
|
+
},
|
|
118
|
+
body: JSON.stringify({ model: modelName, input: input.input }),
|
|
119
|
+
signal: controller.signal
|
|
120
|
+
});
|
|
121
|
+
if (!response.ok) {
|
|
122
|
+
const body = await response.text();
|
|
123
|
+
throw new Error(`Embedding API error ${response.status}: ${body.slice(0, 500)}`);
|
|
124
|
+
}
|
|
125
|
+
const data = await response.json();
|
|
126
|
+
const vectors = (data.data ?? []).slice().sort((a, b) => (a.index ?? 0) - (b.index ?? 0)).map((v) => v.embedding).filter((v) => Array.isArray(v));
|
|
127
|
+
return {
|
|
128
|
+
vectors,
|
|
129
|
+
dimensions: vectors[0]?.length,
|
|
130
|
+
model: data.model ?? modelName
|
|
131
|
+
};
|
|
132
|
+
} finally {
|
|
133
|
+
clearTimeout(timer);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// src/model/hub.ts
|
|
138
|
+
var DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small";
|
|
139
|
+
var DEFAULT_CHAT_MODEL = "gpt-4o-mini";
|
|
140
|
+
var DEFAULT_BASE_URL = "https://api.openai.com/v1";
|
|
141
|
+
function getDefaultLlmConfig(llmSection) {
|
|
142
|
+
const parsed = parseLlmSection(llmSection ?? null);
|
|
143
|
+
const config = parsed.configs.find((c) => c.id === parsed.defaultId) ?? parsed.configs[0] ?? null;
|
|
144
|
+
return config;
|
|
145
|
+
}
|
|
146
|
+
function createModelHub(options = {}) {
|
|
147
|
+
const baseCfg = getDefaultLlmConfig(options.llmSection);
|
|
148
|
+
const defaultBaseUrl = (options.embeddingBaseURL ?? baseCfg?.baseURL ?? DEFAULT_BASE_URL).replace(/\/$/, "");
|
|
149
|
+
const defaultApiKey = options.embeddingApiKey ?? baseCfg?.apiKey ?? process.env.OPENAI_API_KEY;
|
|
150
|
+
const defaultEmbeddingModel = options.embeddingModel ?? (baseCfg?.options && typeof baseCfg.options.embeddingModel === "string" ? baseCfg.options.embeddingModel : void 0) ?? baseCfg?.model ?? process.env.OPENAI_EMBEDDING_MODEL ?? DEFAULT_EMBEDDING_MODEL;
|
|
151
|
+
const defaultChatModel = baseCfg?.model ?? process.env.OPENAI_MODEL ?? DEFAULT_CHAT_MODEL;
|
|
152
|
+
const embeddingTimeoutMs = options.embeddingTimeoutMs;
|
|
153
|
+
const chatTimeoutMs = options.chatTimeoutMs;
|
|
154
|
+
return {
|
|
155
|
+
async generate(input) {
|
|
156
|
+
const messages = [];
|
|
157
|
+
if (typeof input.systemPrompt === "string" && input.systemPrompt.trim() !== "") {
|
|
158
|
+
messages.push({ role: "system", content: input.systemPrompt.trim() });
|
|
159
|
+
}
|
|
160
|
+
messages.push({ role: "user", content: input.input });
|
|
161
|
+
const result = await chatCompletionViaOpenAICompatibleApi(
|
|
162
|
+
{
|
|
163
|
+
baseURL: defaultBaseUrl,
|
|
164
|
+
apiKey: defaultApiKey,
|
|
165
|
+
model: defaultChatModel,
|
|
166
|
+
timeoutMs: chatTimeoutMs
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
messages,
|
|
170
|
+
model: input.model,
|
|
171
|
+
temperature: input.temperature
|
|
172
|
+
}
|
|
173
|
+
);
|
|
174
|
+
return {
|
|
175
|
+
text: result.text,
|
|
176
|
+
model: typeof input.model === "string" && input.model.trim() !== "" ? input.model : result.model ?? baseCfg?.model
|
|
177
|
+
};
|
|
178
|
+
},
|
|
179
|
+
async embed(input) {
|
|
180
|
+
const result = await embedViaOpenAICompatibleApi(
|
|
181
|
+
{
|
|
182
|
+
baseURL: defaultBaseUrl,
|
|
183
|
+
apiKey: defaultApiKey,
|
|
184
|
+
model: defaultEmbeddingModel,
|
|
185
|
+
timeoutMs: embeddingTimeoutMs
|
|
186
|
+
},
|
|
187
|
+
{ input: input.input, model: input.model }
|
|
188
|
+
);
|
|
189
|
+
return {
|
|
190
|
+
vectors: result.vectors,
|
|
191
|
+
dimensions: result.dimensions,
|
|
192
|
+
model: result.model
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
export {
|
|
199
|
+
chatCompletionViaOpenAICompatibleApi,
|
|
200
|
+
embedViaOpenAICompatibleApi,
|
|
201
|
+
createModelHub,
|
|
202
|
+
parseEmbedSection,
|
|
203
|
+
model_exports
|
|
204
|
+
};
|
|
205
|
+
//# sourceMappingURL=chunk-5YPJ43HW.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/model/index.ts","../src/model/embed-parser.ts","../src/model/chat.ts","../src/model/embedding.ts","../src/model/hub.ts"],"sourcesContent":["/**\n * Model APIs (no LangChain dependency):\n * - LLM types and config parsing\n * - Chat completion and embedding HTTP APIs\n * - ModelHub (generate + embed)\n */\n\nexport type { LLMType, LLMConfig, AgentConfigLlmSection } from \"./types.js\";\nexport { parseLlmSection } from \"./llm-parser.js\";\nexport { parseEmbedSection } from \"./embed-parser.js\";\nexport {\n chatCompletionViaOpenAICompatibleApi,\n type ChatCompletionOptions,\n type ChatCompletionMessage,\n type ChatCompletionRequest,\n type ChatCompletionResult,\n} from \"./chat.js\";\nexport {\n embedViaOpenAICompatibleApi,\n type EmbeddingOptions,\n type EmbedRequest,\n type EmbedResult,\n} from \"./embedding.js\";\nexport {\n createModelHub,\n type CreateModelHubOptions,\n type ModelHub,\n type ChatGenerateRequest,\n type ChatGenerateResult,\n} from \"./hub.js\";\n","/**\n * Parse embed section from models.yaml into normalized LLMConfig[] with type \"embed\".\n * Reuses parseLlmSection internally since the config shape is identical.\n */\n\nimport { parseLlmSection } from \"./llm-parser.js\";\nimport type { LLMConfig } from \"./types.js\";\n\n/**\n * Parse an `embed:` section from models.yaml.\n * Accepts the same flat-entry, instances[], or single-object formats as the llm section.\n *\n * @example\n * ```yaml\n * embed:\n * default: gemma\n * gemma:\n * provider: openai\n * base_url: https://ollama.example.com/v1\n * model: embeddinggemma:latest\n * apiKey: ollama\n * ```\n */\nexport function parseEmbedSection(\n section: unknown,\n): { defaultId: string; configs: LLMConfig[] } {\n const result = parseLlmSection(section);\n // Override type to \"embed\" for all parsed configs\n for (const config of result.configs) {\n config.type = \"embed\";\n }\n return result;\n}\n","/**\n * OpenAI-compatible chat completion API: POST /chat/completions.\n * No LangChain dependency; for use by any module that needs LLM text generation.\n */\n\nconst DEFAULT_CHAT_TIMEOUT_MS = 60_000;\n\nexport interface ChatCompletionOptions {\n baseURL: string;\n apiKey?: string;\n model?: string;\n timeoutMs?: number;\n}\n\nexport interface ChatCompletionMessage {\n role: \"system\" | \"user\" | \"assistant\";\n content: string;\n}\n\nexport interface ChatCompletionRequest {\n messages: ChatCompletionMessage[];\n model?: string;\n temperature?: number;\n maxTokens?: number;\n}\n\nexport interface ChatCompletionResult {\n text: string;\n model?: string;\n}\n\nfunction isLocalBaseUrl(url: string): boolean {\n try {\n const u = new URL(url);\n const host = u.hostname.toLowerCase();\n return host === \"localhost\" || host === \"127.0.0.1\" || host === \"::1\";\n } catch {\n return false;\n }\n}\n\nfunction normalizeContent(content: unknown): string {\n if (typeof content === \"string\") return content;\n if (!Array.isArray(content)) return String(content ?? \"\");\n const parts: string[] = [];\n for (const p of content) {\n if (typeof p === \"string\") parts.push(p);\n else if (p && typeof p === \"object\" && typeof (p as { text?: unknown }).text === \"string\")\n parts.push((p as { text: string }).text);\n }\n return parts.join(\"\\n\").trim();\n}\n\n/**\n * Call OpenAI-compatible /chat/completions endpoint.\n * baseURL should be the API root (e.g. https://api.openai.com/v1); trailing slash is stripped.\n */\nexport async function chatCompletionViaOpenAICompatibleApi(\n options: ChatCompletionOptions,\n request: ChatCompletionRequest\n): Promise<ChatCompletionResult> {\n const baseUrl = options.baseURL.replace(/\\/$/, \"\");\n const apiKey = options.apiKey?.trim();\n const timeoutMs = options.timeoutMs ?? DEFAULT_CHAT_TIMEOUT_MS;\n const modelName =\n typeof request.model === \"string\" && request.model.trim() !== \"\"\n ? request.model.trim()\n : options.model ?? \"gpt-4o-mini\";\n\n if (!apiKey && !isLocalBaseUrl(baseUrl)) {\n throw new Error(\"Chat completion API key is required for non-local baseURL\");\n }\n\n const body: Record<string, unknown> = {\n model: modelName,\n messages: request.messages.map((m) => ({ role: m.role, content: m.content })),\n temperature: typeof request.temperature === \"number\" ? request.temperature : 0,\n };\n if (typeof request.maxTokens === \"number\") body.max_tokens = request.maxTokens;\n\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), timeoutMs);\n try {\n const response = await fetch(`${baseUrl}/chat/completions`, {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),\n },\n body: JSON.stringify(body),\n signal: controller.signal,\n });\n if (!response.ok) {\n const text = await response.text();\n throw new Error(`Chat completion API error ${response.status}: ${text.slice(0, 500)}`);\n }\n const data = (await response.json()) as {\n choices?: Array<{ message?: { content?: unknown }; text?: unknown }>;\n model?: string;\n };\n const raw =\n data.choices?.[0]?.message?.content ?? data.choices?.[0]?.text ?? \"\";\n return {\n text: normalizeContent(raw),\n model: data.model ?? modelName,\n };\n } finally {\n clearTimeout(timer);\n }\n}\n","/**\n * OpenAI-compatible embedding API: POST /embeddings.\n * No LangChain dependency; for use by any module that needs embeddings.\n */\n\nconst DEFAULT_EMBEDDING_TIMEOUT_MS = 30_000;\n\nexport interface EmbeddingOptions {\n baseURL: string;\n apiKey?: string;\n model?: string;\n timeoutMs?: number;\n}\n\nexport interface EmbedRequest {\n input: string | string[];\n model?: string;\n}\n\nexport interface EmbedResult {\n vectors: number[][];\n dimensions?: number;\n model?: string;\n}\n\nfunction isLocalBaseUrl(url: string): boolean {\n try {\n const u = new URL(url);\n const host = u.hostname.toLowerCase();\n return host === \"localhost\" || host === \"127.0.0.1\" || host === \"::1\";\n } catch {\n return false;\n }\n}\n\n/**\n * Call OpenAI-compatible /embeddings endpoint.\n * baseURL should be the API root (e.g. https://api.openai.com/v1); trailing slash is stripped.\n */\nexport async function embedViaOpenAICompatibleApi(\n options: EmbeddingOptions,\n input: EmbedRequest\n): Promise<EmbedResult> {\n const baseUrl = options.baseURL.replace(/\\/$/, \"\");\n const apiKey = options.apiKey?.trim();\n const timeoutMs = options.timeoutMs ?? DEFAULT_EMBEDDING_TIMEOUT_MS;\n const modelName =\n typeof input.model === \"string\" && input.model.trim() !== \"\"\n ? input.model.trim()\n : options.model ?? \"text-embedding-3-small\";\n\n if (!apiKey && !isLocalBaseUrl(baseUrl)) {\n throw new Error(\"Embedding API key is required for non-local baseURL\");\n }\n\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), timeoutMs);\n try {\n const response = await fetch(`${baseUrl}/embeddings`, {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),\n },\n body: JSON.stringify({ model: modelName, input: input.input }),\n signal: controller.signal,\n });\n if (!response.ok) {\n const body = await response.text();\n throw new Error(`Embedding API error ${response.status}: ${body.slice(0, 500)}`);\n }\n const data = (await response.json()) as {\n data?: Array<{ embedding?: number[]; index?: number }>;\n model?: string;\n };\n const vectors = (data.data ?? [])\n .slice()\n .sort((a, b) => (a.index ?? 0) - (b.index ?? 0))\n .map((v) => v.embedding)\n .filter((v): v is number[] => Array.isArray(v));\n return {\n vectors,\n dimensions: vectors[0]?.length,\n model: data.model ?? modelName,\n };\n } finally {\n clearTimeout(timer);\n }\n}\n","/**\n * Model hub: generate (chat completion) + embed, driven by llm section config.\n * No LangChain; uses OpenAI-compatible HTTP APIs so any package can use it via agent-model.\n */\n\nimport { parseLlmSection } from \"./llm-parser.js\";\nimport type { LLMConfig } from \"./types.js\";\nimport { chatCompletionViaOpenAICompatibleApi } from \"./chat.js\";\nimport { embedViaOpenAICompatibleApi } from \"./embedding.js\";\nimport type { EmbedRequest, EmbedResult } from \"./embedding.js\";\n\nconst DEFAULT_EMBEDDING_MODEL = \"text-embedding-3-small\";\nconst DEFAULT_CHAT_MODEL = \"gpt-4o-mini\";\nconst DEFAULT_BASE_URL = \"https://api.openai.com/v1\";\n\nexport interface CreateModelHubOptions {\n llmSection?: unknown;\n embeddingBaseURL?: string;\n embeddingApiKey?: string;\n embeddingModel?: string;\n embeddingTimeoutMs?: number;\n chatTimeoutMs?: number;\n}\n\nexport interface ChatGenerateRequest {\n input: string;\n systemPrompt?: string;\n model?: string;\n temperature?: number;\n}\n\nexport interface ChatGenerateResult {\n text: string;\n model?: string;\n}\n\nexport interface ModelHub {\n generate(input: ChatGenerateRequest): Promise<ChatGenerateResult>;\n embed(input: EmbedRequest): Promise<EmbedResult>;\n}\n\nfunction getDefaultLlmConfig(llmSection: unknown): LLMConfig | null {\n const parsed = parseLlmSection(llmSection ?? null);\n const config =\n parsed.configs.find((c: LLMConfig) => c.id === parsed.defaultId) ??\n parsed.configs[0] ??\n null;\n return config;\n}\n\nexport function createModelHub(options: CreateModelHubOptions = {}): ModelHub {\n const baseCfg = getDefaultLlmConfig(options.llmSection);\n const defaultBaseUrl = (\n options.embeddingBaseURL ??\n baseCfg?.baseURL ??\n DEFAULT_BASE_URL\n ).replace(/\\/$/, \"\");\n const defaultApiKey =\n options.embeddingApiKey ??\n baseCfg?.apiKey ??\n process.env.OPENAI_API_KEY;\n const defaultEmbeddingModel =\n options.embeddingModel ??\n (baseCfg?.options && typeof baseCfg.options.embeddingModel === \"string\"\n ? baseCfg.options.embeddingModel\n : undefined) ??\n baseCfg?.model ??\n process.env.OPENAI_EMBEDDING_MODEL ??\n DEFAULT_EMBEDDING_MODEL;\n const defaultChatModel =\n baseCfg?.model ??\n process.env.OPENAI_MODEL ??\n DEFAULT_CHAT_MODEL;\n const embeddingTimeoutMs = options.embeddingTimeoutMs;\n const chatTimeoutMs = options.chatTimeoutMs;\n\n return {\n async generate(input: ChatGenerateRequest): Promise<ChatGenerateResult> {\n const messages: Array<{ role: \"system\" | \"user\" | \"assistant\"; content: string }> = [];\n if (typeof input.systemPrompt === \"string\" && input.systemPrompt.trim() !== \"\") {\n messages.push({ role: \"system\", content: input.systemPrompt.trim() });\n }\n messages.push({ role: \"user\", content: input.input });\n const result = await chatCompletionViaOpenAICompatibleApi(\n {\n baseURL: defaultBaseUrl,\n apiKey: defaultApiKey,\n model: defaultChatModel,\n timeoutMs: chatTimeoutMs,\n },\n {\n messages,\n model: input.model,\n temperature: input.temperature,\n }\n );\n return {\n text: result.text,\n model:\n typeof input.model === \"string\" && input.model.trim() !== \"\"\n ? input.model\n : result.model ?? baseCfg?.model,\n };\n },\n async embed(input: EmbedRequest): Promise<EmbedResult> {\n const result = await embedViaOpenAICompatibleApi(\n {\n baseURL: defaultBaseUrl,\n apiKey: defaultApiKey,\n model: defaultEmbeddingModel,\n timeoutMs: embeddingTimeoutMs,\n },\n { input: input.input, model: input.model }\n );\n return {\n vectors: result.vectors,\n dimensions: result.dimensions,\n model: result.model,\n };\n },\n };\n}\n"],"mappings":";;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACuBO,SAAS,kBACd,SAC6C;AAC7C,QAAM,SAAS,gBAAgB,OAAO;AAEtC,aAAW,UAAU,OAAO,SAAS;AACnC,WAAO,OAAO;AAAA,EAChB;AACA,SAAO;AACT;;;AC3BA,IAAM,0BAA0B;AA0BhC,SAAS,eAAe,KAAsB;AAC5C,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,UAAM,OAAO,EAAE,SAAS,YAAY;AACpC,WAAO,SAAS,eAAe,SAAS,eAAe,SAAS;AAAA,EAClE,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,SAAS,iBAAiB,SAA0B;AAClD,MAAI,OAAO,YAAY,SAAU,QAAO;AACxC,MAAI,CAAC,MAAM,QAAQ,OAAO,EAAG,QAAO,OAAO,WAAW,EAAE;AACxD,QAAM,QAAkB,CAAC;AACzB,aAAW,KAAK,SAAS;AACvB,QAAI,OAAO,MAAM,SAAU,OAAM,KAAK,CAAC;AAAA,aAC9B,KAAK,OAAO,MAAM,YAAY,OAAQ,EAAyB,SAAS;AAC/E,YAAM,KAAM,EAAuB,IAAI;AAAA,EAC3C;AACA,SAAO,MAAM,KAAK,IAAI,EAAE,KAAK;AAC/B;AAMA,eAAsB,qCACpB,SACA,SAC+B;AAC/B,QAAM,UAAU,QAAQ,QAAQ,QAAQ,OAAO,EAAE;AACjD,QAAM,SAAS,QAAQ,QAAQ,KAAK;AACpC,QAAM,YAAY,QAAQ,aAAa;AACvC,QAAM,YACJ,OAAO,QAAQ,UAAU,YAAY,QAAQ,MAAM,KAAK,MAAM,KAC1D,QAAQ,MAAM,KAAK,IACnB,QAAQ,SAAS;AAEvB,MAAI,CAAC,UAAU,CAAC,eAAe,OAAO,GAAG;AACvC,UAAM,IAAI,MAAM,2DAA2D;AAAA,EAC7E;AAEA,QAAM,OAAgC;AAAA,IACpC,OAAO;AAAA,IACP,UAAU,QAAQ,SAAS,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,SAAS,EAAE,QAAQ,EAAE;AAAA,IAC5E,aAAa,OAAO,QAAQ,gBAAgB,WAAW,QAAQ,cAAc;AAAA,EAC/E;AACA,MAAI,OAAO,QAAQ,cAAc,SAAU,MAAK,aAAa,QAAQ;AAErE,QAAM,aAAa,IAAI,gBAAgB;AACvC,QAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,SAAS;AAC5D,MAAI;AACF,UAAM,WAAW,MAAM,MAAM,GAAG,OAAO,qBAAqB;AAAA,MAC1D,QAAQ;AAAA,MACR,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,GAAI,SAAS,EAAE,eAAe,UAAU,MAAM,GAAG,IAAI,CAAC;AAAA,MACxD;AAAA,MACA,MAAM,KAAK,UAAU,IAAI;AAAA,MACzB,QAAQ,WAAW;AAAA,IACrB,CAAC;AACD,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,YAAM,IAAI,MAAM,6BAA6B,SAAS,MAAM,KAAK,KAAK,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,IACvF;AACA,UAAM,OAAQ,MAAM,SAAS,KAAK;AAIlC,UAAM,MACJ,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW,KAAK,UAAU,CAAC,GAAG,QAAQ;AACpE,WAAO;AAAA,MACL,MAAM,iBAAiB,GAAG;AAAA,MAC1B,OAAO,KAAK,SAAS;AAAA,IACvB;AAAA,EACF,UAAE;AACA,iBAAa,KAAK;AAAA,EACpB;AACF;;;ACxGA,IAAM,+BAA+B;AAoBrC,SAASA,gBAAe,KAAsB;AAC5C,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,UAAM,OAAO,EAAE,SAAS,YAAY;AACpC,WAAO,SAAS,eAAe,SAAS,eAAe,SAAS;AAAA,EAClE,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAMA,eAAsB,4BACpB,SACA,OACsB;AACtB,QAAM,UAAU,QAAQ,QAAQ,QAAQ,OAAO,EAAE;AACjD,QAAM,SAAS,QAAQ,QAAQ,KAAK;AACpC,QAAM,YAAY,QAAQ,aAAa;AACvC,QAAM,YACJ,OAAO,MAAM,UAAU,YAAY,MAAM,MAAM,KAAK,MAAM,KACtD,MAAM,MAAM,KAAK,IACjB,QAAQ,SAAS;AAEvB,MAAI,CAAC,UAAU,CAACA,gBAAe,OAAO,GAAG;AACvC,UAAM,IAAI,MAAM,qDAAqD;AAAA,EACvE;AAEA,QAAM,aAAa,IAAI,gBAAgB;AACvC,QAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,SAAS;AAC5D,MAAI;AACF,UAAM,WAAW,MAAM,MAAM,GAAG,OAAO,eAAe;AAAA,MACpD,QAAQ;AAAA,MACR,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,GAAI,SAAS,EAAE,eAAe,UAAU,MAAM,GAAG,IAAI,CAAC;AAAA,MACxD;AAAA,MACA,MAAM,KAAK,UAAU,EAAE,OAAO,WAAW,OAAO,MAAM,MAAM,CAAC;AAAA,MAC7D,QAAQ,WAAW;AAAA,IACrB,CAAC;AACD,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,YAAM,IAAI,MAAM,uBAAuB,SAAS,MAAM,KAAK,KAAK,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,IACjF;AACA,UAAM,OAAQ,MAAM,SAAS,KAAK;AAIlC,UAAM,WAAW,KAAK,QAAQ,CAAC,GAC5B,MAAM,EACN,KAAK,CAAC,GAAG,OAAO,EAAE,SAAS,MAAM,EAAE,SAAS,EAAE,EAC9C,IAAI,CAAC,MAAM,EAAE,SAAS,EACtB,OAAO,CAAC,MAAqB,MAAM,QAAQ,CAAC,CAAC;AAChD,WAAO;AAAA,MACL;AAAA,MACA,YAAY,QAAQ,CAAC,GAAG;AAAA,MACxB,OAAO,KAAK,SAAS;AAAA,IACvB;AAAA,EACF,UAAE;AACA,iBAAa,KAAK;AAAA,EACpB;AACF;;;AC7EA,IAAM,0BAA0B;AAChC,IAAM,qBAAqB;AAC3B,IAAM,mBAAmB;AA4BzB,SAAS,oBAAoB,YAAuC;AAClE,QAAM,SAAS,gBAAgB,cAAc,IAAI;AACjD,QAAM,SACJ,OAAO,QAAQ,KAAK,CAAC,MAAiB,EAAE,OAAO,OAAO,SAAS,KAC/D,OAAO,QAAQ,CAAC,KAChB;AACF,SAAO;AACT;AAEO,SAAS,eAAe,UAAiC,CAAC,GAAa;AAC5E,QAAM,UAAU,oBAAoB,QAAQ,UAAU;AACtD,QAAM,kBACJ,QAAQ,oBACR,SAAS,WACT,kBACA,QAAQ,OAAO,EAAE;AACnB,QAAM,gBACJ,QAAQ,mBACR,SAAS,UACT,QAAQ,IAAI;AACd,QAAM,wBACJ,QAAQ,mBACP,SAAS,WAAW,OAAO,QAAQ,QAAQ,mBAAmB,WAC3D,QAAQ,QAAQ,iBAChB,WACJ,SAAS,SACT,QAAQ,IAAI,0BACZ;AACF,QAAM,mBACJ,SAAS,SACT,QAAQ,IAAI,gBACZ;AACF,QAAM,qBAAqB,QAAQ;AACnC,QAAM,gBAAgB,QAAQ;AAE9B,SAAO;AAAA,IACL,MAAM,SAAS,OAAyD;AACtE,YAAM,WAA8E,CAAC;AACrF,UAAI,OAAO,MAAM,iBAAiB,YAAY,MAAM,aAAa,KAAK,MAAM,IAAI;AAC9E,iBAAS,KAAK,EAAE,MAAM,UAAU,SAAS,MAAM,aAAa,KAAK,EAAE,CAAC;AAAA,MACtE;AACA,eAAS,KAAK,EAAE,MAAM,QAAQ,SAAS,MAAM,MAAM,CAAC;AACpD,YAAM,SAAS,MAAM;AAAA,QACnB;AAAA,UACE,SAAS;AAAA,UACT,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,WAAW;AAAA,QACb;AAAA,QACA;AAAA,UACE;AAAA,UACA,OAAO,MAAM;AAAA,UACb,aAAa,MAAM;AAAA,QACrB;AAAA,MACF;AACA,aAAO;AAAA,QACL,MAAM,OAAO;AAAA,QACb,OACE,OAAO,MAAM,UAAU,YAAY,MAAM,MAAM,KAAK,MAAM,KACtD,MAAM,QACN,OAAO,SAAS,SAAS;AAAA,MACjC;AAAA,IACF;AAAA,IACA,MAAM,MAAM,OAA2C;AACrD,YAAM,SAAS,MAAM;AAAA,QACnB;AAAA,UACE,SAAS;AAAA,UACT,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,WAAW;AAAA,QACb;AAAA,QACA,EAAE,OAAO,MAAM,OAAO,OAAO,MAAM,MAAM;AAAA,MAC3C;AACA,aAAO;AAAA,QACL,SAAS,OAAO;AAAA,QAChB,YAAY,OAAO;AAAA,QACnB,OAAO,OAAO;AAAA,MAChB;AAAA,IACF;AAAA,EACF;AACF;","names":["isLocalBaseUrl"]}
|