@easynet/agent-llm 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +59 -73
- package/dist/chunk-UZOGGJK7.js +554 -0
- package/dist/chunk-UZOGGJK7.js.map +1 -0
- package/dist/cli.d.ts +9 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +120 -0
- package/dist/cli.js.map +1 -0
- package/dist/config.d.ts +2 -2
- package/dist/createAgentLlM.d.ts +20 -0
- package/dist/createAgentLlM.d.ts.map +1 -0
- package/dist/factory.d.ts +2 -2
- package/dist/factory.d.ts.map +1 -1
- package/dist/index.d.ts +6 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +39 -341
- package/dist/index.js.map +1 -1
- package/dist/llmAdapter.d.ts.map +1 -1
- package/dist/loadLlmConfig.d.ts +24 -0
- package/dist/loadLlmConfig.d.ts.map +1 -0
- package/dist/npmProviderProtocol.d.ts +61 -0
- package/dist/npmProviderProtocol.d.ts.map +1 -0
- package/dist/providers/openai.d.ts +2 -2
- package/dist/types.d.ts +31 -31
- package/dist/types.d.ts.map +1 -1
- package/package.json +12 -5
package/README.md
CHANGED
|
@@ -1,101 +1,87 @@
|
|
|
1
|
-
#
|
|
1
|
+
# @easynet/agent-llm
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
Load an LLM from **llm.yaml** (or your config) and use it from the command line or in a LangChain agent. Supports OpenAI and OpenAI-compatible endpoints (Ollama, Groq, Azure, etc.).
|
|
4
4
|
|
|
5
|
-
##
|
|
5
|
+
## Command line
|
|
6
|
+
|
|
7
|
+
Install and run with a question:
|
|
6
8
|
|
|
7
9
|
```bash
|
|
8
|
-
|
|
9
|
-
|
|
10
|
+
npx @easynet/agent-llm "hi"
|
|
11
|
+
npx @easynet/agent-llm "What is 10 + 20?"
|
|
10
12
|
```
|
|
11
13
|
|
|
12
|
-
|
|
14
|
+
Config is read from **llm.yaml** or **config/llm.yaml** in the current directory (or parent). See [config/llm.yaml.example](config/llm.yaml.example). Placeholders like `${OPENAI_API_KEY}` are replaced from the environment.
|
|
13
15
|
|
|
14
|
-
|
|
16
|
+
## Use in a LangChain agent
|
|
15
17
|
|
|
16
|
-
**
|
|
18
|
+
**1.** Get the LLM from config with `createAgentLlM()`:
|
|
17
19
|
|
|
18
|
-
```
|
|
19
|
-
llm
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
name: openai/gpt-oss-120b
|
|
25
|
-
options:
|
|
26
|
-
apiKey: ${GROQ_API_KEY}
|
|
27
|
-
temperature: 0.7
|
|
28
|
-
medium:
|
|
29
|
-
provider: openai
|
|
30
|
-
base_url: http://192.168.0.201:11434
|
|
31
|
-
name: qwen3:4b
|
|
32
|
-
options:
|
|
33
|
-
apiKey: ${API_KEY}
|
|
20
|
+
```ts
|
|
21
|
+
import { createAgentLlM } from "@easynet/agent-llm";
|
|
22
|
+
import { createAgent, tool } from "langchain";
|
|
23
|
+
import { z } from "zod";
|
|
24
|
+
|
|
25
|
+
const llm = createAgentLlM();
|
|
34
26
|
```
|
|
35
27
|
|
|
36
|
-
**
|
|
28
|
+
**2.** Create your agent with LangChain’s `createAgent` and your tools:
|
|
37
29
|
|
|
38
|
-
```
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
30
|
+
```ts
|
|
31
|
+
const add = tool(
|
|
32
|
+
(input: { a: number; b: number }) => String(input.a + input.b),
|
|
33
|
+
{ name: "add", description: "Add two numbers.", schema: z.object({ a: z.number(), b: z.number() }) }
|
|
34
|
+
);
|
|
35
|
+
|
|
36
|
+
const agent = createAgent({ model: llm as unknown as Parameters<typeof createAgent>[0]["model"], tools: [add] });
|
|
45
37
|
```
|
|
46
38
|
|
|
47
|
-
**
|
|
39
|
+
**3.** Invoke with messages:
|
|
48
40
|
|
|
49
|
-
|
|
41
|
+
```ts
|
|
42
|
+
import { HumanMessage } from "@langchain/core/messages";
|
|
50
43
|
|
|
51
|
-
|
|
44
|
+
const result = await agent.invoke({ messages: [new HumanMessage("What is 10 + 20?")] });
|
|
45
|
+
console.log(result.messages?.slice(-1)[0]?.content);
|
|
46
|
+
```
|
|
52
47
|
|
|
53
|
-
|
|
54
|
-
- Extension packages can register ChatModel factories via `registerChatModelProvider`; call **loadLLMExtensions** before use.
|
|
48
|
+
Full example: [examples/langchain-react-agent.ts](examples/langchain-react-agent.ts).
|
|
55
49
|
|
|
56
|
-
|
|
50
|
+
## Config
|
|
57
51
|
|
|
58
|
-
|
|
52
|
+
Put **llm.yaml** (or **config/llm.yaml**) in your project. Example:
|
|
59
53
|
|
|
60
|
-
```
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
const defaultId = registry.defaultId();
|
|
67
|
-
if (defaultId) {
|
|
68
|
-
const llm = registry.get(defaultId)!;
|
|
69
|
-
const result = await llm.chat([
|
|
70
|
-
{ role: "user", content: "Hello" },
|
|
71
|
-
]);
|
|
72
|
-
console.log(result.content);
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
const imageLlm = registry.get("image");
|
|
76
|
-
if (imageLlm?.generateImage) {
|
|
77
|
-
const img = await imageLlm.generateImage({ prompt: "A cat" });
|
|
78
|
-
console.log(img.url);
|
|
79
|
-
}
|
|
54
|
+
```yaml
|
|
55
|
+
provider: openai
|
|
56
|
+
model: gpt-4o-mini
|
|
57
|
+
temperature: 0
|
|
58
|
+
apiKey: ${OPENAI_API_KEY}
|
|
59
|
+
# baseURL: https://api.openai.com/v1 # or Ollama, Groq, etc.
|
|
80
60
|
```
|
|
81
61
|
|
|
82
|
-
|
|
62
|
+
Optional: pass a path when calling `createAgentLlM({ configPath: "/path/to/llm.yaml" })`.
|
|
63
|
+
|
|
64
|
+
## npm: protocol in provider (install on demand)
|
|
83
65
|
|
|
84
|
-
|
|
85
|
-
- **registry.get(id)** — get ILLMClient by id
|
|
86
|
-
- **registry.defaultId()** — default LLM id
|
|
87
|
-
- **registry.ids()** — all ids
|
|
88
|
-
- **ILLMClient.chat(messages)** — chat (type=chat)
|
|
89
|
-
- **ILLMClient.generateImage?(options)** — image generation (type=image)
|
|
66
|
+
You can set the provider by **npm package name** (and optional version) in config. If the package is not installed, the framework will **install it** and then use it as the provider.
|
|
90
67
|
|
|
91
|
-
|
|
68
|
+
**Recommended format:** `npm:<package>@<version>#<provider>` — e.g. **`provider: "npm:wallee-llm@0.1.0#cis"`**.
|
|
92
69
|
|
|
93
|
-
- **
|
|
94
|
-
-
|
|
70
|
+
- **`provider: "npm:wallee-llm@0.1.0#cis"`** – **(recommended)** install `wallee-llm@0.1.0` if missing, then use provider `cis`.
|
|
71
|
+
- **`provider: "npm:wallee-llm@0.1.0"`** – use a specific version; default provider is used.
|
|
72
|
+
- **`provider: "npm:wallee-llm#cis"`** – load wallee-llm and use the provider named `cis`.
|
|
73
|
+
- **`provider: "npm:wallee-llm"`** – load wallee-llm and use its default provider (e.g. `cis`).
|
|
95
74
|
|
|
96
|
-
|
|
75
|
+
Use **createChatModelFromLlmConfigWithNpm** or **createAgentLlMAsync** so npm: providers are resolved (and optionally installed) before creating the model:
|
|
97
76
|
|
|
98
|
-
|
|
77
|
+
```ts
|
|
78
|
+
import { createChatModelFromLlmConfigWithNpm, createAgentLlMAsync } from "@easynet/agent-llm";
|
|
79
|
+
|
|
80
|
+
// From a raw llm section (e.g. from loadLlmConfig)
|
|
81
|
+
const model = await createChatModelFromLlmConfigWithNpm({ llmSection });
|
|
82
|
+
|
|
83
|
+
// From config file (llm.yaml / config/llm.yaml)
|
|
84
|
+
const llm = await createAgentLlMAsync();
|
|
85
|
+
```
|
|
99
86
|
|
|
100
|
-
|
|
101
|
-
2. Push to **master** or run **Actions → Release → Run workflow** to trigger the release. The workflow runs tests, builds, then semantic-release to bump patch and publish to npm.
|
|
87
|
+
Options: **installNpmIfMissing** (default `true`) and **cwd** (default `process.cwd()` for npm install). Exports: `parseNpmProviderSpec`, `ensureNpmPackageInstalled`, `resolveNpmProvider`, `resolveLlmSectionWithNpm`, `isNpmProviderSpec`, `createChatModelFromLlmConfigWithNpm`, `createAgentLlMAsync`.
|