@node-llm/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +215 -0
- package/dist/chat/Chat.d.ts +35 -0
- package/dist/chat/Chat.d.ts.map +1 -0
- package/dist/chat/Chat.js +147 -0
- package/dist/chat/ChatOptions.d.ts +10 -0
- package/dist/chat/ChatOptions.d.ts.map +1 -0
- package/dist/chat/ChatOptions.js +1 -0
- package/dist/chat/Content.d.ts +22 -0
- package/dist/chat/Content.d.ts.map +1 -0
- package/dist/chat/Content.js +1 -0
- package/dist/chat/Message.d.ts +11 -0
- package/dist/chat/Message.d.ts.map +1 -0
- package/dist/chat/Message.js +1 -0
- package/dist/chat/Role.d.ts +2 -0
- package/dist/chat/Role.d.ts.map +1 -0
- package/dist/chat/Role.js +1 -0
- package/dist/chat/Tool.d.ts +18 -0
- package/dist/chat/Tool.d.ts.map +1 -0
- package/dist/chat/Tool.js +1 -0
- package/dist/executor/Executor.d.ts +11 -0
- package/dist/executor/Executor.d.ts.map +1 -0
- package/dist/executor/Executor.js +26 -0
- package/dist/index.d.ts +11 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +5 -0
- package/dist/llm.d.ts +25 -0
- package/dist/llm.d.ts.map +1 -0
- package/dist/llm.js +46 -0
- package/dist/providers/Provider.d.ts +45 -0
- package/dist/providers/Provider.d.ts.map +1 -0
- package/dist/providers/Provider.js +1 -0
- package/dist/providers/openai/Capabilities.d.ts +25 -0
- package/dist/providers/openai/Capabilities.d.ts.map +1 -0
- package/dist/providers/openai/Capabilities.js +322 -0
- package/dist/providers/openai/Chat.d.ts +8 -0
- package/dist/providers/openai/Chat.d.ts.map +1 -0
- package/dist/providers/openai/Chat.js +47 -0
- package/dist/providers/openai/Models.d.ts +8 -0
- package/dist/providers/openai/Models.d.ts.map +1 -0
- package/dist/providers/openai/Models.js +35 -0
- package/dist/providers/openai/OpenAIProvider.d.ts +23 -0
- package/dist/providers/openai/OpenAIProvider.d.ts.map +1 -0
- package/dist/providers/openai/OpenAIProvider.js +33 -0
- package/dist/providers/openai/Streaming.d.ts +8 -0
- package/dist/providers/openai/Streaming.d.ts.map +1 -0
- package/dist/providers/openai/Streaming.js +55 -0
- package/dist/providers/openai/index.d.ts +2 -0
- package/dist/providers/openai/index.d.ts.map +1 -0
- package/dist/providers/openai/index.js +11 -0
- package/dist/providers/openai/register.d.ts +2 -0
- package/dist/providers/openai/register.d.ts.map +1 -0
- package/dist/providers/openai/register.js +15 -0
- package/dist/providers/openai/types.d.ts +23 -0
- package/dist/providers/openai/types.d.ts.map +1 -0
- package/dist/providers/openai/types.js +1 -0
- package/dist/providers/registry.d.ts +20 -0
- package/dist/providers/registry.d.ts.map +1 -0
- package/dist/providers/registry.js +29 -0
- package/dist/tools/Tool.d.ts +8 -0
- package/dist/tools/Tool.d.ts.map +1 -0
- package/dist/tools/Tool.js +1 -0
- package/dist/tools/ToolSet.d.ts +15 -0
- package/dist/tools/ToolSet.d.ts.map +1 -0
- package/dist/tools/ToolSet.js +29 -0
- package/dist/tools/index.d.ts +2 -0
- package/dist/tools/index.d.ts.map +1 -0
- package/dist/tools/index.js +1 -0
- package/dist/tools/runCommandTool.d.ts +8 -0
- package/dist/tools/runCommandTool.d.ts.map +1 -0
- package/dist/tools/runCommandTool.js +19 -0
- package/dist/utils/FileLoader.d.ts +5 -0
- package/dist/utils/FileLoader.d.ts.map +1 -0
- package/dist/utils/FileLoader.js +71 -0
- package/package.json +29 -0
package/README.md
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
# node-llm
|
|
2
|
+
|
|
3
|
+
A provider-agnostic LLM core for Node.js, inspired by ruby-llm.
|
|
4
|
+
|
|
5
|
+
node-llm focuses on:
|
|
6
|
+
- clean abstractions
|
|
7
|
+
- minimal magic
|
|
8
|
+
- streaming-first design
|
|
9
|
+
- no SDK lock-in
|
|
10
|
+
|
|
11
|
+
This is a core library, not a framework.
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
## Features (current)
|
|
16
|
+
|
|
17
|
+
- **Provider-agnostic chat API**: Switch between OpenAI, Anthropic, etc. with one line of config.
|
|
18
|
+
- **Ruby-LLM-style configuration**: Simple, global configuration.
|
|
19
|
+
- **Streaming responses**: Native AsyncIterator support for progressive token delivery.
|
|
20
|
+
- **Tool calling (Function calling)**: Automatic execution loop for model-requested tools.
|
|
21
|
+
- **Multi-modal Support**: Built-in support for Vision (images) and Audio.
|
|
22
|
+
- **Smart File Handling**: Pass local file paths or URLs; the library handles reading and encoding.
|
|
23
|
+
- **Fluent API**: Chainable methods like `.withTool()` for dynamic tool registration.
|
|
24
|
+
- **Retry support**: Configurable retry logic at the execution layer.
|
|
25
|
+
- **Strict ESM and TypeScript**: Modern, type-safe development.
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## Installation
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
pnpm add @node-llm/core
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## Configuration
|
|
38
|
+
|
|
39
|
+
### Environment variables
|
|
40
|
+
|
|
41
|
+
```text
|
|
42
|
+
OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxx
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
Load environment variables in your application:
|
|
46
|
+
|
|
47
|
+
```ts
|
|
48
|
+
import "dotenv/config";
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
---
|
|
52
|
+
|
|
53
|
+
## Basic Chat Usage
|
|
54
|
+
|
|
55
|
+
```ts
|
|
56
|
+
import { LLM } from "@node-llm/core";
|
|
57
|
+
|
|
58
|
+
LLM.configure({
|
|
59
|
+
provider: "openai",
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
const chat = LLM.chat("gpt-4o-mini", {
|
|
63
|
+
systemPrompt: "You are a concise assistant",
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
const reply = await chat.ask("Explain HTTP in one sentence");
|
|
67
|
+
console.log(reply);
|
|
68
|
+
|
|
69
|
+
// List available models with metadata (pricing, context window, etc.)
|
|
70
|
+
const models = await LLM.listModels();
|
|
71
|
+
console.log(models[0]);
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
|
|
76
|
+
## Streaming Responses
|
|
77
|
+
|
|
78
|
+
Streaming uses the native AsyncIterator pattern.
|
|
79
|
+
|
|
80
|
+
```ts
|
|
81
|
+
import { LLM } from "@node-llm/core";
|
|
82
|
+
|
|
83
|
+
LLM.configure({ provider: "openai" });
|
|
84
|
+
|
|
85
|
+
const chat = LLM.chat("gpt-4o-mini");
|
|
86
|
+
|
|
87
|
+
let full = "";
|
|
88
|
+
|
|
89
|
+
for await (const token of chat.stream("Explain HTTP in one sentence")) {
|
|
90
|
+
process.stdout.write(token);
|
|
91
|
+
full += token;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
console.log("\nFinal response:", full);
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
---
|
|
98
|
+
|
|
99
|
+
## Tool Calling
|
|
100
|
+
|
|
101
|
+
You can define tools and pass them to the chat instance. The model will decide when to call them, and the library handles the execution loop automatically.
|
|
102
|
+
|
|
103
|
+
```ts
|
|
104
|
+
import { LLM, Tool } from "@node-llm/core";
|
|
105
|
+
|
|
106
|
+
// 1. Define a tool
|
|
107
|
+
const weatherTool: Tool = {
|
|
108
|
+
type: 'function',
|
|
109
|
+
function: {
|
|
110
|
+
name: 'get_weather',
|
|
111
|
+
description: 'Get the current weather for a location',
|
|
112
|
+
parameters: {
|
|
113
|
+
type: 'object',
|
|
114
|
+
properties: {
|
|
115
|
+
location: { type: 'string', description: 'The city and state, e.g. San Francisco, CA' },
|
|
116
|
+
unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
|
|
117
|
+
},
|
|
118
|
+
required: ['location']
|
|
119
|
+
}
|
|
120
|
+
},
|
|
121
|
+
// 2. Implement the handler
|
|
122
|
+
handler: async ({ location, unit = 'celsius' }) => {
|
|
123
|
+
// Call your real API here
|
|
124
|
+
return JSON.stringify({ location, temperature: 22, unit, condition: "Sunny" });
|
|
125
|
+
}
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
// 3. Initialize chat (Option A: via constructor)
|
|
129
|
+
const chat = LLM.chat("gpt-4o-mini", {
|
|
130
|
+
tools: [weatherTool]
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
// OR Option B: via fluent API (Ruby-LLM style)
|
|
134
|
+
const chat2 = LLM.chat("gpt-4o-mini")
|
|
135
|
+
.withTool(weatherTool);
|
|
136
|
+
|
|
137
|
+
// 4. Ask a question
|
|
138
|
+
const reply = await chat.ask("What is the weather in London?");
|
|
139
|
+
console.log(reply);
|
|
140
|
+
// Output: "The weather in London is currently 22°C and sunny."
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
---
|
|
144
|
+
|
|
145
|
+
## File & Multi-modal Support
|
|
146
|
+
|
|
147
|
+
You can send files (images, audio, text, etc.) to models that support them. The library automatically handles local file reading, MIME detection, and base64 encoding.
|
|
148
|
+
|
|
149
|
+
```ts
|
|
150
|
+
// Local files (automatically read & converted)
|
|
151
|
+
await chat.ask("Analyze this image", {
|
|
152
|
+
files: ["./image.jpg"]
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
// Text files (content is automatically appended to prompt)
|
|
156
|
+
await chat.ask("Summarize this code", {
|
|
157
|
+
files: ["./app.ts"]
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
// Remote URLs (passed through)
|
|
161
|
+
await chat.ask("Describe this", {
|
|
162
|
+
files: ["https://example.com/photo.png"]
|
|
163
|
+
});
|
|
164
|
+
|
|
165
|
+
// Audio files (OpenAI input_audio support)
|
|
166
|
+
await chat.ask("Transcribe this meeting", {
|
|
167
|
+
files: ["./meeting.mp3"]
|
|
168
|
+
});
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
---
|
|
172
|
+
|
|
173
|
+
## Retry Support
|
|
174
|
+
|
|
175
|
+
Retries are applied before chat execution, not inside providers.
|
|
176
|
+
|
|
177
|
+
```ts
|
|
178
|
+
LLM.configure({
|
|
179
|
+
provider: "openai",
|
|
180
|
+
retry: {
|
|
181
|
+
attempts: 3,
|
|
182
|
+
delayMs: 500,
|
|
183
|
+
},
|
|
184
|
+
});
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
Retry behavior:
|
|
188
|
+
- Only transient failures are retried
|
|
189
|
+
- Chat and providers remain clean
|
|
190
|
+
- Designed for future timeouts and circuit breakers
|
|
191
|
+
|
|
192
|
+
---
|
|
193
|
+
|
|
194
|
+
## Development
|
|
195
|
+
|
|
196
|
+
```bash
|
|
197
|
+
pnpm install
|
|
198
|
+
pnpm --filter @node-llm/core build
|
|
199
|
+
node test-openai.mjs
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
---
|
|
203
|
+
|
|
204
|
+
## Design Philosophy
|
|
205
|
+
|
|
206
|
+
- **Explicit over implicit**: No hidden side effects or complex state management.
|
|
207
|
+
- **Provider-agnostic core**: The same code works across different LLM providers.
|
|
208
|
+
- **Ruby-LLM mental model**: Developer experience inspired by the best of Ruby, executed with Node-native patterns.
|
|
209
|
+
- **Production Ready**: Built with TypeScript, ESM, and comprehensive testing.
|
|
210
|
+
|
|
211
|
+
---
|
|
212
|
+
|
|
213
|
+
## License
|
|
214
|
+
|
|
215
|
+
MIT
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { Message } from "./Message.js";
|
|
2
|
+
import { ChatOptions } from "./ChatOptions.js";
|
|
3
|
+
import { Provider } from "../providers/Provider.js";
|
|
4
|
+
export declare class Chat {
|
|
5
|
+
private readonly provider;
|
|
6
|
+
private readonly model;
|
|
7
|
+
private readonly options;
|
|
8
|
+
private messages;
|
|
9
|
+
private executor;
|
|
10
|
+
constructor(provider: Provider, model: string, options?: ChatOptions);
|
|
11
|
+
/**
|
|
12
|
+
* Read-only access to message history
|
|
13
|
+
*/
|
|
14
|
+
get history(): readonly Message[];
|
|
15
|
+
/**
|
|
16
|
+
* Add a tool to the chat session (fluent API)
|
|
17
|
+
*/
|
|
18
|
+
withTool(tool: any): this;
|
|
19
|
+
/**
|
|
20
|
+
* Ask the model a question
|
|
21
|
+
*/
|
|
22
|
+
ask(content: string, options?: {
|
|
23
|
+
images?: string[];
|
|
24
|
+
files?: string[];
|
|
25
|
+
temperature?: number;
|
|
26
|
+
maxTokens?: number;
|
|
27
|
+
}): Promise<string>;
|
|
28
|
+
/**
|
|
29
|
+
* Streams the model's response to a user question.
|
|
30
|
+
* @param content The user's question to send to the model.
|
|
31
|
+
* @returns An async generator yielding chunks of the assistant's response as strings.
|
|
32
|
+
*/
|
|
33
|
+
stream(content: string): AsyncGenerator<string, void, unknown>;
|
|
34
|
+
}
|
|
35
|
+
//# sourceMappingURL=Chat.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../src/chat/Chat.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAE,MAAM,0BAA0B,CAAC;AAIpD,qBAAa,IAAI;IAKb,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,QAAQ,CAAC,KAAK;IACtB,OAAO,CAAC,QAAQ,CAAC,OAAO;IAN1B,OAAO,CAAC,QAAQ,CAAiB;IACjC,OAAO,CAAC,QAAQ,CAAW;gBAGR,QAAQ,EAAE,QAAQ,EAClB,KAAK,EAAE,MAAM,EACb,OAAO,GAAE,WAAgB;IAoB5C;;OAEG;IACH,IAAI,OAAO,IAAI,SAAS,OAAO,EAAE,CAEhC;IAED;;OAEG;IACH,QAAQ,CAAC,IAAI,EAAE,GAAG,GAAG,IAAI;IAQzB;;OAEG;IACG,GAAG,CAAC,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;QAAC,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;QAAC,WAAW,CAAC,EAAE,MAAM,CAAC;QAAC,SAAS,CAAC,EAAE,MAAM,CAAA;KAAE,GAAG,OAAO,CAAC,MAAM,CAAC;IA6FxI;;;;OAIG;IACI,MAAM,CAAC,OAAO,EAAE,MAAM;CAyB9B"}
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
import { FileLoader } from "../utils/FileLoader.js";
|
|
2
|
+
import { Executor } from "../executor/Executor.js";
|
|
3
|
+
import { LLM } from "../llm.js";
|
|
4
|
+
export class Chat {
|
|
5
|
+
provider;
|
|
6
|
+
model;
|
|
7
|
+
options;
|
|
8
|
+
messages = [];
|
|
9
|
+
executor;
|
|
10
|
+
constructor(provider, model, options = {}) {
|
|
11
|
+
this.provider = provider;
|
|
12
|
+
this.model = model;
|
|
13
|
+
this.options = options;
|
|
14
|
+
this.executor = new Executor(provider, LLM.getRetryConfig());
|
|
15
|
+
if (options.systemPrompt) {
|
|
16
|
+
this.messages.push({
|
|
17
|
+
role: "system",
|
|
18
|
+
content: options.systemPrompt,
|
|
19
|
+
});
|
|
20
|
+
}
|
|
21
|
+
if (options.messages) {
|
|
22
|
+
this.messages.push(...options.messages);
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Read-only access to message history
|
|
27
|
+
*/
|
|
28
|
+
get history() {
|
|
29
|
+
return this.messages;
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Add a tool to the chat session (fluent API)
|
|
33
|
+
*/
|
|
34
|
+
withTool(tool) {
|
|
35
|
+
if (!this.options.tools) {
|
|
36
|
+
this.options.tools = [];
|
|
37
|
+
}
|
|
38
|
+
this.options.tools.push(tool);
|
|
39
|
+
return this;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Ask the model a question
|
|
43
|
+
*/
|
|
44
|
+
async ask(content, options) {
|
|
45
|
+
let messageContent = content;
|
|
46
|
+
const files = [...(options?.images ?? []), ...(options?.files ?? [])];
|
|
47
|
+
if (files.length > 0) {
|
|
48
|
+
const processedFiles = await Promise.all(files.map(f => FileLoader.load(f)));
|
|
49
|
+
const hasBinary = processedFiles.some(p => p.type === "image_url" || p.type === "input_audio" || p.type === "video_url");
|
|
50
|
+
if (hasBinary && this.provider.capabilities && !this.provider.capabilities.supportsVision(this.model)) {
|
|
51
|
+
throw new Error(`Model ${this.model} does not support vision/binary files.`);
|
|
52
|
+
}
|
|
53
|
+
messageContent = [
|
|
54
|
+
{ type: "text", text: content },
|
|
55
|
+
...processedFiles
|
|
56
|
+
];
|
|
57
|
+
}
|
|
58
|
+
if (this.options.tools && this.options.tools.length > 0) {
|
|
59
|
+
if (this.provider.capabilities && !this.provider.capabilities.supportsTools(this.model)) {
|
|
60
|
+
throw new Error(`Model ${this.model} does not support tool calling.`);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
this.messages.push({
|
|
64
|
+
role: "user",
|
|
65
|
+
content: messageContent,
|
|
66
|
+
});
|
|
67
|
+
const executeOptions = {
|
|
68
|
+
model: this.model,
|
|
69
|
+
messages: this.messages,
|
|
70
|
+
tools: this.options.tools,
|
|
71
|
+
temperature: options?.temperature ?? this.options.temperature,
|
|
72
|
+
max_tokens: options?.maxTokens ?? this.options.maxTokens,
|
|
73
|
+
};
|
|
74
|
+
let response = await this.executor.executeChat(executeOptions);
|
|
75
|
+
this.messages.push({
|
|
76
|
+
role: "assistant",
|
|
77
|
+
content: response.content,
|
|
78
|
+
tool_calls: response.tool_calls,
|
|
79
|
+
});
|
|
80
|
+
while (response.tool_calls && response.tool_calls.length > 0) {
|
|
81
|
+
for (const toolCall of response.tool_calls) {
|
|
82
|
+
const tool = this.options.tools?.find((t) => t.function.name === toolCall.function.name);
|
|
83
|
+
if (tool?.handler) {
|
|
84
|
+
try {
|
|
85
|
+
const args = JSON.parse(toolCall.function.arguments);
|
|
86
|
+
const result = await tool.handler(args);
|
|
87
|
+
this.messages.push({
|
|
88
|
+
role: "tool",
|
|
89
|
+
tool_call_id: toolCall.id,
|
|
90
|
+
content: result,
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
catch (error) {
|
|
94
|
+
this.messages.push({
|
|
95
|
+
role: "tool",
|
|
96
|
+
tool_call_id: toolCall.id,
|
|
97
|
+
content: `Error executing tool: ${error.message}`,
|
|
98
|
+
});
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
else {
|
|
102
|
+
this.messages.push({
|
|
103
|
+
role: "tool",
|
|
104
|
+
tool_call_id: toolCall.id,
|
|
105
|
+
content: "Error: Tool not found or no handler provided",
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
response = await this.executor.executeChat({
|
|
110
|
+
model: this.model,
|
|
111
|
+
messages: this.messages,
|
|
112
|
+
tools: this.options.tools,
|
|
113
|
+
});
|
|
114
|
+
this.messages.push({
|
|
115
|
+
role: "assistant",
|
|
116
|
+
content: response.content,
|
|
117
|
+
tool_calls: response.tool_calls,
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
return response.content ?? "";
|
|
121
|
+
}
|
|
122
|
+
/**
|
|
123
|
+
* Streams the model's response to a user question.
|
|
124
|
+
* @param content The user's question to send to the model.
|
|
125
|
+
* @returns An async generator yielding chunks of the assistant's response as strings.
|
|
126
|
+
*/
|
|
127
|
+
async *stream(content) {
|
|
128
|
+
this.messages.push({ role: "user", content });
|
|
129
|
+
if (!this.provider.stream) {
|
|
130
|
+
throw new Error("Streaming not supported by provider");
|
|
131
|
+
}
|
|
132
|
+
let full = "";
|
|
133
|
+
for await (const chunk of this.provider.stream({
|
|
134
|
+
model: this.model,
|
|
135
|
+
messages: this.messages,
|
|
136
|
+
})) {
|
|
137
|
+
if (chunk.content) {
|
|
138
|
+
full += chunk.content;
|
|
139
|
+
yield chunk.content;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
this.messages.push({
|
|
143
|
+
role: "assistant",
|
|
144
|
+
content: full,
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { Message } from "./Message.js";
|
|
2
|
+
import { Tool } from "./Tool.js";
|
|
3
|
+
export interface ChatOptions {
|
|
4
|
+
systemPrompt?: string;
|
|
5
|
+
messages?: Message[];
|
|
6
|
+
tools?: Tool[];
|
|
7
|
+
temperature?: number;
|
|
8
|
+
maxTokens?: number;
|
|
9
|
+
}
|
|
10
|
+
//# sourceMappingURL=ChatOptions.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ChatOptions.d.ts","sourceRoot":"","sources":["../../src/chat/ChatOptions.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,IAAI,EAAE,MAAM,WAAW,CAAC;AAEjC,MAAM,WAAW,WAAW;IAC1B,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,CAAC,EAAE,OAAO,EAAE,CAAC;IACrB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;IACf,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
export type ContentPart = {
|
|
2
|
+
type: "text";
|
|
3
|
+
text: string;
|
|
4
|
+
} | {
|
|
5
|
+
type: "image_url";
|
|
6
|
+
image_url: {
|
|
7
|
+
url: string;
|
|
8
|
+
};
|
|
9
|
+
} | {
|
|
10
|
+
type: "input_audio";
|
|
11
|
+
input_audio: {
|
|
12
|
+
data: string;
|
|
13
|
+
format: string;
|
|
14
|
+
};
|
|
15
|
+
} | {
|
|
16
|
+
type: "video_url";
|
|
17
|
+
video_url: {
|
|
18
|
+
url: string;
|
|
19
|
+
};
|
|
20
|
+
};
|
|
21
|
+
export type MessageContent = string | ContentPart[];
|
|
22
|
+
//# sourceMappingURL=Content.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"Content.d.ts","sourceRoot":"","sources":["../../src/chat/Content.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,WAAW,GACnB;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,MAAM,CAAA;CAAE,GAC9B;IAAE,IAAI,EAAE,WAAW,CAAC;IAAC,SAAS,EAAE;QAAE,GAAG,EAAE,MAAM,CAAA;KAAE,CAAA;CAAE,GACjD;IAAE,IAAI,EAAE,aAAa,CAAC;IAAC,WAAW,EAAE;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,MAAM,EAAE,MAAM,CAAA;KAAE,CAAA;CAAE,GACtE;IAAE,IAAI,EAAE,WAAW,CAAC;IAAC,SAAS,EAAE;QAAE,GAAG,EAAE,MAAM,CAAA;KAAE,CAAA;CAAE,CAAC;AAEtD,MAAM,MAAM,cAAc,GAAG,MAAM,GAAG,WAAW,EAAE,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { Role } from "./Role.js";
|
|
2
|
+
import { ToolCall } from "./Tool.js";
|
|
3
|
+
import { MessageContent } from "./Content.js";
|
|
4
|
+
export interface Message {
|
|
5
|
+
role: Role;
|
|
6
|
+
content: MessageContent | null;
|
|
7
|
+
tool_calls?: ToolCall[];
|
|
8
|
+
tool_call_id?: string;
|
|
9
|
+
name?: string;
|
|
10
|
+
}
|
|
11
|
+
//# sourceMappingURL=Message.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"Message.d.ts","sourceRoot":"","sources":["../../src/chat/Message.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,IAAI,EAAE,MAAM,WAAW,CAAC;AACjC,OAAO,EAAE,QAAQ,EAAE,MAAM,WAAW,CAAC;AACrC,OAAO,EAAE,cAAc,EAAE,MAAM,cAAc,CAAC;AAE9C,MAAM,WAAW,OAAO;IACtB,IAAI,EAAE,IAAI,CAAC;IACX,OAAO,EAAE,cAAc,GAAG,IAAI,CAAC;IAC/B,UAAU,CAAC,EAAE,QAAQ,EAAE,CAAC;IACxB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,IAAI,CAAC,EAAE,MAAM,CAAC;CACf"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"Role.d.ts","sourceRoot":"","sources":["../../src/chat/Role.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,IAAI,GACZ,QAAQ,GACR,MAAM,GACN,WAAW,GACX,MAAM,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
export interface ToolCall {
|
|
2
|
+
id: string;
|
|
3
|
+
type: 'function';
|
|
4
|
+
function: {
|
|
5
|
+
name: string;
|
|
6
|
+
arguments: string;
|
|
7
|
+
};
|
|
8
|
+
}
|
|
9
|
+
export interface Tool {
|
|
10
|
+
type: 'function';
|
|
11
|
+
function: {
|
|
12
|
+
name: string;
|
|
13
|
+
description?: string;
|
|
14
|
+
parameters: Record<string, any>;
|
|
15
|
+
};
|
|
16
|
+
handler?: (args: any) => Promise<string>;
|
|
17
|
+
}
|
|
18
|
+
//# sourceMappingURL=Tool.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"Tool.d.ts","sourceRoot":"","sources":["../../src/chat/Tool.ts"],"names":[],"mappings":"AAAA,MAAM,WAAW,QAAQ;IACvB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,SAAS,EAAE,MAAM,CAAC;KACnB,CAAC;CACH;AAED,MAAM,WAAW,IAAI;IACnB,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;KACjC,CAAC;IACF,OAAO,CAAC,EAAE,CAAC,IAAI,EAAE,GAAG,KAAK,OAAO,CAAC,MAAM,CAAC,CAAC;CAC1C"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { Provider, ChatRequest, ChatResponse } from "../providers/Provider.js";
|
|
2
|
+
export declare class Executor {
|
|
3
|
+
private readonly provider;
|
|
4
|
+
private readonly retry;
|
|
5
|
+
constructor(provider: Provider, retry: {
|
|
6
|
+
attempts: number;
|
|
7
|
+
delayMs: number;
|
|
8
|
+
});
|
|
9
|
+
executeChat(request: ChatRequest): Promise<ChatResponse>;
|
|
10
|
+
}
|
|
11
|
+
//# sourceMappingURL=Executor.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"Executor.d.ts","sourceRoot":"","sources":["../../src/executor/Executor.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,WAAW,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AAE/E,qBAAa,QAAQ;IAEjB,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,QAAQ,CAAC,KAAK;gBADL,QAAQ,EAAE,QAAQ,EAClB,KAAK,EAAE;QAAE,QAAQ,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAE;IAGzD,WAAW,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;CAqB/D"}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
export class Executor {
|
|
2
|
+
provider;
|
|
3
|
+
retry;
|
|
4
|
+
constructor(provider, retry) {
|
|
5
|
+
this.provider = provider;
|
|
6
|
+
this.retry = retry;
|
|
7
|
+
}
|
|
8
|
+
async executeChat(request) {
|
|
9
|
+
let lastError;
|
|
10
|
+
for (let attempt = 1; attempt <= this.retry.attempts; attempt++) {
|
|
11
|
+
try {
|
|
12
|
+
return await this.provider.chat(request);
|
|
13
|
+
}
|
|
14
|
+
catch (error) {
|
|
15
|
+
lastError = error;
|
|
16
|
+
if (attempt >= this.retry.attempts) {
|
|
17
|
+
throw error;
|
|
18
|
+
}
|
|
19
|
+
if (this.retry.delayMs > 0) {
|
|
20
|
+
await new Promise((r) => setTimeout(r, this.retry.delayMs));
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
throw lastError;
|
|
25
|
+
}
|
|
26
|
+
}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
export { Chat } from "./chat/Chat.js";
|
|
2
|
+
export type { Message } from "./chat/Message.js";
|
|
3
|
+
export type { Role } from "./chat/Role.js";
|
|
4
|
+
export type { ChatOptions } from "./chat/ChatOptions.js";
|
|
5
|
+
export type { Tool, ToolCall } from "./chat/Tool.js";
|
|
6
|
+
export type { MessageContent, ContentPart } from "./chat/Content.js";
|
|
7
|
+
export { LLM } from "./llm.js";
|
|
8
|
+
export { providerRegistry } from "./providers/registry.js";
|
|
9
|
+
export { OpenAIProvider } from "./providers/openai/OpenAIProvider.js";
|
|
10
|
+
export { registerOpenAIProvider } from "./providers/openai/index.js";
|
|
11
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,IAAI,EAAE,MAAM,gBAAgB,CAAC;AACtC,YAAY,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AACjD,YAAY,EAAE,IAAI,EAAE,MAAM,gBAAgB,CAAC;AAC3C,YAAY,EAAE,WAAW,EAAE,MAAM,uBAAuB,CAAC;AACzD,YAAY,EAAE,IAAI,EAAE,QAAQ,EAAE,MAAM,gBAAgB,CAAC;AACrD,YAAY,EAAE,cAAc,EAAE,WAAW,EAAE,MAAM,mBAAmB,CAAC;AAErE,OAAO,EAAE,GAAG,EAAE,MAAM,UAAU,CAAC;AAC/B,OAAO,EAAE,gBAAgB,EAAE,MAAM,yBAAyB,CAAC;AAE3D,OAAO,EAAE,cAAc,EAAE,MAAM,sCAAsC,CAAC;AACtE,OAAO,EAAE,sBAAsB,EAAE,MAAM,6BAA6B,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
export { Chat } from "./chat/Chat.js";
|
|
2
|
+
export { LLM } from "./llm.js";
|
|
3
|
+
export { providerRegistry } from "./providers/registry.js";
|
|
4
|
+
export { OpenAIProvider } from "./providers/openai/OpenAIProvider.js";
|
|
5
|
+
export { registerOpenAIProvider } from "./providers/openai/index.js";
|
package/dist/llm.d.ts
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { Chat } from "./chat/Chat.js";
|
|
2
|
+
import { ChatOptions } from "./chat/ChatOptions.js";
|
|
3
|
+
import { Provider } from "./providers/Provider.js";
|
|
4
|
+
export interface RetryOptions {
|
|
5
|
+
attempts?: number;
|
|
6
|
+
delayMs?: number;
|
|
7
|
+
}
|
|
8
|
+
type LLMConfig = {
|
|
9
|
+
provider: Provider;
|
|
10
|
+
retry?: RetryOptions;
|
|
11
|
+
} | {
|
|
12
|
+
provider: string;
|
|
13
|
+
retry?: RetryOptions;
|
|
14
|
+
};
|
|
15
|
+
declare class LLMCore {
|
|
16
|
+
private provider?;
|
|
17
|
+
private retry;
|
|
18
|
+
configure(config: LLMConfig): void;
|
|
19
|
+
chat(model: string, options?: ChatOptions): Chat;
|
|
20
|
+
listModels(): Promise<import("./providers/Provider.js").ModelInfo[]>;
|
|
21
|
+
getRetryConfig(): Required<RetryOptions>;
|
|
22
|
+
}
|
|
23
|
+
export declare const LLM: LLMCore;
|
|
24
|
+
export {};
|
|
25
|
+
//# sourceMappingURL=llm.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"llm.d.ts","sourceRoot":"","sources":["../src/llm.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,IAAI,EAAE,MAAM,gBAAgB,CAAC;AACtC,OAAO,EAAE,WAAW,EAAE,MAAM,uBAAuB,CAAC;AACpD,OAAO,EAAE,QAAQ,EAAE,MAAM,yBAAyB,CAAC;AAInD,MAAM,WAAW,YAAY;IAC3B,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC;CAClB;AAED,KAAK,SAAS,GACV;IAAE,QAAQ,EAAE,QAAQ,CAAC;IAAC,KAAK,CAAC,EAAE,YAAY,CAAA;CAAE,GAC5C;IAAE,QAAQ,EAAE,MAAM,CAAC;IAAC,KAAK,CAAC,EAAE,YAAY,CAAA;CAAE,CAAC;AAE/C,cAAM,OAAO;IACX,OAAO,CAAC,QAAQ,CAAC,CAAW;IAE5B,OAAO,CAAC,KAAK,CAGX;IAEF,SAAS,CAAC,MAAM,EAAE,SAAS;IAmB3B,IAAI,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,IAAI;IAQ1C,UAAU;IAUhB,cAAc;CAIf;AAED,eAAO,MAAM,GAAG,SAAgB,CAAC"}
|
package/dist/llm.js
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { Chat } from "./chat/Chat.js";
|
|
2
|
+
import { providerRegistry } from "./providers/registry.js";
|
|
3
|
+
import { ensureOpenAIRegistered } from "./providers/openai/register.js";
|
|
4
|
+
class LLMCore {
|
|
5
|
+
provider;
|
|
6
|
+
retry = {
|
|
7
|
+
attempts: 1,
|
|
8
|
+
delayMs: 0,
|
|
9
|
+
};
|
|
10
|
+
configure(config) {
|
|
11
|
+
if (config.retry) {
|
|
12
|
+
this.retry = {
|
|
13
|
+
attempts: config.retry.attempts ?? 1,
|
|
14
|
+
delayMs: config.retry.delayMs ?? 0,
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
if (typeof config.provider === "string") {
|
|
18
|
+
if (config.provider === "openai") {
|
|
19
|
+
ensureOpenAIRegistered();
|
|
20
|
+
}
|
|
21
|
+
this.provider = providerRegistry.resolve(config.provider);
|
|
22
|
+
}
|
|
23
|
+
else {
|
|
24
|
+
this.provider = config.provider;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
chat(model, options) {
|
|
28
|
+
if (!this.provider) {
|
|
29
|
+
throw new Error("LLM provider not configured");
|
|
30
|
+
}
|
|
31
|
+
return new Chat(this.provider, model, options);
|
|
32
|
+
}
|
|
33
|
+
async listModels() {
|
|
34
|
+
if (!this.provider) {
|
|
35
|
+
throw new Error("LLM provider not configured");
|
|
36
|
+
}
|
|
37
|
+
if (!this.provider.listModels) {
|
|
38
|
+
throw new Error(`Provider ${typeof this.provider} does not support listModels`);
|
|
39
|
+
}
|
|
40
|
+
return this.provider.listModels();
|
|
41
|
+
}
|
|
42
|
+
getRetryConfig() {
|
|
43
|
+
return this.retry;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
export const LLM = new LLMCore();
|