@radaros/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +887 -0
- package/dist/index.js +3462 -0
- package/package.json +64 -0
- package/src/agent/agent.ts +314 -0
- package/src/agent/llm-loop.ts +263 -0
- package/src/agent/run-context.ts +35 -0
- package/src/agent/types.ts +77 -0
- package/src/events/event-bus.ts +45 -0
- package/src/events/types.ts +16 -0
- package/src/guardrails/types.ts +5 -0
- package/src/hooks/types.ts +6 -0
- package/src/index.ts +111 -0
- package/src/knowledge/knowledge-base.ts +146 -0
- package/src/logger/logger.ts +232 -0
- package/src/memory/memory.ts +87 -0
- package/src/memory/types.ts +13 -0
- package/src/models/provider.ts +22 -0
- package/src/models/providers/anthropic.ts +330 -0
- package/src/models/providers/google.ts +361 -0
- package/src/models/providers/ollama.ts +211 -0
- package/src/models/providers/openai.ts +323 -0
- package/src/models/registry.ts +90 -0
- package/src/models/types.ts +112 -0
- package/src/session/session-manager.ts +75 -0
- package/src/session/types.ts +10 -0
- package/src/storage/driver.ts +10 -0
- package/src/storage/in-memory.ts +44 -0
- package/src/storage/mongodb.ts +70 -0
- package/src/storage/postgres.ts +81 -0
- package/src/storage/sqlite.ts +81 -0
- package/src/team/modes.ts +1 -0
- package/src/team/team.ts +323 -0
- package/src/team/types.ts +26 -0
- package/src/tools/define-tool.ts +20 -0
- package/src/tools/tool-executor.ts +131 -0
- package/src/tools/types.ts +27 -0
- package/src/vector/base.ts +44 -0
- package/src/vector/embeddings/google.ts +64 -0
- package/src/vector/embeddings/openai.ts +66 -0
- package/src/vector/in-memory.ts +115 -0
- package/src/vector/mongodb.ts +241 -0
- package/src/vector/pgvector.ts +169 -0
- package/src/vector/qdrant.ts +203 -0
- package/src/vector/types.ts +55 -0
- package/src/workflow/step-runner.ts +303 -0
- package/src/workflow/types.ts +55 -0
- package/src/workflow/workflow.ts +68 -0
- package/tsconfig.json +8 -0
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
import { createRequire } from "node:module";
|
|
2
|
+
import type { ModelProvider } from "../provider.js";
|
|
3
|
+
import {
|
|
4
|
+
getTextContent,
|
|
5
|
+
isMultiModal,
|
|
6
|
+
type ChatMessage,
|
|
7
|
+
type ContentPart,
|
|
8
|
+
type ModelConfig,
|
|
9
|
+
type ModelResponse,
|
|
10
|
+
type StreamChunk,
|
|
11
|
+
type ToolDefinition,
|
|
12
|
+
type TokenUsage,
|
|
13
|
+
type ToolCall,
|
|
14
|
+
} from "../types.js";
|
|
15
|
+
|
|
16
|
+
const _require = createRequire(import.meta.url);
|
|
17
|
+
|
|
18
|
+
interface OpenAIConfig {
|
|
19
|
+
apiKey?: string;
|
|
20
|
+
baseURL?: string;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export class OpenAIProvider implements ModelProvider {
|
|
24
|
+
readonly providerId = "openai";
|
|
25
|
+
readonly modelId: string;
|
|
26
|
+
private client: any;
|
|
27
|
+
private OpenAICtor: any;
|
|
28
|
+
private baseURL?: string;
|
|
29
|
+
private clientCache = new Map<string, any>();
|
|
30
|
+
|
|
31
|
+
constructor(modelId: string, config?: OpenAIConfig) {
|
|
32
|
+
this.modelId = modelId;
|
|
33
|
+
this.baseURL = config?.baseURL;
|
|
34
|
+
try {
|
|
35
|
+
const mod = _require("openai");
|
|
36
|
+
this.OpenAICtor = mod.default ?? mod;
|
|
37
|
+
const key = config?.apiKey ?? process.env.OPENAI_API_KEY;
|
|
38
|
+
if (key) {
|
|
39
|
+
this.client = new this.OpenAICtor({ apiKey: key, baseURL: config?.baseURL });
|
|
40
|
+
}
|
|
41
|
+
} catch {
|
|
42
|
+
throw new Error(
|
|
43
|
+
"openai package is required for OpenAIProvider. Install it: npm install openai"
|
|
44
|
+
);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
private getClient(apiKey?: string): any {
|
|
49
|
+
if (apiKey) {
|
|
50
|
+
let cached = this.clientCache.get(apiKey);
|
|
51
|
+
if (!cached) {
|
|
52
|
+
cached = new this.OpenAICtor({ apiKey, baseURL: this.baseURL });
|
|
53
|
+
this.clientCache.set(apiKey, cached);
|
|
54
|
+
}
|
|
55
|
+
return cached;
|
|
56
|
+
}
|
|
57
|
+
if (this.client) return this.client;
|
|
58
|
+
const envKey = process.env.OPENAI_API_KEY;
|
|
59
|
+
if (envKey) {
|
|
60
|
+
this.client = new this.OpenAICtor({ apiKey: envKey, baseURL: this.baseURL });
|
|
61
|
+
return this.client;
|
|
62
|
+
}
|
|
63
|
+
throw new Error("No OpenAI API key provided. Pass it via the x-openai-api-key header, apiKey in request body, or set OPENAI_API_KEY env var.");
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
async generate(
|
|
67
|
+
messages: ChatMessage[],
|
|
68
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
69
|
+
): Promise<ModelResponse> {
|
|
70
|
+
const params: Record<string, unknown> = {
|
|
71
|
+
model: this.modelId,
|
|
72
|
+
messages: this.toOpenAIMessages(messages),
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
if (options?.temperature !== undefined)
|
|
76
|
+
params.temperature = options.temperature;
|
|
77
|
+
if (options?.maxTokens !== undefined)
|
|
78
|
+
params.max_tokens = options.maxTokens;
|
|
79
|
+
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
80
|
+
if (options?.stop) params.stop = options.stop;
|
|
81
|
+
this.applyResponseFormat(params, options);
|
|
82
|
+
if (options?.tools?.length) {
|
|
83
|
+
params.tools = this.toOpenAITools(options.tools);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
const client = this.getClient(options?.apiKey);
|
|
87
|
+
const response = await client.chat.completions.create(params);
|
|
88
|
+
return this.normalizeResponse(response);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async *stream(
|
|
92
|
+
messages: ChatMessage[],
|
|
93
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
94
|
+
): AsyncGenerator<StreamChunk> {
|
|
95
|
+
const params: Record<string, unknown> = {
|
|
96
|
+
model: this.modelId,
|
|
97
|
+
messages: this.toOpenAIMessages(messages),
|
|
98
|
+
stream: true,
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
if (options?.temperature !== undefined)
|
|
102
|
+
params.temperature = options.temperature;
|
|
103
|
+
if (options?.maxTokens !== undefined)
|
|
104
|
+
params.max_tokens = options.maxTokens;
|
|
105
|
+
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
106
|
+
if (options?.stop) params.stop = options.stop;
|
|
107
|
+
this.applyResponseFormat(params, options);
|
|
108
|
+
if (options?.tools?.length) {
|
|
109
|
+
params.tools = this.toOpenAITools(options.tools);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const client = this.getClient(options?.apiKey);
|
|
113
|
+
const stream = await client.chat.completions.create(params);
|
|
114
|
+
|
|
115
|
+
const activeToolCalls = new Map<
|
|
116
|
+
number,
|
|
117
|
+
{ id: string; name: string; args: string }
|
|
118
|
+
>();
|
|
119
|
+
|
|
120
|
+
for await (const chunk of stream) {
|
|
121
|
+
const choice = chunk.choices?.[0];
|
|
122
|
+
if (!choice) continue;
|
|
123
|
+
|
|
124
|
+
const delta = choice.delta;
|
|
125
|
+
|
|
126
|
+
if (delta?.content) {
|
|
127
|
+
yield { type: "text", text: delta.content };
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
if (delta?.tool_calls) {
|
|
131
|
+
for (const tc of delta.tool_calls) {
|
|
132
|
+
const idx = tc.index ?? 0;
|
|
133
|
+
|
|
134
|
+
if (tc.id) {
|
|
135
|
+
activeToolCalls.set(idx, {
|
|
136
|
+
id: tc.id,
|
|
137
|
+
name: tc.function?.name ?? "",
|
|
138
|
+
args: tc.function?.arguments ?? "",
|
|
139
|
+
});
|
|
140
|
+
yield {
|
|
141
|
+
type: "tool_call_start",
|
|
142
|
+
toolCall: {
|
|
143
|
+
id: tc.id,
|
|
144
|
+
name: tc.function?.name ?? "",
|
|
145
|
+
},
|
|
146
|
+
};
|
|
147
|
+
} else if (tc.function?.arguments) {
|
|
148
|
+
const existing = activeToolCalls.get(idx);
|
|
149
|
+
if (existing) {
|
|
150
|
+
existing.args += tc.function.arguments;
|
|
151
|
+
yield {
|
|
152
|
+
type: "tool_call_delta",
|
|
153
|
+
toolCallId: existing.id,
|
|
154
|
+
argumentsDelta: tc.function.arguments,
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
if (tc.function?.name && !tc.id) {
|
|
160
|
+
const existing = activeToolCalls.get(idx);
|
|
161
|
+
if (existing) {
|
|
162
|
+
existing.name = tc.function.name;
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
if (choice.finish_reason) {
|
|
169
|
+
for (const [, tc] of activeToolCalls) {
|
|
170
|
+
yield { type: "tool_call_end", toolCallId: tc.id };
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
yield {
|
|
174
|
+
type: "finish",
|
|
175
|
+
finishReason:
|
|
176
|
+
choice.finish_reason === "tool_calls"
|
|
177
|
+
? "tool_calls"
|
|
178
|
+
: choice.finish_reason,
|
|
179
|
+
usage: chunk.usage
|
|
180
|
+
? {
|
|
181
|
+
promptTokens: chunk.usage.prompt_tokens ?? 0,
|
|
182
|
+
completionTokens: chunk.usage.completion_tokens ?? 0,
|
|
183
|
+
totalTokens: chunk.usage.total_tokens ?? 0,
|
|
184
|
+
}
|
|
185
|
+
: undefined,
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
private applyResponseFormat(params: Record<string, unknown>, options?: ModelConfig): void {
|
|
192
|
+
if (!options?.responseFormat) return;
|
|
193
|
+
if (options.responseFormat === "json") {
|
|
194
|
+
params.response_format = { type: "json_object" };
|
|
195
|
+
} else if (options.responseFormat === "text") {
|
|
196
|
+
// default
|
|
197
|
+
} else if (typeof options.responseFormat === "object") {
|
|
198
|
+
params.response_format = {
|
|
199
|
+
type: "json_schema",
|
|
200
|
+
json_schema: {
|
|
201
|
+
name: options.responseFormat.name ?? "response",
|
|
202
|
+
schema: options.responseFormat.schema,
|
|
203
|
+
strict: true,
|
|
204
|
+
},
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
private toOpenAIMessages(messages: ChatMessage[]): unknown[] {
|
|
210
|
+
return messages.map((msg) => {
|
|
211
|
+
if (msg.role === "assistant" && msg.toolCalls?.length) {
|
|
212
|
+
return {
|
|
213
|
+
role: "assistant",
|
|
214
|
+
content: getTextContent(msg.content),
|
|
215
|
+
tool_calls: msg.toolCalls.map((tc) => ({
|
|
216
|
+
id: tc.id,
|
|
217
|
+
type: "function",
|
|
218
|
+
function: {
|
|
219
|
+
name: tc.name,
|
|
220
|
+
arguments: JSON.stringify(tc.arguments),
|
|
221
|
+
},
|
|
222
|
+
})),
|
|
223
|
+
};
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if (msg.role === "tool") {
|
|
227
|
+
return {
|
|
228
|
+
role: "tool",
|
|
229
|
+
tool_call_id: msg.toolCallId,
|
|
230
|
+
content: getTextContent(msg.content),
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
if (isMultiModal(msg.content)) {
|
|
235
|
+
return {
|
|
236
|
+
role: msg.role,
|
|
237
|
+
content: msg.content.map((part) => this.partToOpenAI(part)),
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
return {
|
|
242
|
+
role: msg.role,
|
|
243
|
+
content: msg.content ?? "",
|
|
244
|
+
};
|
|
245
|
+
});
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
private partToOpenAI(part: ContentPart): unknown {
|
|
249
|
+
switch (part.type) {
|
|
250
|
+
case "text":
|
|
251
|
+
return { type: "text", text: part.text };
|
|
252
|
+
case "image": {
|
|
253
|
+
const isUrl = part.data.startsWith("http://") || part.data.startsWith("https://");
|
|
254
|
+
return {
|
|
255
|
+
type: "image_url",
|
|
256
|
+
image_url: {
|
|
257
|
+
url: isUrl ? part.data : `data:${part.mimeType ?? "image/png"};base64,${part.data}`,
|
|
258
|
+
},
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
case "audio":
|
|
262
|
+
return {
|
|
263
|
+
type: "input_audio",
|
|
264
|
+
input_audio: {
|
|
265
|
+
data: part.data,
|
|
266
|
+
format: part.mimeType?.split("/")[1] ?? "mp3",
|
|
267
|
+
},
|
|
268
|
+
};
|
|
269
|
+
case "file":
|
|
270
|
+
return {
|
|
271
|
+
type: "text",
|
|
272
|
+
text: `[File: ${part.filename ?? "attachment"} (${part.mimeType})]`,
|
|
273
|
+
};
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
private toOpenAITools(
|
|
278
|
+
tools: ToolDefinition[]
|
|
279
|
+
): unknown[] {
|
|
280
|
+
return tools.map((t) => ({
|
|
281
|
+
type: "function",
|
|
282
|
+
function: {
|
|
283
|
+
name: t.name,
|
|
284
|
+
description: t.description,
|
|
285
|
+
parameters: t.parameters,
|
|
286
|
+
},
|
|
287
|
+
}));
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
private normalizeResponse(response: any): ModelResponse {
|
|
291
|
+
const choice = response.choices[0];
|
|
292
|
+
const msg = choice.message;
|
|
293
|
+
|
|
294
|
+
const toolCalls: ToolCall[] = (msg.tool_calls ?? []).map((tc: any) => ({
|
|
295
|
+
id: tc.id,
|
|
296
|
+
name: tc.function.name,
|
|
297
|
+
arguments: JSON.parse(tc.function.arguments || "{}"),
|
|
298
|
+
}));
|
|
299
|
+
|
|
300
|
+
const usage: TokenUsage = {
|
|
301
|
+
promptTokens: response.usage?.prompt_tokens ?? 0,
|
|
302
|
+
completionTokens: response.usage?.completion_tokens ?? 0,
|
|
303
|
+
totalTokens: response.usage?.total_tokens ?? 0,
|
|
304
|
+
};
|
|
305
|
+
|
|
306
|
+
let finishReason: ModelResponse["finishReason"] = "stop";
|
|
307
|
+
if (choice.finish_reason === "tool_calls") finishReason = "tool_calls";
|
|
308
|
+
else if (choice.finish_reason === "length") finishReason = "length";
|
|
309
|
+
else if (choice.finish_reason === "content_filter")
|
|
310
|
+
finishReason = "content_filter";
|
|
311
|
+
|
|
312
|
+
return {
|
|
313
|
+
message: {
|
|
314
|
+
role: "assistant",
|
|
315
|
+
content: msg.content ?? null,
|
|
316
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
317
|
+
},
|
|
318
|
+
usage,
|
|
319
|
+
finishReason,
|
|
320
|
+
raw: response,
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import type { ModelProvider } from "./provider.js";
|
|
2
|
+
import { OpenAIProvider } from "./providers/openai.js";
|
|
3
|
+
import { AnthropicProvider } from "./providers/anthropic.js";
|
|
4
|
+
import { GoogleProvider } from "./providers/google.js";
|
|
5
|
+
import { OllamaProvider } from "./providers/ollama.js";
|
|
6
|
+
|
|
7
|
+
type ProviderFactory = (
|
|
8
|
+
modelId: string,
|
|
9
|
+
config?: Record<string, unknown>
|
|
10
|
+
) => ModelProvider;
|
|
11
|
+
|
|
12
|
+
export class ModelRegistry {
|
|
13
|
+
private factories = new Map<string, ProviderFactory>();
|
|
14
|
+
|
|
15
|
+
register(providerId: string, factory: ProviderFactory): void {
|
|
16
|
+
this.factories.set(providerId, factory);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
resolve(
|
|
20
|
+
providerId: string,
|
|
21
|
+
modelId: string,
|
|
22
|
+
config?: Record<string, unknown>
|
|
23
|
+
): ModelProvider {
|
|
24
|
+
const factory = this.factories.get(providerId);
|
|
25
|
+
if (!factory) {
|
|
26
|
+
throw new Error(
|
|
27
|
+
`Unknown provider "${providerId}". Register it first with registry.register().`
|
|
28
|
+
);
|
|
29
|
+
}
|
|
30
|
+
return factory(modelId, config);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
has(providerId: string): boolean {
|
|
34
|
+
return this.factories.has(providerId);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export const registry = new ModelRegistry();
|
|
39
|
+
|
|
40
|
+
registry.register(
|
|
41
|
+
"openai",
|
|
42
|
+
(modelId, config) =>
|
|
43
|
+
new OpenAIProvider(modelId, config as { apiKey?: string; baseURL?: string })
|
|
44
|
+
);
|
|
45
|
+
|
|
46
|
+
registry.register(
|
|
47
|
+
"anthropic",
|
|
48
|
+
(modelId, config) =>
|
|
49
|
+
new AnthropicProvider(modelId, config as { apiKey?: string })
|
|
50
|
+
);
|
|
51
|
+
|
|
52
|
+
registry.register(
|
|
53
|
+
"google",
|
|
54
|
+
(modelId, config) =>
|
|
55
|
+
new GoogleProvider(modelId, config as { apiKey?: string })
|
|
56
|
+
);
|
|
57
|
+
|
|
58
|
+
registry.register(
|
|
59
|
+
"ollama",
|
|
60
|
+
(modelId, config) =>
|
|
61
|
+
new OllamaProvider(modelId, config as { host?: string })
|
|
62
|
+
);
|
|
63
|
+
|
|
64
|
+
export function openai(
|
|
65
|
+
modelId: string,
|
|
66
|
+
config?: { apiKey?: string; baseURL?: string }
|
|
67
|
+
): ModelProvider {
|
|
68
|
+
return registry.resolve("openai", modelId, config);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
export function anthropic(
|
|
72
|
+
modelId: string,
|
|
73
|
+
config?: { apiKey?: string }
|
|
74
|
+
): ModelProvider {
|
|
75
|
+
return registry.resolve("anthropic", modelId, config);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
export function google(
|
|
79
|
+
modelId: string,
|
|
80
|
+
config?: { apiKey?: string }
|
|
81
|
+
): ModelProvider {
|
|
82
|
+
return registry.resolve("google", modelId, config);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
export function ollama(
|
|
86
|
+
modelId: string,
|
|
87
|
+
config?: { host?: string }
|
|
88
|
+
): ModelProvider {
|
|
89
|
+
return registry.resolve("ollama", modelId, config);
|
|
90
|
+
}
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
export type MessageRole = "system" | "user" | "assistant" | "tool";
|
|
2
|
+
|
|
3
|
+
// ── Multi-modal content parts ─────────────────────────────────────────────
|
|
4
|
+
|
|
5
|
+
export interface TextPart {
|
|
6
|
+
type: "text";
|
|
7
|
+
text: string;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export interface ImagePart {
|
|
11
|
+
type: "image";
|
|
12
|
+
/** Base64-encoded image data OR a URL. */
|
|
13
|
+
data: string;
|
|
14
|
+
mimeType?: "image/png" | "image/jpeg" | "image/gif" | "image/webp";
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export interface AudioPart {
|
|
18
|
+
type: "audio";
|
|
19
|
+
/** Base64-encoded audio data. */
|
|
20
|
+
data: string;
|
|
21
|
+
mimeType?: "audio/mp3" | "audio/wav" | "audio/ogg" | "audio/webm";
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export interface FilePart {
|
|
25
|
+
type: "file";
|
|
26
|
+
/** Base64-encoded file data OR a URL. */
|
|
27
|
+
data: string;
|
|
28
|
+
mimeType: string;
|
|
29
|
+
filename?: string;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export type ContentPart = TextPart | ImagePart | AudioPart | FilePart;
|
|
33
|
+
|
|
34
|
+
/** Convenience: plain string, or an array of multi-modal content parts. */
|
|
35
|
+
export type MessageContent = string | ContentPart[];
|
|
36
|
+
|
|
37
|
+
// ── Chat message ──────────────────────────────────────────────────────────
|
|
38
|
+
|
|
39
|
+
export interface ChatMessage {
|
|
40
|
+
role: MessageRole;
|
|
41
|
+
content: MessageContent | null;
|
|
42
|
+
toolCalls?: ToolCall[];
|
|
43
|
+
toolCallId?: string;
|
|
44
|
+
name?: string;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// ── Tool definitions ──────────────────────────────────────────────────────
|
|
48
|
+
|
|
49
|
+
export interface ToolCall {
|
|
50
|
+
id: string;
|
|
51
|
+
name: string;
|
|
52
|
+
arguments: Record<string, unknown>;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export interface ToolDefinition {
|
|
56
|
+
name: string;
|
|
57
|
+
description: string;
|
|
58
|
+
parameters: Record<string, unknown>;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// ── Token usage ───────────────────────────────────────────────────────────
|
|
62
|
+
|
|
63
|
+
export interface TokenUsage {
|
|
64
|
+
promptTokens: number;
|
|
65
|
+
completionTokens: number;
|
|
66
|
+
totalTokens: number;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// ── Model response ────────────────────────────────────────────────────────
|
|
70
|
+
|
|
71
|
+
export interface ModelResponse {
|
|
72
|
+
message: ChatMessage;
|
|
73
|
+
usage: TokenUsage;
|
|
74
|
+
finishReason: "stop" | "tool_calls" | "length" | "content_filter";
|
|
75
|
+
raw: unknown;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
export type StreamChunk =
|
|
79
|
+
| { type: "text"; text: string }
|
|
80
|
+
| { type: "tool_call_start"; toolCall: { id: string; name: string } }
|
|
81
|
+
| { type: "tool_call_delta"; toolCallId: string; argumentsDelta: string }
|
|
82
|
+
| { type: "tool_call_end"; toolCallId: string }
|
|
83
|
+
| { type: "finish"; finishReason: string; usage?: TokenUsage };
|
|
84
|
+
|
|
85
|
+
// ── Model config ──────────────────────────────────────────────────────────
|
|
86
|
+
|
|
87
|
+
export interface ModelConfig {
|
|
88
|
+
temperature?: number;
|
|
89
|
+
maxTokens?: number;
|
|
90
|
+
topP?: number;
|
|
91
|
+
stop?: string[];
|
|
92
|
+
responseFormat?: "text" | "json" | { type: "json_schema"; schema: Record<string, unknown>; name?: string };
|
|
93
|
+
/** Per-request API key override. When provided, the provider uses this key instead of the one set at construction. */
|
|
94
|
+
apiKey?: string;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// ── Helpers ───────────────────────────────────────────────────────────────
|
|
98
|
+
|
|
99
|
+
/** Extract the text content from a MessageContent value. */
|
|
100
|
+
export function getTextContent(content: MessageContent | null): string {
|
|
101
|
+
if (content === null) return "";
|
|
102
|
+
if (typeof content === "string") return content;
|
|
103
|
+
return content
|
|
104
|
+
.filter((p): p is TextPart => p.type === "text")
|
|
105
|
+
.map((p) => p.text)
|
|
106
|
+
.join("");
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/** Check if content has multi-modal parts. */
|
|
110
|
+
export function isMultiModal(content: MessageContent | null): content is ContentPart[] {
|
|
111
|
+
return Array.isArray(content);
|
|
112
|
+
}
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import type { StorageDriver } from "../storage/driver.js";
|
|
2
|
+
import type { ChatMessage } from "../models/types.js";
|
|
3
|
+
import type { Session } from "./types.js";
|
|
4
|
+
|
|
5
|
+
const NAMESPACE = "sessions";
|
|
6
|
+
|
|
7
|
+
export class SessionManager {
|
|
8
|
+
constructor(private storage: StorageDriver) {}
|
|
9
|
+
|
|
10
|
+
async getOrCreate(sessionId: string, userId?: string): Promise<Session> {
|
|
11
|
+
const existing = await this.storage.get<Session>(NAMESPACE, sessionId);
|
|
12
|
+
if (existing) return existing;
|
|
13
|
+
|
|
14
|
+
const session: Session = {
|
|
15
|
+
sessionId,
|
|
16
|
+
userId,
|
|
17
|
+
messages: [],
|
|
18
|
+
state: {},
|
|
19
|
+
createdAt: new Date(),
|
|
20
|
+
updatedAt: new Date(),
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
await this.storage.set(NAMESPACE, sessionId, session);
|
|
24
|
+
return session;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
async appendMessage(sessionId: string, msg: ChatMessage): Promise<void> {
|
|
28
|
+
const session = await this.getOrCreate(sessionId);
|
|
29
|
+
session.messages.push(msg);
|
|
30
|
+
session.updatedAt = new Date();
|
|
31
|
+
await this.storage.set(NAMESPACE, sessionId, session);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
async appendMessages(
|
|
35
|
+
sessionId: string,
|
|
36
|
+
msgs: ChatMessage[]
|
|
37
|
+
): Promise<void> {
|
|
38
|
+
const session = await this.getOrCreate(sessionId);
|
|
39
|
+
session.messages.push(...msgs);
|
|
40
|
+
session.updatedAt = new Date();
|
|
41
|
+
await this.storage.set(NAMESPACE, sessionId, session);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
async getHistory(
|
|
45
|
+
sessionId: string,
|
|
46
|
+
limit?: number
|
|
47
|
+
): Promise<ChatMessage[]> {
|
|
48
|
+
const session = await this.storage.get<Session>(NAMESPACE, sessionId);
|
|
49
|
+
if (!session) return [];
|
|
50
|
+
|
|
51
|
+
if (limit && limit > 0) {
|
|
52
|
+
return session.messages.slice(-limit);
|
|
53
|
+
}
|
|
54
|
+
return session.messages;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
async updateState(
|
|
58
|
+
sessionId: string,
|
|
59
|
+
patch: Record<string, unknown>
|
|
60
|
+
): Promise<void> {
|
|
61
|
+
const session = await this.getOrCreate(sessionId);
|
|
62
|
+
Object.assign(session.state, patch);
|
|
63
|
+
session.updatedAt = new Date();
|
|
64
|
+
await this.storage.set(NAMESPACE, sessionId, session);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
async getState(sessionId: string): Promise<Record<string, unknown>> {
|
|
68
|
+
const session = await this.storage.get<Session>(NAMESPACE, sessionId);
|
|
69
|
+
return session?.state ?? {};
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
async deleteSession(sessionId: string): Promise<void> {
|
|
73
|
+
await this.storage.delete(NAMESPACE, sessionId);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
export interface StorageDriver {
|
|
2
|
+
get<T>(namespace: string, key: string): Promise<T | null>;
|
|
3
|
+
set<T>(namespace: string, key: string, value: T): Promise<void>;
|
|
4
|
+
delete(namespace: string, key: string): Promise<void>;
|
|
5
|
+
list<T>(
|
|
6
|
+
namespace: string,
|
|
7
|
+
prefix?: string
|
|
8
|
+
): Promise<Array<{ key: string; value: T }>>;
|
|
9
|
+
close(): Promise<void>;
|
|
10
|
+
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import type { StorageDriver } from "./driver.js";
|
|
2
|
+
|
|
3
|
+
export class InMemoryStorage implements StorageDriver {
|
|
4
|
+
private store = new Map<string, string>();
|
|
5
|
+
|
|
6
|
+
private makeKey(namespace: string, key: string): string {
|
|
7
|
+
return `${namespace}:${key}`;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
async get<T>(namespace: string, key: string): Promise<T | null> {
|
|
11
|
+
const raw = this.store.get(this.makeKey(namespace, key));
|
|
12
|
+
if (raw === undefined) return null;
|
|
13
|
+
return JSON.parse(raw) as T;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
async set<T>(namespace: string, key: string, value: T): Promise<void> {
|
|
17
|
+
this.store.set(this.makeKey(namespace, key), JSON.stringify(value));
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async delete(namespace: string, key: string): Promise<void> {
|
|
21
|
+
this.store.delete(this.makeKey(namespace, key));
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
async list<T>(
|
|
25
|
+
namespace: string,
|
|
26
|
+
prefix?: string
|
|
27
|
+
): Promise<Array<{ key: string; value: T }>> {
|
|
28
|
+
const nsPrefix = prefix ? `${namespace}:${prefix}` : `${namespace}:`;
|
|
29
|
+
const results: Array<{ key: string; value: T }> = [];
|
|
30
|
+
|
|
31
|
+
for (const [fullKey, raw] of this.store.entries()) {
|
|
32
|
+
if (fullKey.startsWith(nsPrefix)) {
|
|
33
|
+
const key = fullKey.slice(namespace.length + 1);
|
|
34
|
+
results.push({ key, value: JSON.parse(raw) as T });
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
return results;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async close(): Promise<void> {
|
|
42
|
+
this.store.clear();
|
|
43
|
+
}
|
|
44
|
+
}
|