@radaros/core 0.3.5 → 0.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +1407 -0
- package/dist/index.js +5269 -0
- package/package.json +6 -2
- package/src/a2a/a2a-remote-agent.ts +0 -270
- package/src/a2a/types.ts +0 -142
- package/src/agent/agent.ts +0 -417
- package/src/agent/llm-loop.ts +0 -290
- package/src/agent/run-context.ts +0 -35
- package/src/agent/types.ts +0 -89
- package/src/events/event-bus.ts +0 -45
- package/src/events/types.ts +0 -16
- package/src/guardrails/types.ts +0 -5
- package/src/hooks/types.ts +0 -6
- package/src/index.ts +0 -157
- package/src/knowledge/knowledge-base.ts +0 -146
- package/src/logger/logger.ts +0 -249
- package/src/mcp/mcp-client.ts +0 -264
- package/src/memory/memory.ts +0 -87
- package/src/memory/types.ts +0 -13
- package/src/memory/user-memory.ts +0 -211
- package/src/models/provider.ts +0 -22
- package/src/models/providers/anthropic.ts +0 -360
- package/src/models/providers/google.ts +0 -386
- package/src/models/providers/ollama.ts +0 -211
- package/src/models/providers/openai.ts +0 -345
- package/src/models/providers/vertex.ts +0 -427
- package/src/models/registry.ts +0 -107
- package/src/models/types.ts +0 -124
- package/src/session/session-manager.ts +0 -75
- package/src/session/types.ts +0 -10
- package/src/storage/driver.ts +0 -10
- package/src/storage/in-memory.ts +0 -44
- package/src/storage/mongodb.ts +0 -70
- package/src/storage/postgres.ts +0 -81
- package/src/storage/sqlite.ts +0 -81
- package/src/team/modes.ts +0 -1
- package/src/team/team.ts +0 -323
- package/src/team/types.ts +0 -26
- package/src/toolkits/base.ts +0 -15
- package/src/toolkits/duckduckgo.ts +0 -256
- package/src/toolkits/gmail.ts +0 -226
- package/src/toolkits/hackernews.ts +0 -121
- package/src/toolkits/websearch.ts +0 -158
- package/src/toolkits/whatsapp.ts +0 -209
- package/src/tools/define-tool.ts +0 -22
- package/src/tools/tool-executor.ts +0 -221
- package/src/tools/types.ts +0 -36
- package/src/utils/retry.ts +0 -56
- package/src/vector/base.ts +0 -44
- package/src/vector/embeddings/google.ts +0 -64
- package/src/vector/embeddings/openai.ts +0 -66
- package/src/vector/in-memory.ts +0 -115
- package/src/vector/mongodb.ts +0 -241
- package/src/vector/pgvector.ts +0 -169
- package/src/vector/qdrant.ts +0 -203
- package/src/vector/types.ts +0 -55
- package/src/workflow/step-runner.ts +0 -303
- package/src/workflow/types.ts +0 -55
- package/src/workflow/workflow.ts +0 -68
- package/tsconfig.json +0 -8
|
@@ -1,345 +0,0 @@
|
|
|
1
|
-
import { createRequire } from "node:module";
|
|
2
|
-
import type { ModelProvider } from "../provider.js";
|
|
3
|
-
import {
|
|
4
|
-
getTextContent,
|
|
5
|
-
isMultiModal,
|
|
6
|
-
type ChatMessage,
|
|
7
|
-
type ContentPart,
|
|
8
|
-
type ModelConfig,
|
|
9
|
-
type ModelResponse,
|
|
10
|
-
type StreamChunk,
|
|
11
|
-
type ToolDefinition,
|
|
12
|
-
type TokenUsage,
|
|
13
|
-
type ToolCall,
|
|
14
|
-
} from "../types.js";
|
|
15
|
-
|
|
16
|
-
const _require = createRequire(import.meta.url);
|
|
17
|
-
|
|
18
|
-
interface OpenAIConfig {
|
|
19
|
-
apiKey?: string;
|
|
20
|
-
baseURL?: string;
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
export class OpenAIProvider implements ModelProvider {
|
|
24
|
-
readonly providerId = "openai";
|
|
25
|
-
readonly modelId: string;
|
|
26
|
-
private client: any;
|
|
27
|
-
private OpenAICtor: any;
|
|
28
|
-
private baseURL?: string;
|
|
29
|
-
private clientCache = new Map<string, any>();
|
|
30
|
-
|
|
31
|
-
constructor(modelId: string, config?: OpenAIConfig) {
|
|
32
|
-
this.modelId = modelId;
|
|
33
|
-
this.baseURL = config?.baseURL;
|
|
34
|
-
try {
|
|
35
|
-
const mod = _require("openai");
|
|
36
|
-
this.OpenAICtor = mod.default ?? mod;
|
|
37
|
-
const key = config?.apiKey ?? process.env.OPENAI_API_KEY;
|
|
38
|
-
if (key) {
|
|
39
|
-
this.client = new this.OpenAICtor({ apiKey: key, baseURL: config?.baseURL });
|
|
40
|
-
}
|
|
41
|
-
} catch {
|
|
42
|
-
throw new Error(
|
|
43
|
-
"openai package is required for OpenAIProvider. Install it: npm install openai"
|
|
44
|
-
);
|
|
45
|
-
}
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
private getClient(apiKey?: string): any {
|
|
49
|
-
if (apiKey) {
|
|
50
|
-
let cached = this.clientCache.get(apiKey);
|
|
51
|
-
if (!cached) {
|
|
52
|
-
cached = new this.OpenAICtor({ apiKey, baseURL: this.baseURL });
|
|
53
|
-
this.clientCache.set(apiKey, cached);
|
|
54
|
-
}
|
|
55
|
-
return cached;
|
|
56
|
-
}
|
|
57
|
-
if (this.client) return this.client;
|
|
58
|
-
const envKey = process.env.OPENAI_API_KEY;
|
|
59
|
-
if (envKey) {
|
|
60
|
-
this.client = new this.OpenAICtor({ apiKey: envKey, baseURL: this.baseURL });
|
|
61
|
-
return this.client;
|
|
62
|
-
}
|
|
63
|
-
throw new Error("No OpenAI API key provided. Pass it via the x-openai-api-key header, apiKey in request body, or set OPENAI_API_KEY env var.");
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
async generate(
|
|
67
|
-
messages: ChatMessage[],
|
|
68
|
-
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
69
|
-
): Promise<ModelResponse> {
|
|
70
|
-
const params: Record<string, unknown> = {
|
|
71
|
-
model: this.modelId,
|
|
72
|
-
messages: this.toOpenAIMessages(messages),
|
|
73
|
-
};
|
|
74
|
-
|
|
75
|
-
if (options?.reasoning?.enabled) {
|
|
76
|
-
params.reasoning_effort = options.reasoning.effort ?? "medium";
|
|
77
|
-
} else {
|
|
78
|
-
if (options?.temperature !== undefined)
|
|
79
|
-
params.temperature = options.temperature;
|
|
80
|
-
}
|
|
81
|
-
if (options?.maxTokens !== undefined)
|
|
82
|
-
params.max_tokens = options.maxTokens;
|
|
83
|
-
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
84
|
-
if (options?.stop) params.stop = options.stop;
|
|
85
|
-
this.applyResponseFormat(params, options);
|
|
86
|
-
if (options?.tools?.length) {
|
|
87
|
-
params.tools = this.toOpenAITools(options.tools);
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
const client = this.getClient(options?.apiKey);
|
|
91
|
-
const response = await client.chat.completions.create(params);
|
|
92
|
-
return this.normalizeResponse(response);
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
async *stream(
|
|
96
|
-
messages: ChatMessage[],
|
|
97
|
-
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
98
|
-
): AsyncGenerator<StreamChunk> {
|
|
99
|
-
const params: Record<string, unknown> = {
|
|
100
|
-
model: this.modelId,
|
|
101
|
-
messages: this.toOpenAIMessages(messages),
|
|
102
|
-
stream: true,
|
|
103
|
-
};
|
|
104
|
-
|
|
105
|
-
if (options?.reasoning?.enabled) {
|
|
106
|
-
params.reasoning_effort = options.reasoning.effort ?? "medium";
|
|
107
|
-
} else {
|
|
108
|
-
if (options?.temperature !== undefined)
|
|
109
|
-
params.temperature = options.temperature;
|
|
110
|
-
}
|
|
111
|
-
if (options?.maxTokens !== undefined)
|
|
112
|
-
params.max_tokens = options.maxTokens;
|
|
113
|
-
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
114
|
-
if (options?.stop) params.stop = options.stop;
|
|
115
|
-
this.applyResponseFormat(params, options);
|
|
116
|
-
if (options?.tools?.length) {
|
|
117
|
-
params.tools = this.toOpenAITools(options.tools);
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
const client = this.getClient(options?.apiKey);
|
|
121
|
-
const stream = await client.chat.completions.create(params);
|
|
122
|
-
|
|
123
|
-
const activeToolCalls = new Map<
|
|
124
|
-
number,
|
|
125
|
-
{ id: string; name: string; args: string }
|
|
126
|
-
>();
|
|
127
|
-
|
|
128
|
-
for await (const chunk of stream) {
|
|
129
|
-
const choice = chunk.choices?.[0];
|
|
130
|
-
if (!choice) continue;
|
|
131
|
-
|
|
132
|
-
const delta = choice.delta;
|
|
133
|
-
|
|
134
|
-
if (delta?.content) {
|
|
135
|
-
yield { type: "text", text: delta.content };
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
if (delta?.tool_calls) {
|
|
139
|
-
for (const tc of delta.tool_calls) {
|
|
140
|
-
const idx = tc.index ?? 0;
|
|
141
|
-
|
|
142
|
-
if (tc.id) {
|
|
143
|
-
activeToolCalls.set(idx, {
|
|
144
|
-
id: tc.id,
|
|
145
|
-
name: tc.function?.name ?? "",
|
|
146
|
-
args: tc.function?.arguments ?? "",
|
|
147
|
-
});
|
|
148
|
-
yield {
|
|
149
|
-
type: "tool_call_start",
|
|
150
|
-
toolCall: {
|
|
151
|
-
id: tc.id,
|
|
152
|
-
name: tc.function?.name ?? "",
|
|
153
|
-
},
|
|
154
|
-
};
|
|
155
|
-
} else if (tc.function?.arguments) {
|
|
156
|
-
const existing = activeToolCalls.get(idx);
|
|
157
|
-
if (existing) {
|
|
158
|
-
existing.args += tc.function.arguments;
|
|
159
|
-
yield {
|
|
160
|
-
type: "tool_call_delta",
|
|
161
|
-
toolCallId: existing.id,
|
|
162
|
-
argumentsDelta: tc.function.arguments,
|
|
163
|
-
};
|
|
164
|
-
}
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
if (tc.function?.name && !tc.id) {
|
|
168
|
-
const existing = activeToolCalls.get(idx);
|
|
169
|
-
if (existing) {
|
|
170
|
-
existing.name = tc.function.name;
|
|
171
|
-
}
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
if (delta?.reasoning_content) {
|
|
177
|
-
yield { type: "thinking", text: delta.reasoning_content };
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
if (choice.finish_reason) {
|
|
181
|
-
for (const [, tc] of activeToolCalls) {
|
|
182
|
-
yield { type: "tool_call_end", toolCallId: tc.id };
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
const reasoningTkns = chunk.usage?.completion_tokens_details?.reasoning_tokens ?? 0;
|
|
186
|
-
yield {
|
|
187
|
-
type: "finish",
|
|
188
|
-
finishReason:
|
|
189
|
-
choice.finish_reason === "tool_calls"
|
|
190
|
-
? "tool_calls"
|
|
191
|
-
: choice.finish_reason,
|
|
192
|
-
usage: chunk.usage
|
|
193
|
-
? {
|
|
194
|
-
promptTokens: chunk.usage.prompt_tokens ?? 0,
|
|
195
|
-
completionTokens: chunk.usage.completion_tokens ?? 0,
|
|
196
|
-
totalTokens: chunk.usage.total_tokens ?? 0,
|
|
197
|
-
...(reasoningTkns > 0 ? { reasoningTokens: reasoningTkns } : {}),
|
|
198
|
-
}
|
|
199
|
-
: undefined,
|
|
200
|
-
};
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
private applyResponseFormat(params: Record<string, unknown>, options?: ModelConfig): void {
|
|
206
|
-
if (!options?.responseFormat) return;
|
|
207
|
-
if (options.responseFormat === "json") {
|
|
208
|
-
params.response_format = { type: "json_object" };
|
|
209
|
-
} else if (options.responseFormat === "text") {
|
|
210
|
-
// default
|
|
211
|
-
} else if (typeof options.responseFormat === "object") {
|
|
212
|
-
params.response_format = {
|
|
213
|
-
type: "json_schema",
|
|
214
|
-
json_schema: {
|
|
215
|
-
name: options.responseFormat.name ?? "response",
|
|
216
|
-
schema: options.responseFormat.schema,
|
|
217
|
-
strict: true,
|
|
218
|
-
},
|
|
219
|
-
};
|
|
220
|
-
}
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
private toOpenAIMessages(messages: ChatMessage[]): unknown[] {
|
|
224
|
-
return messages.map((msg) => {
|
|
225
|
-
if (msg.role === "assistant" && msg.toolCalls?.length) {
|
|
226
|
-
return {
|
|
227
|
-
role: "assistant",
|
|
228
|
-
content: getTextContent(msg.content),
|
|
229
|
-
tool_calls: msg.toolCalls.map((tc) => ({
|
|
230
|
-
id: tc.id,
|
|
231
|
-
type: "function",
|
|
232
|
-
function: {
|
|
233
|
-
name: tc.name,
|
|
234
|
-
arguments: JSON.stringify(tc.arguments),
|
|
235
|
-
},
|
|
236
|
-
})),
|
|
237
|
-
};
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
if (msg.role === "tool") {
|
|
241
|
-
return {
|
|
242
|
-
role: "tool",
|
|
243
|
-
tool_call_id: msg.toolCallId,
|
|
244
|
-
content: getTextContent(msg.content),
|
|
245
|
-
};
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
if (isMultiModal(msg.content)) {
|
|
249
|
-
return {
|
|
250
|
-
role: msg.role,
|
|
251
|
-
content: msg.content.map((part) => this.partToOpenAI(part)),
|
|
252
|
-
};
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
return {
|
|
256
|
-
role: msg.role,
|
|
257
|
-
content: msg.content ?? "",
|
|
258
|
-
};
|
|
259
|
-
});
|
|
260
|
-
}
|
|
261
|
-
|
|
262
|
-
private partToOpenAI(part: ContentPart): unknown {
|
|
263
|
-
switch (part.type) {
|
|
264
|
-
case "text":
|
|
265
|
-
return { type: "text", text: part.text };
|
|
266
|
-
case "image": {
|
|
267
|
-
const isUrl = part.data.startsWith("http://") || part.data.startsWith("https://");
|
|
268
|
-
return {
|
|
269
|
-
type: "image_url",
|
|
270
|
-
image_url: {
|
|
271
|
-
url: isUrl ? part.data : `data:${part.mimeType ?? "image/png"};base64,${part.data}`,
|
|
272
|
-
},
|
|
273
|
-
};
|
|
274
|
-
}
|
|
275
|
-
case "audio":
|
|
276
|
-
return {
|
|
277
|
-
type: "input_audio",
|
|
278
|
-
input_audio: {
|
|
279
|
-
data: part.data,
|
|
280
|
-
format: part.mimeType?.split("/")[1] ?? "mp3",
|
|
281
|
-
},
|
|
282
|
-
};
|
|
283
|
-
case "file":
|
|
284
|
-
return {
|
|
285
|
-
type: "text",
|
|
286
|
-
text: `[File: ${part.filename ?? "attachment"} (${part.mimeType})]`,
|
|
287
|
-
};
|
|
288
|
-
}
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
private toOpenAITools(
|
|
292
|
-
tools: ToolDefinition[]
|
|
293
|
-
): unknown[] {
|
|
294
|
-
return tools.map((t) => ({
|
|
295
|
-
type: "function",
|
|
296
|
-
function: {
|
|
297
|
-
name: t.name,
|
|
298
|
-
description: t.description,
|
|
299
|
-
parameters: t.parameters,
|
|
300
|
-
},
|
|
301
|
-
}));
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
private normalizeResponse(response: any): ModelResponse & { thinking?: string } {
|
|
305
|
-
const choice = response.choices[0];
|
|
306
|
-
const msg = choice.message;
|
|
307
|
-
|
|
308
|
-
const toolCalls: ToolCall[] = (msg.tool_calls ?? []).map((tc: any) => ({
|
|
309
|
-
id: tc.id,
|
|
310
|
-
name: tc.function.name,
|
|
311
|
-
arguments: JSON.parse(tc.function.arguments || "{}"),
|
|
312
|
-
}));
|
|
313
|
-
|
|
314
|
-
const reasoningTokens = response.usage?.completion_tokens_details?.reasoning_tokens ?? 0;
|
|
315
|
-
const usage: TokenUsage = {
|
|
316
|
-
promptTokens: response.usage?.prompt_tokens ?? 0,
|
|
317
|
-
completionTokens: response.usage?.completion_tokens ?? 0,
|
|
318
|
-
totalTokens: response.usage?.total_tokens ?? 0,
|
|
319
|
-
...(reasoningTokens > 0 ? { reasoningTokens } : {}),
|
|
320
|
-
};
|
|
321
|
-
|
|
322
|
-
let finishReason: ModelResponse["finishReason"] = "stop";
|
|
323
|
-
if (choice.finish_reason === "tool_calls") finishReason = "tool_calls";
|
|
324
|
-
else if (choice.finish_reason === "length") finishReason = "length";
|
|
325
|
-
else if (choice.finish_reason === "content_filter")
|
|
326
|
-
finishReason = "content_filter";
|
|
327
|
-
|
|
328
|
-
const result: ModelResponse & { thinking?: string } = {
|
|
329
|
-
message: {
|
|
330
|
-
role: "assistant",
|
|
331
|
-
content: msg.content ?? null,
|
|
332
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
333
|
-
},
|
|
334
|
-
usage,
|
|
335
|
-
finishReason,
|
|
336
|
-
raw: response,
|
|
337
|
-
};
|
|
338
|
-
|
|
339
|
-
if (msg.reasoning_content) {
|
|
340
|
-
result.thinking = msg.reasoning_content;
|
|
341
|
-
}
|
|
342
|
-
|
|
343
|
-
return result;
|
|
344
|
-
}
|
|
345
|
-
}
|