@radaros/core 0.3.5 → 0.3.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +1711 -0
- package/dist/index.js +6341 -0
- package/package.json +6 -2
- package/src/a2a/a2a-remote-agent.ts +0 -270
- package/src/a2a/types.ts +0 -142
- package/src/agent/agent.ts +0 -417
- package/src/agent/llm-loop.ts +0 -290
- package/src/agent/run-context.ts +0 -35
- package/src/agent/types.ts +0 -89
- package/src/events/event-bus.ts +0 -45
- package/src/events/types.ts +0 -16
- package/src/guardrails/types.ts +0 -5
- package/src/hooks/types.ts +0 -6
- package/src/index.ts +0 -157
- package/src/knowledge/knowledge-base.ts +0 -146
- package/src/logger/logger.ts +0 -249
- package/src/mcp/mcp-client.ts +0 -264
- package/src/memory/memory.ts +0 -87
- package/src/memory/types.ts +0 -13
- package/src/memory/user-memory.ts +0 -211
- package/src/models/provider.ts +0 -22
- package/src/models/providers/anthropic.ts +0 -360
- package/src/models/providers/google.ts +0 -386
- package/src/models/providers/ollama.ts +0 -211
- package/src/models/providers/openai.ts +0 -345
- package/src/models/providers/vertex.ts +0 -427
- package/src/models/registry.ts +0 -107
- package/src/models/types.ts +0 -124
- package/src/session/session-manager.ts +0 -75
- package/src/session/types.ts +0 -10
- package/src/storage/driver.ts +0 -10
- package/src/storage/in-memory.ts +0 -44
- package/src/storage/mongodb.ts +0 -70
- package/src/storage/postgres.ts +0 -81
- package/src/storage/sqlite.ts +0 -81
- package/src/team/modes.ts +0 -1
- package/src/team/team.ts +0 -323
- package/src/team/types.ts +0 -26
- package/src/toolkits/base.ts +0 -15
- package/src/toolkits/duckduckgo.ts +0 -256
- package/src/toolkits/gmail.ts +0 -226
- package/src/toolkits/hackernews.ts +0 -121
- package/src/toolkits/websearch.ts +0 -158
- package/src/toolkits/whatsapp.ts +0 -209
- package/src/tools/define-tool.ts +0 -22
- package/src/tools/tool-executor.ts +0 -221
- package/src/tools/types.ts +0 -36
- package/src/utils/retry.ts +0 -56
- package/src/vector/base.ts +0 -44
- package/src/vector/embeddings/google.ts +0 -64
- package/src/vector/embeddings/openai.ts +0 -66
- package/src/vector/in-memory.ts +0 -115
- package/src/vector/mongodb.ts +0 -241
- package/src/vector/pgvector.ts +0 -169
- package/src/vector/qdrant.ts +0 -203
- package/src/vector/types.ts +0 -55
- package/src/workflow/step-runner.ts +0 -303
- package/src/workflow/types.ts +0 -55
- package/src/workflow/workflow.ts +0 -68
- package/tsconfig.json +0 -8
|
@@ -1,211 +0,0 @@
|
|
|
1
|
-
import { v4 as uuidv4 } from "uuid";
|
|
2
|
-
import { z } from "zod";
|
|
3
|
-
import { InMemoryStorage } from "../storage/in-memory.js";
|
|
4
|
-
import type { StorageDriver } from "../storage/driver.js";
|
|
5
|
-
import type { ModelProvider } from "../models/provider.js";
|
|
6
|
-
import type { ChatMessage } from "../models/types.js";
|
|
7
|
-
import type { ToolDef } from "../tools/types.js";
|
|
8
|
-
|
|
9
|
-
const USER_MEMORY_NS = "memory:user";
|
|
10
|
-
|
|
11
|
-
export interface UserMemoryConfig {
|
|
12
|
-
storage?: StorageDriver;
|
|
13
|
-
/** LLM used for auto-extraction of facts from conversations. */
|
|
14
|
-
model?: ModelProvider;
|
|
15
|
-
/** Maximum number of facts stored per user (default 100). */
|
|
16
|
-
maxFacts?: number;
|
|
17
|
-
/** Whether auto-extraction is enabled (default true). */
|
|
18
|
-
enabled?: boolean;
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
export interface UserFact {
|
|
22
|
-
id: string;
|
|
23
|
-
fact: string;
|
|
24
|
-
createdAt: Date;
|
|
25
|
-
source: "auto" | "manual";
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
const EXTRACTION_PROMPT = `You are a memory extraction assistant. Analyze the conversation below and extract important facts about the user that would be useful for future personalization.
|
|
29
|
-
|
|
30
|
-
Rules:
|
|
31
|
-
- Extract concrete facts like preferences, location, profession, interests, goals, communication style
|
|
32
|
-
- Each fact should be a short, self-contained statement (e.g., "Lives in Mumbai", "Prefers concise answers")
|
|
33
|
-
- Do NOT extract transient information (e.g., "asked about weather today")
|
|
34
|
-
- Do NOT extract information about the assistant
|
|
35
|
-
- If there are no new meaningful facts, return an empty array
|
|
36
|
-
- Return ONLY a valid JSON array of strings, nothing else
|
|
37
|
-
|
|
38
|
-
Existing facts about this user (avoid duplicates):
|
|
39
|
-
{existingFacts}
|
|
40
|
-
|
|
41
|
-
Conversation:
|
|
42
|
-
{conversation}
|
|
43
|
-
|
|
44
|
-
Return a JSON array of new fact strings:`;
|
|
45
|
-
|
|
46
|
-
export class UserMemory {
|
|
47
|
-
private storage: StorageDriver;
|
|
48
|
-
private model?: ModelProvider;
|
|
49
|
-
private maxFacts: number;
|
|
50
|
-
private enabled: boolean;
|
|
51
|
-
private initPromise: Promise<void> | null = null;
|
|
52
|
-
|
|
53
|
-
constructor(config?: UserMemoryConfig) {
|
|
54
|
-
this.storage = config?.storage ?? new InMemoryStorage();
|
|
55
|
-
this.model = config?.model;
|
|
56
|
-
this.maxFacts = config?.maxFacts ?? 100;
|
|
57
|
-
this.enabled = config?.enabled ?? true;
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
private ensureInitialized(): Promise<void> {
|
|
61
|
-
if (!this.initPromise) {
|
|
62
|
-
this.initPromise = (async () => {
|
|
63
|
-
if (typeof (this.storage as any).initialize === "function") {
|
|
64
|
-
await (this.storage as any).initialize();
|
|
65
|
-
}
|
|
66
|
-
})();
|
|
67
|
-
}
|
|
68
|
-
return this.initPromise;
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
async getFacts(userId: string): Promise<UserFact[]> {
|
|
72
|
-
await this.ensureInitialized();
|
|
73
|
-
return (await this.storage.get<UserFact[]>(USER_MEMORY_NS, userId)) ?? [];
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
async addFacts(userId: string, facts: string[], source: "auto" | "manual" = "manual"): Promise<void> {
|
|
77
|
-
await this.ensureInitialized();
|
|
78
|
-
const existing = await this.getFacts(userId);
|
|
79
|
-
const existingSet = new Set(existing.map((f) => f.fact.toLowerCase()));
|
|
80
|
-
|
|
81
|
-
const newFacts: UserFact[] = [];
|
|
82
|
-
for (const fact of facts) {
|
|
83
|
-
const normalized = fact.trim();
|
|
84
|
-
if (!normalized || existingSet.has(normalized.toLowerCase())) continue;
|
|
85
|
-
newFacts.push({
|
|
86
|
-
id: uuidv4(),
|
|
87
|
-
fact: normalized,
|
|
88
|
-
createdAt: new Date(),
|
|
89
|
-
source,
|
|
90
|
-
});
|
|
91
|
-
existingSet.add(normalized.toLowerCase());
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
if (newFacts.length === 0) return;
|
|
95
|
-
|
|
96
|
-
let updated = [...existing, ...newFacts];
|
|
97
|
-
if (updated.length > this.maxFacts) {
|
|
98
|
-
updated = updated.slice(updated.length - this.maxFacts);
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
await this.storage.set(USER_MEMORY_NS, userId, updated);
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
async removeFact(userId: string, factId: string): Promise<void> {
|
|
105
|
-
await this.ensureInitialized();
|
|
106
|
-
const existing = await this.getFacts(userId);
|
|
107
|
-
const updated = existing.filter((f) => f.id !== factId);
|
|
108
|
-
await this.storage.set(USER_MEMORY_NS, userId, updated);
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
async clear(userId: string): Promise<void> {
|
|
112
|
-
await this.ensureInitialized();
|
|
113
|
-
await this.storage.delete(USER_MEMORY_NS, userId);
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
async getContextString(userId: string): Promise<string> {
|
|
117
|
-
if (!this.enabled) return "";
|
|
118
|
-
const facts = await this.getFacts(userId);
|
|
119
|
-
if (facts.length === 0) return "";
|
|
120
|
-
const factList = facts.map((f) => `- ${f.fact}`).join("\n");
|
|
121
|
-
return `What you know about this user:\n${factList}`;
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
asTool(config?: { name?: string; description?: string }): ToolDef {
|
|
125
|
-
const mem = this;
|
|
126
|
-
return {
|
|
127
|
-
name: config?.name ?? "recall_user_facts",
|
|
128
|
-
description:
|
|
129
|
-
config?.description ??
|
|
130
|
-
"Retrieve stored facts about the current user — preferences, background, interests, and other personal details from past conversations. Call this when the user asks what you know or remember about them.",
|
|
131
|
-
parameters: z.object({}),
|
|
132
|
-
execute: async (_args, ctx) => {
|
|
133
|
-
const uid = ctx.userId;
|
|
134
|
-
if (!uid) return "No user identified for this session.";
|
|
135
|
-
const facts = await mem.getFacts(uid);
|
|
136
|
-
if (facts.length === 0) return "No stored facts about this user yet.";
|
|
137
|
-
return facts.map((f) => `- ${f.fact}`).join("\n");
|
|
138
|
-
},
|
|
139
|
-
};
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
async extractAndStore(
|
|
143
|
-
userId: string,
|
|
144
|
-
messages: ChatMessage[],
|
|
145
|
-
fallbackModel?: ModelProvider
|
|
146
|
-
): Promise<void> {
|
|
147
|
-
if (!this.enabled) return;
|
|
148
|
-
|
|
149
|
-
const model = this.model ?? fallbackModel;
|
|
150
|
-
if (!model) return;
|
|
151
|
-
|
|
152
|
-
try {
|
|
153
|
-
const existing = await this.getFacts(userId);
|
|
154
|
-
const existingStr =
|
|
155
|
-
existing.length > 0
|
|
156
|
-
? existing.map((f) => `- ${f.fact}`).join("\n")
|
|
157
|
-
: "(none)";
|
|
158
|
-
|
|
159
|
-
const conversationStr = messages
|
|
160
|
-
.filter((m) => m.role === "user" || m.role === "assistant")
|
|
161
|
-
.map((m) => {
|
|
162
|
-
const content = typeof m.content === "string" ? m.content : "(multimodal)";
|
|
163
|
-
return `${m.role}: ${content}`;
|
|
164
|
-
})
|
|
165
|
-
.join("\n");
|
|
166
|
-
|
|
167
|
-
const prompt = EXTRACTION_PROMPT
|
|
168
|
-
.replace("{existingFacts}", existingStr)
|
|
169
|
-
.replace("{conversation}", conversationStr);
|
|
170
|
-
|
|
171
|
-
const response = await model.generate(
|
|
172
|
-
[{ role: "user", content: prompt }],
|
|
173
|
-
{ temperature: 0, maxTokens: 500 }
|
|
174
|
-
);
|
|
175
|
-
|
|
176
|
-
const text =
|
|
177
|
-
typeof response.message.content === "string"
|
|
178
|
-
? response.message.content
|
|
179
|
-
: "";
|
|
180
|
-
|
|
181
|
-
if (!text) return;
|
|
182
|
-
|
|
183
|
-
const jsonStr = this.extractJsonArray(text);
|
|
184
|
-
const parsed = JSON.parse(jsonStr);
|
|
185
|
-
|
|
186
|
-
if (Array.isArray(parsed) && parsed.length > 0) {
|
|
187
|
-
const validFacts = parsed.filter(
|
|
188
|
-
(f: unknown) => typeof f === "string" && f.trim().length > 0
|
|
189
|
-
);
|
|
190
|
-
if (validFacts.length > 0) {
|
|
191
|
-
await this.addFacts(userId, validFacts, "auto");
|
|
192
|
-
}
|
|
193
|
-
}
|
|
194
|
-
} catch (err) {
|
|
195
|
-
console.warn("[UserMemory] extractAndStore failed:", (err as Error).message ?? err);
|
|
196
|
-
}
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
private extractJsonArray(text: string): string {
|
|
200
|
-
const fenceMatch = text.match(/```(?:json)?\s*\n?([\s\S]*?)```/);
|
|
201
|
-
if (fenceMatch) return fenceMatch[1].trim();
|
|
202
|
-
|
|
203
|
-
const bracketStart = text.indexOf("[");
|
|
204
|
-
const bracketEnd = text.lastIndexOf("]");
|
|
205
|
-
if (bracketStart !== -1 && bracketEnd > bracketStart) {
|
|
206
|
-
return text.slice(bracketStart, bracketEnd + 1);
|
|
207
|
-
}
|
|
208
|
-
|
|
209
|
-
return text.trim();
|
|
210
|
-
}
|
|
211
|
-
}
|
package/src/models/provider.ts
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
import type {
|
|
2
|
-
ChatMessage,
|
|
3
|
-
ModelConfig,
|
|
4
|
-
ModelResponse,
|
|
5
|
-
StreamChunk,
|
|
6
|
-
ToolDefinition,
|
|
7
|
-
} from "./types.js";
|
|
8
|
-
|
|
9
|
-
export interface ModelProvider {
|
|
10
|
-
readonly providerId: string;
|
|
11
|
-
readonly modelId: string;
|
|
12
|
-
|
|
13
|
-
generate(
|
|
14
|
-
messages: ChatMessage[],
|
|
15
|
-
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
16
|
-
): Promise<ModelResponse>;
|
|
17
|
-
|
|
18
|
-
stream(
|
|
19
|
-
messages: ChatMessage[],
|
|
20
|
-
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
21
|
-
): AsyncGenerator<StreamChunk>;
|
|
22
|
-
}
|
|
@@ -1,360 +0,0 @@
|
|
|
1
|
-
import { createRequire } from "node:module";
|
|
2
|
-
import type { ModelProvider } from "../provider.js";
|
|
3
|
-
import {
|
|
4
|
-
getTextContent,
|
|
5
|
-
isMultiModal,
|
|
6
|
-
type ChatMessage,
|
|
7
|
-
type ContentPart,
|
|
8
|
-
type ModelConfig,
|
|
9
|
-
type ModelResponse,
|
|
10
|
-
type StreamChunk,
|
|
11
|
-
type ToolDefinition,
|
|
12
|
-
type TokenUsage,
|
|
13
|
-
type ToolCall,
|
|
14
|
-
} from "../types.js";
|
|
15
|
-
|
|
16
|
-
const _require = createRequire(import.meta.url);
|
|
17
|
-
|
|
18
|
-
interface AnthropicConfig {
|
|
19
|
-
apiKey?: string;
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
export class AnthropicProvider implements ModelProvider {
|
|
23
|
-
readonly providerId = "anthropic";
|
|
24
|
-
readonly modelId: string;
|
|
25
|
-
private client: any;
|
|
26
|
-
private AnthropicCtor: any;
|
|
27
|
-
private clientCache = new Map<string, any>();
|
|
28
|
-
|
|
29
|
-
constructor(modelId: string, config?: AnthropicConfig) {
|
|
30
|
-
this.modelId = modelId;
|
|
31
|
-
try {
|
|
32
|
-
const mod = _require("@anthropic-ai/sdk");
|
|
33
|
-
this.AnthropicCtor = mod.default ?? mod;
|
|
34
|
-
const key = config?.apiKey ?? process.env.ANTHROPIC_API_KEY;
|
|
35
|
-
if (key) {
|
|
36
|
-
this.client = new this.AnthropicCtor({ apiKey: key });
|
|
37
|
-
}
|
|
38
|
-
} catch {
|
|
39
|
-
throw new Error(
|
|
40
|
-
"@anthropic-ai/sdk is required for AnthropicProvider. Install it: npm install @anthropic-ai/sdk"
|
|
41
|
-
);
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
private getClient(apiKey?: string): any {
|
|
46
|
-
if (apiKey) {
|
|
47
|
-
let cached = this.clientCache.get(apiKey);
|
|
48
|
-
if (!cached) {
|
|
49
|
-
cached = new this.AnthropicCtor({ apiKey });
|
|
50
|
-
this.clientCache.set(apiKey, cached);
|
|
51
|
-
}
|
|
52
|
-
return cached;
|
|
53
|
-
}
|
|
54
|
-
if (this.client) return this.client;
|
|
55
|
-
const envKey = process.env.ANTHROPIC_API_KEY;
|
|
56
|
-
if (envKey) {
|
|
57
|
-
this.client = new this.AnthropicCtor({ apiKey: envKey });
|
|
58
|
-
return this.client;
|
|
59
|
-
}
|
|
60
|
-
throw new Error("No Anthropic API key provided. Pass it via the x-anthropic-api-key header, apiKey in request body, or set ANTHROPIC_API_KEY env var.");
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
async generate(
|
|
64
|
-
messages: ChatMessage[],
|
|
65
|
-
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
66
|
-
): Promise<ModelResponse> {
|
|
67
|
-
const { systemMsg, anthropicMessages } =
|
|
68
|
-
this.toAnthropicMessages(messages);
|
|
69
|
-
|
|
70
|
-
const params: Record<string, unknown> = {
|
|
71
|
-
model: this.modelId,
|
|
72
|
-
messages: anthropicMessages,
|
|
73
|
-
max_tokens: options?.maxTokens ?? 4096,
|
|
74
|
-
};
|
|
75
|
-
|
|
76
|
-
if (systemMsg) params.system = systemMsg;
|
|
77
|
-
if (options?.temperature !== undefined)
|
|
78
|
-
params.temperature = options.temperature;
|
|
79
|
-
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
80
|
-
if (options?.stop) params.stop_sequences = options.stop;
|
|
81
|
-
if (options?.tools?.length) {
|
|
82
|
-
params.tools = this.toAnthropicTools(options.tools);
|
|
83
|
-
}
|
|
84
|
-
if (options?.reasoning?.enabled) {
|
|
85
|
-
params.thinking = {
|
|
86
|
-
type: "enabled",
|
|
87
|
-
budget_tokens: options.reasoning.budgetTokens ?? 10000,
|
|
88
|
-
};
|
|
89
|
-
delete params.temperature;
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
const client = this.getClient(options?.apiKey);
|
|
93
|
-
const response = await client.messages.create(params);
|
|
94
|
-
return this.normalizeResponse(response);
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
async *stream(
|
|
98
|
-
messages: ChatMessage[],
|
|
99
|
-
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
100
|
-
): AsyncGenerator<StreamChunk> {
|
|
101
|
-
const { systemMsg, anthropicMessages } =
|
|
102
|
-
this.toAnthropicMessages(messages);
|
|
103
|
-
|
|
104
|
-
const params: Record<string, unknown> = {
|
|
105
|
-
model: this.modelId,
|
|
106
|
-
messages: anthropicMessages,
|
|
107
|
-
max_tokens: options?.maxTokens ?? 4096,
|
|
108
|
-
stream: true,
|
|
109
|
-
};
|
|
110
|
-
|
|
111
|
-
if (systemMsg) params.system = systemMsg;
|
|
112
|
-
if (options?.temperature !== undefined)
|
|
113
|
-
params.temperature = options.temperature;
|
|
114
|
-
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
115
|
-
if (options?.stop) params.stop_sequences = options.stop;
|
|
116
|
-
if (options?.tools?.length) {
|
|
117
|
-
params.tools = this.toAnthropicTools(options.tools);
|
|
118
|
-
}
|
|
119
|
-
if (options?.reasoning?.enabled) {
|
|
120
|
-
params.thinking = {
|
|
121
|
-
type: "enabled",
|
|
122
|
-
budget_tokens: options.reasoning.budgetTokens ?? 10000,
|
|
123
|
-
};
|
|
124
|
-
delete params.temperature;
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
const client = this.getClient(options?.apiKey);
|
|
128
|
-
const stream = await client.messages.create(params);
|
|
129
|
-
|
|
130
|
-
let currentToolId = "";
|
|
131
|
-
let inThinkingBlock = false;
|
|
132
|
-
|
|
133
|
-
for await (const event of stream) {
|
|
134
|
-
switch (event.type) {
|
|
135
|
-
case "content_block_start": {
|
|
136
|
-
if (event.content_block?.type === "tool_use") {
|
|
137
|
-
currentToolId = event.content_block.id;
|
|
138
|
-
yield {
|
|
139
|
-
type: "tool_call_start",
|
|
140
|
-
toolCall: {
|
|
141
|
-
id: event.content_block.id,
|
|
142
|
-
name: event.content_block.name,
|
|
143
|
-
},
|
|
144
|
-
};
|
|
145
|
-
} else if (event.content_block?.type === "thinking") {
|
|
146
|
-
inThinkingBlock = true;
|
|
147
|
-
}
|
|
148
|
-
break;
|
|
149
|
-
}
|
|
150
|
-
case "content_block_delta": {
|
|
151
|
-
if (event.delta?.type === "thinking_delta") {
|
|
152
|
-
yield { type: "thinking", text: event.delta.thinking };
|
|
153
|
-
} else if (event.delta?.type === "text_delta") {
|
|
154
|
-
yield { type: "text", text: event.delta.text };
|
|
155
|
-
} else if (event.delta?.type === "input_json_delta") {
|
|
156
|
-
yield {
|
|
157
|
-
type: "tool_call_delta",
|
|
158
|
-
toolCallId: currentToolId,
|
|
159
|
-
argumentsDelta: event.delta.partial_json,
|
|
160
|
-
};
|
|
161
|
-
}
|
|
162
|
-
break;
|
|
163
|
-
}
|
|
164
|
-
case "content_block_stop": {
|
|
165
|
-
if (inThinkingBlock) {
|
|
166
|
-
inThinkingBlock = false;
|
|
167
|
-
} else if (currentToolId) {
|
|
168
|
-
yield { type: "tool_call_end", toolCallId: currentToolId };
|
|
169
|
-
currentToolId = "";
|
|
170
|
-
}
|
|
171
|
-
break;
|
|
172
|
-
}
|
|
173
|
-
case "message_delta": {
|
|
174
|
-
const usage: TokenUsage | undefined = event.usage
|
|
175
|
-
? {
|
|
176
|
-
promptTokens: 0,
|
|
177
|
-
completionTokens: event.usage.output_tokens ?? 0,
|
|
178
|
-
totalTokens: event.usage.output_tokens ?? 0,
|
|
179
|
-
}
|
|
180
|
-
: undefined;
|
|
181
|
-
|
|
182
|
-
let finishReason = event.delta?.stop_reason ?? "stop";
|
|
183
|
-
if (finishReason === "tool_use") finishReason = "tool_calls";
|
|
184
|
-
if (finishReason === "end_turn") finishReason = "stop";
|
|
185
|
-
|
|
186
|
-
yield { type: "finish", finishReason, usage };
|
|
187
|
-
break;
|
|
188
|
-
}
|
|
189
|
-
case "message_start": {
|
|
190
|
-
if (event.message?.usage) {
|
|
191
|
-
// Input tokens available at start
|
|
192
|
-
}
|
|
193
|
-
break;
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
private toAnthropicMessages(messages: ChatMessage[]): {
|
|
200
|
-
systemMsg: string | undefined;
|
|
201
|
-
anthropicMessages: unknown[];
|
|
202
|
-
} {
|
|
203
|
-
let systemMsg: string | undefined;
|
|
204
|
-
const anthropicMessages: unknown[] = [];
|
|
205
|
-
|
|
206
|
-
for (const msg of messages) {
|
|
207
|
-
if (msg.role === "system") {
|
|
208
|
-
systemMsg = getTextContent(msg.content) || undefined;
|
|
209
|
-
continue;
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
if (msg.role === "user") {
|
|
213
|
-
if (isMultiModal(msg.content)) {
|
|
214
|
-
anthropicMessages.push({
|
|
215
|
-
role: "user",
|
|
216
|
-
content: msg.content.map((p) => this.partToAnthropic(p)),
|
|
217
|
-
});
|
|
218
|
-
} else {
|
|
219
|
-
anthropicMessages.push({
|
|
220
|
-
role: "user",
|
|
221
|
-
content: [{ type: "text", text: msg.content ?? "" }],
|
|
222
|
-
});
|
|
223
|
-
}
|
|
224
|
-
continue;
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
if (msg.role === "assistant") {
|
|
228
|
-
const content: unknown[] = [];
|
|
229
|
-
if (msg.content) {
|
|
230
|
-
content.push({ type: "text", text: msg.content });
|
|
231
|
-
}
|
|
232
|
-
if (msg.toolCalls) {
|
|
233
|
-
for (const tc of msg.toolCalls) {
|
|
234
|
-
content.push({
|
|
235
|
-
type: "tool_use",
|
|
236
|
-
id: tc.id,
|
|
237
|
-
name: tc.name,
|
|
238
|
-
input: tc.arguments,
|
|
239
|
-
});
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
anthropicMessages.push({
|
|
243
|
-
role: "assistant",
|
|
244
|
-
content: content.length > 0 ? content : [{ type: "text", text: "" }],
|
|
245
|
-
});
|
|
246
|
-
continue;
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
if (msg.role === "tool") {
|
|
250
|
-
anthropicMessages.push({
|
|
251
|
-
role: "user",
|
|
252
|
-
content: [
|
|
253
|
-
{
|
|
254
|
-
type: "tool_result",
|
|
255
|
-
tool_use_id: msg.toolCallId,
|
|
256
|
-
content: msg.content ?? "",
|
|
257
|
-
},
|
|
258
|
-
],
|
|
259
|
-
});
|
|
260
|
-
continue;
|
|
261
|
-
}
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
return { systemMsg, anthropicMessages };
|
|
265
|
-
}
|
|
266
|
-
|
|
267
|
-
private partToAnthropic(part: ContentPart): unknown {
|
|
268
|
-
switch (part.type) {
|
|
269
|
-
case "text":
|
|
270
|
-
return { type: "text", text: part.text };
|
|
271
|
-
case "image": {
|
|
272
|
-
const isUrl = part.data.startsWith("http://") || part.data.startsWith("https://");
|
|
273
|
-
if (isUrl) {
|
|
274
|
-
return { type: "image", source: { type: "url", url: part.data } };
|
|
275
|
-
}
|
|
276
|
-
return {
|
|
277
|
-
type: "image",
|
|
278
|
-
source: {
|
|
279
|
-
type: "base64",
|
|
280
|
-
media_type: part.mimeType ?? "image/png",
|
|
281
|
-
data: part.data,
|
|
282
|
-
},
|
|
283
|
-
};
|
|
284
|
-
}
|
|
285
|
-
case "audio":
|
|
286
|
-
return {
|
|
287
|
-
type: "text",
|
|
288
|
-
text: `[Audio content: ${part.mimeType ?? "audio"}]`,
|
|
289
|
-
};
|
|
290
|
-
case "file":
|
|
291
|
-
if (part.mimeType === "application/pdf") {
|
|
292
|
-
return {
|
|
293
|
-
type: "document",
|
|
294
|
-
source: { type: "base64", media_type: "application/pdf", data: part.data },
|
|
295
|
-
};
|
|
296
|
-
}
|
|
297
|
-
return {
|
|
298
|
-
type: "text",
|
|
299
|
-
text: `[File: ${part.filename ?? "attachment"} (${part.mimeType})]`,
|
|
300
|
-
};
|
|
301
|
-
}
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
private toAnthropicTools(tools: ToolDefinition[]): unknown[] {
|
|
305
|
-
return tools.map((t) => ({
|
|
306
|
-
name: t.name,
|
|
307
|
-
description: t.description,
|
|
308
|
-
input_schema: t.parameters,
|
|
309
|
-
}));
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
private normalizeResponse(response: any): ModelResponse & { thinking?: string } {
|
|
313
|
-
const toolCalls: ToolCall[] = [];
|
|
314
|
-
let textContent = "";
|
|
315
|
-
let thinkingContent = "";
|
|
316
|
-
|
|
317
|
-
for (const block of response.content ?? []) {
|
|
318
|
-
if (block.type === "text") {
|
|
319
|
-
textContent += block.text;
|
|
320
|
-
} else if (block.type === "thinking") {
|
|
321
|
-
thinkingContent += block.thinking;
|
|
322
|
-
} else if (block.type === "tool_use") {
|
|
323
|
-
toolCalls.push({
|
|
324
|
-
id: block.id,
|
|
325
|
-
name: block.name,
|
|
326
|
-
arguments: block.input ?? {},
|
|
327
|
-
});
|
|
328
|
-
}
|
|
329
|
-
}
|
|
330
|
-
|
|
331
|
-
const usage: TokenUsage = {
|
|
332
|
-
promptTokens: response.usage?.input_tokens ?? 0,
|
|
333
|
-
completionTokens: response.usage?.output_tokens ?? 0,
|
|
334
|
-
totalTokens:
|
|
335
|
-
(response.usage?.input_tokens ?? 0) +
|
|
336
|
-
(response.usage?.output_tokens ?? 0),
|
|
337
|
-
};
|
|
338
|
-
|
|
339
|
-
let finishReason: ModelResponse["finishReason"] = "stop";
|
|
340
|
-
if (response.stop_reason === "tool_use") finishReason = "tool_calls";
|
|
341
|
-
else if (response.stop_reason === "max_tokens") finishReason = "length";
|
|
342
|
-
|
|
343
|
-
const result: ModelResponse & { thinking?: string } = {
|
|
344
|
-
message: {
|
|
345
|
-
role: "assistant",
|
|
346
|
-
content: textContent || null,
|
|
347
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
348
|
-
},
|
|
349
|
-
usage,
|
|
350
|
-
finishReason,
|
|
351
|
-
raw: response,
|
|
352
|
-
};
|
|
353
|
-
|
|
354
|
-
if (thinkingContent) {
|
|
355
|
-
result.thinking = thinkingContent;
|
|
356
|
-
}
|
|
357
|
-
|
|
358
|
-
return result;
|
|
359
|
-
}
|
|
360
|
-
}
|