@radaros/core 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +42 -1
- package/dist/index.js +322 -25
- package/package.json +1 -1
- package/src/index.ts +3 -1
- package/src/models/providers/vertex.ts +400 -0
- package/src/models/registry.ts +17 -0
package/dist/index.d.ts
CHANGED
|
@@ -491,6 +491,11 @@ declare function google(modelId: string, config?: {
|
|
|
491
491
|
declare function ollama(modelId: string, config?: {
|
|
492
492
|
host?: string;
|
|
493
493
|
}): ModelProvider;
|
|
494
|
+
declare function vertex(modelId: string, config?: {
|
|
495
|
+
project?: string;
|
|
496
|
+
location?: string;
|
|
497
|
+
credentials?: string;
|
|
498
|
+
}): ModelProvider;
|
|
494
499
|
|
|
495
500
|
interface OpenAIConfig {
|
|
496
501
|
apiKey?: string;
|
|
@@ -584,6 +589,42 @@ declare class OllamaProvider implements ModelProvider {
|
|
|
584
589
|
private normalizeResponse;
|
|
585
590
|
}
|
|
586
591
|
|
|
592
|
+
interface VertexAIConfig {
|
|
593
|
+
project?: string;
|
|
594
|
+
location?: string;
|
|
595
|
+
/** Service account key JSON string or path (optional — uses ADC by default). */
|
|
596
|
+
credentials?: string;
|
|
597
|
+
}
|
|
598
|
+
/**
|
|
599
|
+
* Vertex AI provider using Google's @google/genai SDK in Vertex mode.
|
|
600
|
+
*
|
|
601
|
+
* Authentication (in order of precedence):
|
|
602
|
+
* 1. Explicit `project` + `location` in config
|
|
603
|
+
* 2. GOOGLE_CLOUD_PROJECT / GOOGLE_CLOUD_LOCATION env vars
|
|
604
|
+
* 3. Application Default Credentials (gcloud auth)
|
|
605
|
+
*/
|
|
606
|
+
declare class VertexAIProvider implements ModelProvider {
|
|
607
|
+
readonly providerId = "vertex";
|
|
608
|
+
readonly modelId: string;
|
|
609
|
+
private ai;
|
|
610
|
+
private GoogleGenAICtor;
|
|
611
|
+
private project;
|
|
612
|
+
private location;
|
|
613
|
+
constructor(modelId: string, config?: VertexAIConfig);
|
|
614
|
+
private getClient;
|
|
615
|
+
generate(messages: ChatMessage[], options?: ModelConfig & {
|
|
616
|
+
tools?: ToolDefinition[];
|
|
617
|
+
}): Promise<ModelResponse>;
|
|
618
|
+
stream(messages: ChatMessage[], options?: ModelConfig & {
|
|
619
|
+
tools?: ToolDefinition[];
|
|
620
|
+
}): AsyncGenerator<StreamChunk>;
|
|
621
|
+
private toGoogleMessages;
|
|
622
|
+
private partToGoogle;
|
|
623
|
+
private toGoogleTools;
|
|
624
|
+
private cleanJsonSchema;
|
|
625
|
+
private normalizeResponse;
|
|
626
|
+
}
|
|
627
|
+
|
|
587
628
|
declare function defineTool<T extends z.ZodObject<any>>(config: {
|
|
588
629
|
name: string;
|
|
589
630
|
description: string;
|
|
@@ -1107,4 +1148,4 @@ declare class A2ARemoteAgent {
|
|
|
1107
1148
|
private partsToText;
|
|
1108
1149
|
}
|
|
1109
1150
|
|
|
1110
|
-
export { type A2AAgentCard, type A2AArtifact, type A2ADataPart, type A2AFilePart, type A2AJsonRpcRequest, type A2AJsonRpcResponse, type A2AMessage, type A2APart, A2ARemoteAgent, type A2ARemoteAgentConfig, type A2ASendParams, type A2ASkill, type A2ATask, type A2ATaskQueryParams, type A2ATaskState, type A2ATextPart, Agent, type AgentConfig, type AgentEventMap, type AgentHooks, type AgentStep, AnthropicProvider, type Artifact, type AudioPart, BaseVectorStore, type ChatMessage, type ConditionStep, type ContentPart, type EmbeddingProvider, EventBus, type FilePart, type FunctionStep, GoogleEmbedding, type GoogleEmbeddingConfig, GoogleProvider, type GuardrailResult, type ImagePart, InMemoryStorage, InMemoryVectorStore, type InputGuardrail, KnowledgeBase, type KnowledgeBaseConfig, type KnowledgeBaseToolConfig, LLMLoop, type LogLevel, Logger, type LoggerConfig, MCPToolProvider, type MCPToolProviderConfig, Memory, type MemoryConfig, type MemoryEntry, type MessageContent, type MessageRole, type ModelConfig, type ModelProvider, ModelRegistry, type ModelResponse, MongoDBStorage, type MongoDBVectorConfig, MongoDBVectorStore, OllamaProvider, OpenAIEmbedding, type OpenAIEmbeddingConfig, OpenAIProvider, type OutputGuardrail, type ParallelStep, type PgVectorConfig, PgVectorStore, PostgresStorage, type QdrantConfig, QdrantVectorStore, RunContext, type RunOpts, type RunOutput, type Session, SessionManager, SqliteStorage, type StepDef, type StepResult, type StorageDriver, type StreamChunk, Team, type TeamConfig, TeamMode, type TextPart, type TokenUsage, type ToolCall, type ToolCallResult, type ToolDef, type ToolDefinition, ToolExecutor, type ToolResult, type VectorDocument, type VectorSearchOptions, type VectorSearchResult, type VectorStore, Workflow, type WorkflowConfig, type WorkflowResult, anthropic, defineTool, getTextContent, google, isMultiModal, ollama, openai, registry };
|
|
1151
|
+
export { type A2AAgentCard, type A2AArtifact, type A2ADataPart, type A2AFilePart, type A2AJsonRpcRequest, type A2AJsonRpcResponse, type A2AMessage, type A2APart, A2ARemoteAgent, type A2ARemoteAgentConfig, type A2ASendParams, type A2ASkill, type A2ATask, type A2ATaskQueryParams, type A2ATaskState, type A2ATextPart, Agent, type AgentConfig, type AgentEventMap, type AgentHooks, type AgentStep, AnthropicProvider, type Artifact, type AudioPart, BaseVectorStore, type ChatMessage, type ConditionStep, type ContentPart, type EmbeddingProvider, EventBus, type FilePart, type FunctionStep, GoogleEmbedding, type GoogleEmbeddingConfig, GoogleProvider, type GuardrailResult, type ImagePart, InMemoryStorage, InMemoryVectorStore, type InputGuardrail, KnowledgeBase, type KnowledgeBaseConfig, type KnowledgeBaseToolConfig, LLMLoop, type LogLevel, Logger, type LoggerConfig, MCPToolProvider, type MCPToolProviderConfig, Memory, type MemoryConfig, type MemoryEntry, type MessageContent, type MessageRole, type ModelConfig, type ModelProvider, ModelRegistry, type ModelResponse, MongoDBStorage, type MongoDBVectorConfig, MongoDBVectorStore, OllamaProvider, OpenAIEmbedding, type OpenAIEmbeddingConfig, OpenAIProvider, type OutputGuardrail, type ParallelStep, type PgVectorConfig, PgVectorStore, PostgresStorage, type QdrantConfig, QdrantVectorStore, RunContext, type RunOpts, type RunOutput, type Session, SessionManager, SqliteStorage, type StepDef, type StepResult, type StorageDriver, type StreamChunk, Team, type TeamConfig, TeamMode, type TextPart, type TokenUsage, type ToolCall, type ToolCallResult, type ToolDef, type ToolDefinition, ToolExecutor, type ToolResult, type VectorDocument, type VectorSearchOptions, type VectorSearchResult, type VectorStore, type VertexAIConfig, VertexAIProvider, Workflow, type WorkflowConfig, type WorkflowResult, anthropic, defineTool, getTextContent, google, isMultiModal, ollama, openai, registry, vertex };
|
package/dist/index.js
CHANGED
|
@@ -2367,6 +2367,291 @@ var OllamaProvider = class {
|
|
|
2367
2367
|
}
|
|
2368
2368
|
};
|
|
2369
2369
|
|
|
2370
|
+
// src/models/providers/vertex.ts
|
|
2371
|
+
import { createRequire as createRequire7 } from "module";
|
|
2372
|
+
var _require7 = createRequire7(import.meta.url);
|
|
2373
|
+
var VertexAIProvider = class {
|
|
2374
|
+
providerId = "vertex";
|
|
2375
|
+
modelId;
|
|
2376
|
+
ai = null;
|
|
2377
|
+
GoogleGenAICtor;
|
|
2378
|
+
project;
|
|
2379
|
+
location;
|
|
2380
|
+
constructor(modelId, config) {
|
|
2381
|
+
this.modelId = modelId;
|
|
2382
|
+
this.project = config?.project ?? process.env.GOOGLE_CLOUD_PROJECT ?? process.env.GCLOUD_PROJECT ?? "";
|
|
2383
|
+
this.location = config?.location ?? process.env.GOOGLE_CLOUD_LOCATION ?? process.env.GOOGLE_CLOUD_REGION ?? "us-central1";
|
|
2384
|
+
if (!this.project) {
|
|
2385
|
+
throw new Error(
|
|
2386
|
+
"VertexAIProvider: 'project' is required. Pass it in config or set GOOGLE_CLOUD_PROJECT env var."
|
|
2387
|
+
);
|
|
2388
|
+
}
|
|
2389
|
+
try {
|
|
2390
|
+
const { GoogleGenAI } = _require7("@google/genai");
|
|
2391
|
+
this.GoogleGenAICtor = GoogleGenAI;
|
|
2392
|
+
} catch {
|
|
2393
|
+
throw new Error(
|
|
2394
|
+
"@google/genai is required for VertexAIProvider. Install it: npm install @google/genai"
|
|
2395
|
+
);
|
|
2396
|
+
}
|
|
2397
|
+
}
|
|
2398
|
+
getClient() {
|
|
2399
|
+
if (this.ai) return this.ai;
|
|
2400
|
+
this.ai = new this.GoogleGenAICtor({
|
|
2401
|
+
vertexai: true,
|
|
2402
|
+
project: this.project,
|
|
2403
|
+
location: this.location
|
|
2404
|
+
});
|
|
2405
|
+
return this.ai;
|
|
2406
|
+
}
|
|
2407
|
+
async generate(messages, options) {
|
|
2408
|
+
const { systemInstruction, contents } = this.toGoogleMessages(messages);
|
|
2409
|
+
const config = {};
|
|
2410
|
+
if (options?.temperature !== void 0)
|
|
2411
|
+
config.temperature = options.temperature;
|
|
2412
|
+
if (options?.maxTokens !== void 0)
|
|
2413
|
+
config.maxOutputTokens = options.maxTokens;
|
|
2414
|
+
if (options?.topP !== void 0) config.topP = options.topP;
|
|
2415
|
+
if (options?.stop) config.stopSequences = options.stop;
|
|
2416
|
+
if (options?.responseFormat) {
|
|
2417
|
+
config.responseMimeType = "application/json";
|
|
2418
|
+
const rf = options.responseFormat;
|
|
2419
|
+
if (typeof rf === "object" && rf !== null && "type" in rf && rf.type === "json_schema" && "schema" in rf && rf.schema) {
|
|
2420
|
+
config.responseSchema = this.cleanJsonSchema(
|
|
2421
|
+
rf.schema
|
|
2422
|
+
);
|
|
2423
|
+
}
|
|
2424
|
+
}
|
|
2425
|
+
const params = {
|
|
2426
|
+
model: this.modelId,
|
|
2427
|
+
contents,
|
|
2428
|
+
config
|
|
2429
|
+
};
|
|
2430
|
+
if (systemInstruction) params.systemInstruction = systemInstruction;
|
|
2431
|
+
if (options?.tools?.length) {
|
|
2432
|
+
params.tools = [
|
|
2433
|
+
{ functionDeclarations: this.toGoogleTools(options.tools) }
|
|
2434
|
+
];
|
|
2435
|
+
}
|
|
2436
|
+
const client = this.getClient();
|
|
2437
|
+
const response = await client.models.generateContent(params);
|
|
2438
|
+
return this.normalizeResponse(response);
|
|
2439
|
+
}
|
|
2440
|
+
async *stream(messages, options) {
|
|
2441
|
+
const { systemInstruction, contents } = this.toGoogleMessages(messages);
|
|
2442
|
+
const config = {};
|
|
2443
|
+
if (options?.temperature !== void 0)
|
|
2444
|
+
config.temperature = options.temperature;
|
|
2445
|
+
if (options?.maxTokens !== void 0)
|
|
2446
|
+
config.maxOutputTokens = options.maxTokens;
|
|
2447
|
+
if (options?.topP !== void 0) config.topP = options.topP;
|
|
2448
|
+
if (options?.stop) config.stopSequences = options.stop;
|
|
2449
|
+
const params = {
|
|
2450
|
+
model: this.modelId,
|
|
2451
|
+
contents,
|
|
2452
|
+
config
|
|
2453
|
+
};
|
|
2454
|
+
if (systemInstruction) params.systemInstruction = systemInstruction;
|
|
2455
|
+
if (options?.tools?.length) {
|
|
2456
|
+
params.tools = [
|
|
2457
|
+
{ functionDeclarations: this.toGoogleTools(options.tools) }
|
|
2458
|
+
];
|
|
2459
|
+
}
|
|
2460
|
+
const client = this.getClient();
|
|
2461
|
+
const streamResult = await client.models.generateContentStream(params);
|
|
2462
|
+
let toolCallCounter = 0;
|
|
2463
|
+
for await (const chunk of streamResult) {
|
|
2464
|
+
const candidate = chunk.candidates?.[0];
|
|
2465
|
+
if (!candidate?.content?.parts) continue;
|
|
2466
|
+
for (const part of candidate.content.parts) {
|
|
2467
|
+
if (part.text) {
|
|
2468
|
+
yield { type: "text", text: part.text };
|
|
2469
|
+
}
|
|
2470
|
+
if (part.functionCall) {
|
|
2471
|
+
const id = `vertex_tc_${toolCallCounter++}`;
|
|
2472
|
+
yield {
|
|
2473
|
+
type: "tool_call_start",
|
|
2474
|
+
toolCall: { id, name: part.functionCall.name }
|
|
2475
|
+
};
|
|
2476
|
+
yield {
|
|
2477
|
+
type: "tool_call_delta",
|
|
2478
|
+
toolCallId: id,
|
|
2479
|
+
argumentsDelta: JSON.stringify(part.functionCall.args ?? {})
|
|
2480
|
+
};
|
|
2481
|
+
yield { type: "tool_call_end", toolCallId: id };
|
|
2482
|
+
}
|
|
2483
|
+
}
|
|
2484
|
+
if (candidate.finishReason) {
|
|
2485
|
+
let finishReason = "stop";
|
|
2486
|
+
if (candidate.finishReason === "STOP" || candidate.finishReason === "END_TURN")
|
|
2487
|
+
finishReason = "stop";
|
|
2488
|
+
else if (candidate.finishReason === "MAX_TOKENS")
|
|
2489
|
+
finishReason = "length";
|
|
2490
|
+
else if (candidate.finishReason === "SAFETY")
|
|
2491
|
+
finishReason = "content_filter";
|
|
2492
|
+
const hasToolCalls = candidate.content?.parts?.some(
|
|
2493
|
+
(p) => p.functionCall
|
|
2494
|
+
);
|
|
2495
|
+
if (hasToolCalls) finishReason = "tool_calls";
|
|
2496
|
+
yield {
|
|
2497
|
+
type: "finish",
|
|
2498
|
+
finishReason,
|
|
2499
|
+
usage: chunk.usageMetadata ? {
|
|
2500
|
+
promptTokens: chunk.usageMetadata.promptTokenCount ?? 0,
|
|
2501
|
+
completionTokens: chunk.usageMetadata.candidatesTokenCount ?? 0,
|
|
2502
|
+
totalTokens: chunk.usageMetadata.totalTokenCount ?? 0
|
|
2503
|
+
} : void 0
|
|
2504
|
+
};
|
|
2505
|
+
}
|
|
2506
|
+
}
|
|
2507
|
+
}
|
|
2508
|
+
// ── Message conversion (identical to GoogleProvider) ─────────────────────
|
|
2509
|
+
toGoogleMessages(messages) {
|
|
2510
|
+
let systemInstruction;
|
|
2511
|
+
const contents = [];
|
|
2512
|
+
for (const msg of messages) {
|
|
2513
|
+
if (msg.role === "system") {
|
|
2514
|
+
systemInstruction = getTextContent(msg.content) || void 0;
|
|
2515
|
+
continue;
|
|
2516
|
+
}
|
|
2517
|
+
if (msg.role === "user") {
|
|
2518
|
+
if (isMultiModal(msg.content)) {
|
|
2519
|
+
contents.push({
|
|
2520
|
+
role: "user",
|
|
2521
|
+
parts: msg.content.map((p) => this.partToGoogle(p))
|
|
2522
|
+
});
|
|
2523
|
+
} else {
|
|
2524
|
+
contents.push({
|
|
2525
|
+
role: "user",
|
|
2526
|
+
parts: [{ text: msg.content ?? "" }]
|
|
2527
|
+
});
|
|
2528
|
+
}
|
|
2529
|
+
continue;
|
|
2530
|
+
}
|
|
2531
|
+
if (msg.role === "assistant") {
|
|
2532
|
+
const parts = [];
|
|
2533
|
+
if (msg.content) parts.push({ text: msg.content });
|
|
2534
|
+
if (msg.toolCalls) {
|
|
2535
|
+
for (const tc of msg.toolCalls) {
|
|
2536
|
+
parts.push({
|
|
2537
|
+
functionCall: { name: tc.name, args: tc.arguments }
|
|
2538
|
+
});
|
|
2539
|
+
}
|
|
2540
|
+
}
|
|
2541
|
+
if (parts.length === 0) parts.push({ text: "" });
|
|
2542
|
+
contents.push({ role: "model", parts });
|
|
2543
|
+
continue;
|
|
2544
|
+
}
|
|
2545
|
+
if (msg.role === "tool") {
|
|
2546
|
+
contents.push({
|
|
2547
|
+
role: "function",
|
|
2548
|
+
parts: [
|
|
2549
|
+
{
|
|
2550
|
+
functionResponse: {
|
|
2551
|
+
name: msg.name ?? "unknown",
|
|
2552
|
+
response: { result: msg.content ?? "" }
|
|
2553
|
+
}
|
|
2554
|
+
}
|
|
2555
|
+
]
|
|
2556
|
+
});
|
|
2557
|
+
continue;
|
|
2558
|
+
}
|
|
2559
|
+
}
|
|
2560
|
+
return { systemInstruction, contents };
|
|
2561
|
+
}
|
|
2562
|
+
partToGoogle(part) {
|
|
2563
|
+
switch (part.type) {
|
|
2564
|
+
case "text":
|
|
2565
|
+
return { text: part.text };
|
|
2566
|
+
case "image":
|
|
2567
|
+
case "audio":
|
|
2568
|
+
case "file": {
|
|
2569
|
+
const isUrl = part.data.startsWith("http://") || part.data.startsWith("https://");
|
|
2570
|
+
if (isUrl) {
|
|
2571
|
+
return {
|
|
2572
|
+
fileData: {
|
|
2573
|
+
fileUri: part.data,
|
|
2574
|
+
mimeType: part.mimeType ?? (part.type === "image" ? "image/png" : "application/octet-stream")
|
|
2575
|
+
}
|
|
2576
|
+
};
|
|
2577
|
+
}
|
|
2578
|
+
return {
|
|
2579
|
+
inlineData: {
|
|
2580
|
+
data: part.data,
|
|
2581
|
+
mimeType: part.mimeType ?? (part.type === "image" ? "image/png" : part.type === "audio" ? "audio/mp3" : "application/octet-stream")
|
|
2582
|
+
}
|
|
2583
|
+
};
|
|
2584
|
+
}
|
|
2585
|
+
}
|
|
2586
|
+
}
|
|
2587
|
+
toGoogleTools(tools) {
|
|
2588
|
+
return tools.map((t) => ({
|
|
2589
|
+
name: t.name,
|
|
2590
|
+
description: t.description,
|
|
2591
|
+
parameters: t.parameters
|
|
2592
|
+
}));
|
|
2593
|
+
}
|
|
2594
|
+
cleanJsonSchema(schema) {
|
|
2595
|
+
const cleaned = { ...schema };
|
|
2596
|
+
delete cleaned["$schema"];
|
|
2597
|
+
delete cleaned["$ref"];
|
|
2598
|
+
delete cleaned["additionalProperties"];
|
|
2599
|
+
if (cleaned.properties && typeof cleaned.properties === "object") {
|
|
2600
|
+
const props = {};
|
|
2601
|
+
for (const [key, val] of Object.entries(
|
|
2602
|
+
cleaned.properties
|
|
2603
|
+
)) {
|
|
2604
|
+
props[key] = typeof val === "object" && val ? this.cleanJsonSchema(val) : val;
|
|
2605
|
+
}
|
|
2606
|
+
cleaned.properties = props;
|
|
2607
|
+
}
|
|
2608
|
+
if (cleaned.items && typeof cleaned.items === "object") {
|
|
2609
|
+
cleaned.items = this.cleanJsonSchema(
|
|
2610
|
+
cleaned.items
|
|
2611
|
+
);
|
|
2612
|
+
}
|
|
2613
|
+
return cleaned;
|
|
2614
|
+
}
|
|
2615
|
+
normalizeResponse(response) {
|
|
2616
|
+
const candidate = response.candidates?.[0];
|
|
2617
|
+
const parts = candidate?.content?.parts ?? [];
|
|
2618
|
+
let textContent = "";
|
|
2619
|
+
const toolCalls = [];
|
|
2620
|
+
let toolCallCounter = 0;
|
|
2621
|
+
for (const part of parts) {
|
|
2622
|
+
if (part.text) textContent += part.text;
|
|
2623
|
+
if (part.functionCall) {
|
|
2624
|
+
toolCalls.push({
|
|
2625
|
+
id: `vertex_tc_${toolCallCounter++}`,
|
|
2626
|
+
name: part.functionCall.name,
|
|
2627
|
+
arguments: part.functionCall.args ?? {}
|
|
2628
|
+
});
|
|
2629
|
+
}
|
|
2630
|
+
}
|
|
2631
|
+
const usage = {
|
|
2632
|
+
promptTokens: response.usageMetadata?.promptTokenCount ?? 0,
|
|
2633
|
+
completionTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
|
|
2634
|
+
totalTokens: response.usageMetadata?.totalTokenCount ?? 0
|
|
2635
|
+
};
|
|
2636
|
+
let finishReason = "stop";
|
|
2637
|
+
if (toolCalls.length > 0) finishReason = "tool_calls";
|
|
2638
|
+
else if (candidate?.finishReason === "MAX_TOKENS")
|
|
2639
|
+
finishReason = "length";
|
|
2640
|
+
else if (candidate?.finishReason === "SAFETY")
|
|
2641
|
+
finishReason = "content_filter";
|
|
2642
|
+
return {
|
|
2643
|
+
message: {
|
|
2644
|
+
role: "assistant",
|
|
2645
|
+
content: textContent || null,
|
|
2646
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : void 0
|
|
2647
|
+
},
|
|
2648
|
+
usage,
|
|
2649
|
+
finishReason,
|
|
2650
|
+
raw: response
|
|
2651
|
+
};
|
|
2652
|
+
}
|
|
2653
|
+
};
|
|
2654
|
+
|
|
2370
2655
|
// src/models/registry.ts
|
|
2371
2656
|
var ModelRegistry = class {
|
|
2372
2657
|
factories = /* @__PURE__ */ new Map();
|
|
@@ -2415,6 +2700,16 @@ function google(modelId, config) {
|
|
|
2415
2700
|
function ollama(modelId, config) {
|
|
2416
2701
|
return registry.resolve("ollama", modelId, config);
|
|
2417
2702
|
}
|
|
2703
|
+
registry.register(
|
|
2704
|
+
"vertex",
|
|
2705
|
+
(modelId, config) => new VertexAIProvider(
|
|
2706
|
+
modelId,
|
|
2707
|
+
config
|
|
2708
|
+
)
|
|
2709
|
+
);
|
|
2710
|
+
function vertex(modelId, config) {
|
|
2711
|
+
return registry.resolve("vertex", modelId, config);
|
|
2712
|
+
}
|
|
2418
2713
|
|
|
2419
2714
|
// src/tools/define-tool.ts
|
|
2420
2715
|
function defineTool(config) {
|
|
@@ -2427,13 +2722,13 @@ function defineTool(config) {
|
|
|
2427
2722
|
}
|
|
2428
2723
|
|
|
2429
2724
|
// src/storage/sqlite.ts
|
|
2430
|
-
import { createRequire as
|
|
2431
|
-
var
|
|
2725
|
+
import { createRequire as createRequire8 } from "module";
|
|
2726
|
+
var _require8 = createRequire8(import.meta.url);
|
|
2432
2727
|
var SqliteStorage = class {
|
|
2433
2728
|
db;
|
|
2434
2729
|
constructor(dbPath) {
|
|
2435
2730
|
try {
|
|
2436
|
-
const Database =
|
|
2731
|
+
const Database = _require8("better-sqlite3");
|
|
2437
2732
|
this.db = new Database(dbPath);
|
|
2438
2733
|
this.db.pragma("journal_mode = WAL");
|
|
2439
2734
|
this.db.exec(`
|
|
@@ -2482,13 +2777,13 @@ var SqliteStorage = class {
|
|
|
2482
2777
|
};
|
|
2483
2778
|
|
|
2484
2779
|
// src/storage/postgres.ts
|
|
2485
|
-
import { createRequire as
|
|
2486
|
-
var
|
|
2780
|
+
import { createRequire as createRequire9 } from "module";
|
|
2781
|
+
var _require9 = createRequire9(import.meta.url);
|
|
2487
2782
|
var PostgresStorage = class {
|
|
2488
2783
|
pool;
|
|
2489
2784
|
constructor(connectionString) {
|
|
2490
2785
|
try {
|
|
2491
|
-
const { Pool } =
|
|
2786
|
+
const { Pool } = _require9("pg");
|
|
2492
2787
|
this.pool = new Pool({ connectionString });
|
|
2493
2788
|
} catch {
|
|
2494
2789
|
throw new Error(
|
|
@@ -2549,15 +2844,15 @@ var PostgresStorage = class {
|
|
|
2549
2844
|
};
|
|
2550
2845
|
|
|
2551
2846
|
// src/storage/mongodb.ts
|
|
2552
|
-
import { createRequire as
|
|
2553
|
-
var
|
|
2847
|
+
import { createRequire as createRequire10 } from "module";
|
|
2848
|
+
var _require10 = createRequire10(import.meta.url);
|
|
2554
2849
|
var MongoDBStorage = class {
|
|
2555
2850
|
constructor(uri, dbName = "radaros", collectionName = "kv_store") {
|
|
2556
2851
|
this.uri = uri;
|
|
2557
2852
|
this.dbName = dbName;
|
|
2558
2853
|
this.collectionName = collectionName;
|
|
2559
2854
|
try {
|
|
2560
|
-
const { MongoClient } =
|
|
2855
|
+
const { MongoClient } = _require10("mongodb");
|
|
2561
2856
|
this.client = new MongoClient(uri);
|
|
2562
2857
|
} catch {
|
|
2563
2858
|
throw new Error(
|
|
@@ -2717,8 +3012,8 @@ var InMemoryVectorStore = class extends BaseVectorStore {
|
|
|
2717
3012
|
};
|
|
2718
3013
|
|
|
2719
3014
|
// src/vector/pgvector.ts
|
|
2720
|
-
import { createRequire as
|
|
2721
|
-
var
|
|
3015
|
+
import { createRequire as createRequire11 } from "module";
|
|
3016
|
+
var _require11 = createRequire11(import.meta.url);
|
|
2722
3017
|
var PgVectorStore = class extends BaseVectorStore {
|
|
2723
3018
|
pool;
|
|
2724
3019
|
dimensions;
|
|
@@ -2727,7 +3022,7 @@ var PgVectorStore = class extends BaseVectorStore {
|
|
|
2727
3022
|
super(embedder);
|
|
2728
3023
|
this.dimensions = config.dimensions ?? embedder?.dimensions ?? 1536;
|
|
2729
3024
|
try {
|
|
2730
|
-
const { Pool } =
|
|
3025
|
+
const { Pool } = _require11("pg");
|
|
2731
3026
|
this.pool = new Pool({ connectionString: config.connectionString });
|
|
2732
3027
|
} catch {
|
|
2733
3028
|
throw new Error(
|
|
@@ -2846,9 +3141,9 @@ var PgVectorStore = class extends BaseVectorStore {
|
|
|
2846
3141
|
};
|
|
2847
3142
|
|
|
2848
3143
|
// src/vector/qdrant.ts
|
|
2849
|
-
import { createRequire as
|
|
3144
|
+
import { createRequire as createRequire12 } from "module";
|
|
2850
3145
|
import { createHash } from "crypto";
|
|
2851
|
-
var
|
|
3146
|
+
var _require12 = createRequire12(import.meta.url);
|
|
2852
3147
|
var UUID_RE = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
|
|
2853
3148
|
function stringToUUID(str) {
|
|
2854
3149
|
const hex = createHash("md5").update(str).digest("hex");
|
|
@@ -2873,7 +3168,7 @@ var QdrantVectorStore = class extends BaseVectorStore {
|
|
|
2873
3168
|
super(embedder);
|
|
2874
3169
|
this.dimensions = config.dimensions ?? embedder?.dimensions ?? 1536;
|
|
2875
3170
|
try {
|
|
2876
|
-
const { QdrantClient } =
|
|
3171
|
+
const { QdrantClient } = _require12("@qdrant/js-client-rest");
|
|
2877
3172
|
this.client = new QdrantClient({
|
|
2878
3173
|
url: config.url ?? "http://localhost:6333",
|
|
2879
3174
|
apiKey: config.apiKey,
|
|
@@ -3006,8 +3301,8 @@ var QdrantVectorStore = class extends BaseVectorStore {
|
|
|
3006
3301
|
};
|
|
3007
3302
|
|
|
3008
3303
|
// src/vector/mongodb.ts
|
|
3009
|
-
import { createRequire as
|
|
3010
|
-
var
|
|
3304
|
+
import { createRequire as createRequire13 } from "module";
|
|
3305
|
+
var _require13 = createRequire13(import.meta.url);
|
|
3011
3306
|
var MongoDBVectorStore = class extends BaseVectorStore {
|
|
3012
3307
|
client;
|
|
3013
3308
|
db;
|
|
@@ -3019,7 +3314,7 @@ var MongoDBVectorStore = class extends BaseVectorStore {
|
|
|
3019
3314
|
this.indexName = config.indexName ?? "vector_index";
|
|
3020
3315
|
this.dbName = config.dbName ?? "radaros_vectors";
|
|
3021
3316
|
try {
|
|
3022
|
-
const { MongoClient } =
|
|
3317
|
+
const { MongoClient } = _require13("mongodb");
|
|
3023
3318
|
this.client = new MongoClient(config.uri);
|
|
3024
3319
|
} catch {
|
|
3025
3320
|
throw new Error(
|
|
@@ -3190,8 +3485,8 @@ function cosine(a, b) {
|
|
|
3190
3485
|
}
|
|
3191
3486
|
|
|
3192
3487
|
// src/vector/embeddings/openai.ts
|
|
3193
|
-
import { createRequire as
|
|
3194
|
-
var
|
|
3488
|
+
import { createRequire as createRequire14 } from "module";
|
|
3489
|
+
var _require14 = createRequire14(import.meta.url);
|
|
3195
3490
|
var MODEL_DIMENSIONS = {
|
|
3196
3491
|
"text-embedding-3-small": 1536,
|
|
3197
3492
|
"text-embedding-3-large": 3072,
|
|
@@ -3205,7 +3500,7 @@ var OpenAIEmbedding = class {
|
|
|
3205
3500
|
this.model = config.model ?? "text-embedding-3-small";
|
|
3206
3501
|
this.dimensions = config.dimensions ?? MODEL_DIMENSIONS[this.model] ?? 1536;
|
|
3207
3502
|
try {
|
|
3208
|
-
const mod =
|
|
3503
|
+
const mod = _require14("openai");
|
|
3209
3504
|
const OpenAI = mod.default ?? mod;
|
|
3210
3505
|
this.client = new OpenAI({
|
|
3211
3506
|
apiKey: config.apiKey ?? process.env.OPENAI_API_KEY,
|
|
@@ -3236,8 +3531,8 @@ var OpenAIEmbedding = class {
|
|
|
3236
3531
|
};
|
|
3237
3532
|
|
|
3238
3533
|
// src/vector/embeddings/google.ts
|
|
3239
|
-
import { createRequire as
|
|
3240
|
-
var
|
|
3534
|
+
import { createRequire as createRequire15 } from "module";
|
|
3535
|
+
var _require15 = createRequire15(import.meta.url);
|
|
3241
3536
|
var MODEL_DIMENSIONS2 = {
|
|
3242
3537
|
"text-embedding-004": 768,
|
|
3243
3538
|
"embedding-001": 768
|
|
@@ -3250,7 +3545,7 @@ var GoogleEmbedding = class {
|
|
|
3250
3545
|
this.model = config.model ?? "text-embedding-004";
|
|
3251
3546
|
this.dimensions = config.dimensions ?? MODEL_DIMENSIONS2[this.model] ?? 768;
|
|
3252
3547
|
try {
|
|
3253
|
-
const { GoogleGenAI } =
|
|
3548
|
+
const { GoogleGenAI } = _require15("@google/genai");
|
|
3254
3549
|
this.ai = new GoogleGenAI({
|
|
3255
3550
|
apiKey: config.apiKey ?? process.env.GOOGLE_API_KEY
|
|
3256
3551
|
});
|
|
@@ -3843,6 +4138,7 @@ export {
|
|
|
3843
4138
|
Team,
|
|
3844
4139
|
TeamMode,
|
|
3845
4140
|
ToolExecutor,
|
|
4141
|
+
VertexAIProvider,
|
|
3846
4142
|
Workflow,
|
|
3847
4143
|
anthropic,
|
|
3848
4144
|
defineTool,
|
|
@@ -3851,5 +4147,6 @@ export {
|
|
|
3851
4147
|
isMultiModal,
|
|
3852
4148
|
ollama,
|
|
3853
4149
|
openai,
|
|
3854
|
-
registry
|
|
4150
|
+
registry,
|
|
4151
|
+
vertex
|
|
3855
4152
|
};
|
package/package.json
CHANGED
package/src/index.ts
CHANGED
|
@@ -49,11 +49,13 @@ export type {
|
|
|
49
49
|
ModelConfig,
|
|
50
50
|
} from "./models/types.js";
|
|
51
51
|
export { getTextContent, isMultiModal } from "./models/types.js";
|
|
52
|
-
export { ModelRegistry, registry, openai, anthropic, google, ollama } from "./models/registry.js";
|
|
52
|
+
export { ModelRegistry, registry, openai, anthropic, google, ollama, vertex } from "./models/registry.js";
|
|
53
53
|
export { OpenAIProvider } from "./models/providers/openai.js";
|
|
54
54
|
export { AnthropicProvider } from "./models/providers/anthropic.js";
|
|
55
55
|
export { GoogleProvider } from "./models/providers/google.js";
|
|
56
56
|
export { OllamaProvider } from "./models/providers/ollama.js";
|
|
57
|
+
export { VertexAIProvider } from "./models/providers/vertex.js";
|
|
58
|
+
export type { VertexAIConfig } from "./models/providers/vertex.js";
|
|
57
59
|
|
|
58
60
|
// Tools
|
|
59
61
|
export { defineTool } from "./tools/define-tool.js";
|
|
@@ -0,0 +1,400 @@
|
|
|
1
|
+
import { createRequire } from "node:module";
|
|
2
|
+
import type { ModelProvider } from "../provider.js";
|
|
3
|
+
import {
|
|
4
|
+
getTextContent,
|
|
5
|
+
isMultiModal,
|
|
6
|
+
type ChatMessage,
|
|
7
|
+
type ContentPart,
|
|
8
|
+
type ModelConfig,
|
|
9
|
+
type ModelResponse,
|
|
10
|
+
type StreamChunk,
|
|
11
|
+
type ToolDefinition,
|
|
12
|
+
type TokenUsage,
|
|
13
|
+
type ToolCall,
|
|
14
|
+
} from "../types.js";
|
|
15
|
+
|
|
16
|
+
const _require = createRequire(import.meta.url);
|
|
17
|
+
|
|
18
|
+
export interface VertexAIConfig {
|
|
19
|
+
project?: string;
|
|
20
|
+
location?: string;
|
|
21
|
+
/** Service account key JSON string or path (optional — uses ADC by default). */
|
|
22
|
+
credentials?: string;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Vertex AI provider using Google's @google/genai SDK in Vertex mode.
|
|
27
|
+
*
|
|
28
|
+
* Authentication (in order of precedence):
|
|
29
|
+
* 1. Explicit `project` + `location` in config
|
|
30
|
+
* 2. GOOGLE_CLOUD_PROJECT / GOOGLE_CLOUD_LOCATION env vars
|
|
31
|
+
* 3. Application Default Credentials (gcloud auth)
|
|
32
|
+
*/
|
|
33
|
+
export class VertexAIProvider implements ModelProvider {
|
|
34
|
+
readonly providerId = "vertex";
|
|
35
|
+
readonly modelId: string;
|
|
36
|
+
private ai: any = null;
|
|
37
|
+
private GoogleGenAICtor: any;
|
|
38
|
+
private project: string;
|
|
39
|
+
private location: string;
|
|
40
|
+
|
|
41
|
+
constructor(modelId: string, config?: VertexAIConfig) {
|
|
42
|
+
this.modelId = modelId;
|
|
43
|
+
this.project =
|
|
44
|
+
config?.project ??
|
|
45
|
+
process.env.GOOGLE_CLOUD_PROJECT ??
|
|
46
|
+
process.env.GCLOUD_PROJECT ??
|
|
47
|
+
"";
|
|
48
|
+
this.location =
|
|
49
|
+
config?.location ??
|
|
50
|
+
process.env.GOOGLE_CLOUD_LOCATION ??
|
|
51
|
+
process.env.GOOGLE_CLOUD_REGION ??
|
|
52
|
+
"us-central1";
|
|
53
|
+
|
|
54
|
+
if (!this.project) {
|
|
55
|
+
throw new Error(
|
|
56
|
+
"VertexAIProvider: 'project' is required. Pass it in config or set GOOGLE_CLOUD_PROJECT env var."
|
|
57
|
+
);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
try {
|
|
61
|
+
const { GoogleGenAI } = _require("@google/genai");
|
|
62
|
+
this.GoogleGenAICtor = GoogleGenAI;
|
|
63
|
+
} catch {
|
|
64
|
+
throw new Error(
|
|
65
|
+
"@google/genai is required for VertexAIProvider. Install it: npm install @google/genai"
|
|
66
|
+
);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
private getClient(): any {
|
|
71
|
+
if (this.ai) return this.ai;
|
|
72
|
+
this.ai = new this.GoogleGenAICtor({
|
|
73
|
+
vertexai: true,
|
|
74
|
+
project: this.project,
|
|
75
|
+
location: this.location,
|
|
76
|
+
});
|
|
77
|
+
return this.ai;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
async generate(
|
|
81
|
+
messages: ChatMessage[],
|
|
82
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
83
|
+
): Promise<ModelResponse> {
|
|
84
|
+
const { systemInstruction, contents } = this.toGoogleMessages(messages);
|
|
85
|
+
|
|
86
|
+
const config: Record<string, unknown> = {};
|
|
87
|
+
if (options?.temperature !== undefined)
|
|
88
|
+
config.temperature = options.temperature;
|
|
89
|
+
if (options?.maxTokens !== undefined)
|
|
90
|
+
config.maxOutputTokens = options.maxTokens;
|
|
91
|
+
if (options?.topP !== undefined) config.topP = options.topP;
|
|
92
|
+
if (options?.stop) config.stopSequences = options.stop;
|
|
93
|
+
|
|
94
|
+
if (options?.responseFormat) {
|
|
95
|
+
config.responseMimeType = "application/json";
|
|
96
|
+
const rf = options.responseFormat;
|
|
97
|
+
if (
|
|
98
|
+
typeof rf === "object" &&
|
|
99
|
+
rf !== null &&
|
|
100
|
+
"type" in rf &&
|
|
101
|
+
rf.type === "json_schema" &&
|
|
102
|
+
"schema" in rf &&
|
|
103
|
+
rf.schema
|
|
104
|
+
) {
|
|
105
|
+
config.responseSchema = this.cleanJsonSchema(
|
|
106
|
+
rf.schema as Record<string, unknown>
|
|
107
|
+
);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
const params: Record<string, unknown> = {
|
|
112
|
+
model: this.modelId,
|
|
113
|
+
contents,
|
|
114
|
+
config,
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
if (systemInstruction) params.systemInstruction = systemInstruction;
|
|
118
|
+
if (options?.tools?.length) {
|
|
119
|
+
params.tools = [
|
|
120
|
+
{ functionDeclarations: this.toGoogleTools(options.tools) },
|
|
121
|
+
];
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
const client = this.getClient();
|
|
125
|
+
const response = await client.models.generateContent(params);
|
|
126
|
+
return this.normalizeResponse(response);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
async *stream(
|
|
130
|
+
messages: ChatMessage[],
|
|
131
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
132
|
+
): AsyncGenerator<StreamChunk> {
|
|
133
|
+
const { systemInstruction, contents } = this.toGoogleMessages(messages);
|
|
134
|
+
|
|
135
|
+
const config: Record<string, unknown> = {};
|
|
136
|
+
if (options?.temperature !== undefined)
|
|
137
|
+
config.temperature = options.temperature;
|
|
138
|
+
if (options?.maxTokens !== undefined)
|
|
139
|
+
config.maxOutputTokens = options.maxTokens;
|
|
140
|
+
if (options?.topP !== undefined) config.topP = options.topP;
|
|
141
|
+
if (options?.stop) config.stopSequences = options.stop;
|
|
142
|
+
|
|
143
|
+
const params: Record<string, unknown> = {
|
|
144
|
+
model: this.modelId,
|
|
145
|
+
contents,
|
|
146
|
+
config,
|
|
147
|
+
};
|
|
148
|
+
|
|
149
|
+
if (systemInstruction) params.systemInstruction = systemInstruction;
|
|
150
|
+
if (options?.tools?.length) {
|
|
151
|
+
params.tools = [
|
|
152
|
+
{ functionDeclarations: this.toGoogleTools(options.tools) },
|
|
153
|
+
];
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
const client = this.getClient();
|
|
157
|
+
const streamResult = await client.models.generateContentStream(params);
|
|
158
|
+
|
|
159
|
+
let toolCallCounter = 0;
|
|
160
|
+
|
|
161
|
+
for await (const chunk of streamResult) {
|
|
162
|
+
const candidate = chunk.candidates?.[0];
|
|
163
|
+
if (!candidate?.content?.parts) continue;
|
|
164
|
+
|
|
165
|
+
for (const part of candidate.content.parts) {
|
|
166
|
+
if (part.text) {
|
|
167
|
+
yield { type: "text", text: part.text };
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
if (part.functionCall) {
|
|
171
|
+
const id = `vertex_tc_${toolCallCounter++}`;
|
|
172
|
+
yield {
|
|
173
|
+
type: "tool_call_start",
|
|
174
|
+
toolCall: { id, name: part.functionCall.name },
|
|
175
|
+
};
|
|
176
|
+
yield {
|
|
177
|
+
type: "tool_call_delta",
|
|
178
|
+
toolCallId: id,
|
|
179
|
+
argumentsDelta: JSON.stringify(part.functionCall.args ?? {}),
|
|
180
|
+
};
|
|
181
|
+
yield { type: "tool_call_end", toolCallId: id };
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
if (candidate.finishReason) {
|
|
186
|
+
let finishReason = "stop";
|
|
187
|
+
if (
|
|
188
|
+
candidate.finishReason === "STOP" ||
|
|
189
|
+
candidate.finishReason === "END_TURN"
|
|
190
|
+
)
|
|
191
|
+
finishReason = "stop";
|
|
192
|
+
else if (candidate.finishReason === "MAX_TOKENS")
|
|
193
|
+
finishReason = "length";
|
|
194
|
+
else if (candidate.finishReason === "SAFETY")
|
|
195
|
+
finishReason = "content_filter";
|
|
196
|
+
|
|
197
|
+
const hasToolCalls = candidate.content?.parts?.some(
|
|
198
|
+
(p: any) => p.functionCall
|
|
199
|
+
);
|
|
200
|
+
if (hasToolCalls) finishReason = "tool_calls";
|
|
201
|
+
|
|
202
|
+
yield {
|
|
203
|
+
type: "finish",
|
|
204
|
+
finishReason,
|
|
205
|
+
usage: chunk.usageMetadata
|
|
206
|
+
? {
|
|
207
|
+
promptTokens: chunk.usageMetadata.promptTokenCount ?? 0,
|
|
208
|
+
completionTokens:
|
|
209
|
+
chunk.usageMetadata.candidatesTokenCount ?? 0,
|
|
210
|
+
totalTokens: chunk.usageMetadata.totalTokenCount ?? 0,
|
|
211
|
+
}
|
|
212
|
+
: undefined,
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// ── Message conversion (identical to GoogleProvider) ─────────────────────
|
|
219
|
+
|
|
220
|
+
private toGoogleMessages(messages: ChatMessage[]): {
|
|
221
|
+
systemInstruction: string | undefined;
|
|
222
|
+
contents: unknown[];
|
|
223
|
+
} {
|
|
224
|
+
let systemInstruction: string | undefined;
|
|
225
|
+
const contents: unknown[] = [];
|
|
226
|
+
|
|
227
|
+
for (const msg of messages) {
|
|
228
|
+
if (msg.role === "system") {
|
|
229
|
+
systemInstruction = getTextContent(msg.content) || undefined;
|
|
230
|
+
continue;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
if (msg.role === "user") {
|
|
234
|
+
if (isMultiModal(msg.content)) {
|
|
235
|
+
contents.push({
|
|
236
|
+
role: "user",
|
|
237
|
+
parts: msg.content.map((p) => this.partToGoogle(p)),
|
|
238
|
+
});
|
|
239
|
+
} else {
|
|
240
|
+
contents.push({
|
|
241
|
+
role: "user",
|
|
242
|
+
parts: [{ text: msg.content ?? "" }],
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
continue;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
if (msg.role === "assistant") {
|
|
249
|
+
const parts: unknown[] = [];
|
|
250
|
+
if (msg.content) parts.push({ text: msg.content });
|
|
251
|
+
if (msg.toolCalls) {
|
|
252
|
+
for (const tc of msg.toolCalls) {
|
|
253
|
+
parts.push({
|
|
254
|
+
functionCall: { name: tc.name, args: tc.arguments },
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
if (parts.length === 0) parts.push({ text: "" });
|
|
259
|
+
contents.push({ role: "model", parts });
|
|
260
|
+
continue;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if (msg.role === "tool") {
|
|
264
|
+
contents.push({
|
|
265
|
+
role: "function",
|
|
266
|
+
parts: [
|
|
267
|
+
{
|
|
268
|
+
functionResponse: {
|
|
269
|
+
name: msg.name ?? "unknown",
|
|
270
|
+
response: { result: msg.content ?? "" },
|
|
271
|
+
},
|
|
272
|
+
},
|
|
273
|
+
],
|
|
274
|
+
});
|
|
275
|
+
continue;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
return { systemInstruction, contents };
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
private partToGoogle(part: ContentPart): unknown {
|
|
283
|
+
switch (part.type) {
|
|
284
|
+
case "text":
|
|
285
|
+
return { text: part.text };
|
|
286
|
+
case "image":
|
|
287
|
+
case "audio":
|
|
288
|
+
case "file": {
|
|
289
|
+
const isUrl =
|
|
290
|
+
part.data.startsWith("http://") || part.data.startsWith("https://");
|
|
291
|
+
if (isUrl) {
|
|
292
|
+
return {
|
|
293
|
+
fileData: {
|
|
294
|
+
fileUri: part.data,
|
|
295
|
+
mimeType:
|
|
296
|
+
part.mimeType ??
|
|
297
|
+
(part.type === "image"
|
|
298
|
+
? "image/png"
|
|
299
|
+
: "application/octet-stream"),
|
|
300
|
+
},
|
|
301
|
+
};
|
|
302
|
+
}
|
|
303
|
+
return {
|
|
304
|
+
inlineData: {
|
|
305
|
+
data: part.data,
|
|
306
|
+
mimeType:
|
|
307
|
+
part.mimeType ??
|
|
308
|
+
(part.type === "image"
|
|
309
|
+
? "image/png"
|
|
310
|
+
: part.type === "audio"
|
|
311
|
+
? "audio/mp3"
|
|
312
|
+
: "application/octet-stream"),
|
|
313
|
+
},
|
|
314
|
+
};
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
private toGoogleTools(tools: ToolDefinition[]): unknown[] {
|
|
320
|
+
return tools.map((t) => ({
|
|
321
|
+
name: t.name,
|
|
322
|
+
description: t.description,
|
|
323
|
+
parameters: t.parameters,
|
|
324
|
+
}));
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
private cleanJsonSchema(
|
|
328
|
+
schema: Record<string, unknown>
|
|
329
|
+
): Record<string, unknown> {
|
|
330
|
+
const cleaned = { ...schema };
|
|
331
|
+
delete cleaned["$schema"];
|
|
332
|
+
delete cleaned["$ref"];
|
|
333
|
+
delete cleaned["additionalProperties"];
|
|
334
|
+
|
|
335
|
+
if (cleaned.properties && typeof cleaned.properties === "object") {
|
|
336
|
+
const props: Record<string, unknown> = {};
|
|
337
|
+
for (const [key, val] of Object.entries(
|
|
338
|
+
cleaned.properties as Record<string, unknown>
|
|
339
|
+
)) {
|
|
340
|
+
props[key] =
|
|
341
|
+
typeof val === "object" && val
|
|
342
|
+
? this.cleanJsonSchema(val as Record<string, unknown>)
|
|
343
|
+
: val;
|
|
344
|
+
}
|
|
345
|
+
cleaned.properties = props;
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
if (cleaned.items && typeof cleaned.items === "object") {
|
|
349
|
+
cleaned.items = this.cleanJsonSchema(
|
|
350
|
+
cleaned.items as Record<string, unknown>
|
|
351
|
+
);
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
return cleaned;
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
private normalizeResponse(response: any): ModelResponse {
|
|
358
|
+
const candidate = response.candidates?.[0];
|
|
359
|
+
const parts = candidate?.content?.parts ?? [];
|
|
360
|
+
|
|
361
|
+
let textContent = "";
|
|
362
|
+
const toolCalls: ToolCall[] = [];
|
|
363
|
+
let toolCallCounter = 0;
|
|
364
|
+
|
|
365
|
+
for (const part of parts) {
|
|
366
|
+
if (part.text) textContent += part.text;
|
|
367
|
+
if (part.functionCall) {
|
|
368
|
+
toolCalls.push({
|
|
369
|
+
id: `vertex_tc_${toolCallCounter++}`,
|
|
370
|
+
name: part.functionCall.name,
|
|
371
|
+
arguments: part.functionCall.args ?? {},
|
|
372
|
+
});
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
const usage: TokenUsage = {
|
|
377
|
+
promptTokens: response.usageMetadata?.promptTokenCount ?? 0,
|
|
378
|
+
completionTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
|
|
379
|
+
totalTokens: response.usageMetadata?.totalTokenCount ?? 0,
|
|
380
|
+
};
|
|
381
|
+
|
|
382
|
+
let finishReason: ModelResponse["finishReason"] = "stop";
|
|
383
|
+
if (toolCalls.length > 0) finishReason = "tool_calls";
|
|
384
|
+
else if (candidate?.finishReason === "MAX_TOKENS")
|
|
385
|
+
finishReason = "length";
|
|
386
|
+
else if (candidate?.finishReason === "SAFETY")
|
|
387
|
+
finishReason = "content_filter";
|
|
388
|
+
|
|
389
|
+
return {
|
|
390
|
+
message: {
|
|
391
|
+
role: "assistant",
|
|
392
|
+
content: textContent || null,
|
|
393
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
394
|
+
},
|
|
395
|
+
usage,
|
|
396
|
+
finishReason,
|
|
397
|
+
raw: response,
|
|
398
|
+
};
|
|
399
|
+
}
|
|
400
|
+
}
|
package/src/models/registry.ts
CHANGED
|
@@ -3,6 +3,7 @@ import { OpenAIProvider } from "./providers/openai.js";
|
|
|
3
3
|
import { AnthropicProvider } from "./providers/anthropic.js";
|
|
4
4
|
import { GoogleProvider } from "./providers/google.js";
|
|
5
5
|
import { OllamaProvider } from "./providers/ollama.js";
|
|
6
|
+
import { VertexAIProvider } from "./providers/vertex.js";
|
|
6
7
|
|
|
7
8
|
type ProviderFactory = (
|
|
8
9
|
modelId: string,
|
|
@@ -88,3 +89,19 @@ export function ollama(
|
|
|
88
89
|
): ModelProvider {
|
|
89
90
|
return registry.resolve("ollama", modelId, config);
|
|
90
91
|
}
|
|
92
|
+
|
|
93
|
+
registry.register(
|
|
94
|
+
"vertex",
|
|
95
|
+
(modelId, config) =>
|
|
96
|
+
new VertexAIProvider(
|
|
97
|
+
modelId,
|
|
98
|
+
config as { project?: string; location?: string; credentials?: string }
|
|
99
|
+
)
|
|
100
|
+
);
|
|
101
|
+
|
|
102
|
+
export function vertex(
|
|
103
|
+
modelId: string,
|
|
104
|
+
config?: { project?: string; location?: string; credentials?: string }
|
|
105
|
+
): ModelProvider {
|
|
106
|
+
return registry.resolve("vertex", modelId, config);
|
|
107
|
+
}
|