@juspay/neurolink 9.17.2 → 9.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/context/stages/slidingWindowTruncator.js +2 -2
  3. package/dist/core/baseProvider.d.ts +13 -0
  4. package/dist/core/baseProvider.js +21 -0
  5. package/dist/lib/context/stages/slidingWindowTruncator.js +2 -2
  6. package/dist/lib/core/baseProvider.d.ts +13 -0
  7. package/dist/lib/core/baseProvider.js +21 -0
  8. package/dist/lib/providers/amazonBedrock.d.ts +7 -0
  9. package/dist/lib/providers/amazonBedrock.js +32 -0
  10. package/dist/lib/providers/googleAiStudio.d.ts +15 -0
  11. package/dist/lib/providers/googleAiStudio.js +87 -3
  12. package/dist/lib/providers/googleNativeGemini3.d.ts +24 -1
  13. package/dist/lib/providers/googleNativeGemini3.js +117 -4
  14. package/dist/lib/providers/googleVertex.d.ts +7 -0
  15. package/dist/lib/providers/googleVertex.js +52 -5
  16. package/dist/lib/providers/openAI.d.ts +7 -0
  17. package/dist/lib/providers/openAI.js +41 -2
  18. package/dist/lib/server/routes/agentRoutes.js +60 -1
  19. package/dist/lib/server/types.d.ts +50 -0
  20. package/dist/lib/server/utils/validation.d.ts +32 -0
  21. package/dist/lib/server/utils/validation.js +19 -0
  22. package/dist/lib/types/providers.d.ts +2 -0
  23. package/dist/providers/amazonBedrock.d.ts +7 -0
  24. package/dist/providers/amazonBedrock.js +32 -0
  25. package/dist/providers/googleAiStudio.d.ts +15 -0
  26. package/dist/providers/googleAiStudio.js +87 -3
  27. package/dist/providers/googleNativeGemini3.d.ts +24 -1
  28. package/dist/providers/googleNativeGemini3.js +117 -4
  29. package/dist/providers/googleVertex.d.ts +7 -0
  30. package/dist/providers/googleVertex.js +52 -5
  31. package/dist/providers/openAI.d.ts +7 -0
  32. package/dist/providers/openAI.js +41 -2
  33. package/dist/server/routes/agentRoutes.js +60 -1
  34. package/dist/server/types.d.ts +50 -0
  35. package/dist/server/utils/validation.d.ts +32 -0
  36. package/dist/server/utils/validation.js +19 -0
  37. package/dist/types/providers.d.ts +2 -0
  38. package/package.json +1 -1
@@ -1,5 +1,5 @@
1
1
  import { createOpenAI } from "@ai-sdk/openai";
2
- import { streamText } from "ai";
2
+ import { embed, embedMany, streamText, } from "ai";
3
3
  import { trace, SpanKind, SpanStatusCode } from "@opentelemetry/api";
4
4
  import { AIProviderName } from "../constants/enums.js";
5
5
  import { BaseProvider } from "../core/baseProvider.js";
@@ -529,7 +529,6 @@ export class OpenAIProvider extends BaseProvider {
529
529
  });
530
530
  try {
531
531
  // Create embedding model using the AI SDK
532
- const { embed } = await import("ai");
533
532
  // Create the OpenAI provider
534
533
  const openai = createOpenAI({
535
534
  apiKey: getOpenAIApiKey(),
@@ -558,6 +557,46 @@ export class OpenAIProvider extends BaseProvider {
558
557
  throw this.handleProviderError(error);
559
558
  }
560
559
  }
560
+ /**
561
+ * Generate embeddings for multiple texts in a single batch
562
+ * @param texts - The texts to embed
563
+ * @param modelName - The embedding model to use (default: text-embedding-3-small)
564
+ * @returns Promise resolving to an array of embedding vectors
565
+ */
566
+ async embedMany(texts, modelName) {
567
+ const embeddingModelName = modelName || "text-embedding-3-small";
568
+ logger.debug("Generating batch embeddings", {
569
+ provider: this.providerName,
570
+ model: embeddingModelName,
571
+ count: texts.length,
572
+ });
573
+ try {
574
+ const openai = createOpenAI({
575
+ apiKey: getOpenAIApiKey(),
576
+ fetch: createProxyFetch(),
577
+ });
578
+ const embeddingModel = openai.textEmbeddingModel(embeddingModelName);
579
+ const result = await embedMany({
580
+ model: embeddingModel,
581
+ values: texts,
582
+ });
583
+ logger.debug("Batch embeddings generated successfully", {
584
+ provider: this.providerName,
585
+ model: embeddingModelName,
586
+ count: result.embeddings.length,
587
+ embeddingDimension: result.embeddings[0]?.length,
588
+ });
589
+ return result.embeddings;
590
+ }
591
+ catch (error) {
592
+ logger.error("Batch embedding generation failed", {
593
+ error: error instanceof Error ? error.message : String(error),
594
+ model: embeddingModelName,
595
+ count: texts.length,
596
+ });
597
+ throw this.handleProviderError(error);
598
+ }
599
+ }
561
600
  }
562
601
  // Export for factory registration
563
602
  export default OpenAIProvider;
@@ -4,7 +4,7 @@
4
4
  */
5
5
  import { ProviderFactory } from "../../factories/providerFactory.js";
6
6
  import { createStreamRedactor } from "../utils/redaction.js";
7
- import { AgentExecuteRequestSchema, validateRequest, } from "../utils/validation.js";
7
+ import { AgentExecuteRequestSchema, createErrorResponse as createError, EmbedManyRequestSchema, EmbedRequestSchema, validateRequest, } from "../utils/validation.js";
8
8
  /**
9
9
  * Create agent routes
10
10
  */
@@ -107,6 +107,65 @@ export function createAgentRoutes(basePath = "/api") {
107
107
  description: "List available AI providers",
108
108
  tags: ["agent", "providers"],
109
109
  },
110
+ {
111
+ method: "POST",
112
+ path: `${basePath}/agent/embed`,
113
+ handler: async (ctx) => {
114
+ const validation = validateRequest(EmbedRequestSchema, ctx.body, ctx.requestId);
115
+ if (!validation.success) {
116
+ return validation.error;
117
+ }
118
+ const request = validation.data;
119
+ try {
120
+ const providerName = request.provider || "openai";
121
+ const provider = await ProviderFactory.createProvider(providerName, request.model);
122
+ const embedding = await provider.embed(request.text, request.model);
123
+ return {
124
+ embedding,
125
+ provider: providerName,
126
+ model: request.model || "default",
127
+ dimension: embedding.length,
128
+ };
129
+ }
130
+ catch (error) {
131
+ return createError("EXECUTION_FAILED", error instanceof Error
132
+ ? error.message
133
+ : "Embedding generation failed", undefined, ctx.requestId);
134
+ }
135
+ },
136
+ description: "Generate embedding for a single text",
137
+ tags: ["agent", "embeddings"],
138
+ },
139
+ {
140
+ method: "POST",
141
+ path: `${basePath}/agent/embed-many`,
142
+ handler: async (ctx) => {
143
+ const validation = validateRequest(EmbedManyRequestSchema, ctx.body, ctx.requestId);
144
+ if (!validation.success) {
145
+ return validation.error;
146
+ }
147
+ const request = validation.data;
148
+ try {
149
+ const providerName = request.provider || "openai";
150
+ const provider = await ProviderFactory.createProvider(providerName, request.model);
151
+ const embeddings = await provider.embedMany(request.texts, request.model);
152
+ return {
153
+ embeddings,
154
+ provider: providerName,
155
+ model: request.model || "default",
156
+ count: embeddings.length,
157
+ dimension: embeddings[0]?.length ?? 0,
158
+ };
159
+ }
160
+ catch (error) {
161
+ return createError("EXECUTION_FAILED", error instanceof Error
162
+ ? error.message
163
+ : "Batch embedding generation failed", undefined, ctx.requestId);
164
+ }
165
+ },
166
+ description: "Generate embeddings for multiple texts in a batch",
167
+ tags: ["agent", "embeddings"],
168
+ },
110
169
  ],
111
170
  };
112
171
  }
@@ -458,6 +458,56 @@ export type AgentExecuteResponse = {
458
458
  /** Response metadata */
459
459
  metadata?: Record<string, JsonValue>;
460
460
  };
461
+ /**
462
+ * Embed request (single text)
463
+ */
464
+ export type EmbedRequest = {
465
+ /** Text to embed */
466
+ text: string;
467
+ /** Provider to use (optional) */
468
+ provider?: string;
469
+ /** Embedding model to use (optional) */
470
+ model?: string;
471
+ };
472
+ /**
473
+ * Embed response (single text)
474
+ */
475
+ export type EmbedResponse = {
476
+ /** The embedding vector */
477
+ embedding: number[];
478
+ /** Provider used */
479
+ provider: string;
480
+ /** Model used */
481
+ model: string;
482
+ /** Embedding dimension */
483
+ dimension: number;
484
+ };
485
+ /**
486
+ * Embed many request (batch texts)
487
+ */
488
+ export type EmbedManyRequest = {
489
+ /** Texts to embed */
490
+ texts: string[];
491
+ /** Provider to use (optional) */
492
+ provider?: string;
493
+ /** Embedding model to use (optional) */
494
+ model?: string;
495
+ };
496
+ /**
497
+ * Embed many response (batch texts)
498
+ */
499
+ export type EmbedManyResponse = {
500
+ /** The embedding vectors */
501
+ embeddings: number[][];
502
+ /** Provider used */
503
+ provider: string;
504
+ /** Model used */
505
+ model: string;
506
+ /** Number of embeddings */
507
+ count: number;
508
+ /** Embedding dimension */
509
+ dimension: number;
510
+ };
461
511
  /**
462
512
  * Tool execution request
463
513
  */
@@ -168,6 +168,38 @@ export declare const SessionMessagesQuerySchema: z.ZodObject<{
168
168
  offset?: string | undefined;
169
169
  limit?: string | undefined;
170
170
  }>;
171
+ /**
172
+ * Embed request schema (single text)
173
+ */
174
+ export declare const EmbedRequestSchema: z.ZodObject<{
175
+ text: z.ZodString;
176
+ provider: z.ZodOptional<z.ZodString>;
177
+ model: z.ZodOptional<z.ZodString>;
178
+ }, "strip", z.ZodTypeAny, {
179
+ text: string;
180
+ provider?: string | undefined;
181
+ model?: string | undefined;
182
+ }, {
183
+ text: string;
184
+ provider?: string | undefined;
185
+ model?: string | undefined;
186
+ }>;
187
+ /**
188
+ * Embed many request schema (batch texts)
189
+ */
190
+ export declare const EmbedManyRequestSchema: z.ZodObject<{
191
+ texts: z.ZodArray<z.ZodString, "many">;
192
+ provider: z.ZodOptional<z.ZodString>;
193
+ model: z.ZodOptional<z.ZodString>;
194
+ }, "strip", z.ZodTypeAny, {
195
+ texts: string[];
196
+ provider?: string | undefined;
197
+ model?: string | undefined;
198
+ }, {
199
+ texts: string[];
200
+ provider?: string | undefined;
201
+ model?: string | undefined;
202
+ }>;
171
203
  /**
172
204
  * Standardized error response format
173
205
  */
@@ -108,6 +108,25 @@ export const SessionMessagesQuerySchema = z.object({
108
108
  .pipe(z.number().nonnegative())
109
109
  .optional(),
110
110
  });
111
+ /**
112
+ * Embed request schema (single text)
113
+ */
114
+ export const EmbedRequestSchema = z.object({
115
+ text: z.string().min(1, "Text is required"),
116
+ provider: z.string().optional(),
117
+ model: z.string().optional(),
118
+ });
119
+ /**
120
+ * Embed many request schema (batch texts)
121
+ */
122
+ export const EmbedManyRequestSchema = z.object({
123
+ texts: z
124
+ .array(z.string().min(1))
125
+ .min(1, "At least one text is required")
126
+ .max(2048, "Maximum 2048 texts per batch"),
127
+ provider: z.string().optional(),
128
+ model: z.string().optional(),
129
+ });
111
130
  /**
112
131
  * Type guard to check if a value is an ErrorResponse
113
132
  */
@@ -329,6 +329,8 @@ export type AIProvider = {
329
329
  stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ValidationSchema): Promise<StreamResult>;
330
330
  generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
331
331
  gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
332
+ embed(text: string, modelName?: string): Promise<number[]>;
333
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
332
334
  setupToolExecutor(sdk: {
333
335
  customTools: Map<string, unknown>;
334
336
  executeTool: (toolName: string, params: unknown) => Promise<unknown>;
@@ -53,4 +53,11 @@ export declare class AmazonBedrockProvider extends BaseProvider {
53
53
  * @returns Promise resolving to the embedding vector
54
54
  */
55
55
  embed(text: string, modelName?: string): Promise<number[]>;
56
+ /**
57
+ * Generate embeddings for multiple texts in a single batch
58
+ * @param texts - The texts to embed
59
+ * @param modelName - The embedding model to use (default: amazon.titan-embed-text-v2:0)
60
+ * @returns Promise resolving to an array of embedding vectors
61
+ */
62
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
56
63
  }
@@ -1470,4 +1470,36 @@ export class AmazonBedrockProvider extends BaseProvider {
1470
1470
  throw this.handleProviderError(error);
1471
1471
  }
1472
1472
  }
1473
+ /**
1474
+ * Generate embeddings for multiple texts in a single batch
1475
+ * @param texts - The texts to embed
1476
+ * @param modelName - The embedding model to use (default: amazon.titan-embed-text-v2:0)
1477
+ * @returns Promise resolving to an array of embedding vectors
1478
+ */
1479
+ async embedMany(texts, modelName) {
1480
+ const embeddingModelName = modelName || "amazon.titan-embed-text-v2:0";
1481
+ logger.debug("Generating batch embeddings", {
1482
+ provider: this.providerName,
1483
+ model: embeddingModelName,
1484
+ count: texts.length,
1485
+ });
1486
+ try {
1487
+ const embeddings = await Promise.all(texts.map((text) => this.embed(text, embeddingModelName)));
1488
+ logger.debug("Batch embeddings generated successfully", {
1489
+ provider: this.providerName,
1490
+ model: embeddingModelName,
1491
+ count: embeddings.length,
1492
+ embeddingDimension: embeddings[0]?.length,
1493
+ });
1494
+ return embeddings;
1495
+ }
1496
+ catch (error) {
1497
+ logger.error("Batch embedding generation failed", {
1498
+ error: error instanceof Error ? error.message : String(error),
1499
+ model: embeddingModelName,
1500
+ count: texts.length,
1501
+ });
1502
+ throw this.handleProviderError(error);
1503
+ }
1504
+ }
1473
1505
  }
@@ -75,6 +75,21 @@ export declare class GoogleAIStudioProvider extends BaseProvider {
75
75
  */
76
76
  generate(optionsOrPrompt: TextGenerationOptions | string): Promise<EnhancedGenerateResult | null>;
77
77
  private executeAudioStreamViaGeminiLive;
78
+ protected getDefaultEmbeddingModel(): string;
79
+ /**
80
+ * Generate embeddings for text using Google AI Studio embedding models
81
+ * @param text - The text to embed
82
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
83
+ * @returns Promise resolving to the embedding vector
84
+ */
85
+ embed(text: string, modelName?: string): Promise<number[]>;
86
+ /**
87
+ * Generate embeddings for multiple texts in a single batch
88
+ * @param texts - The texts to embed
89
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
90
+ * @returns Promise resolving to an array of embedding vectors
91
+ */
92
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
78
93
  private getApiKey;
79
94
  }
80
95
  export default GoogleAIStudioProvider;
@@ -1,5 +1,5 @@
1
1
  import { createGoogleGenerativeAI } from "@ai-sdk/google";
2
- import { streamText } from "ai";
2
+ import { embed, embedMany, streamText, } from "ai";
3
3
  import { ErrorCategory, ErrorSeverity, GoogleAIModels, } from "../constants/enums.js";
4
4
  import { estimateTokens } from "../utils/tokenEstimation.js";
5
5
  import { BaseProvider } from "../core/baseProvider.js";
@@ -11,7 +11,7 @@ import { logger } from "../utils/logger.js";
11
11
  import { isGemini3Model } from "../utils/modelDetection.js";
12
12
  import { tracers, ATTR, withClientSpan } from "../telemetry/index.js";
13
13
  import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
14
- import { buildNativeToolDeclarations, buildNativeConfig, computeMaxSteps, collectStreamChunks, extractTextFromParts, executeNativeToolCalls, handleMaxStepsTermination, pushModelResponseToHistory, } from "./googleNativeGemini3.js";
14
+ import { buildNativeToolDeclarations, buildNativeConfig, computeMaxSteps, collectStreamChunks, extractTextFromParts, executeNativeToolCalls, handleMaxStepsTermination, pushModelResponseToHistory, sanitizeToolsForGemini, } from "./googleNativeGemini3.js";
15
15
  // Google AI Live API types now imported from ../types/providerSpecific.js
16
16
  // Import proper types for multimodal message handling
17
17
  // Create Google GenAI client
@@ -444,9 +444,13 @@ export class GoogleAIStudioProvider extends BaseProvider {
444
444
  // Get tools consistently with generate method (include user-provided RAG tools)
445
445
  const shouldUseTools = !options.disableTools && this.supportsTools();
446
446
  const baseTools = shouldUseTools ? await this.getAllTools() : {};
447
- const tools = shouldUseTools
447
+ const rawTools = shouldUseTools
448
448
  ? { ...baseTools, ...(options.tools || {}) }
449
449
  : {};
450
+ // Sanitize tool schemas for Gemini proto compatibility (converts anyOf/oneOf unions to string)
451
+ const tools = Object.keys(rawTools).length > 0
452
+ ? sanitizeToolsForGemini(rawTools)
453
+ : rawTools;
450
454
  // Build message array from options with multimodal support
451
455
  // Using protected helper from BaseProvider to eliminate code duplication
452
456
  const messages = await this.buildMessagesForStream(options);
@@ -1027,6 +1031,86 @@ export class GoogleAIStudioProvider extends BaseProvider {
1027
1031
  },
1028
1032
  };
1029
1033
  }
1034
+ getDefaultEmbeddingModel() {
1035
+ return (process.env.GOOGLE_AI_EMBEDDING_MODEL ||
1036
+ process.env.GOOGLE_EMBEDDING_MODEL ||
1037
+ "gemini-embedding-001");
1038
+ }
1039
+ /**
1040
+ * Generate embeddings for text using Google AI Studio embedding models
1041
+ * @param text - The text to embed
1042
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
1043
+ * @returns Promise resolving to the embedding vector
1044
+ */
1045
+ async embed(text, modelName) {
1046
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "gemini-embedding-001";
1047
+ logger.debug("Generating embedding", {
1048
+ provider: this.providerName,
1049
+ model: embeddingModelName,
1050
+ textLength: text.length,
1051
+ });
1052
+ try {
1053
+ const apiKey = this.getApiKey();
1054
+ const google = createGoogleGenerativeAI({ apiKey });
1055
+ const embeddingModel = google.textEmbeddingModel(embeddingModelName);
1056
+ const result = await embed({
1057
+ model: embeddingModel,
1058
+ value: text,
1059
+ });
1060
+ logger.debug("Embedding generated successfully", {
1061
+ provider: this.providerName,
1062
+ model: embeddingModelName,
1063
+ embeddingDimension: result.embedding.length,
1064
+ });
1065
+ return result.embedding;
1066
+ }
1067
+ catch (error) {
1068
+ logger.error("Embedding generation failed", {
1069
+ error: error instanceof Error ? error.message : String(error),
1070
+ model: embeddingModelName,
1071
+ textLength: text.length,
1072
+ });
1073
+ throw this.handleProviderError(error);
1074
+ }
1075
+ }
1076
+ /**
1077
+ * Generate embeddings for multiple texts in a single batch
1078
+ * @param texts - The texts to embed
1079
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
1080
+ * @returns Promise resolving to an array of embedding vectors
1081
+ */
1082
+ async embedMany(texts, modelName) {
1083
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "gemini-embedding-001";
1084
+ logger.debug("Generating batch embeddings", {
1085
+ provider: this.providerName,
1086
+ model: embeddingModelName,
1087
+ count: texts.length,
1088
+ });
1089
+ try {
1090
+ const apiKey = this.getApiKey();
1091
+ const google = createGoogleGenerativeAI({ apiKey });
1092
+ const embeddingModel = google.textEmbeddingModel(embeddingModelName);
1093
+ const result = await embedMany({
1094
+ model: embeddingModel,
1095
+ values: texts,
1096
+ });
1097
+ logger.debug("Batch embeddings generated successfully", {
1098
+ provider: this.providerName,
1099
+ model: embeddingModelName,
1100
+ count: result.embeddings.length,
1101
+ embeddingDimension: result.embeddings[0]?.length,
1102
+ });
1103
+ return result.embeddings;
1104
+ }
1105
+ catch (error) {
1106
+ logger.error("Batch embedding generation failed", {
1107
+ error: error instanceof Error ? error.message : String(error),
1108
+ model: embeddingModelName,
1109
+ count: texts.length,
1110
+ });
1111
+ throw this.handleProviderError(error);
1112
+ }
1113
+ }
1030
1114
  getApiKey() {
1031
1115
  const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
1032
1116
  if (!apiKey) {
@@ -8,7 +8,7 @@
8
8
  * This module extracts the functions that are duplicated between the two
9
9
  * providers so they can share a single implementation.
10
10
  */
11
- import type { Tool } from "ai";
11
+ import { type Tool } from "ai";
12
12
  import type { ThinkingConfig } from "../utils/thinkingConfig.js";
13
13
  /** A single native @google/genai function declaration. */
14
14
  export type NativeFunctionDeclaration = {
@@ -44,6 +44,29 @@ export type CollectedChunkResult = {
44
44
  inputTokens: number;
45
45
  outputTokens: number;
46
46
  };
47
+ /**
48
+ * Sanitize a JSON Schema for Gemini's proto-based API.
49
+ *
50
+ * Gemini cannot handle `anyOf`/`oneOf` union types in function declarations
51
+ * because its proto format expects a single `type` field, not a list of types.
52
+ * This function recursively converts unions to `string` type (the most
53
+ * permissive primitive that can represent any value as text).
54
+ *
55
+ * Also removes `$schema`, `additionalProperties`, and `default` keys that
56
+ * Gemini's proto format doesn't support.
57
+ */
58
+ export declare function sanitizeSchemaForGemini(schema: Record<string, unknown>): Record<string, unknown>;
59
+ /**
60
+ * Sanitize Vercel AI SDK tools for Gemini compatibility.
61
+ *
62
+ * For the Vercel AI SDK path (non-native), tool parameters are Zod schemas that
63
+ * get converted to JSON Schema internally by @ai-sdk/google. This conversion
64
+ * doesn't sanitize union types (anyOf/oneOf), causing Gemini proto errors.
65
+ *
66
+ * This function pre-converts each tool's Zod parameters to sanitized JSON Schema
67
+ * and re-wraps with the Vercel AI SDK's jsonSchema() helper.
68
+ */
69
+ export declare function sanitizeToolsForGemini(tools: Record<string, Tool>): Record<string, Tool>;
47
70
  /**
48
71
  * Convert Vercel AI SDK tools to @google/genai FunctionDeclarations and an execute map.
49
72
  *
@@ -9,11 +9,127 @@
9
9
  * providers so they can share a single implementation.
10
10
  */
11
11
  import { randomUUID } from "node:crypto";
12
+ import { jsonSchema as aiJsonSchema, tool as createAISDKTool, } from "ai";
12
13
  import { DEFAULT_MAX_STEPS, DEFAULT_TOOL_MAX_RETRIES, } from "../core/constants.js";
13
14
  import { logger } from "../utils/logger.js";
14
15
  import { convertZodToJsonSchema, inlineJsonSchema, isZodSchema, } from "../utils/schemaConversion.js";
15
16
  import { createNativeThinkingConfig } from "../utils/thinkingConfig.js";
16
17
  // ── Functions ──
18
+ /**
19
+ * Sanitize a JSON Schema for Gemini's proto-based API.
20
+ *
21
+ * Gemini cannot handle `anyOf`/`oneOf` union types in function declarations
22
+ * because its proto format expects a single `type` field, not a list of types.
23
+ * This function recursively converts unions to `string` type (the most
24
+ * permissive primitive that can represent any value as text).
25
+ *
26
+ * Also removes `$schema`, `additionalProperties`, and `default` keys that
27
+ * Gemini's proto format doesn't support.
28
+ */
29
+ export function sanitizeSchemaForGemini(schema) {
30
+ // If this node has anyOf/oneOf, collapse to string type
31
+ if (Array.isArray(schema.anyOf) || Array.isArray(schema.oneOf)) {
32
+ const unionKey = schema.anyOf ? "anyOf" : "oneOf";
33
+ const variants = schema[unionKey];
34
+ // Check if it's a nullable union (e.g., anyOf: [{type: "string"}, {type: "null"}])
35
+ const nonNullVariants = variants.filter((v) => v.type !== "null" && v.type !== "undefined");
36
+ if (nonNullVariants.length === 1) {
37
+ // Simple nullable — use the non-null type with nullable flag
38
+ const base = sanitizeSchemaForGemini({ ...nonNullVariants[0] });
39
+ base.nullable = true;
40
+ if (schema.description) {
41
+ base.description = schema.description;
42
+ }
43
+ return base;
44
+ }
45
+ // Multi-type union — collapse to string with description noting the original types
46
+ const types = nonNullVariants.map((v) => v.type || "unknown").join(" | ");
47
+ const result = { type: "string" };
48
+ const desc = schema.description
49
+ ? `${schema.description} (accepts: ${types})`
50
+ : `Value as string (accepts: ${types})`;
51
+ result.description = desc;
52
+ if (variants.some((v) => v.type === "null")) {
53
+ result.nullable = true;
54
+ }
55
+ return result;
56
+ }
57
+ const result = {};
58
+ for (const [key, value] of Object.entries(schema)) {
59
+ // Skip keys unsupported by Gemini proto format
60
+ if (key === "$schema" ||
61
+ key === "additionalProperties" ||
62
+ key === "default") {
63
+ continue;
64
+ }
65
+ if (key === "properties" && value && typeof value === "object") {
66
+ const properties = {};
67
+ for (const [propName, propSchema] of Object.entries(value)) {
68
+ if (propSchema && typeof propSchema === "object") {
69
+ properties[propName] = sanitizeSchemaForGemini(propSchema);
70
+ }
71
+ else {
72
+ properties[propName] = propSchema;
73
+ }
74
+ }
75
+ result[key] = properties;
76
+ }
77
+ else if (key === "items" && value && typeof value === "object") {
78
+ if (Array.isArray(value)) {
79
+ result[key] = value.map((item) => item && typeof item === "object"
80
+ ? sanitizeSchemaForGemini(item)
81
+ : item);
82
+ }
83
+ else {
84
+ result[key] = sanitizeSchemaForGemini(value);
85
+ }
86
+ }
87
+ else {
88
+ result[key] = value;
89
+ }
90
+ }
91
+ return result;
92
+ }
93
+ /**
94
+ * Sanitize Vercel AI SDK tools for Gemini compatibility.
95
+ *
96
+ * For the Vercel AI SDK path (non-native), tool parameters are Zod schemas that
97
+ * get converted to JSON Schema internally by @ai-sdk/google. This conversion
98
+ * doesn't sanitize union types (anyOf/oneOf), causing Gemini proto errors.
99
+ *
100
+ * This function pre-converts each tool's Zod parameters to sanitized JSON Schema
101
+ * and re-wraps with the Vercel AI SDK's jsonSchema() helper.
102
+ */
103
+ export function sanitizeToolsForGemini(tools) {
104
+ const sanitized = {};
105
+ for (const [name, tool] of Object.entries(tools)) {
106
+ try {
107
+ const params = tool.parameters;
108
+ if (params &&
109
+ typeof params === "object" &&
110
+ "_def" in params &&
111
+ typeof params.parse === "function") {
112
+ const rawJsonSchema = convertZodToJsonSchema(params);
113
+ const inlined = inlineJsonSchema(rawJsonSchema);
114
+ const sanitizedSchema = sanitizeSchemaForGemini(inlined);
115
+ sanitized[name] = createAISDKTool({
116
+ description: tool.description || `Tool: ${name}`,
117
+ parameters: aiJsonSchema(sanitizedSchema),
118
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
119
+ execute: tool.execute,
120
+ });
121
+ }
122
+ else {
123
+ sanitized[name] = tool;
124
+ }
125
+ }
126
+ catch (error) {
127
+ logger.warn(`[Gemini] Failed to sanitize tool "${name}", using original`, { error: error instanceof Error ? error.message : String(error) });
128
+ sanitized[name] = tool;
129
+ }
130
+ }
131
+ return sanitized;
132
+ }
17
133
  /**
18
134
  * Convert Vercel AI SDK tools to @google/genai FunctionDeclarations and an execute map.
19
135
  *
@@ -38,10 +154,7 @@ export function buildNativeToolDeclarations(tools) {
38
154
  else {
39
155
  rawSchema = { type: "object", properties: {} };
40
156
  }
41
- decl.parametersJsonSchema = inlineJsonSchema(rawSchema);
42
- if (decl.parametersJsonSchema.$schema) {
43
- delete decl.parametersJsonSchema.$schema;
44
- }
157
+ decl.parametersJsonSchema = sanitizeSchemaForGemini(inlineJsonSchema(rawSchema));
45
158
  }
46
159
  functionDeclarations.push(decl);
47
160
  if (tool.execute) {
@@ -298,6 +298,13 @@ export declare class GoogleVertexProvider extends BaseProvider {
298
298
  * @returns Promise resolving to the embedding vector
299
299
  */
300
300
  embed(text: string, modelName?: string): Promise<number[]>;
301
+ /**
302
+ * Generate embeddings for multiple texts in a single batch
303
+ * @param texts - The texts to embed
304
+ * @param modelName - The embedding model to use (default: text-embedding-004)
305
+ * @returns Promise resolving to an array of embedding vectors
306
+ */
307
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
301
308
  /**
302
309
  * Get model suggestions when a model is not found
303
310
  */