@juspay/neurolink 9.17.1 → 9.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [9.18.0](https://github.com/juspay/neurolink/compare/v9.17.2...v9.18.0) (2026-03-07)
2
+
3
+ ### Features
4
+
5
+ - **(sdk):** add embed() and embedMany() support across providers and server ([17243ad](https://github.com/juspay/neurolink/commit/17243ada417a192caa6e555f92df23938ddca6aa))
6
+
7
+ ## [9.17.2](https://github.com/juspay/neurolink/compare/v9.17.1...v9.17.2) (2026-03-07)
8
+
9
+ ### Bug Fixes
10
+
11
+ - **(docs):** fall back to local search when Algolia index doesn't exist ([93f92b6](https://github.com/juspay/neurolink/commit/93f92b6a5c898b099e4684cadfd793a807752091))
12
+
1
13
  ## [9.17.1](https://github.com/juspay/neurolink/compare/v9.17.0...v9.17.1) (2026-03-07)
2
14
 
3
15
  ### Bug Fixes
@@ -153,6 +153,19 @@ export declare abstract class BaseProvider implements AIProvider {
153
153
  * ```
154
154
  */
155
155
  embed(text: string, _modelName?: string): Promise<number[]>;
156
+ /**
157
+ * Generate embeddings for multiple texts in a single batch
158
+ *
159
+ * This is a default implementation that throws an error.
160
+ * Providers that support embeddings should override this method.
161
+ * The AI SDK's embedMany automatically handles chunking for models with batch limits.
162
+ *
163
+ * @param texts - The texts to embed
164
+ * @param _modelName - Optional embedding model name (provider-specific)
165
+ * @returns Promise resolving to an array of embedding vectors
166
+ * @throws Error if the provider does not support embeddings
167
+ */
168
+ embedMany(texts: string[], _modelName?: string): Promise<number[][]>;
156
169
  /**
157
170
  * Get the default embedding model for this provider
158
171
  *
@@ -756,6 +756,27 @@ export class BaseProvider {
756
756
  `Use an embedding model like text-embedding-3-small (OpenAI), text-embedding-004 (Vertex), ` +
757
757
  `or amazon.titan-embed-text-v2:0 (Bedrock).`);
758
758
  }
759
+ /**
760
+ * Generate embeddings for multiple texts in a single batch
761
+ *
762
+ * This is a default implementation that throws an error.
763
+ * Providers that support embeddings should override this method.
764
+ * The AI SDK's embedMany automatically handles chunking for models with batch limits.
765
+ *
766
+ * @param texts - The texts to embed
767
+ * @param _modelName - Optional embedding model name (provider-specific)
768
+ * @returns Promise resolving to an array of embedding vectors
769
+ * @throws Error if the provider does not support embeddings
770
+ */
771
+ async embedMany(texts, _modelName) {
772
+ logger.warn(`embedMany() called on ${this.providerName} which does not have a native implementation`, {
773
+ count: texts.length,
774
+ });
775
+ throw new Error(`Batch embedding generation is not supported by the ${this.providerName} provider. ` +
776
+ `Supported providers: openai, googleAiStudio, vertex/google, bedrock. ` +
777
+ `Use an embedding model like text-embedding-3-small (OpenAI), gemini-embedding-001 (Google AI), ` +
778
+ `text-embedding-004 (Vertex), or amazon.titan-embed-text-v2:0 (Bedrock).`);
779
+ }
759
780
  /**
760
781
  * Get the default embedding model for this provider
761
782
  *
@@ -153,6 +153,19 @@ export declare abstract class BaseProvider implements AIProvider {
153
153
  * ```
154
154
  */
155
155
  embed(text: string, _modelName?: string): Promise<number[]>;
156
+ /**
157
+ * Generate embeddings for multiple texts in a single batch
158
+ *
159
+ * This is a default implementation that throws an error.
160
+ * Providers that support embeddings should override this method.
161
+ * The AI SDK's embedMany automatically handles chunking for models with batch limits.
162
+ *
163
+ * @param texts - The texts to embed
164
+ * @param _modelName - Optional embedding model name (provider-specific)
165
+ * @returns Promise resolving to an array of embedding vectors
166
+ * @throws Error if the provider does not support embeddings
167
+ */
168
+ embedMany(texts: string[], _modelName?: string): Promise<number[][]>;
156
169
  /**
157
170
  * Get the default embedding model for this provider
158
171
  *
@@ -756,6 +756,27 @@ export class BaseProvider {
756
756
  `Use an embedding model like text-embedding-3-small (OpenAI), text-embedding-004 (Vertex), ` +
757
757
  `or amazon.titan-embed-text-v2:0 (Bedrock).`);
758
758
  }
759
+ /**
760
+ * Generate embeddings for multiple texts in a single batch
761
+ *
762
+ * This is a default implementation that throws an error.
763
+ * Providers that support embeddings should override this method.
764
+ * The AI SDK's embedMany automatically handles chunking for models with batch limits.
765
+ *
766
+ * @param texts - The texts to embed
767
+ * @param _modelName - Optional embedding model name (provider-specific)
768
+ * @returns Promise resolving to an array of embedding vectors
769
+ * @throws Error if the provider does not support embeddings
770
+ */
771
+ async embedMany(texts, _modelName) {
772
+ logger.warn(`embedMany() called on ${this.providerName} which does not have a native implementation`, {
773
+ count: texts.length,
774
+ });
775
+ throw new Error(`Batch embedding generation is not supported by the ${this.providerName} provider. ` +
776
+ `Supported providers: openai, googleAiStudio, vertex/google, bedrock. ` +
777
+ `Use an embedding model like text-embedding-3-small (OpenAI), gemini-embedding-001 (Google AI), ` +
778
+ `text-embedding-004 (Vertex), or amazon.titan-embed-text-v2:0 (Bedrock).`);
779
+ }
759
780
  /**
760
781
  * Get the default embedding model for this provider
761
782
  *
@@ -53,4 +53,11 @@ export declare class AmazonBedrockProvider extends BaseProvider {
53
53
  * @returns Promise resolving to the embedding vector
54
54
  */
55
55
  embed(text: string, modelName?: string): Promise<number[]>;
56
+ /**
57
+ * Generate embeddings for multiple texts in a single batch
58
+ * @param texts - The texts to embed
59
+ * @param modelName - The embedding model to use (default: amazon.titan-embed-text-v2:0)
60
+ * @returns Promise resolving to an array of embedding vectors
61
+ */
62
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
56
63
  }
@@ -1470,5 +1470,37 @@ export class AmazonBedrockProvider extends BaseProvider {
1470
1470
  throw this.handleProviderError(error);
1471
1471
  }
1472
1472
  }
1473
+ /**
1474
+ * Generate embeddings for multiple texts in a single batch
1475
+ * @param texts - The texts to embed
1476
+ * @param modelName - The embedding model to use (default: amazon.titan-embed-text-v2:0)
1477
+ * @returns Promise resolving to an array of embedding vectors
1478
+ */
1479
+ async embedMany(texts, modelName) {
1480
+ const embeddingModelName = modelName || "amazon.titan-embed-text-v2:0";
1481
+ logger.debug("Generating batch embeddings", {
1482
+ provider: this.providerName,
1483
+ model: embeddingModelName,
1484
+ count: texts.length,
1485
+ });
1486
+ try {
1487
+ const embeddings = await Promise.all(texts.map((text) => this.embed(text, embeddingModelName)));
1488
+ logger.debug("Batch embeddings generated successfully", {
1489
+ provider: this.providerName,
1490
+ model: embeddingModelName,
1491
+ count: embeddings.length,
1492
+ embeddingDimension: embeddings[0]?.length,
1493
+ });
1494
+ return embeddings;
1495
+ }
1496
+ catch (error) {
1497
+ logger.error("Batch embedding generation failed", {
1498
+ error: error instanceof Error ? error.message : String(error),
1499
+ model: embeddingModelName,
1500
+ count: texts.length,
1501
+ });
1502
+ throw this.handleProviderError(error);
1503
+ }
1504
+ }
1473
1505
  }
1474
1506
  //# sourceMappingURL=amazonBedrock.js.map
@@ -75,6 +75,21 @@ export declare class GoogleAIStudioProvider extends BaseProvider {
75
75
  */
76
76
  generate(optionsOrPrompt: TextGenerationOptions | string): Promise<EnhancedGenerateResult | null>;
77
77
  private executeAudioStreamViaGeminiLive;
78
+ protected getDefaultEmbeddingModel(): string;
79
+ /**
80
+ * Generate embeddings for text using Google AI Studio embedding models
81
+ * @param text - The text to embed
82
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
83
+ * @returns Promise resolving to the embedding vector
84
+ */
85
+ embed(text: string, modelName?: string): Promise<number[]>;
86
+ /**
87
+ * Generate embeddings for multiple texts in a single batch
88
+ * @param texts - The texts to embed
89
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
90
+ * @returns Promise resolving to an array of embedding vectors
91
+ */
92
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
78
93
  private getApiKey;
79
94
  }
80
95
  export default GoogleAIStudioProvider;
@@ -1,5 +1,5 @@
1
1
  import { createGoogleGenerativeAI } from "@ai-sdk/google";
2
- import { streamText } from "ai";
2
+ import { embed, embedMany, streamText, } from "ai";
3
3
  import { ErrorCategory, ErrorSeverity, GoogleAIModels, } from "../constants/enums.js";
4
4
  import { estimateTokens } from "../utils/tokenEstimation.js";
5
5
  import { BaseProvider } from "../core/baseProvider.js";
@@ -1027,6 +1027,86 @@ export class GoogleAIStudioProvider extends BaseProvider {
1027
1027
  },
1028
1028
  };
1029
1029
  }
1030
+ getDefaultEmbeddingModel() {
1031
+ return (process.env.GOOGLE_AI_EMBEDDING_MODEL ||
1032
+ process.env.GOOGLE_EMBEDDING_MODEL ||
1033
+ "gemini-embedding-001");
1034
+ }
1035
+ /**
1036
+ * Generate embeddings for text using Google AI Studio embedding models
1037
+ * @param text - The text to embed
1038
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
1039
+ * @returns Promise resolving to the embedding vector
1040
+ */
1041
+ async embed(text, modelName) {
1042
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "gemini-embedding-001";
1043
+ logger.debug("Generating embedding", {
1044
+ provider: this.providerName,
1045
+ model: embeddingModelName,
1046
+ textLength: text.length,
1047
+ });
1048
+ try {
1049
+ const apiKey = this.getApiKey();
1050
+ const google = createGoogleGenerativeAI({ apiKey });
1051
+ const embeddingModel = google.textEmbeddingModel(embeddingModelName);
1052
+ const result = await embed({
1053
+ model: embeddingModel,
1054
+ value: text,
1055
+ });
1056
+ logger.debug("Embedding generated successfully", {
1057
+ provider: this.providerName,
1058
+ model: embeddingModelName,
1059
+ embeddingDimension: result.embedding.length,
1060
+ });
1061
+ return result.embedding;
1062
+ }
1063
+ catch (error) {
1064
+ logger.error("Embedding generation failed", {
1065
+ error: error instanceof Error ? error.message : String(error),
1066
+ model: embeddingModelName,
1067
+ textLength: text.length,
1068
+ });
1069
+ throw this.handleProviderError(error);
1070
+ }
1071
+ }
1072
+ /**
1073
+ * Generate embeddings for multiple texts in a single batch
1074
+ * @param texts - The texts to embed
1075
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
1076
+ * @returns Promise resolving to an array of embedding vectors
1077
+ */
1078
+ async embedMany(texts, modelName) {
1079
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "gemini-embedding-001";
1080
+ logger.debug("Generating batch embeddings", {
1081
+ provider: this.providerName,
1082
+ model: embeddingModelName,
1083
+ count: texts.length,
1084
+ });
1085
+ try {
1086
+ const apiKey = this.getApiKey();
1087
+ const google = createGoogleGenerativeAI({ apiKey });
1088
+ const embeddingModel = google.textEmbeddingModel(embeddingModelName);
1089
+ const result = await embedMany({
1090
+ model: embeddingModel,
1091
+ values: texts,
1092
+ });
1093
+ logger.debug("Batch embeddings generated successfully", {
1094
+ provider: this.providerName,
1095
+ model: embeddingModelName,
1096
+ count: result.embeddings.length,
1097
+ embeddingDimension: result.embeddings[0]?.length,
1098
+ });
1099
+ return result.embeddings;
1100
+ }
1101
+ catch (error) {
1102
+ logger.error("Batch embedding generation failed", {
1103
+ error: error instanceof Error ? error.message : String(error),
1104
+ model: embeddingModelName,
1105
+ count: texts.length,
1106
+ });
1107
+ throw this.handleProviderError(error);
1108
+ }
1109
+ }
1030
1110
  getApiKey() {
1031
1111
  const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
1032
1112
  if (!apiKey) {
@@ -298,6 +298,13 @@ export declare class GoogleVertexProvider extends BaseProvider {
298
298
  * @returns Promise resolving to the embedding vector
299
299
  */
300
300
  embed(text: string, modelName?: string): Promise<number[]>;
301
+ /**
302
+ * Generate embeddings for multiple texts in a single batch
303
+ * @param texts - The texts to embed
304
+ * @param modelName - The embedding model to use (default: text-embedding-004)
305
+ * @returns Promise resolving to an array of embedding vectors
306
+ */
307
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
301
308
  /**
302
309
  * Get model suggestions when a model is not found
303
310
  */
@@ -1,6 +1,6 @@
1
1
  import { createVertex, } from "@ai-sdk/google-vertex";
2
2
  import { createVertexAnthropic, } from "@ai-sdk/google-vertex/anthropic";
3
- import { Output, streamText, } from "ai";
3
+ import { embed, embedMany, Output, streamText, } from "ai";
4
4
  import { trace, SpanKind, SpanStatusCode } from "@opentelemetry/api";
5
5
  import dns from "node:dns";
6
6
  import fs from "fs";
@@ -2691,8 +2691,6 @@ export class GoogleVertexProvider extends BaseProvider {
2691
2691
  textLength: text.length,
2692
2692
  });
2693
2693
  try {
2694
- // Create embedding model using the AI SDK
2695
- const { embed } = await import("ai");
2696
2694
  // Create the Vertex provider with current settings
2697
2695
  const vertexSettings = await createVertexSettings(this.location);
2698
2696
  const vertex = createVertex(vertexSettings);
@@ -2719,6 +2717,44 @@ export class GoogleVertexProvider extends BaseProvider {
2719
2717
  throw this.handleProviderError(error);
2720
2718
  }
2721
2719
  }
2720
+ /**
2721
+ * Generate embeddings for multiple texts in a single batch
2722
+ * @param texts - The texts to embed
2723
+ * @param modelName - The embedding model to use (default: text-embedding-004)
2724
+ * @returns Promise resolving to an array of embedding vectors
2725
+ */
2726
+ async embedMany(texts, modelName) {
2727
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "text-embedding-004";
2728
+ logger.debug("Generating batch embeddings", {
2729
+ provider: this.providerName,
2730
+ model: embeddingModelName,
2731
+ count: texts.length,
2732
+ });
2733
+ try {
2734
+ const vertexSettings = await createVertexSettings(this.location);
2735
+ const vertex = createVertex(vertexSettings);
2736
+ const embeddingModel = vertex.textEmbeddingModel(embeddingModelName);
2737
+ const result = await embedMany({
2738
+ model: embeddingModel,
2739
+ values: texts,
2740
+ });
2741
+ logger.debug("Batch embeddings generated successfully", {
2742
+ provider: this.providerName,
2743
+ model: embeddingModelName,
2744
+ count: result.embeddings.length,
2745
+ embeddingDimension: result.embeddings[0]?.length,
2746
+ });
2747
+ return result.embeddings;
2748
+ }
2749
+ catch (error) {
2750
+ logger.error("Batch embedding generation failed", {
2751
+ error: error instanceof Error ? error.message : String(error),
2752
+ model: embeddingModelName,
2753
+ count: texts.length,
2754
+ });
2755
+ throw this.handleProviderError(error);
2756
+ }
2757
+ }
2722
2758
  /**
2723
2759
  * Get model suggestions when a model is not found
2724
2760
  */
@@ -59,5 +59,12 @@ export declare class OpenAIProvider extends BaseProvider {
59
59
  * @returns Promise resolving to the embedding vector
60
60
  */
61
61
  embed(text: string, modelName?: string): Promise<number[]>;
62
+ /**
63
+ * Generate embeddings for multiple texts in a single batch
64
+ * @param texts - The texts to embed
65
+ * @param modelName - The embedding model to use (default: text-embedding-3-small)
66
+ * @returns Promise resolving to an array of embedding vectors
67
+ */
68
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
62
69
  }
63
70
  export default OpenAIProvider;
@@ -1,5 +1,5 @@
1
1
  import { createOpenAI } from "@ai-sdk/openai";
2
- import { streamText } from "ai";
2
+ import { embed, embedMany, streamText, } from "ai";
3
3
  import { trace, SpanKind, SpanStatusCode } from "@opentelemetry/api";
4
4
  import { AIProviderName } from "../constants/enums.js";
5
5
  import { BaseProvider } from "../core/baseProvider.js";
@@ -529,7 +529,6 @@ export class OpenAIProvider extends BaseProvider {
529
529
  });
530
530
  try {
531
531
  // Create embedding model using the AI SDK
532
- const { embed } = await import("ai");
533
532
  // Create the OpenAI provider
534
533
  const openai = createOpenAI({
535
534
  apiKey: getOpenAIApiKey(),
@@ -558,6 +557,46 @@ export class OpenAIProvider extends BaseProvider {
558
557
  throw this.handleProviderError(error);
559
558
  }
560
559
  }
560
+ /**
561
+ * Generate embeddings for multiple texts in a single batch
562
+ * @param texts - The texts to embed
563
+ * @param modelName - The embedding model to use (default: text-embedding-3-small)
564
+ * @returns Promise resolving to an array of embedding vectors
565
+ */
566
+ async embedMany(texts, modelName) {
567
+ const embeddingModelName = modelName || "text-embedding-3-small";
568
+ logger.debug("Generating batch embeddings", {
569
+ provider: this.providerName,
570
+ model: embeddingModelName,
571
+ count: texts.length,
572
+ });
573
+ try {
574
+ const openai = createOpenAI({
575
+ apiKey: getOpenAIApiKey(),
576
+ fetch: createProxyFetch(),
577
+ });
578
+ const embeddingModel = openai.textEmbeddingModel(embeddingModelName);
579
+ const result = await embedMany({
580
+ model: embeddingModel,
581
+ values: texts,
582
+ });
583
+ logger.debug("Batch embeddings generated successfully", {
584
+ provider: this.providerName,
585
+ model: embeddingModelName,
586
+ count: result.embeddings.length,
587
+ embeddingDimension: result.embeddings[0]?.length,
588
+ });
589
+ return result.embeddings;
590
+ }
591
+ catch (error) {
592
+ logger.error("Batch embedding generation failed", {
593
+ error: error instanceof Error ? error.message : String(error),
594
+ model: embeddingModelName,
595
+ count: texts.length,
596
+ });
597
+ throw this.handleProviderError(error);
598
+ }
599
+ }
561
600
  }
562
601
  // Export for factory registration
563
602
  export default OpenAIProvider;
@@ -4,7 +4,7 @@
4
4
  */
5
5
  import { ProviderFactory } from "../../factories/providerFactory.js";
6
6
  import { createStreamRedactor } from "../utils/redaction.js";
7
- import { AgentExecuteRequestSchema, validateRequest, } from "../utils/validation.js";
7
+ import { AgentExecuteRequestSchema, createErrorResponse as createError, EmbedManyRequestSchema, EmbedRequestSchema, validateRequest, } from "../utils/validation.js";
8
8
  /**
9
9
  * Create agent routes
10
10
  */
@@ -107,6 +107,65 @@ export function createAgentRoutes(basePath = "/api") {
107
107
  description: "List available AI providers",
108
108
  tags: ["agent", "providers"],
109
109
  },
110
+ {
111
+ method: "POST",
112
+ path: `${basePath}/agent/embed`,
113
+ handler: async (ctx) => {
114
+ const validation = validateRequest(EmbedRequestSchema, ctx.body, ctx.requestId);
115
+ if (!validation.success) {
116
+ return validation.error;
117
+ }
118
+ const request = validation.data;
119
+ try {
120
+ const providerName = request.provider || "openai";
121
+ const provider = await ProviderFactory.createProvider(providerName, request.model);
122
+ const embedding = await provider.embed(request.text, request.model);
123
+ return {
124
+ embedding,
125
+ provider: providerName,
126
+ model: request.model || "default",
127
+ dimension: embedding.length,
128
+ };
129
+ }
130
+ catch (error) {
131
+ return createError("EXECUTION_FAILED", error instanceof Error
132
+ ? error.message
133
+ : "Embedding generation failed", undefined, ctx.requestId);
134
+ }
135
+ },
136
+ description: "Generate embedding for a single text",
137
+ tags: ["agent", "embeddings"],
138
+ },
139
+ {
140
+ method: "POST",
141
+ path: `${basePath}/agent/embed-many`,
142
+ handler: async (ctx) => {
143
+ const validation = validateRequest(EmbedManyRequestSchema, ctx.body, ctx.requestId);
144
+ if (!validation.success) {
145
+ return validation.error;
146
+ }
147
+ const request = validation.data;
148
+ try {
149
+ const providerName = request.provider || "openai";
150
+ const provider = await ProviderFactory.createProvider(providerName, request.model);
151
+ const embeddings = await provider.embedMany(request.texts, request.model);
152
+ return {
153
+ embeddings,
154
+ provider: providerName,
155
+ model: request.model || "default",
156
+ count: embeddings.length,
157
+ dimension: embeddings[0]?.length ?? 0,
158
+ };
159
+ }
160
+ catch (error) {
161
+ return createError("EXECUTION_FAILED", error instanceof Error
162
+ ? error.message
163
+ : "Batch embedding generation failed", undefined, ctx.requestId);
164
+ }
165
+ },
166
+ description: "Generate embeddings for multiple texts in a batch",
167
+ tags: ["agent", "embeddings"],
168
+ },
110
169
  ],
111
170
  };
112
171
  }
@@ -458,6 +458,56 @@ export type AgentExecuteResponse = {
458
458
  /** Response metadata */
459
459
  metadata?: Record<string, JsonValue>;
460
460
  };
461
+ /**
462
+ * Embed request (single text)
463
+ */
464
+ export type EmbedRequest = {
465
+ /** Text to embed */
466
+ text: string;
467
+ /** Provider to use (optional) */
468
+ provider?: string;
469
+ /** Embedding model to use (optional) */
470
+ model?: string;
471
+ };
472
+ /**
473
+ * Embed response (single text)
474
+ */
475
+ export type EmbedResponse = {
476
+ /** The embedding vector */
477
+ embedding: number[];
478
+ /** Provider used */
479
+ provider: string;
480
+ /** Model used */
481
+ model: string;
482
+ /** Embedding dimension */
483
+ dimension: number;
484
+ };
485
+ /**
486
+ * Embed many request (batch texts)
487
+ */
488
+ export type EmbedManyRequest = {
489
+ /** Texts to embed */
490
+ texts: string[];
491
+ /** Provider to use (optional) */
492
+ provider?: string;
493
+ /** Embedding model to use (optional) */
494
+ model?: string;
495
+ };
496
+ /**
497
+ * Embed many response (batch texts)
498
+ */
499
+ export type EmbedManyResponse = {
500
+ /** The embedding vectors */
501
+ embeddings: number[][];
502
+ /** Provider used */
503
+ provider: string;
504
+ /** Model used */
505
+ model: string;
506
+ /** Number of embeddings */
507
+ count: number;
508
+ /** Embedding dimension */
509
+ dimension: number;
510
+ };
461
511
  /**
462
512
  * Tool execution request
463
513
  */
@@ -168,6 +168,38 @@ export declare const SessionMessagesQuerySchema: z.ZodObject<{
168
168
  offset?: string | undefined;
169
169
  limit?: string | undefined;
170
170
  }>;
171
+ /**
172
+ * Embed request schema (single text)
173
+ */
174
+ export declare const EmbedRequestSchema: z.ZodObject<{
175
+ text: z.ZodString;
176
+ provider: z.ZodOptional<z.ZodString>;
177
+ model: z.ZodOptional<z.ZodString>;
178
+ }, "strip", z.ZodTypeAny, {
179
+ text: string;
180
+ provider?: string | undefined;
181
+ model?: string | undefined;
182
+ }, {
183
+ text: string;
184
+ provider?: string | undefined;
185
+ model?: string | undefined;
186
+ }>;
187
+ /**
188
+ * Embed many request schema (batch texts)
189
+ */
190
+ export declare const EmbedManyRequestSchema: z.ZodObject<{
191
+ texts: z.ZodArray<z.ZodString, "many">;
192
+ provider: z.ZodOptional<z.ZodString>;
193
+ model: z.ZodOptional<z.ZodString>;
194
+ }, "strip", z.ZodTypeAny, {
195
+ texts: string[];
196
+ provider?: string | undefined;
197
+ model?: string | undefined;
198
+ }, {
199
+ texts: string[];
200
+ provider?: string | undefined;
201
+ model?: string | undefined;
202
+ }>;
171
203
  /**
172
204
  * Standardized error response format
173
205
  */
@@ -108,6 +108,25 @@ export const SessionMessagesQuerySchema = z.object({
108
108
  .pipe(z.number().nonnegative())
109
109
  .optional(),
110
110
  });
111
+ /**
112
+ * Embed request schema (single text)
113
+ */
114
+ export const EmbedRequestSchema = z.object({
115
+ text: z.string().min(1, "Text is required"),
116
+ provider: z.string().optional(),
117
+ model: z.string().optional(),
118
+ });
119
+ /**
120
+ * Embed many request schema (batch texts)
121
+ */
122
+ export const EmbedManyRequestSchema = z.object({
123
+ texts: z
124
+ .array(z.string().min(1))
125
+ .min(1, "At least one text is required")
126
+ .max(2048, "Maximum 2048 texts per batch"),
127
+ provider: z.string().optional(),
128
+ model: z.string().optional(),
129
+ });
111
130
  /**
112
131
  * Type guard to check if a value is an ErrorResponse
113
132
  */
@@ -329,6 +329,8 @@ export type AIProvider = {
329
329
  stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ValidationSchema): Promise<StreamResult>;
330
330
  generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
331
331
  gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
332
+ embed(text: string, modelName?: string): Promise<number[]>;
333
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
332
334
  setupToolExecutor(sdk: {
333
335
  customTools: Map<string, unknown>;
334
336
  executeTool: (toolName: string, params: unknown) => Promise<unknown>;
@@ -53,4 +53,11 @@ export declare class AmazonBedrockProvider extends BaseProvider {
53
53
  * @returns Promise resolving to the embedding vector
54
54
  */
55
55
  embed(text: string, modelName?: string): Promise<number[]>;
56
+ /**
57
+ * Generate embeddings for multiple texts in a single batch
58
+ * @param texts - The texts to embed
59
+ * @param modelName - The embedding model to use (default: amazon.titan-embed-text-v2:0)
60
+ * @returns Promise resolving to an array of embedding vectors
61
+ */
62
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
56
63
  }
@@ -1470,4 +1470,36 @@ export class AmazonBedrockProvider extends BaseProvider {
1470
1470
  throw this.handleProviderError(error);
1471
1471
  }
1472
1472
  }
1473
+ /**
1474
+ * Generate embeddings for multiple texts in a single batch
1475
+ * @param texts - The texts to embed
1476
+ * @param modelName - The embedding model to use (default: amazon.titan-embed-text-v2:0)
1477
+ * @returns Promise resolving to an array of embedding vectors
1478
+ */
1479
+ async embedMany(texts, modelName) {
1480
+ const embeddingModelName = modelName || "amazon.titan-embed-text-v2:0";
1481
+ logger.debug("Generating batch embeddings", {
1482
+ provider: this.providerName,
1483
+ model: embeddingModelName,
1484
+ count: texts.length,
1485
+ });
1486
+ try {
1487
+ const embeddings = await Promise.all(texts.map((text) => this.embed(text, embeddingModelName)));
1488
+ logger.debug("Batch embeddings generated successfully", {
1489
+ provider: this.providerName,
1490
+ model: embeddingModelName,
1491
+ count: embeddings.length,
1492
+ embeddingDimension: embeddings[0]?.length,
1493
+ });
1494
+ return embeddings;
1495
+ }
1496
+ catch (error) {
1497
+ logger.error("Batch embedding generation failed", {
1498
+ error: error instanceof Error ? error.message : String(error),
1499
+ model: embeddingModelName,
1500
+ count: texts.length,
1501
+ });
1502
+ throw this.handleProviderError(error);
1503
+ }
1504
+ }
1473
1505
  }
@@ -75,6 +75,21 @@ export declare class GoogleAIStudioProvider extends BaseProvider {
75
75
  */
76
76
  generate(optionsOrPrompt: TextGenerationOptions | string): Promise<EnhancedGenerateResult | null>;
77
77
  private executeAudioStreamViaGeminiLive;
78
+ protected getDefaultEmbeddingModel(): string;
79
+ /**
80
+ * Generate embeddings for text using Google AI Studio embedding models
81
+ * @param text - The text to embed
82
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
83
+ * @returns Promise resolving to the embedding vector
84
+ */
85
+ embed(text: string, modelName?: string): Promise<number[]>;
86
+ /**
87
+ * Generate embeddings for multiple texts in a single batch
88
+ * @param texts - The texts to embed
89
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
90
+ * @returns Promise resolving to an array of embedding vectors
91
+ */
92
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
78
93
  private getApiKey;
79
94
  }
80
95
  export default GoogleAIStudioProvider;
@@ -1,5 +1,5 @@
1
1
  import { createGoogleGenerativeAI } from "@ai-sdk/google";
2
- import { streamText } from "ai";
2
+ import { embed, embedMany, streamText, } from "ai";
3
3
  import { ErrorCategory, ErrorSeverity, GoogleAIModels, } from "../constants/enums.js";
4
4
  import { estimateTokens } from "../utils/tokenEstimation.js";
5
5
  import { BaseProvider } from "../core/baseProvider.js";
@@ -1027,6 +1027,86 @@ export class GoogleAIStudioProvider extends BaseProvider {
1027
1027
  },
1028
1028
  };
1029
1029
  }
1030
+ getDefaultEmbeddingModel() {
1031
+ return (process.env.GOOGLE_AI_EMBEDDING_MODEL ||
1032
+ process.env.GOOGLE_EMBEDDING_MODEL ||
1033
+ "gemini-embedding-001");
1034
+ }
1035
+ /**
1036
+ * Generate embeddings for text using Google AI Studio embedding models
1037
+ * @param text - The text to embed
1038
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
1039
+ * @returns Promise resolving to the embedding vector
1040
+ */
1041
+ async embed(text, modelName) {
1042
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "gemini-embedding-001";
1043
+ logger.debug("Generating embedding", {
1044
+ provider: this.providerName,
1045
+ model: embeddingModelName,
1046
+ textLength: text.length,
1047
+ });
1048
+ try {
1049
+ const apiKey = this.getApiKey();
1050
+ const google = createGoogleGenerativeAI({ apiKey });
1051
+ const embeddingModel = google.textEmbeddingModel(embeddingModelName);
1052
+ const result = await embed({
1053
+ model: embeddingModel,
1054
+ value: text,
1055
+ });
1056
+ logger.debug("Embedding generated successfully", {
1057
+ provider: this.providerName,
1058
+ model: embeddingModelName,
1059
+ embeddingDimension: result.embedding.length,
1060
+ });
1061
+ return result.embedding;
1062
+ }
1063
+ catch (error) {
1064
+ logger.error("Embedding generation failed", {
1065
+ error: error instanceof Error ? error.message : String(error),
1066
+ model: embeddingModelName,
1067
+ textLength: text.length,
1068
+ });
1069
+ throw this.handleProviderError(error);
1070
+ }
1071
+ }
1072
+ /**
1073
+ * Generate embeddings for multiple texts in a single batch
1074
+ * @param texts - The texts to embed
1075
+ * @param modelName - The embedding model to use (default: gemini-embedding-001)
1076
+ * @returns Promise resolving to an array of embedding vectors
1077
+ */
1078
+ async embedMany(texts, modelName) {
1079
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "gemini-embedding-001";
1080
+ logger.debug("Generating batch embeddings", {
1081
+ provider: this.providerName,
1082
+ model: embeddingModelName,
1083
+ count: texts.length,
1084
+ });
1085
+ try {
1086
+ const apiKey = this.getApiKey();
1087
+ const google = createGoogleGenerativeAI({ apiKey });
1088
+ const embeddingModel = google.textEmbeddingModel(embeddingModelName);
1089
+ const result = await embedMany({
1090
+ model: embeddingModel,
1091
+ values: texts,
1092
+ });
1093
+ logger.debug("Batch embeddings generated successfully", {
1094
+ provider: this.providerName,
1095
+ model: embeddingModelName,
1096
+ count: result.embeddings.length,
1097
+ embeddingDimension: result.embeddings[0]?.length,
1098
+ });
1099
+ return result.embeddings;
1100
+ }
1101
+ catch (error) {
1102
+ logger.error("Batch embedding generation failed", {
1103
+ error: error instanceof Error ? error.message : String(error),
1104
+ model: embeddingModelName,
1105
+ count: texts.length,
1106
+ });
1107
+ throw this.handleProviderError(error);
1108
+ }
1109
+ }
1030
1110
  getApiKey() {
1031
1111
  const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
1032
1112
  if (!apiKey) {
@@ -298,6 +298,13 @@ export declare class GoogleVertexProvider extends BaseProvider {
298
298
  * @returns Promise resolving to the embedding vector
299
299
  */
300
300
  embed(text: string, modelName?: string): Promise<number[]>;
301
+ /**
302
+ * Generate embeddings for multiple texts in a single batch
303
+ * @param texts - The texts to embed
304
+ * @param modelName - The embedding model to use (default: text-embedding-004)
305
+ * @returns Promise resolving to an array of embedding vectors
306
+ */
307
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
301
308
  /**
302
309
  * Get model suggestions when a model is not found
303
310
  */
@@ -1,6 +1,6 @@
1
1
  import { createVertex, } from "@ai-sdk/google-vertex";
2
2
  import { createVertexAnthropic, } from "@ai-sdk/google-vertex/anthropic";
3
- import { Output, streamText, } from "ai";
3
+ import { embed, embedMany, Output, streamText, } from "ai";
4
4
  import { trace, SpanKind, SpanStatusCode } from "@opentelemetry/api";
5
5
  import dns from "node:dns";
6
6
  import fs from "fs";
@@ -2691,8 +2691,6 @@ export class GoogleVertexProvider extends BaseProvider {
2691
2691
  textLength: text.length,
2692
2692
  });
2693
2693
  try {
2694
- // Create embedding model using the AI SDK
2695
- const { embed } = await import("ai");
2696
2694
  // Create the Vertex provider with current settings
2697
2695
  const vertexSettings = await createVertexSettings(this.location);
2698
2696
  const vertex = createVertex(vertexSettings);
@@ -2719,6 +2717,44 @@ export class GoogleVertexProvider extends BaseProvider {
2719
2717
  throw this.handleProviderError(error);
2720
2718
  }
2721
2719
  }
2720
+ /**
2721
+ * Generate embeddings for multiple texts in a single batch
2722
+ * @param texts - The texts to embed
2723
+ * @param modelName - The embedding model to use (default: text-embedding-004)
2724
+ * @returns Promise resolving to an array of embedding vectors
2725
+ */
2726
+ async embedMany(texts, modelName) {
2727
+ const embeddingModelName = modelName || this.getDefaultEmbeddingModel() || "text-embedding-004";
2728
+ logger.debug("Generating batch embeddings", {
2729
+ provider: this.providerName,
2730
+ model: embeddingModelName,
2731
+ count: texts.length,
2732
+ });
2733
+ try {
2734
+ const vertexSettings = await createVertexSettings(this.location);
2735
+ const vertex = createVertex(vertexSettings);
2736
+ const embeddingModel = vertex.textEmbeddingModel(embeddingModelName);
2737
+ const result = await embedMany({
2738
+ model: embeddingModel,
2739
+ values: texts,
2740
+ });
2741
+ logger.debug("Batch embeddings generated successfully", {
2742
+ provider: this.providerName,
2743
+ model: embeddingModelName,
2744
+ count: result.embeddings.length,
2745
+ embeddingDimension: result.embeddings[0]?.length,
2746
+ });
2747
+ return result.embeddings;
2748
+ }
2749
+ catch (error) {
2750
+ logger.error("Batch embedding generation failed", {
2751
+ error: error instanceof Error ? error.message : String(error),
2752
+ model: embeddingModelName,
2753
+ count: texts.length,
2754
+ });
2755
+ throw this.handleProviderError(error);
2756
+ }
2757
+ }
2722
2758
  /**
2723
2759
  * Get model suggestions when a model is not found
2724
2760
  */
@@ -59,5 +59,12 @@ export declare class OpenAIProvider extends BaseProvider {
59
59
  * @returns Promise resolving to the embedding vector
60
60
  */
61
61
  embed(text: string, modelName?: string): Promise<number[]>;
62
+ /**
63
+ * Generate embeddings for multiple texts in a single batch
64
+ * @param texts - The texts to embed
65
+ * @param modelName - The embedding model to use (default: text-embedding-3-small)
66
+ * @returns Promise resolving to an array of embedding vectors
67
+ */
68
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
62
69
  }
63
70
  export default OpenAIProvider;
@@ -1,5 +1,5 @@
1
1
  import { createOpenAI } from "@ai-sdk/openai";
2
- import { streamText } from "ai";
2
+ import { embed, embedMany, streamText, } from "ai";
3
3
  import { trace, SpanKind, SpanStatusCode } from "@opentelemetry/api";
4
4
  import { AIProviderName } from "../constants/enums.js";
5
5
  import { BaseProvider } from "../core/baseProvider.js";
@@ -529,7 +529,6 @@ export class OpenAIProvider extends BaseProvider {
529
529
  });
530
530
  try {
531
531
  // Create embedding model using the AI SDK
532
- const { embed } = await import("ai");
533
532
  // Create the OpenAI provider
534
533
  const openai = createOpenAI({
535
534
  apiKey: getOpenAIApiKey(),
@@ -558,6 +557,46 @@ export class OpenAIProvider extends BaseProvider {
558
557
  throw this.handleProviderError(error);
559
558
  }
560
559
  }
560
+ /**
561
+ * Generate embeddings for multiple texts in a single batch
562
+ * @param texts - The texts to embed
563
+ * @param modelName - The embedding model to use (default: text-embedding-3-small)
564
+ * @returns Promise resolving to an array of embedding vectors
565
+ */
566
+ async embedMany(texts, modelName) {
567
+ const embeddingModelName = modelName || "text-embedding-3-small";
568
+ logger.debug("Generating batch embeddings", {
569
+ provider: this.providerName,
570
+ model: embeddingModelName,
571
+ count: texts.length,
572
+ });
573
+ try {
574
+ const openai = createOpenAI({
575
+ apiKey: getOpenAIApiKey(),
576
+ fetch: createProxyFetch(),
577
+ });
578
+ const embeddingModel = openai.textEmbeddingModel(embeddingModelName);
579
+ const result = await embedMany({
580
+ model: embeddingModel,
581
+ values: texts,
582
+ });
583
+ logger.debug("Batch embeddings generated successfully", {
584
+ provider: this.providerName,
585
+ model: embeddingModelName,
586
+ count: result.embeddings.length,
587
+ embeddingDimension: result.embeddings[0]?.length,
588
+ });
589
+ return result.embeddings;
590
+ }
591
+ catch (error) {
592
+ logger.error("Batch embedding generation failed", {
593
+ error: error instanceof Error ? error.message : String(error),
594
+ model: embeddingModelName,
595
+ count: texts.length,
596
+ });
597
+ throw this.handleProviderError(error);
598
+ }
599
+ }
561
600
  }
562
601
  // Export for factory registration
563
602
  export default OpenAIProvider;
@@ -4,7 +4,7 @@
4
4
  */
5
5
  import { ProviderFactory } from "../../factories/providerFactory.js";
6
6
  import { createStreamRedactor } from "../utils/redaction.js";
7
- import { AgentExecuteRequestSchema, validateRequest, } from "../utils/validation.js";
7
+ import { AgentExecuteRequestSchema, createErrorResponse as createError, EmbedManyRequestSchema, EmbedRequestSchema, validateRequest, } from "../utils/validation.js";
8
8
  /**
9
9
  * Create agent routes
10
10
  */
@@ -107,6 +107,65 @@ export function createAgentRoutes(basePath = "/api") {
107
107
  description: "List available AI providers",
108
108
  tags: ["agent", "providers"],
109
109
  },
110
+ {
111
+ method: "POST",
112
+ path: `${basePath}/agent/embed`,
113
+ handler: async (ctx) => {
114
+ const validation = validateRequest(EmbedRequestSchema, ctx.body, ctx.requestId);
115
+ if (!validation.success) {
116
+ return validation.error;
117
+ }
118
+ const request = validation.data;
119
+ try {
120
+ const providerName = request.provider || "openai";
121
+ const provider = await ProviderFactory.createProvider(providerName, request.model);
122
+ const embedding = await provider.embed(request.text, request.model);
123
+ return {
124
+ embedding,
125
+ provider: providerName,
126
+ model: request.model || "default",
127
+ dimension: embedding.length,
128
+ };
129
+ }
130
+ catch (error) {
131
+ return createError("EXECUTION_FAILED", error instanceof Error
132
+ ? error.message
133
+ : "Embedding generation failed", undefined, ctx.requestId);
134
+ }
135
+ },
136
+ description: "Generate embedding for a single text",
137
+ tags: ["agent", "embeddings"],
138
+ },
139
+ {
140
+ method: "POST",
141
+ path: `${basePath}/agent/embed-many`,
142
+ handler: async (ctx) => {
143
+ const validation = validateRequest(EmbedManyRequestSchema, ctx.body, ctx.requestId);
144
+ if (!validation.success) {
145
+ return validation.error;
146
+ }
147
+ const request = validation.data;
148
+ try {
149
+ const providerName = request.provider || "openai";
150
+ const provider = await ProviderFactory.createProvider(providerName, request.model);
151
+ const embeddings = await provider.embedMany(request.texts, request.model);
152
+ return {
153
+ embeddings,
154
+ provider: providerName,
155
+ model: request.model || "default",
156
+ count: embeddings.length,
157
+ dimension: embeddings[0]?.length ?? 0,
158
+ };
159
+ }
160
+ catch (error) {
161
+ return createError("EXECUTION_FAILED", error instanceof Error
162
+ ? error.message
163
+ : "Batch embedding generation failed", undefined, ctx.requestId);
164
+ }
165
+ },
166
+ description: "Generate embeddings for multiple texts in a batch",
167
+ tags: ["agent", "embeddings"],
168
+ },
110
169
  ],
111
170
  };
112
171
  }
@@ -458,6 +458,56 @@ export type AgentExecuteResponse = {
458
458
  /** Response metadata */
459
459
  metadata?: Record<string, JsonValue>;
460
460
  };
461
+ /**
462
+ * Embed request (single text)
463
+ */
464
+ export type EmbedRequest = {
465
+ /** Text to embed */
466
+ text: string;
467
+ /** Provider to use (optional) */
468
+ provider?: string;
469
+ /** Embedding model to use (optional) */
470
+ model?: string;
471
+ };
472
+ /**
473
+ * Embed response (single text)
474
+ */
475
+ export type EmbedResponse = {
476
+ /** The embedding vector */
477
+ embedding: number[];
478
+ /** Provider used */
479
+ provider: string;
480
+ /** Model used */
481
+ model: string;
482
+ /** Embedding dimension */
483
+ dimension: number;
484
+ };
485
+ /**
486
+ * Embed many request (batch texts)
487
+ */
488
+ export type EmbedManyRequest = {
489
+ /** Texts to embed */
490
+ texts: string[];
491
+ /** Provider to use (optional) */
492
+ provider?: string;
493
+ /** Embedding model to use (optional) */
494
+ model?: string;
495
+ };
496
+ /**
497
+ * Embed many response (batch texts)
498
+ */
499
+ export type EmbedManyResponse = {
500
+ /** The embedding vectors */
501
+ embeddings: number[][];
502
+ /** Provider used */
503
+ provider: string;
504
+ /** Model used */
505
+ model: string;
506
+ /** Number of embeddings */
507
+ count: number;
508
+ /** Embedding dimension */
509
+ dimension: number;
510
+ };
461
511
  /**
462
512
  * Tool execution request
463
513
  */
@@ -168,6 +168,38 @@ export declare const SessionMessagesQuerySchema: z.ZodObject<{
168
168
  offset?: string | undefined;
169
169
  limit?: string | undefined;
170
170
  }>;
171
+ /**
172
+ * Embed request schema (single text)
173
+ */
174
+ export declare const EmbedRequestSchema: z.ZodObject<{
175
+ text: z.ZodString;
176
+ provider: z.ZodOptional<z.ZodString>;
177
+ model: z.ZodOptional<z.ZodString>;
178
+ }, "strip", z.ZodTypeAny, {
179
+ text: string;
180
+ provider?: string | undefined;
181
+ model?: string | undefined;
182
+ }, {
183
+ text: string;
184
+ provider?: string | undefined;
185
+ model?: string | undefined;
186
+ }>;
187
+ /**
188
+ * Embed many request schema (batch texts)
189
+ */
190
+ export declare const EmbedManyRequestSchema: z.ZodObject<{
191
+ texts: z.ZodArray<z.ZodString, "many">;
192
+ provider: z.ZodOptional<z.ZodString>;
193
+ model: z.ZodOptional<z.ZodString>;
194
+ }, "strip", z.ZodTypeAny, {
195
+ texts: string[];
196
+ provider?: string | undefined;
197
+ model?: string | undefined;
198
+ }, {
199
+ texts: string[];
200
+ provider?: string | undefined;
201
+ model?: string | undefined;
202
+ }>;
171
203
  /**
172
204
  * Standardized error response format
173
205
  */
@@ -108,6 +108,25 @@ export const SessionMessagesQuerySchema = z.object({
108
108
  .pipe(z.number().nonnegative())
109
109
  .optional(),
110
110
  });
111
+ /**
112
+ * Embed request schema (single text)
113
+ */
114
+ export const EmbedRequestSchema = z.object({
115
+ text: z.string().min(1, "Text is required"),
116
+ provider: z.string().optional(),
117
+ model: z.string().optional(),
118
+ });
119
+ /**
120
+ * Embed many request schema (batch texts)
121
+ */
122
+ export const EmbedManyRequestSchema = z.object({
123
+ texts: z
124
+ .array(z.string().min(1))
125
+ .min(1, "At least one text is required")
126
+ .max(2048, "Maximum 2048 texts per batch"),
127
+ provider: z.string().optional(),
128
+ model: z.string().optional(),
129
+ });
111
130
  /**
112
131
  * Type guard to check if a value is an ErrorResponse
113
132
  */
@@ -329,6 +329,8 @@ export type AIProvider = {
329
329
  stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ValidationSchema): Promise<StreamResult>;
330
330
  generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
331
331
  gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
332
+ embed(text: string, modelName?: string): Promise<number[]>;
333
+ embedMany(texts: string[], modelName?: string): Promise<number[][]>;
332
334
  setupToolExecutor(sdk: {
333
335
  customTools: Map<string, unknown>;
334
336
  executeTool: (toolName: string, params: unknown) => Promise<unknown>;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "9.17.1",
3
+ "version": "9.18.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",