agent0-js 0.0.7 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -170,8 +170,148 @@ for await (const chunk of stream) {
170
170
  }
171
171
  ```
172
172
 
173
+ ### `embed(options: EmbedOptions): Promise<EmbedResponse>`
174
+
175
+ Generate an embedding for a single text value.
176
+
177
+ **Parameters:**
178
+
179
+ Extends Vercel AI SDK's `embed` parameters. Only the `model` property is different:
180
+
181
+ ```typescript
182
+ // All options from Vercel AI SDK's embed() are supported
183
+ // Only the model property uses Agent0's format:
184
+ type EmbedOptions = Omit<VercelEmbedOptions, 'model'> & {
185
+ model: {
186
+ provider_id: string; // The provider ID (from your Agent0 providers)
187
+ name: string; // The embedding model name (e.g., 'text-embedding-3-small')
188
+ };
189
+ };
190
+
191
+ // Common options include:
192
+ // - value: string // The text to embed
193
+ // - maxRetries?: number // Maximum number of retries
194
+ // - headers?: Record<string, string>
195
+ // - providerOptions?: {...} // Provider-specific options
196
+ // - experimental_telemetry?: {...}
197
+ // Plus any future options added to Vercel AI SDK!
198
+ ```
199
+
200
+ **Returns:**
201
+
202
+ ```typescript
203
+ interface EmbedResponse {
204
+ embedding: number[]; // The embedding vector
205
+ }
206
+ ```
207
+
208
+ **Example:**
209
+
210
+ ```typescript
211
+ const result = await client.embed({
212
+ model: {
213
+ provider_id: 'your-openai-provider-id',
214
+ name: 'text-embedding-3-small'
215
+ },
216
+ value: 'Hello, world!'
217
+ });
218
+
219
+ console.log('Embedding vector length:', result.embedding.length);
220
+ // Store or use the embedding for similarity search, etc.
221
+ ```
222
+
223
+ ### `embedMany(options: EmbedManyOptions): Promise<EmbedManyResponse>`
224
+
225
+ Generate embeddings for multiple text values in a single request.
226
+
227
+ **Parameters:**
228
+
229
+ Extends Vercel AI SDK's `embedMany` parameters. Only the `model` property is different:
230
+
231
+ ```typescript
232
+ // All options from Vercel AI SDK's embedMany() are supported
233
+ // Only the model property uses Agent0's format:
234
+ type EmbedManyOptions = Omit<VercelEmbedManyOptions, 'model'> & {
235
+ model: {
236
+ provider_id: string; // The provider ID (from your Agent0 providers)
237
+ name: string; // The embedding model name
238
+ };
239
+ };
240
+
241
+ // Common options include:
242
+ // - values: string[] // The texts to embed
243
+ // - maxRetries?: number // Maximum number of retries
244
+ // - headers?: Record<string, string>
245
+ // - providerOptions?: {...} // Provider-specific options
246
+ // Plus any future options added to Vercel AI SDK!
247
+ ```
248
+
249
+ **Returns:**
250
+
251
+ ```typescript
252
+ interface EmbedManyResponse {
253
+ embeddings: number[][]; // Array of embedding vectors (one per input value)
254
+ }
255
+ ```
256
+
257
+ **Example:**
258
+
259
+ ```typescript
260
+ const result = await client.embedMany({
261
+ model: {
262
+ provider_id: 'your-openai-provider-id',
263
+ name: 'text-embedding-3-small'
264
+ },
265
+ values: [
266
+ 'First document to embed',
267
+ 'Second document to embed',
268
+ 'Third document to embed'
269
+ ]
270
+ });
271
+
272
+ console.log('Number of embeddings:', result.embeddings.length);
273
+ result.embeddings.forEach((embedding, i) => {
274
+ console.log(`Embedding ${i} length:`, embedding.length);
275
+ });
276
+ ```
277
+
278
+ **Using Provider Options:**
279
+
280
+ Provider-specific options can be passed to customize embedding behavior:
281
+
282
+ ```typescript
283
+ // Example: OpenAI with custom dimensions
284
+ const result = await client.embed({
285
+ model: {
286
+ provider_id: 'your-openai-provider-id',
287
+ name: 'text-embedding-3-small'
288
+ },
289
+ value: 'Hello, world!',
290
+ providerOptions: {
291
+ openai: {
292
+ dimensions: 256 // Reduce dimensions for smaller vectors
293
+ }
294
+ }
295
+ });
296
+
297
+ // Example: Google with task type
298
+ const googleResult = await client.embed({
299
+ model: {
300
+ provider_id: 'your-google-provider-id',
301
+ name: 'text-embedding-004'
302
+ },
303
+ value: 'Search query text',
304
+ providerOptions: {
305
+ google: {
306
+ taskType: 'RETRIEVAL_QUERY' // Optimize for search queries
307
+ }
308
+ }
309
+ });
310
+ ```
311
+
173
312
  ## Examples
174
313
 
314
+
175
315
  ### Basic Usage (Node.js)
176
316
 
177
317
  ```javascript
@@ -240,8 +380,59 @@ async function streamExample() {
240
380
  streamExample();
241
381
  ```
242
382
 
383
+ ### Embeddings for Semantic Search
384
+
385
+ Generate embeddings to power semantic search, similarity matching, or RAG (Retrieval-Augmented Generation) applications.
386
+
387
+ ```typescript
388
+ import { Agent0 } from 'agent0-js';
389
+
390
+ const client = new Agent0({
391
+ apiKey: process.env.AGENT0_API_KEY!
392
+ });
393
+
394
+ // Embed documents for a knowledge base
395
+ async function embedDocuments() {
396
+ const documents = [
397
+ 'Machine learning is a subset of artificial intelligence.',
398
+ 'Neural networks are inspired by the human brain.',
399
+ 'Deep learning uses multiple layers of neural networks.',
400
+ ];
401
+
402
+ const result = await client.embedMany({
403
+ model: {
404
+ provider_id: 'your-openai-provider-id',
405
+ name: 'text-embedding-3-small'
406
+ },
407
+ values: documents
408
+ });
409
+
410
+ // Store embeddings in your vector database
411
+ result.embeddings.forEach((embedding, i) => {
412
+ console.log(`Document ${i}: ${embedding.length} dimensions`);
413
+ // vectorDB.insert({ text: documents[i], embedding });
414
+ });
415
+ }
416
+
417
+ // Query with semantic search
418
+ async function semanticSearch(query: string) {
419
+ const queryEmbedding = await client.embed({
420
+ model: {
421
+ provider_id: 'your-openai-provider-id',
422
+ name: 'text-embedding-3-small'
423
+ },
424
+ value: query
425
+ });
426
+
427
+ // Use the embedding to find similar documents
428
+ // const results = await vectorDB.search(queryEmbedding.embedding, { limit: 5 });
429
+ console.log('Query embedding dimensions:', queryEmbedding.embedding.length);
430
+ }
431
+ ```
432
+
243
433
  ### Using Variables
244
434
 
435
+
245
436
  Variables allow you to pass dynamic data to your agents. Any variables defined in your agent's prompts will be replaced with the values you provide.
246
437
 
247
438
  ```typescript
package/dist/index.d.ts CHANGED
@@ -1,5 +1,5 @@
1
- import type { TextStreamPart, ToolSet } from 'ai';
2
- import type { Agent0Config, GenerateResponse, RunOptions } from './types';
1
+ import type { TextStreamPart, ToolSet } from "ai";
2
+ import type { Agent0Config, EmbedManyOptions, EmbedManyResponse, EmbedOptions, EmbedResponse, GenerateResponse, RunOptions } from "./types";
3
3
  export declare class Agent0 {
4
4
  private apiKey;
5
5
  private baseUrl;
@@ -7,4 +7,21 @@ export declare class Agent0 {
7
7
  private fetchApi;
8
8
  generate(options: RunOptions): Promise<GenerateResponse>;
9
9
  stream(options: RunOptions): AsyncGenerator<TextStreamPart<ToolSet>, void, unknown>;
10
+ /**
11
+ * Generate an embedding for a single value using the specified model.
12
+ * Accepts all options from Vercel AI SDK's embed function.
13
+ *
14
+ * @param options - The embedding options (extends Vercel AI SDK's embed parameters)
15
+ * @returns The embedding vector
16
+ */
17
+ embed(options: EmbedOptions): Promise<EmbedResponse>;
18
+ /**
19
+ * Generate embeddings for multiple values using the specified model.
20
+ * Accepts all options from Vercel AI SDK's embedMany function.
21
+ *
22
+ * @param options - The embedding options (extends Vercel AI SDK's embedMany parameters)
23
+ * @returns The embedding vectors (one per input value)
24
+ */
25
+ embedMany(options: EmbedManyOptions): Promise<EmbedManyResponse>;
10
26
  }
27
+ export type { Agent0Config, EmbedManyOptions, EmbedManyResponse, EmbedModel, EmbedOptions, EmbedResponse, GenerateResponse, ModelOverrides, ProviderOptions, RunOptions, } from "./types";
package/dist/index.js CHANGED
@@ -4,16 +4,16 @@ exports.Agent0 = void 0;
4
4
  class Agent0 {
5
5
  constructor(config) {
6
6
  this.apiKey = config.apiKey;
7
- this.baseUrl = config.baseUrl || 'https://app.agent0.com'; // Default URL, can be overridden
7
+ this.baseUrl = config.baseUrl || "https://app.agent0.com"; // Default URL, can be overridden
8
8
  }
9
9
  async fetchApi(endpoint, body) {
10
10
  const url = `${this.baseUrl}${endpoint}`;
11
11
  const headers = {
12
- 'Content-Type': 'application/json',
13
- 'x-api-key': this.apiKey,
12
+ "Content-Type": "application/json",
13
+ "x-api-key": this.apiKey,
14
14
  };
15
15
  const response = await fetch(url, {
16
- method: 'POST',
16
+ method: "POST",
17
17
  headers,
18
18
  body: JSON.stringify(body),
19
19
  });
@@ -24,7 +24,7 @@ class Agent0 {
24
24
  return response;
25
25
  }
26
26
  async generate(options) {
27
- const response = await this.fetchApi('/api/v1/run', {
27
+ const response = await this.fetchApi("/api/v1/run", {
28
28
  agent_id: options.agentId,
29
29
  variables: options.variables,
30
30
  overrides: options.overrides,
@@ -34,7 +34,7 @@ class Agent0 {
34
34
  return await response.json();
35
35
  }
36
36
  async *stream(options) {
37
- const response = await this.fetchApi('/api/v1/run', {
37
+ const response = await this.fetchApi("/api/v1/run", {
38
38
  agent_id: options.agentId,
39
39
  variables: options.variables,
40
40
  overrides: options.overrides,
@@ -42,23 +42,23 @@ class Agent0 {
42
42
  stream: true,
43
43
  });
44
44
  if (!response.body) {
45
- throw new Error('Response body is empty');
45
+ throw new Error("Response body is empty");
46
46
  }
47
47
  const reader = response.body.getReader();
48
48
  const decoder = new TextDecoder();
49
- let buffer = '';
49
+ let buffer = "";
50
50
  try {
51
51
  while (true) {
52
52
  const { done, value } = await reader.read();
53
53
  if (done)
54
54
  break;
55
55
  buffer += decoder.decode(value, { stream: true });
56
- const lines = buffer.split('\n');
56
+ const lines = buffer.split("\n");
57
57
  // Keep the last incomplete line in the buffer
58
- buffer = lines.pop() || '';
58
+ buffer = lines.pop() || "";
59
59
  for (const line of lines) {
60
60
  const trimmedLine = line.trim();
61
- if (!trimmedLine || !trimmedLine.startsWith('data: '))
61
+ if (!trimmedLine || !trimmedLine.startsWith("data: "))
62
62
  continue;
63
63
  const data = trimmedLine.slice(6);
64
64
  try {
@@ -66,7 +66,7 @@ class Agent0 {
66
66
  yield parsed;
67
67
  }
68
68
  catch (e) {
69
- console.warn('Failed to parse stream chunk:', data, e);
69
+ console.warn("Failed to parse stream chunk:", data, e);
70
70
  }
71
71
  }
72
72
  }
@@ -75,5 +75,29 @@ class Agent0 {
75
75
  reader.releaseLock();
76
76
  }
77
77
  }
78
+ /**
79
+ * Generate an embedding for a single value using the specified model.
80
+ * Accepts all options from Vercel AI SDK's embed function.
81
+ *
82
+ * @param options - The embedding options (extends Vercel AI SDK's embed parameters)
83
+ * @returns The embedding vector
84
+ */
85
+ async embed(options) {
86
+ // Pass all options directly to the API
87
+ const response = await this.fetchApi("/api/v1/embed", options);
88
+ return await response.json();
89
+ }
90
+ /**
91
+ * Generate embeddings for multiple values using the specified model.
92
+ * Accepts all options from Vercel AI SDK's embedMany function.
93
+ *
94
+ * @param options - The embedding options (extends Vercel AI SDK's embedMany parameters)
95
+ * @returns The embedding vectors (one per input value)
96
+ */
97
+ async embedMany(options) {
98
+ // Pass all options directly to the API
99
+ const response = await this.fetchApi("/api/v1/embed-many", options);
100
+ return await response.json();
101
+ }
78
102
  }
79
103
  exports.Agent0 = Agent0;
package/dist/types.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import type { GoogleGenerativeAIProviderOptions } from "@ai-sdk/google";
2
2
  import type { OpenAIResponsesProviderOptions } from "@ai-sdk/openai";
3
3
  import type { XaiProviderOptions } from "@ai-sdk/xai";
4
- import type { ModelMessage } from "ai";
4
+ import type { embed, embedMany, ModelMessage } from "ai";
5
5
  export interface Agent0Config {
6
6
  apiKey: string;
7
7
  baseUrl?: string;
@@ -49,3 +49,43 @@ export interface GenerateResponse {
49
49
  messages: ModelMessage[];
50
50
  text: string;
51
51
  }
52
+ /**
53
+ * Model specification for Agent0 embedding operations.
54
+ * Instead of passing an EmbeddingModel instance, pass the provider_id and model name.
55
+ */
56
+ export interface EmbedModel {
57
+ /** The provider ID (from your Agent0 providers configuration) */
58
+ provider_id: string;
59
+ /** The embedding model name (e.g., 'text-embedding-3-small', 'text-embedding-ada-002') */
60
+ name: string;
61
+ }
62
+ /**
63
+ * Options for the embed function.
64
+ * Extends Vercel AI SDK's embed parameters, only modifying the `model` property
65
+ * to use Agent0's provider_id + name format instead of an EmbeddingModel instance.
66
+ */
67
+ export type EmbedOptions = Omit<Parameters<typeof embed>[0], "model"> & {
68
+ model: EmbedModel;
69
+ };
70
+ /**
71
+ * Options for the embedMany function.
72
+ * Extends Vercel AI SDK's embedMany parameters, only modifying the `model` property
73
+ * to use Agent0's provider_id + name format instead of an EmbeddingModel instance.
74
+ */
75
+ export type EmbedManyOptions = Omit<Parameters<typeof embedMany>[0], "model"> & {
76
+ model: EmbedModel;
77
+ };
78
+ /**
79
+ * Response from the embed function.
80
+ */
81
+ export interface EmbedResponse {
82
+ /** The embedding vector */
83
+ embedding: number[];
84
+ }
85
+ /**
86
+ * Response from the embedMany function.
87
+ */
88
+ export interface EmbedManyResponse {
89
+ /** The embedding vectors (one per input value) */
90
+ embeddings: number[][];
91
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agent0-js",
3
- "version": "0.0.7",
3
+ "version": "0.0.8",
4
4
  "description": "TypeScript SDK for Agent0",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
package/src/index.ts CHANGED
@@ -1,93 +1,145 @@
1
- import type { TextStreamPart, ToolSet } from 'ai';
2
- import type { Agent0Config, GenerateResponse, RunOptions } from './types';
1
+ import type { TextStreamPart, ToolSet } from "ai";
2
+ import type {
3
+ Agent0Config,
4
+ EmbedManyOptions,
5
+ EmbedManyResponse,
6
+ EmbedOptions,
7
+ EmbedResponse,
8
+ GenerateResponse,
9
+ RunOptions,
10
+ } from "./types";
3
11
 
4
12
  export class Agent0 {
5
- private apiKey: string;
6
- private baseUrl: string;
7
-
8
- constructor(config: Agent0Config) {
9
- this.apiKey = config.apiKey;
10
- this.baseUrl = config.baseUrl || 'https://app.agent0.com'; // Default URL, can be overridden
11
- }
12
-
13
- private async fetchApi(endpoint: string, body: unknown): Promise<Response> {
14
- const url = `${this.baseUrl}${endpoint}`;
15
-
16
- const headers = {
17
- 'Content-Type': 'application/json',
18
- 'x-api-key': this.apiKey,
19
- };
20
-
21
- const response = await fetch(url, {
22
- method: 'POST',
23
- headers,
24
- body: JSON.stringify(body),
25
- });
26
-
27
- if (!response.ok) {
28
- const errorText = await response.text();
29
- throw new Error(`API request failed: ${response.status} ${response.statusText} - ${errorText}`);
30
- }
31
-
32
- return response;
33
- }
34
-
35
- async generate(options: RunOptions): Promise<GenerateResponse> {
36
- const response = await this.fetchApi('/api/v1/run', {
37
- agent_id: options.agentId,
38
- variables: options.variables,
39
- overrides: options.overrides,
40
- extra_messages: options.extraMessages,
41
- stream: false,
42
- });
43
-
44
- return await response.json();
45
- }
46
-
47
- async *stream(options: RunOptions): AsyncGenerator<TextStreamPart<ToolSet>, void, unknown> {
48
- const response = await this.fetchApi('/api/v1/run', {
49
- agent_id: options.agentId,
50
- variables: options.variables,
51
- overrides: options.overrides,
52
- extra_messages: options.extraMessages,
53
- stream: true,
54
- });
55
-
56
- if (!response.body) {
57
- throw new Error('Response body is empty');
58
- }
59
-
60
- const reader = response.body.getReader();
61
- const decoder = new TextDecoder();
62
-
63
- let buffer = '';
64
-
65
- try {
66
- while (true) {
67
- const { done, value } = await reader.read();
68
- if (done) break;
69
-
70
- buffer += decoder.decode(value, { stream: true });
71
- const lines = buffer.split('\n');
72
-
73
- // Keep the last incomplete line in the buffer
74
- buffer = lines.pop() || '';
75
-
76
- for (const line of lines) {
77
- const trimmedLine = line.trim();
78
- if (!trimmedLine || !trimmedLine.startsWith('data: ')) continue;
79
-
80
- const data = trimmedLine.slice(6);
81
- try {
82
- const parsed = JSON.parse(data) as TextStreamPart<ToolSet>;
83
- yield parsed;
84
- } catch (e) {
85
- console.warn('Failed to parse stream chunk:', data, e);
86
- }
87
- }
88
- }
89
- } finally {
90
- reader.releaseLock();
91
- }
92
- }
13
+ private apiKey: string;
14
+ private baseUrl: string;
15
+
16
+ constructor(config: Agent0Config) {
17
+ this.apiKey = config.apiKey;
18
+ this.baseUrl = config.baseUrl || "https://app.agent0.com"; // Default URL, can be overridden
19
+ }
20
+
21
+ private async fetchApi(endpoint: string, body: unknown): Promise<Response> {
22
+ const url = `${this.baseUrl}${endpoint}`;
23
+
24
+ const headers = {
25
+ "Content-Type": "application/json",
26
+ "x-api-key": this.apiKey,
27
+ };
28
+
29
+ const response = await fetch(url, {
30
+ method: "POST",
31
+ headers,
32
+ body: JSON.stringify(body),
33
+ });
34
+
35
+ if (!response.ok) {
36
+ const errorText = await response.text();
37
+ throw new Error(
38
+ `API request failed: ${response.status} ${response.statusText} - ${errorText}`,
39
+ );
40
+ }
41
+
42
+ return response;
43
+ }
44
+
45
+ async generate(options: RunOptions): Promise<GenerateResponse> {
46
+ const response = await this.fetchApi("/api/v1/run", {
47
+ agent_id: options.agentId,
48
+ variables: options.variables,
49
+ overrides: options.overrides,
50
+ extra_messages: options.extraMessages,
51
+ stream: false,
52
+ });
53
+
54
+ return await response.json();
55
+ }
56
+
57
+ async *stream(
58
+ options: RunOptions,
59
+ ): AsyncGenerator<TextStreamPart<ToolSet>, void, unknown> {
60
+ const response = await this.fetchApi("/api/v1/run", {
61
+ agent_id: options.agentId,
62
+ variables: options.variables,
63
+ overrides: options.overrides,
64
+ extra_messages: options.extraMessages,
65
+ stream: true,
66
+ });
67
+
68
+ if (!response.body) {
69
+ throw new Error("Response body is empty");
70
+ }
71
+
72
+ const reader = response.body.getReader();
73
+ const decoder = new TextDecoder();
74
+
75
+ let buffer = "";
76
+
77
+ try {
78
+ while (true) {
79
+ const { done, value } = await reader.read();
80
+ if (done) break;
81
+
82
+ buffer += decoder.decode(value, { stream: true });
83
+ const lines = buffer.split("\n");
84
+
85
+ // Keep the last incomplete line in the buffer
86
+ buffer = lines.pop() || "";
87
+
88
+ for (const line of lines) {
89
+ const trimmedLine = line.trim();
90
+ if (!trimmedLine || !trimmedLine.startsWith("data: ")) continue;
91
+
92
+ const data = trimmedLine.slice(6);
93
+ try {
94
+ const parsed = JSON.parse(data) as TextStreamPart<ToolSet>;
95
+ yield parsed;
96
+ } catch (e) {
97
+ console.warn("Failed to parse stream chunk:", data, e);
98
+ }
99
+ }
100
+ }
101
+ } finally {
102
+ reader.releaseLock();
103
+ }
104
+ }
105
+
106
+ /**
107
+ * Generate an embedding for a single value using the specified model.
108
+ * Accepts all options from Vercel AI SDK's embed function.
109
+ *
110
+ * @param options - The embedding options (extends Vercel AI SDK's embed parameters)
111
+ * @returns The embedding vector
112
+ */
113
+ async embed(options: EmbedOptions): Promise<EmbedResponse> {
114
+ // Pass all options directly to the API
115
+ const response = await this.fetchApi("/api/v1/embed", options);
116
+ return await response.json();
117
+ }
118
+
119
+ /**
120
+ * Generate embeddings for multiple values using the specified model.
121
+ * Accepts all options from Vercel AI SDK's embedMany function.
122
+ *
123
+ * @param options - The embedding options (extends Vercel AI SDK's embedMany parameters)
124
+ * @returns The embedding vectors (one per input value)
125
+ */
126
+ async embedMany(options: EmbedManyOptions): Promise<EmbedManyResponse> {
127
+ // Pass all options directly to the API
128
+ const response = await this.fetchApi("/api/v1/embed-many", options);
129
+ return await response.json();
130
+ }
93
131
  }
132
+
133
+ // Re-export types for convenience
134
+ export type {
135
+ Agent0Config,
136
+ EmbedManyOptions,
137
+ EmbedManyResponse,
138
+ EmbedModel,
139
+ EmbedOptions,
140
+ EmbedResponse,
141
+ GenerateResponse,
142
+ ModelOverrides,
143
+ ProviderOptions,
144
+ RunOptions,
145
+ } from "./types";
package/src/types.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import type { GoogleGenerativeAIProviderOptions } from "@ai-sdk/google";
2
2
  import type { OpenAIResponsesProviderOptions } from "@ai-sdk/openai";
3
3
  import type { XaiProviderOptions } from "@ai-sdk/xai";
4
- import type { ModelMessage } from "ai";
4
+ import type { embed, embedMany, ModelMessage } from "ai";
5
5
 
6
6
  export interface Agent0Config {
7
7
  apiKey: string;
@@ -54,3 +54,51 @@ export interface GenerateResponse {
54
54
  messages: ModelMessage[];
55
55
  text: string;
56
56
  }
57
+
58
+ /**
59
+ * Model specification for Agent0 embedding operations.
60
+ * Instead of passing an EmbeddingModel instance, pass the provider_id and model name.
61
+ */
62
+ export interface EmbedModel {
63
+ /** The provider ID (from your Agent0 providers configuration) */
64
+ provider_id: string;
65
+ /** The embedding model name (e.g., 'text-embedding-3-small', 'text-embedding-ada-002') */
66
+ name: string;
67
+ }
68
+
69
+ /**
70
+ * Options for the embed function.
71
+ * Extends Vercel AI SDK's embed parameters, only modifying the `model` property
72
+ * to use Agent0's provider_id + name format instead of an EmbeddingModel instance.
73
+ */
74
+ export type EmbedOptions = Omit<Parameters<typeof embed>[0], "model"> & {
75
+ model: EmbedModel;
76
+ };
77
+
78
+ /**
79
+ * Options for the embedMany function.
80
+ * Extends Vercel AI SDK's embedMany parameters, only modifying the `model` property
81
+ * to use Agent0's provider_id + name format instead of an EmbeddingModel instance.
82
+ */
83
+ export type EmbedManyOptions = Omit<
84
+ Parameters<typeof embedMany>[0],
85
+ "model"
86
+ > & {
87
+ model: EmbedModel;
88
+ };
89
+
90
+ /**
91
+ * Response from the embed function.
92
+ */
93
+ export interface EmbedResponse {
94
+ /** The embedding vector */
95
+ embedding: number[];
96
+ }
97
+
98
+ /**
99
+ * Response from the embedMany function.
100
+ */
101
+ export interface EmbedManyResponse {
102
+ /** The embedding vectors (one per input value) */
103
+ embeddings: number[][];
104
+ }