@blinkdotnew/dev-sdk 2.2.3 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -353,7 +353,7 @@ interface StorageDownloadResponse {
353
353
  contentType?: string;
354
354
  size?: number;
355
355
  }
356
- interface TokenUsage {
356
+ interface TokenUsage$1 {
357
357
  promptTokens: number;
358
358
  completionTokens: number;
359
359
  totalTokens: number;
@@ -386,7 +386,7 @@ interface TextGenerationRequest {
386
386
  interface TextGenerationResponse {
387
387
  text: string;
388
388
  finishReason?: 'stop' | 'length' | 'content_filter' | 'tool_calls';
389
- usage?: TokenUsage;
389
+ usage?: TokenUsage$1;
390
390
  files?: any[];
391
391
  reasoningDetails?: any[];
392
392
  toolCalls?: any[];
@@ -400,7 +400,7 @@ interface TextGenerationResponse {
400
400
  stepType?: string;
401
401
  text?: string;
402
402
  finishReason?: string;
403
- usage?: TokenUsage;
403
+ usage?: TokenUsage$1;
404
404
  }>;
405
405
  sources?: any[];
406
406
  providerMetadata?: any;
@@ -418,7 +418,7 @@ interface ObjectGenerationRequest {
418
418
  interface ObjectGenerationResponse {
419
419
  object: any;
420
420
  finishReason?: 'stop' | 'length' | 'content_filter';
421
- usage?: TokenUsage;
421
+ usage?: TokenUsage$1;
422
422
  warnings?: string[];
423
423
  providerMetadata?: {
424
424
  openai?: {
@@ -561,6 +561,11 @@ interface BlinkAI {
561
561
  generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
562
562
  generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
563
563
  transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
564
+ agent(options: any): Promise<any>;
565
+ /** Creates a reusable Agent instance (Vercel AI SDK pattern) */
566
+ createAgent(options: any): any;
567
+ /** Binds an existing Agent instance to this client's httpClient */
568
+ bindAgent(agent: any): any;
564
569
  }
565
570
  interface DataExtraction {
566
571
  chunks: string[];
@@ -806,8 +811,38 @@ interface SendEmailResponse {
806
811
  interface BlinkNotifications {
807
812
  email(params: SendEmailRequest): Promise<SendEmailResponse>;
808
813
  }
814
+ /**
815
+ * Token type in Blink Auth system
816
+ * - `access`: Regular user access token (short-lived)
817
+ * - `service`: Service token for server-side operations (permanent secret key)
818
+ */
819
+ type BlinkTokenType = 'access' | 'service';
820
+ /**
821
+ * Result of token introspection
822
+ * Used by edge functions and server-side code to verify user tokens
823
+ */
824
+ interface TokenIntrospectionResult {
825
+ /** Whether the token is valid */
826
+ valid: boolean;
827
+ /** Project ID from the token */
828
+ projectId?: string;
829
+ /** User ID (Firebase UID) - only present for access tokens */
830
+ userId?: string;
831
+ /** User's email - only present for access tokens */
832
+ email?: string;
833
+ /** Token type: 'access' or 'service' */
834
+ tokenType?: BlinkTokenType;
835
+ /** User's role in the app (if set via app_role claim) */
836
+ appRole?: string;
837
+ /** Token expiration timestamp (Unix seconds) - not present for secret keys */
838
+ exp?: number;
839
+ /** Legacy service key ID (for JWT-based service tokens) */
840
+ svcKeyId?: string;
841
+ /** Error message if token is invalid */
842
+ error?: string;
843
+ }
809
844
 
810
- type ConnectorProvider = 'discord' | 'notion' | 'google_drive' | 'google_calendar' | 'google_slides' | 'google_docs' | 'google_sheets' | 'slack' | 'linkedin' | 'hubspot' | 'ai';
845
+ type ConnectorProvider = 'discord' | 'notion' | 'google_drive' | 'google_calendar' | 'ai';
811
846
  type ConnectorAuthMode = 'oauth' | 'api_key' | 'blink_managed' | 'hybrid';
812
847
  interface ConnectorStatusData {
813
848
  connected: boolean;
@@ -1036,6 +1071,94 @@ declare class HttpClient {
1036
1071
  cfg_scale?: number;
1037
1072
  signal?: AbortSignal;
1038
1073
  }): Promise<BlinkResponse<any>>;
1074
+ /**
1075
+ * AI Agent request (non-streaming)
1076
+ * Returns JSON response with text, steps, usage, and billing
1077
+ */
1078
+ aiAgent(requestBody: {
1079
+ stream: false;
1080
+ prompt?: string;
1081
+ messages?: Array<{
1082
+ role: string;
1083
+ content: string | any[];
1084
+ parts?: any[];
1085
+ }>;
1086
+ agent: {
1087
+ model: string;
1088
+ system?: string;
1089
+ tools?: string[];
1090
+ webhook_tools?: Array<{
1091
+ name: string;
1092
+ description: string;
1093
+ input_schema: any;
1094
+ webhook_url: string;
1095
+ }>;
1096
+ client_tools?: Array<{
1097
+ name: string;
1098
+ description: string;
1099
+ input_schema: any;
1100
+ }>;
1101
+ tool_choice?: 'auto' | 'required' | 'none';
1102
+ stop_when?: Array<{
1103
+ type: string;
1104
+ count: number;
1105
+ }>;
1106
+ prepare_step?: {
1107
+ context_policy: any;
1108
+ };
1109
+ };
1110
+ }, signal?: AbortSignal): Promise<BlinkResponse<any>>;
1111
+ /**
1112
+ * AI Agent streaming request
1113
+ * Returns raw Response for SSE streaming (compatible with AI SDK useChat)
1114
+ */
1115
+ aiAgentStream(requestBody: {
1116
+ stream: true;
1117
+ prompt?: string;
1118
+ messages?: Array<{
1119
+ role: string;
1120
+ content: string | any[];
1121
+ parts?: any[];
1122
+ }>;
1123
+ agent: {
1124
+ model: string;
1125
+ system?: string;
1126
+ tools?: string[];
1127
+ webhook_tools?: Array<{
1128
+ name: string;
1129
+ description: string;
1130
+ input_schema: any;
1131
+ webhook_url: string;
1132
+ }>;
1133
+ client_tools?: Array<{
1134
+ name: string;
1135
+ description: string;
1136
+ input_schema: any;
1137
+ }>;
1138
+ tool_choice?: 'auto' | 'required' | 'none';
1139
+ stop_when?: Array<{
1140
+ type: string;
1141
+ count: number;
1142
+ }>;
1143
+ prepare_step?: {
1144
+ context_policy: any;
1145
+ };
1146
+ };
1147
+ }, signal?: AbortSignal): Promise<Response>;
1148
+ /**
1149
+ * RAG AI Search streaming request
1150
+ * Returns raw Response for SSE streaming
1151
+ */
1152
+ ragAiSearchStream(body: {
1153
+ collection_id?: string;
1154
+ collection_name?: string;
1155
+ query: string;
1156
+ model?: string;
1157
+ max_context_chunks?: number;
1158
+ score_threshold?: number;
1159
+ system_prompt?: string;
1160
+ stream: true;
1161
+ }, signal?: AbortSignal): Promise<Response>;
1039
1162
  /**
1040
1163
  * Data-specific requests
1041
1164
  */
@@ -1477,6 +1600,44 @@ declare class BlinkAuth {
1477
1600
  expires_in?: number;
1478
1601
  refresh_expires_in?: number;
1479
1602
  }, persist?: boolean): Promise<BlinkUser>;
1603
+ /**
1604
+ * Verify a Blink Auth token using the introspection endpoint.
1605
+ *
1606
+ * **Server-side / Edge Function use only.**
1607
+ *
1608
+ * This is the recommended way to verify user tokens in Deno Edge Functions
1609
+ * and other server-side contexts. It calls the Blink API introspection
1610
+ * endpoint which validates the token without exposing the JWT secret.
1611
+ *
1612
+ * @param token - The raw JWT token (without "Bearer " prefix) or full Authorization header
1613
+ * @returns Token introspection result with validity and claims
1614
+ *
1615
+ * @example
1616
+ * // Deno Edge Function usage
1617
+ * import { createClient } from "npm:@blinkdotnew/sdk";
1618
+ *
1619
+ * const blink = createClient({
1620
+ * projectId: Deno.env.get("BLINK_PROJECT_ID")!,
1621
+ * secretKey: Deno.env.get("BLINK_SECRET_KEY"),
1622
+ * });
1623
+ *
1624
+ * async function handler(req: Request): Promise<Response> {
1625
+ * const authHeader = req.headers.get("Authorization");
1626
+ * const result = await blink.auth.verifyToken(authHeader);
1627
+ *
1628
+ * if (!result.valid) {
1629
+ * return new Response(JSON.stringify({ error: result.error }), { status: 401 });
1630
+ * }
1631
+ *
1632
+ * // User is authenticated
1633
+ * console.log("User ID:", result.userId);
1634
+ * console.log("Email:", result.email);
1635
+ * console.log("Project:", result.projectId);
1636
+ *
1637
+ * // Continue with your logic...
1638
+ * }
1639
+ */
1640
+ verifyToken(token: string | null): Promise<TokenIntrospectionResult>;
1480
1641
  /**
1481
1642
  * Refresh access token using refresh token
1482
1643
  */
@@ -1827,6 +1988,319 @@ interface BlinkFunctions {
1827
1988
  invoke<T = any>(functionSlug: string, options?: FunctionsInvokeOptions): Promise<FunctionsInvokeResponse<T>>;
1828
1989
  }
1829
1990
 
1991
+ /**
1992
+ * Blink RAG Module - Vector Search and AI-powered retrieval
1993
+ *
1994
+ * Provides document ingestion, vector search, and RAG (Retrieval-Augmented Generation) capabilities.
1995
+ */
1996
+
1997
+ interface RAGCollection {
1998
+ id: string;
1999
+ name: string;
2000
+ description: string | null;
2001
+ embeddingModel: string;
2002
+ embeddingDimensions: number;
2003
+ indexMetric: 'cosine' | 'l2';
2004
+ chunkMaxTokens: number;
2005
+ chunkOverlapTokens: number;
2006
+ documentCount: number;
2007
+ chunkCount: number;
2008
+ shared: boolean;
2009
+ createdAt: string;
2010
+ updatedAt: string;
2011
+ }
2012
+ interface RAGDocument {
2013
+ id: string;
2014
+ collectionId: string;
2015
+ filename: string;
2016
+ sourceType: 'file' | 'url' | 'text';
2017
+ sourceUrl: string | null;
2018
+ contentType: string | null;
2019
+ fileSize: number | null;
2020
+ status: 'pending' | 'processing' | 'ready' | 'error';
2021
+ errorMessage: string | null;
2022
+ processingStartedAt: string | null;
2023
+ processingCompletedAt: string | null;
2024
+ chunkCount: number;
2025
+ tokenCount: number | null;
2026
+ metadata: Record<string, any>;
2027
+ createdAt: string;
2028
+ updatedAt: string;
2029
+ }
2030
+ interface RAGSearchResult {
2031
+ chunkId: string;
2032
+ documentId: string;
2033
+ filename: string;
2034
+ content: string;
2035
+ score: number;
2036
+ chunkIndex: number;
2037
+ metadata: Record<string, any>;
2038
+ }
2039
+ interface RAGSearchResponse {
2040
+ results: RAGSearchResult[];
2041
+ query: string;
2042
+ collectionId: string;
2043
+ totalResults: number;
2044
+ }
2045
+ interface RAGAISearchSource {
2046
+ documentId: string;
2047
+ filename: string;
2048
+ chunkId: string;
2049
+ excerpt: string;
2050
+ score: number;
2051
+ }
2052
+ interface RAGAISearchResult {
2053
+ answer: string;
2054
+ sources: RAGAISearchSource[];
2055
+ query: string;
2056
+ model: string;
2057
+ usage: {
2058
+ inputTokens: number;
2059
+ outputTokens: number;
2060
+ };
2061
+ }
2062
+ interface CreateCollectionOptions {
2063
+ name: string;
2064
+ description?: string;
2065
+ embeddingModel?: string;
2066
+ embeddingDimensions?: number;
2067
+ indexMetric?: 'cosine' | 'l2';
2068
+ chunkMaxTokens?: number;
2069
+ chunkOverlapTokens?: number;
2070
+ shared?: boolean;
2071
+ }
2072
+ interface UploadOptions {
2073
+ collectionId?: string;
2074
+ collectionName?: string;
2075
+ filename: string;
2076
+ content?: string;
2077
+ file?: {
2078
+ data: string;
2079
+ contentType: string;
2080
+ };
2081
+ url?: string;
2082
+ metadata?: Record<string, any>;
2083
+ }
2084
+ interface SearchOptions {
2085
+ collectionId?: string;
2086
+ collectionName?: string;
2087
+ query: string;
2088
+ maxResults?: number;
2089
+ scoreThreshold?: number;
2090
+ filters?: Record<string, any>;
2091
+ includeContent?: boolean;
2092
+ }
2093
+ interface AISearchOptions {
2094
+ collectionId?: string;
2095
+ collectionName?: string;
2096
+ query: string;
2097
+ model?: string;
2098
+ maxContextChunks?: number;
2099
+ scoreThreshold?: number;
2100
+ systemPrompt?: string;
2101
+ stream?: boolean;
2102
+ /** AbortSignal for cancellation (streaming only) */
2103
+ signal?: AbortSignal;
2104
+ }
2105
+ interface ListDocumentsOptions {
2106
+ collectionId?: string;
2107
+ status?: 'pending' | 'processing' | 'ready' | 'error';
2108
+ }
2109
+ interface WaitForReadyOptions {
2110
+ timeoutMs?: number;
2111
+ pollIntervalMs?: number;
2112
+ }
2113
+ declare class BlinkRAGImpl {
2114
+ private httpClient;
2115
+ private projectId;
2116
+ constructor(httpClient: HttpClient);
2117
+ /**
2118
+ * Build URL with project_id prefix
2119
+ */
2120
+ private url;
2121
+ /**
2122
+ * Create a new RAG collection
2123
+ */
2124
+ createCollection(options: CreateCollectionOptions): Promise<RAGCollection>;
2125
+ /**
2126
+ * List all collections accessible to the current user
2127
+ */
2128
+ listCollections(): Promise<RAGCollection[]>;
2129
+ /**
2130
+ * Get a specific collection by ID
2131
+ */
2132
+ getCollection(collectionId: string): Promise<RAGCollection>;
2133
+ /**
2134
+ * Delete a collection and all its documents
2135
+ */
2136
+ deleteCollection(collectionId: string): Promise<void>;
2137
+ /**
2138
+ * Upload a document for processing
2139
+ *
2140
+ * @example
2141
+ * // Upload text content
2142
+ * const doc = await blink.rag.upload({
2143
+ * collectionName: 'docs',
2144
+ * filename: 'notes.txt',
2145
+ * content: 'My document content...'
2146
+ * })
2147
+ *
2148
+ * @example
2149
+ * // Upload from URL
2150
+ * const doc = await blink.rag.upload({
2151
+ * collectionId: 'col_abc123',
2152
+ * filename: 'article.html',
2153
+ * url: 'https://example.com/article'
2154
+ * })
2155
+ *
2156
+ * @example
2157
+ * // Upload a file (base64)
2158
+ * const doc = await blink.rag.upload({
2159
+ * collectionName: 'docs',
2160
+ * filename: 'report.pdf',
2161
+ * file: { data: base64Data, contentType: 'application/pdf' }
2162
+ * })
2163
+ */
2164
+ upload(options: UploadOptions): Promise<RAGDocument>;
2165
+ /**
2166
+ * Get document status and metadata
2167
+ */
2168
+ getDocument(documentId: string): Promise<RAGDocument>;
2169
+ /**
2170
+ * List documents, optionally filtered by collection or status
2171
+ */
2172
+ listDocuments(options?: ListDocumentsOptions): Promise<RAGDocument[]>;
2173
+ /**
2174
+ * Delete a document and its chunks
2175
+ */
2176
+ deleteDocument(documentId: string): Promise<void>;
2177
+ /**
2178
+ * Wait for a document to finish processing
2179
+ *
2180
+ * @example
2181
+ * const doc = await blink.rag.upload({ ... })
2182
+ * const readyDoc = await blink.rag.waitForReady(doc.id)
2183
+ * console.log(`Processed ${readyDoc.chunkCount} chunks`)
2184
+ */
2185
+ waitForReady(documentId: string, options?: WaitForReadyOptions): Promise<RAGDocument>;
2186
+ /**
2187
+ * Search for similar chunks using vector similarity
2188
+ *
2189
+ * @example
2190
+ * const results = await blink.rag.search({
2191
+ * collectionName: 'docs',
2192
+ * query: 'How do I configure authentication?',
2193
+ * maxResults: 5
2194
+ * })
2195
+ */
2196
+ search(options: SearchOptions): Promise<RAGSearchResponse>;
2197
+ /**
2198
+ * Perform RAG: search + LLM answer generation
2199
+ *
2200
+ * @example
2201
+ * // Non-streaming
2202
+ * const result = await blink.rag.aiSearch({
2203
+ * collectionName: 'docs',
2204
+ * query: 'What are the main features?'
2205
+ * })
2206
+ * console.log(result.answer)
2207
+ *
2208
+ * @example
2209
+ * // Streaming
2210
+ * const stream = await blink.rag.aiSearch({
2211
+ * collectionName: 'docs',
2212
+ * query: 'Explain the architecture',
2213
+ * stream: true
2214
+ * })
2215
+ */
2216
+ aiSearch(options: AISearchOptions & {
2217
+ stream?: false;
2218
+ }): Promise<RAGAISearchResult>;
2219
+ aiSearch(options: AISearchOptions & {
2220
+ stream: true;
2221
+ }): Promise<ReadableStream<Uint8Array>>;
2222
+ }
2223
+ type BlinkRAG = BlinkRAGImpl;
2224
+
2225
+ /**
2226
+ * Blink Sandbox Module - Persistent coding environments for AI agents
2227
+ *
2228
+ * Provides lifecycle management for E2B sandboxes with auto-pause and resume.
2229
+ * Used with agent.generate({ sandbox }) for AI coding agents.
2230
+ */
2231
+
2232
+ interface Sandbox {
2233
+ /** Sandbox ID (sbx_xxx format) - STORE THIS for persistence! */
2234
+ id: string;
2235
+ /** Template used to create the sandbox */
2236
+ template: string;
2237
+ /** Get public URL for any port (sync - computed locally from hostPattern) */
2238
+ getHost(port: number): string;
2239
+ }
2240
+ interface SandboxCreateOptions {
2241
+ /** E2B template ID (default: 'devtools-base') */
2242
+ template?: string;
2243
+ /** Inactivity timeout in ms (default: 5 min, max: 60 min) */
2244
+ timeoutMs?: number;
2245
+ /** Custom metadata for tracking */
2246
+ metadata?: Record<string, string>;
2247
+ }
2248
+ interface SandboxConnectOptions {
2249
+ /** Reset inactivity timeout in ms (default: 5 min) */
2250
+ timeoutMs?: number;
2251
+ }
2252
+ interface BlinkSandbox {
2253
+ /**
2254
+ * Create a new persistent sandbox with auto-pause enabled.
2255
+ *
2256
+ * @example
2257
+ * ```ts
2258
+ * const sandbox = await blink.sandbox.create({ template: 'nextjs-app' })
2259
+ * console.log(sandbox.id) // "sbx_abc123xyz" - Store this!
2260
+ * console.log(sandbox.getHost(3000)) // "3000-sbx_abc123xyz.preview-blink.com"
2261
+ * ```
2262
+ */
2263
+ create(options?: SandboxCreateOptions): Promise<Sandbox>;
2264
+ /**
2265
+ * Connect to an existing sandbox. Auto-resumes if paused.
2266
+ * Built-in retry with exponential backoff (3 retries: 250ms → 500ms → 1000ms).
2267
+ *
2268
+ * @example
2269
+ * ```ts
2270
+ * const sandbox = await blink.sandbox.connect(storedSandboxId)
2271
+ * ```
2272
+ */
2273
+ connect(sandboxId: string, options?: SandboxConnectOptions): Promise<Sandbox>;
2274
+ /**
2275
+ * Permanently kill a sandbox. Cannot be resumed after kill.
2276
+ *
2277
+ * @example
2278
+ * ```ts
2279
+ * await blink.sandbox.kill(sandboxId)
2280
+ * await blink.db.user_sandboxes.delete({ sandbox_id: sandboxId })
2281
+ * ```
2282
+ */
2283
+ kill(sandboxId: string): Promise<void>;
2284
+ }
2285
+ declare const SANDBOX_TEMPLATES: readonly ["devtools-base", "nextjs-app", "nextjs-app-bun", "vite-react", "vite-react-bun", "expo-app", "desktop", "claude-code"];
2286
+ type SandboxTemplate = typeof SANDBOX_TEMPLATES[number];
2287
+ declare class SandboxConnectionError extends Error {
2288
+ sandboxId: string;
2289
+ constructor(sandboxId: string, cause?: Error);
2290
+ }
2291
+ declare class BlinkSandboxImpl implements BlinkSandbox {
2292
+ private httpClient;
2293
+ private projectId;
2294
+ constructor(httpClient: HttpClient);
2295
+ /**
2296
+ * Build URL with project_id prefix
2297
+ */
2298
+ private url;
2299
+ create(options?: SandboxCreateOptions): Promise<Sandbox>;
2300
+ connect(sandboxId: string, options?: SandboxConnectOptions): Promise<Sandbox>;
2301
+ kill(sandboxId: string): Promise<void>;
2302
+ }
2303
+
1830
2304
  /**
1831
2305
  * Blink Client - Main SDK entry point
1832
2306
  * Factory function and client class for the Blink SDK
@@ -1843,6 +2317,8 @@ interface BlinkClient {
1843
2317
  analytics: BlinkAnalytics;
1844
2318
  connectors: BlinkConnectors;
1845
2319
  functions: BlinkFunctions;
2320
+ rag: BlinkRAG;
2321
+ sandbox: BlinkSandbox;
1846
2322
  }
1847
2323
  /**
1848
2324
  * Create a new Blink client instance
@@ -1944,6 +2420,366 @@ declare class BlinkStorageImpl implements BlinkStorage {
1944
2420
  remove(...paths: string[]): Promise<void>;
1945
2421
  }
1946
2422
 
2423
+ /**
2424
+ * Agent Tool Types
2425
+ *
2426
+ * Type definitions for AI agent tools used with blink.ai.agent()
2427
+ */
2428
+ /**
2429
+ * JSON Schema type for tool input definitions
2430
+ */
2431
+ type JSONSchema = Record<string, any>;
2432
+ /**
2433
+ * A custom webhook tool definition
2434
+ *
2435
+ * Webhook tools NEED schemas because the server forwards them to your endpoint.
2436
+ */
2437
+ interface WebhookTool {
2438
+ /** Tool name */
2439
+ name: string;
2440
+ /** Human-readable description */
2441
+ description: string;
2442
+ /** JSON Schema for input parameters */
2443
+ input_schema: JSONSchema;
2444
+ /** URL to POST when tool is called */
2445
+ webhook_url: string;
2446
+ }
2447
+ /**
2448
+ * A client-side tool requiring user confirmation
2449
+ *
2450
+ * Client tools NEED schemas so the AI knows what inputs to provide.
2451
+ */
2452
+ interface ClientTool {
2453
+ /** Tool name */
2454
+ name: string;
2455
+ /** Human-readable description */
2456
+ description: string;
2457
+ /** JSON Schema for input parameters */
2458
+ input_schema: JSONSchema;
2459
+ }
2460
+ /**
2461
+ * Stop condition for agent loop
2462
+ */
2463
+ interface StopCondition {
2464
+ type: 'step_count_is';
2465
+ count: number;
2466
+ }
2467
+ /**
2468
+ * Context policy for managing conversation history
2469
+ */
2470
+ interface ContextPolicy {
2471
+ strategy: 'token_budget';
2472
+ max_input_tokens: number;
2473
+ keep_system: boolean;
2474
+ keep_last_messages: number;
2475
+ trim_order: string[];
2476
+ max_tool_result_bytes: number;
2477
+ }
2478
+ /**
2479
+ * Agent configuration
2480
+ */
2481
+ interface AgentConfig {
2482
+ /** Model ID in Vercel AI Gateway format: "provider/model-id" */
2483
+ model: string;
2484
+ /** System prompt */
2485
+ system?: string;
2486
+ /** Built-in tools to enable (tool names) */
2487
+ tools?: string[];
2488
+ /** Custom webhook tools */
2489
+ webhook_tools?: WebhookTool[];
2490
+ /** Client-side tools for HITL */
2491
+ client_tools?: ClientTool[];
2492
+ /** Tool choice strategy */
2493
+ tool_choice?: 'auto' | 'required' | 'none';
2494
+ /** Stop conditions */
2495
+ stop_when?: StopCondition[];
2496
+ /** Context management */
2497
+ prepare_step?: {
2498
+ context_policy: Partial<ContextPolicy>;
2499
+ };
2500
+ }
2501
+ /**
2502
+ * UI Message format (AI SDK compatible)
2503
+ */
2504
+ interface UIMessage {
2505
+ id?: string;
2506
+ role: 'user' | 'assistant';
2507
+ content: string;
2508
+ parts?: UIMessagePart[];
2509
+ }
2510
+ type UIMessagePart = {
2511
+ type: 'text';
2512
+ text: string;
2513
+ } | {
2514
+ type: 'tool-invocation';
2515
+ toolCallId: string;
2516
+ toolName: string;
2517
+ state: 'pending' | 'result' | 'output-available';
2518
+ input: Record<string, any>;
2519
+ output?: any;
2520
+ };
2521
+ /**
2522
+ * Agent request for streaming mode
2523
+ */
2524
+ interface AgentStreamRequest {
2525
+ stream: true;
2526
+ messages: UIMessage[];
2527
+ agent: AgentConfig;
2528
+ }
2529
+ /**
2530
+ * Agent request for non-streaming mode with messages
2531
+ */
2532
+ interface AgentNonStreamMessagesRequest {
2533
+ stream: false;
2534
+ messages: UIMessage[];
2535
+ agent: AgentConfig;
2536
+ }
2537
+ /**
2538
+ * Agent request for non-streaming mode with prompt
2539
+ */
2540
+ interface AgentNonStreamPromptRequest {
2541
+ stream: false;
2542
+ prompt: string;
2543
+ agent: AgentConfig;
2544
+ }
2545
+ /**
2546
+ * Union of all agent request types
2547
+ */
2548
+ type AgentRequest = AgentStreamRequest | AgentNonStreamMessagesRequest | AgentNonStreamPromptRequest;
2549
+ /**
2550
+ * Token usage information
2551
+ */
2552
+ interface TokenUsage {
2553
+ inputTokens: number;
2554
+ outputTokens: number;
2555
+ totalTokens?: number;
2556
+ }
2557
+ /**
2558
+ * Tool call in a step
2559
+ */
2560
+ interface ToolCall {
2561
+ toolCallId: string;
2562
+ toolName: string;
2563
+ args: Record<string, any>;
2564
+ }
2565
+ /**
2566
+ * Tool result in a step
2567
+ */
2568
+ interface ToolResult {
2569
+ toolCallId: string;
2570
+ result: any;
2571
+ }
2572
+ /**
2573
+ * Agent step information
2574
+ */
2575
+ interface AgentStep {
2576
+ text: string;
2577
+ toolCalls: ToolCall[];
2578
+ toolResults: ToolResult[];
2579
+ finishReason: string;
2580
+ usage: TokenUsage;
2581
+ }
2582
+ /**
2583
+ * Billing information
2584
+ */
2585
+ interface AgentBilling {
2586
+ model: string;
2587
+ creditsCharged: number;
2588
+ costUSD: number;
2589
+ breakdown?: {
2590
+ ai: {
2591
+ credits: number;
2592
+ costUSD: number;
2593
+ };
2594
+ tools: Record<string, {
2595
+ count: number;
2596
+ credits: number;
2597
+ costUSD: number;
2598
+ }>;
2599
+ };
2600
+ }
2601
+ /**
2602
+ * Non-streaming agent response
2603
+ */
2604
+ interface AgentResponse {
2605
+ text: string;
2606
+ finishReason: 'stop' | 'length' | 'tool-calls' | 'error' | 'content-filter';
2607
+ steps: AgentStep[];
2608
+ usage: TokenUsage;
2609
+ warnings?: Array<{
2610
+ type: string;
2611
+ message: string;
2612
+ }>;
2613
+ _billing: AgentBilling;
2614
+ }
2615
+
2616
+ /**
2617
+ * Agent Class
2618
+ *
2619
+ * Matches Vercel AI SDK ToolLoopAgent pattern:
2620
+ * - Create agent instance with config
2621
+ * - Call agent.generate() for non-streaming
2622
+ * - Call agent.stream() for streaming
2623
+ */
2624
+
2625
+ /**
2626
+ * Options for creating an Agent instance
2627
+ * Matches Vercel AI SDK ToolLoopAgent constructor options
2628
+ */
2629
+ interface AgentOptions {
2630
+ /** Model ID in Vercel AI Gateway format: "provider/model-id" */
2631
+ model: string;
2632
+ /** System prompt / instructions */
2633
+ system?: string;
2634
+ /** Alias for system (Vercel AI SDK compatibility) */
2635
+ instructions?: string;
2636
+ /** Built-in tools to enable (tool names) */
2637
+ tools?: string[];
2638
+ /** Custom webhook tools */
2639
+ webhookTools?: WebhookTool[];
2640
+ /** Client-side tools for HITL */
2641
+ clientTools?: ClientTool[];
2642
+ /** Tool choice strategy */
2643
+ toolChoice?: 'auto' | 'required' | 'none';
2644
+ /** Stop conditions */
2645
+ stopWhen?: StopCondition[];
2646
+ /** Maximum number of steps (convenience for stopWhen) */
2647
+ maxSteps?: number;
2648
+ }
2649
+ /**
2650
+ * Options for agent.generate() call
2651
+ */
2652
+ interface GenerateOptions {
2653
+ /** Simple text prompt */
2654
+ prompt?: string;
2655
+ /** Conversation history */
2656
+ messages?: UIMessage[];
2657
+ /** Sandbox for sandbox tools (object with id, or just sandboxId string) */
2658
+ sandbox?: Sandbox | string;
2659
+ /** Abort signal for cancellation */
2660
+ signal?: AbortSignal;
2661
+ }
2662
+ /**
2663
+ * Options for agent.stream() call
2664
+ */
2665
+ interface StreamOptions {
2666
+ /** Simple text prompt */
2667
+ prompt?: string;
2668
+ /** Conversation history */
2669
+ messages?: UIMessage[];
2670
+ /** Sandbox for sandbox tools (object with id, or just sandboxId string) */
2671
+ sandbox?: Sandbox | string;
2672
+ /** Abort signal for cancellation */
2673
+ signal?: AbortSignal;
2674
+ }
2675
+ /**
2676
+ * AI Agent class following Vercel AI SDK ToolLoopAgent pattern.
2677
+ *
2678
+ * Create an agent instance with configuration, then use:
2679
+ * - `agent.generate({ prompt })` for non-streaming one-shot generation
2680
+ * - `agent.stream({ prompt })` for streaming real-time generation
2681
+ *
2682
+ * @example
2683
+ * ```ts
2684
+ * import { Agent, webSearch, fetchUrl } from '@blinkdotnew/sdk'
2685
+ *
2686
+ * const weatherAgent = new Agent({
2687
+ * model: 'anthropic/claude-sonnet-4-20250514',
2688
+ * system: 'You are a helpful weather assistant.',
2689
+ * tools: [webSearch, fetchUrl],
2690
+ * maxSteps: 10,
2691
+ * })
2692
+ *
2693
+ * // Non-streaming
2694
+ * const result = await weatherAgent.generate({
2695
+ * prompt: 'What is the weather in San Francisco?',
2696
+ * })
2697
+ * console.log(result.text)
2698
+ *
2699
+ * // Streaming
2700
+ * const stream = await weatherAgent.stream({
2701
+ * prompt: 'Tell me about weather patterns',
2702
+ * })
2703
+ * ```
2704
+ */
2705
+ declare class Agent {
2706
+ private httpClient;
2707
+ private readonly config;
2708
+ /**
2709
+ * Create a new Agent instance
2710
+ * @param options - Agent configuration options
2711
+ */
2712
+ constructor(options: AgentOptions);
2713
+ /**
2714
+ * Internal: Set the HTTP client (called by BlinkClient)
2715
+ */
2716
+ _setHttpClient(client: HttpClient): void;
2717
+ /**
2718
+ * Internal: Get the agent config for API requests
2719
+ */
2720
+ private getAgentConfig;
2721
+ /**
2722
+ * Generate a response (non-streaming)
2723
+ *
2724
+ * @param options - Generation options (prompt or messages)
2725
+ * @returns Promise<AgentResponse> with text, steps, usage, and billing
2726
+ *
2727
+ * @example
2728
+ * ```ts
2729
+ * const result = await agent.generate({
2730
+ * prompt: 'What is the weather in San Francisco?',
2731
+ * })
2732
+ * console.log(result.text)
2733
+ * console.log(result.steps)
2734
+ * ```
2735
+ */
2736
+ generate(options: GenerateOptions): Promise<AgentResponse>;
2737
+ /**
2738
+ * Stream a response (real-time)
2739
+ *
2740
+ * @param options - Stream options (prompt or messages)
2741
+ * @returns Promise<Response> - AI SDK UI Message Stream for useChat compatibility
2742
+ *
2743
+ * @example
2744
+ * ```ts
2745
+ * const stream = await agent.stream({
2746
+ * prompt: 'Tell me a story',
2747
+ * })
2748
+ *
2749
+ * // Process stream
2750
+ * for await (const chunk of stream.body) {
2751
+ * // Handle chunk
2752
+ * }
2753
+ * ```
2754
+ */
2755
+ stream(options: StreamOptions): Promise<Response>;
2756
+ /**
2757
+ * Get the agent's model
2758
+ */
2759
+ get model(): string;
2760
+ /**
2761
+ * Get the agent's system prompt
2762
+ */
2763
+ get system(): string | undefined;
2764
+ /**
2765
+ * Get the agent's tools
2766
+ */
2767
+ get tools(): string[] | undefined;
2768
+ }
2769
+ /**
2770
+ * Creates a stop condition for maximum step count
2771
+ * Matches Vercel AI SDK's stepCountIs helper
2772
+ *
2773
+ * @example
2774
+ * ```ts
2775
+ * const agent = new Agent({
2776
+ * model: 'openai/gpt-4o',
2777
+ * stopWhen: [stepCountIs(10)],
2778
+ * })
2779
+ * ```
2780
+ */
2781
+ declare function stepCountIs(count: number): StopCondition;
2782
+
1947
2783
  /**
1948
2784
  * Blink AI Module
1949
2785
  * Provides AI generation capabilities with Vercel AI SDK compatibility
@@ -2484,6 +3320,110 @@ declare class BlinkAIImpl implements BlinkAI {
2484
3320
  * - `duration`: Audio duration in seconds
2485
3321
  */
2486
3322
  transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
3323
+ /**
3324
+ * Runs an AI agent that can use tools in a loop to accomplish tasks.
3325
+ *
3326
+ * @param options - Agent request options
3327
+ * - `stream`: Whether to stream the response (true for UI, false for headless)
3328
+ * - `prompt`: Simple string prompt (for non-streaming, mutually exclusive with messages)
3329
+ * - `messages`: Array of UI messages (for streaming or non-streaming with history)
3330
+ * - `agent`: Agent configuration (model, tools, system prompt, etc.)
3331
+ * - `signal`: AbortSignal for cancellation
3332
+ *
3333
+ * @example Non-streaming with prompt
3334
+ * ```ts
3335
+ * const result = await blink.ai.agent({
3336
+ * stream: false,
3337
+ * prompt: 'What is the weather in Tokyo?',
3338
+ * agent: {
3339
+ * model: 'openai/gpt-4o',
3340
+ * tools: ['web_search']
3341
+ * }
3342
+ * })
3343
+ * console.log(result.text)
3344
+ * ```
3345
+ *
3346
+ * @example Streaming with messages (for useChat integration)
3347
+ * ```ts
3348
+ * const response = await blink.ai.agent({
3349
+ * stream: true,
3350
+ * messages: [{ role: 'user', content: 'Search for AI news' }],
3351
+ * agent: {
3352
+ * model: 'openai/gpt-4o',
3353
+ * tools: [webSearch, fetchUrl]
3354
+ * }
3355
+ * })
3356
+ * // Response is a ReadableStream for SSE
3357
+ * ```
3358
+ *
3359
+ * @returns For non-streaming: Promise<AgentResponse>
3360
+ * For streaming: Promise<Response> (SSE stream)
3361
+ */
3362
+ agent(options: {
3363
+ stream: false;
3364
+ prompt: string;
3365
+ agent: AgentConfig;
3366
+ signal?: AbortSignal;
3367
+ }): Promise<AgentResponse>;
3368
+ agent(options: {
3369
+ stream: false;
3370
+ messages: UIMessage[];
3371
+ agent: AgentConfig;
3372
+ signal?: AbortSignal;
3373
+ }): Promise<AgentResponse>;
3374
+ agent(options: {
3375
+ stream: true;
3376
+ messages: UIMessage[];
3377
+ agent: AgentConfig;
3378
+ signal?: AbortSignal;
3379
+ }): Promise<Response>;
3380
+ agent(options: {
3381
+ stream: true;
3382
+ prompt: string;
3383
+ agent: AgentConfig;
3384
+ signal?: AbortSignal;
3385
+ }): Promise<Response>;
3386
+ /**
3387
+ * Creates a reusable Agent instance with the Vercel AI SDK pattern.
3388
+ *
3389
+ * The Agent can be used multiple times with different prompts:
3390
+ * - `agent.generate({ prompt })` for non-streaming
3391
+ * - `agent.stream({ prompt })` for streaming
3392
+ *
3393
+ * @param options - Agent configuration (model, tools, system, etc.)
3394
+ * @returns Agent instance
3395
+ *
3396
+ * @example
3397
+ * ```ts
3398
+ * const weatherAgent = blink.ai.createAgent({
3399
+ * model: 'anthropic/claude-sonnet-4-20250514',
3400
+ * system: 'You are a helpful weather assistant.',
3401
+ * tools: [webSearch, fetchUrl],
3402
+ * maxSteps: 10,
3403
+ * })
3404
+ *
3405
+ * // Non-streaming
3406
+ * const result = await weatherAgent.generate({
3407
+ * prompt: 'What is the weather in San Francisco?',
3408
+ * })
3409
+ *
3410
+ * // Streaming
3411
+ * const stream = await weatherAgent.stream({
3412
+ * prompt: 'Tell me about weather patterns',
3413
+ * })
3414
+ * ```
3415
+ */
3416
+ createAgent(options: AgentOptions): Agent;
3417
+ /**
3418
+ * Binds an existing Agent instance to this client's HTTP client.
3419
+ *
3420
+ * Used internally by useAgent() when an Agent instance is passed.
3421
+ * This allows agents created with `new Agent()` to be used with the hook.
3422
+ *
3423
+ * @param agent - Existing Agent instance
3424
+ * @returns The same Agent instance (with httpClient set)
3425
+ */
3426
+ bindAgent(agent: Agent): Agent;
2487
3427
  }
2488
3428
 
2489
3429
  /**
@@ -2578,4 +3518,147 @@ declare class BlinkConnectorsImpl implements BlinkConnectors {
2578
3518
  saveApiKey<TMetadata = Record<string, unknown>>(provider: ConnectorProvider, request: ConnectorApiKeyRequest<TMetadata>): Promise<ConnectorApiKeyResponse>;
2579
3519
  }
2580
3520
 
2581
- export { type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, BlinkConnectorError, type BlinkConnectors, BlinkConnectorsImpl, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkUser, type ConnectorApiKeyRequest, type ConnectorApiKeyResponse, type ConnectorAuthMode, type ConnectorExecuteRequest, type ConnectorExecuteResponse, type ConnectorProvider, type ConnectorStatusResponse, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type ImageGenerationRequest, type ImageGenerationResponse, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenUsage, type TranscriptionRequest, type TranscriptionResponse, type UpdateOptions, type UpsertOptions, type WebBrowserModule, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, platform };
3521
+ /**
3522
+ * Core Agent Tools
3523
+ *
3524
+ * Tool names for web search, URL fetching, and code execution.
3525
+ * The server has the full schemas - SDK just passes tool names.
3526
+ */
3527
+ /** Search the web for current information (Exa AI) */
3528
+ declare const webSearch = "web_search";
3529
+ /** Fetch URL content as clean text (HTML stripped) */
3530
+ declare const fetchUrl = "fetch_url";
3531
+ /** Execute Python code in isolated sandbox */
3532
+ declare const runCode = "run_code";
3533
+ /** Core tools for research agents */
3534
+ declare const coreTools: readonly ["web_search", "fetch_url", "run_code"];
3535
+
3536
+ /**
3537
+ * Sandbox Tools (Cursor-like)
3538
+ *
3539
+ * Tool names for file and command execution in coding agents.
3540
+ * The server has the full schemas - SDK just passes tool names.
3541
+ */
3542
+ /** Read file contents from sandbox */
3543
+ declare const readFile = "read_file";
3544
+ /** List files and directories */
3545
+ declare const listDir = "list_dir";
3546
+ /** Create/overwrite a file (use search_replace for existing files!) */
3547
+ declare const writeFile = "write_file";
3548
+ /** Search and replace in files (PREFERRED for editing) */
3549
+ declare const searchReplace = "search_replace";
3550
+ /** Regex content search using ripgrep */
3551
+ declare const grep = "grep";
3552
+ /** Find files by name pattern */
3553
+ declare const globFileSearch = "glob_file_search";
3554
+ /** Execute shell command in sandbox */
3555
+ declare const runTerminalCmd = "run_terminal_cmd";
3556
+ /** Get public URL for a sandbox port */
3557
+ declare const getHost = "get_host";
3558
+ /** All sandbox tools for coding agents */
3559
+ declare const sandboxTools: readonly ["read_file", "list_dir", "write_file", "search_replace", "grep", "glob_file_search", "run_terminal_cmd", "get_host"];
3560
+
3561
+ /**
3562
+ * Database Tools
3563
+ *
3564
+ * Tool names for RLS-enforced database operations.
3565
+ * The server has the full schemas - SDK just passes tool names.
3566
+ */
3567
+ /** Insert a new row (user_id auto-set via RLS) */
3568
+ declare const dbInsert = "db_insert";
3569
+ /** List rows with filtering/pagination (RLS enforced) */
3570
+ declare const dbList = "db_list";
3571
+ /** Get a single row by ID (RLS enforced) */
3572
+ declare const dbGet = "db_get";
3573
+ /** Update a row by ID (RLS enforced) */
3574
+ declare const dbUpdate = "db_update";
3575
+ /** Delete a row by ID (RLS enforced) */
3576
+ declare const dbDelete = "db_delete";
3577
+ /** All database tools */
3578
+ declare const dbTools: readonly ["db_insert", "db_list", "db_get", "db_update", "db_delete"];
3579
+
3580
+ /**
3581
+ * Storage Tools
3582
+ *
3583
+ * Tool names for file storage operations.
3584
+ * The server has the full schemas - SDK just passes tool names.
3585
+ */
3586
+ /** Upload a file (base64 or URL) */
3587
+ declare const storageUpload = "storage_upload";
3588
+ /** Download file from storage */
3589
+ declare const storageDownload = "storage_download";
3590
+ /** List files in a directory */
3591
+ declare const storageList = "storage_list";
3592
+ /** Delete a file */
3593
+ declare const storageDelete = "storage_delete";
3594
+ /** Get public URL for a file */
3595
+ declare const storagePublicUrl = "storage_public_url";
3596
+ /** Move/rename a file */
3597
+ declare const storageMove = "storage_move";
3598
+ /** Copy a file */
3599
+ declare const storageCopy = "storage_copy";
3600
+ /** All storage tools */
3601
+ declare const storageTools: readonly ["storage_upload", "storage_download", "storage_list", "storage_delete", "storage_public_url", "storage_move", "storage_copy"];
3602
+
3603
+ /**
3604
+ * RAG Agent Tool
3605
+ *
3606
+ * Tool name for knowledge base search.
3607
+ * The server has the full schema - SDK just passes tool name.
3608
+ */
3609
+ /** Search knowledge base using semantic similarity */
3610
+ declare const ragSearch = "rag_search";
3611
+ /** All RAG tools */
3612
+ declare const ragTools: readonly ["rag_search"];
3613
+
3614
+ /**
3615
+ * Media Agent Tools
3616
+ *
3617
+ * Tool names for AI image and video generation.
3618
+ * The server has the full schemas - SDK just passes tool names.
3619
+ */
3620
+ /** Generate images from text prompts */
3621
+ declare const generateImage = "generate_image";
3622
+ /** Edit existing images with text prompts */
3623
+ declare const editImage = "edit_image";
3624
+ /** Generate videos from text prompts */
3625
+ declare const generateVideo = "generate_video";
3626
+ /** Animate static images into videos */
3627
+ declare const imageToVideo = "image_to_video";
3628
+ /** All media generation tools */
3629
+ declare const mediaTools: readonly ["generate_image", "edit_image", "generate_video", "image_to_video"];
3630
+
3631
+ /**
3632
+ * Agent Tools
3633
+ *
3634
+ * Tool names for use with blink.ai.agent() and useAgent()
3635
+ *
3636
+ * Built-in tools are just string names - the server has the schemas.
3637
+ *
3638
+ * @example
3639
+ * ```ts
3640
+ * import {
3641
+ * webSearch, fetchUrl, runCode, // Core tools
3642
+ * ragSearch, // RAG tool
3643
+ * sandboxTools, dbTools, storageTools, // Tool bundles
3644
+ * mediaTools // Media generation
3645
+ * } from '@blinkdotnew/sdk'
3646
+ *
3647
+ * const agent = blink.ai.createAgent({
3648
+ * model: 'google/gemini-3-flash',
3649
+ * tools: [...sandboxTools, webSearch, ragSearch]
3650
+ * })
3651
+ * ```
3652
+ */
3653
+
3654
+ /**
3655
+ * Convert tools array for API request
3656
+ *
3657
+ * Since tools are now just strings, this is essentially an identity function.
3658
+ * Kept for backward compatibility.
3659
+ *
3660
+ * @internal Used by SDK to serialize tools for API
3661
+ */
3662
+ declare function serializeTools(tools: string[]): string[];
3663
+
3664
+ export { type AISearchOptions, Agent, type AgentBilling, type AgentConfig, type AgentNonStreamMessagesRequest, type AgentNonStreamPromptRequest, type AgentOptions, type AgentRequest, type AgentResponse, type AgentStep, type AgentStreamRequest, type TokenUsage as AgentTokenUsage, type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, BlinkConnectorError, type BlinkConnectors, BlinkConnectorsImpl, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRAG, BlinkRAGImpl, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkSandbox, BlinkSandboxImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkTokenType, type BlinkUser, type ClientTool, type ConnectorApiKeyRequest, type ConnectorApiKeyResponse, type ConnectorAuthMode, type ConnectorExecuteRequest, type ConnectorExecuteResponse, type ConnectorProvider, type ConnectorStatusResponse, type ContextPolicy, type CreateCollectionOptions, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type GenerateOptions, type ImageGenerationRequest, type ImageGenerationResponse, type JSONSchema, type ListDocumentsOptions, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RAGAISearchResult, type RAGAISearchSource, type RAGCollection, type RAGDocument, type RAGSearchResponse, type RAGSearchResult, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, SANDBOX_TEMPLATES, type Sandbox, type SandboxConnectOptions, SandboxConnectionError, type SandboxCreateOptions, type SandboxTemplate, type SearchOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StopCondition, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type StreamOptions, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenIntrospectionResult, type TokenUsage$1 as TokenUsage, type ToolCall, type ToolResult, type TranscriptionRequest, type TranscriptionResponse, type UIMessage, type UIMessagePart, type UpdateOptions, type UploadOptions, type UpsertOptions, type WaitForReadyOptions, type WebBrowserModule, WebStorageAdapter, type WebhookTool, coreTools, createClient, dbDelete, dbGet, dbInsert, dbList, dbTools, dbUpdate, editImage, fetchUrl, generateImage, generateVideo, getDefaultStorageAdapter, getHost, globFileSearch, grep, imageToVideo, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, listDir, mediaTools, platform, ragSearch, ragTools, readFile, runCode, runTerminalCmd, sandboxTools, searchReplace, serializeTools, stepCountIs, storageCopy, storageDelete, storageDownload, storageList, storageMove, storagePublicUrl, storageTools, storageUpload, webSearch, writeFile };