@blinkdotnew/dev-sdk 2.2.3 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -353,7 +353,7 @@ interface StorageDownloadResponse {
353
353
  contentType?: string;
354
354
  size?: number;
355
355
  }
356
- interface TokenUsage {
356
+ interface TokenUsage$1 {
357
357
  promptTokens: number;
358
358
  completionTokens: number;
359
359
  totalTokens: number;
@@ -386,7 +386,7 @@ interface TextGenerationRequest {
386
386
  interface TextGenerationResponse {
387
387
  text: string;
388
388
  finishReason?: 'stop' | 'length' | 'content_filter' | 'tool_calls';
389
- usage?: TokenUsage;
389
+ usage?: TokenUsage$1;
390
390
  files?: any[];
391
391
  reasoningDetails?: any[];
392
392
  toolCalls?: any[];
@@ -400,7 +400,7 @@ interface TextGenerationResponse {
400
400
  stepType?: string;
401
401
  text?: string;
402
402
  finishReason?: string;
403
- usage?: TokenUsage;
403
+ usage?: TokenUsage$1;
404
404
  }>;
405
405
  sources?: any[];
406
406
  providerMetadata?: any;
@@ -418,7 +418,7 @@ interface ObjectGenerationRequest {
418
418
  interface ObjectGenerationResponse {
419
419
  object: any;
420
420
  finishReason?: 'stop' | 'length' | 'content_filter';
421
- usage?: TokenUsage;
421
+ usage?: TokenUsage$1;
422
422
  warnings?: string[];
423
423
  providerMetadata?: {
424
424
  openai?: {
@@ -561,6 +561,11 @@ interface BlinkAI {
561
561
  generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
562
562
  generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
563
563
  transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
564
+ agent(options: any): Promise<any>;
565
+ /** Creates a reusable Agent instance (Vercel AI SDK pattern) */
566
+ createAgent(options: any): any;
567
+ /** Binds an existing Agent instance to this client's httpClient */
568
+ bindAgent(agent: any): any;
564
569
  }
565
570
  interface DataExtraction {
566
571
  chunks: string[];
@@ -806,8 +811,38 @@ interface SendEmailResponse {
806
811
  interface BlinkNotifications {
807
812
  email(params: SendEmailRequest): Promise<SendEmailResponse>;
808
813
  }
814
+ /**
815
+ * Token type in Blink Auth system
816
+ * - `access`: Regular user access token (short-lived)
817
+ * - `service`: Service token for server-side operations (permanent secret key)
818
+ */
819
+ type BlinkTokenType = 'access' | 'service';
820
+ /**
821
+ * Result of token introspection
822
+ * Used by edge functions and server-side code to verify user tokens
823
+ */
824
+ interface TokenIntrospectionResult {
825
+ /** Whether the token is valid */
826
+ valid: boolean;
827
+ /** Project ID from the token */
828
+ projectId?: string;
829
+ /** User ID (Firebase UID) - only present for access tokens */
830
+ userId?: string;
831
+ /** User's email - only present for access tokens */
832
+ email?: string;
833
+ /** Token type: 'access' or 'service' */
834
+ tokenType?: BlinkTokenType;
835
+ /** User's role in the app (if set via app_role claim) */
836
+ appRole?: string;
837
+ /** Token expiration timestamp (Unix seconds) - not present for secret keys */
838
+ exp?: number;
839
+ /** Legacy service key ID (for JWT-based service tokens) */
840
+ svcKeyId?: string;
841
+ /** Error message if token is invalid */
842
+ error?: string;
843
+ }
809
844
 
810
- type ConnectorProvider = 'discord' | 'notion' | 'google_drive' | 'google_calendar' | 'google_slides' | 'google_docs' | 'google_sheets' | 'slack' | 'linkedin' | 'hubspot' | 'ai';
845
+ type ConnectorProvider = 'discord' | 'notion' | 'google_drive' | 'google_calendar' | 'ai';
811
846
  type ConnectorAuthMode = 'oauth' | 'api_key' | 'blink_managed' | 'hybrid';
812
847
  interface ConnectorStatusData {
813
848
  connected: boolean;
@@ -1036,6 +1071,94 @@ declare class HttpClient {
1036
1071
  cfg_scale?: number;
1037
1072
  signal?: AbortSignal;
1038
1073
  }): Promise<BlinkResponse<any>>;
1074
+ /**
1075
+ * AI Agent request (non-streaming)
1076
+ * Returns JSON response with text, steps, usage, and billing
1077
+ */
1078
+ aiAgent(requestBody: {
1079
+ stream: false;
1080
+ prompt?: string;
1081
+ messages?: Array<{
1082
+ role: string;
1083
+ content: string | any[];
1084
+ parts?: any[];
1085
+ }>;
1086
+ agent: {
1087
+ model: string;
1088
+ system?: string;
1089
+ tools?: string[];
1090
+ webhook_tools?: Array<{
1091
+ name: string;
1092
+ description: string;
1093
+ input_schema: any;
1094
+ webhook_url: string;
1095
+ }>;
1096
+ client_tools?: Array<{
1097
+ name: string;
1098
+ description: string;
1099
+ input_schema: any;
1100
+ }>;
1101
+ tool_choice?: 'auto' | 'required' | 'none';
1102
+ stop_when?: Array<{
1103
+ type: string;
1104
+ count: number;
1105
+ }>;
1106
+ prepare_step?: {
1107
+ context_policy: any;
1108
+ };
1109
+ };
1110
+ }, signal?: AbortSignal): Promise<BlinkResponse<any>>;
1111
+ /**
1112
+ * AI Agent streaming request
1113
+ * Returns raw Response for SSE streaming (compatible with AI SDK useChat)
1114
+ */
1115
+ aiAgentStream(requestBody: {
1116
+ stream: true;
1117
+ prompt?: string;
1118
+ messages?: Array<{
1119
+ role: string;
1120
+ content: string | any[];
1121
+ parts?: any[];
1122
+ }>;
1123
+ agent: {
1124
+ model: string;
1125
+ system?: string;
1126
+ tools?: string[];
1127
+ webhook_tools?: Array<{
1128
+ name: string;
1129
+ description: string;
1130
+ input_schema: any;
1131
+ webhook_url: string;
1132
+ }>;
1133
+ client_tools?: Array<{
1134
+ name: string;
1135
+ description: string;
1136
+ input_schema: any;
1137
+ }>;
1138
+ tool_choice?: 'auto' | 'required' | 'none';
1139
+ stop_when?: Array<{
1140
+ type: string;
1141
+ count: number;
1142
+ }>;
1143
+ prepare_step?: {
1144
+ context_policy: any;
1145
+ };
1146
+ };
1147
+ }, signal?: AbortSignal): Promise<Response>;
1148
+ /**
1149
+ * RAG AI Search streaming request
1150
+ * Returns raw Response for SSE streaming
1151
+ */
1152
+ ragAiSearchStream(body: {
1153
+ collection_id?: string;
1154
+ collection_name?: string;
1155
+ query: string;
1156
+ model?: string;
1157
+ max_context_chunks?: number;
1158
+ score_threshold?: number;
1159
+ system_prompt?: string;
1160
+ stream: true;
1161
+ }, signal?: AbortSignal): Promise<Response>;
1039
1162
  /**
1040
1163
  * Data-specific requests
1041
1164
  */
@@ -1477,6 +1600,44 @@ declare class BlinkAuth {
1477
1600
  expires_in?: number;
1478
1601
  refresh_expires_in?: number;
1479
1602
  }, persist?: boolean): Promise<BlinkUser>;
1603
+ /**
1604
+ * Verify a Blink Auth token using the introspection endpoint.
1605
+ *
1606
+ * **Server-side / Edge Function use only.**
1607
+ *
1608
+ * This is the recommended way to verify user tokens in Deno Edge Functions
1609
+ * and other server-side contexts. It calls the Blink API introspection
1610
+ * endpoint which validates the token without exposing the JWT secret.
1611
+ *
1612
+ * @param token - The raw JWT token (without "Bearer " prefix) or full Authorization header
1613
+ * @returns Token introspection result with validity and claims
1614
+ *
1615
+ * @example
1616
+ * // Deno Edge Function usage
1617
+ * import { createClient } from "npm:@blinkdotnew/sdk";
1618
+ *
1619
+ * const blink = createClient({
1620
+ * projectId: Deno.env.get("BLINK_PROJECT_ID")!,
1621
+ * secretKey: Deno.env.get("BLINK_SECRET_KEY"),
1622
+ * });
1623
+ *
1624
+ * async function handler(req: Request): Promise<Response> {
1625
+ * const authHeader = req.headers.get("Authorization");
1626
+ * const result = await blink.auth.verifyToken(authHeader);
1627
+ *
1628
+ * if (!result.valid) {
1629
+ * return new Response(JSON.stringify({ error: result.error }), { status: 401 });
1630
+ * }
1631
+ *
1632
+ * // User is authenticated
1633
+ * console.log("User ID:", result.userId);
1634
+ * console.log("Email:", result.email);
1635
+ * console.log("Project:", result.projectId);
1636
+ *
1637
+ * // Continue with your logic...
1638
+ * }
1639
+ */
1640
+ verifyToken(token: string | null): Promise<TokenIntrospectionResult>;
1480
1641
  /**
1481
1642
  * Refresh access token using refresh token
1482
1643
  */
@@ -1827,11 +1988,348 @@ interface BlinkFunctions {
1827
1988
  invoke<T = any>(functionSlug: string, options?: FunctionsInvokeOptions): Promise<FunctionsInvokeResponse<T>>;
1828
1989
  }
1829
1990
 
1991
+ /**
1992
+ * Blink RAG Module - Vector Search and AI-powered retrieval
1993
+ *
1994
+ * Provides document ingestion, vector search, and RAG (Retrieval-Augmented Generation) capabilities.
1995
+ */
1996
+
1997
+ interface RAGCollection {
1998
+ id: string;
1999
+ name: string;
2000
+ description: string | null;
2001
+ embeddingModel: string;
2002
+ embeddingDimensions: number;
2003
+ indexMetric: 'cosine' | 'l2';
2004
+ chunkMaxTokens: number;
2005
+ chunkOverlapTokens: number;
2006
+ documentCount: number;
2007
+ chunkCount: number;
2008
+ shared: boolean;
2009
+ createdAt: string;
2010
+ updatedAt: string;
2011
+ }
2012
+ interface RAGDocument {
2013
+ id: string;
2014
+ collectionId: string;
2015
+ filename: string;
2016
+ sourceType: 'file' | 'url' | 'text';
2017
+ sourceUrl: string | null;
2018
+ contentType: string | null;
2019
+ fileSize: number | null;
2020
+ status: 'pending' | 'processing' | 'ready' | 'error';
2021
+ errorMessage: string | null;
2022
+ processingStartedAt: string | null;
2023
+ processingCompletedAt: string | null;
2024
+ chunkCount: number;
2025
+ tokenCount: number | null;
2026
+ metadata: Record<string, any>;
2027
+ createdAt: string;
2028
+ updatedAt: string;
2029
+ }
2030
+ interface RAGSearchResult {
2031
+ chunkId: string;
2032
+ documentId: string;
2033
+ filename: string;
2034
+ content: string;
2035
+ score: number;
2036
+ chunkIndex: number;
2037
+ metadata: Record<string, any>;
2038
+ }
2039
+ interface RAGSearchResponse {
2040
+ results: RAGSearchResult[];
2041
+ query: string;
2042
+ collectionId: string;
2043
+ totalResults: number;
2044
+ }
2045
+ interface RAGAISearchSource {
2046
+ documentId: string;
2047
+ filename: string;
2048
+ chunkId: string;
2049
+ excerpt: string;
2050
+ score: number;
2051
+ }
2052
+ interface RAGAISearchResult {
2053
+ answer: string;
2054
+ sources: RAGAISearchSource[];
2055
+ query: string;
2056
+ model: string;
2057
+ usage: {
2058
+ inputTokens: number;
2059
+ outputTokens: number;
2060
+ };
2061
+ }
2062
+ interface CreateCollectionOptions {
2063
+ name: string;
2064
+ description?: string;
2065
+ embeddingModel?: string;
2066
+ embeddingDimensions?: number;
2067
+ indexMetric?: 'cosine' | 'l2';
2068
+ chunkMaxTokens?: number;
2069
+ chunkOverlapTokens?: number;
2070
+ shared?: boolean;
2071
+ }
2072
+ interface UploadOptions {
2073
+ collectionId?: string;
2074
+ collectionName?: string;
2075
+ filename: string;
2076
+ content?: string;
2077
+ file?: {
2078
+ data: string;
2079
+ contentType: string;
2080
+ };
2081
+ url?: string;
2082
+ metadata?: Record<string, any>;
2083
+ }
2084
+ interface SearchOptions {
2085
+ collectionId?: string;
2086
+ collectionName?: string;
2087
+ query: string;
2088
+ maxResults?: number;
2089
+ scoreThreshold?: number;
2090
+ filters?: Record<string, any>;
2091
+ includeContent?: boolean;
2092
+ }
2093
+ interface AISearchOptions {
2094
+ collectionId?: string;
2095
+ collectionName?: string;
2096
+ query: string;
2097
+ model?: string;
2098
+ maxContextChunks?: number;
2099
+ scoreThreshold?: number;
2100
+ systemPrompt?: string;
2101
+ stream?: boolean;
2102
+ /** AbortSignal for cancellation (streaming only) */
2103
+ signal?: AbortSignal;
2104
+ }
2105
+ interface ListDocumentsOptions {
2106
+ collectionId?: string;
2107
+ status?: 'pending' | 'processing' | 'ready' | 'error';
2108
+ }
2109
+ interface WaitForReadyOptions {
2110
+ timeoutMs?: number;
2111
+ pollIntervalMs?: number;
2112
+ }
2113
+ declare class BlinkRAGImpl {
2114
+ private httpClient;
2115
+ private projectId;
2116
+ constructor(httpClient: HttpClient);
2117
+ /**
2118
+ * Build URL with project_id prefix
2119
+ */
2120
+ private url;
2121
+ /**
2122
+ * Create a new RAG collection
2123
+ */
2124
+ createCollection(options: CreateCollectionOptions): Promise<RAGCollection>;
2125
+ /**
2126
+ * List all collections accessible to the current user
2127
+ */
2128
+ listCollections(): Promise<RAGCollection[]>;
2129
+ /**
2130
+ * Get a specific collection by ID
2131
+ */
2132
+ getCollection(collectionId: string): Promise<RAGCollection>;
2133
+ /**
2134
+ * Delete a collection and all its documents
2135
+ */
2136
+ deleteCollection(collectionId: string): Promise<void>;
2137
+ /**
2138
+ * Upload a document for processing
2139
+ *
2140
+ * @example
2141
+ * // Upload text content
2142
+ * const doc = await blink.rag.upload({
2143
+ * collectionName: 'docs',
2144
+ * filename: 'notes.txt',
2145
+ * content: 'My document content...'
2146
+ * })
2147
+ *
2148
+ * @example
2149
+ * // Upload from URL
2150
+ * const doc = await blink.rag.upload({
2151
+ * collectionId: 'col_abc123',
2152
+ * filename: 'article.html',
2153
+ * url: 'https://example.com/article'
2154
+ * })
2155
+ *
2156
+ * @example
2157
+ * // Upload a file (base64)
2158
+ * const doc = await blink.rag.upload({
2159
+ * collectionName: 'docs',
2160
+ * filename: 'report.pdf',
2161
+ * file: { data: base64Data, contentType: 'application/pdf' }
2162
+ * })
2163
+ */
2164
+ upload(options: UploadOptions): Promise<RAGDocument>;
2165
+ /**
2166
+ * Get document status and metadata
2167
+ */
2168
+ getDocument(documentId: string): Promise<RAGDocument>;
2169
+ /**
2170
+ * List documents, optionally filtered by collection or status
2171
+ */
2172
+ listDocuments(options?: ListDocumentsOptions): Promise<RAGDocument[]>;
2173
+ /**
2174
+ * Delete a document and its chunks
2175
+ */
2176
+ deleteDocument(documentId: string): Promise<void>;
2177
+ /**
2178
+ * Wait for a document to finish processing
2179
+ *
2180
+ * @example
2181
+ * const doc = await blink.rag.upload({ ... })
2182
+ * const readyDoc = await blink.rag.waitForReady(doc.id)
2183
+ * console.log(`Processed ${readyDoc.chunkCount} chunks`)
2184
+ */
2185
+ waitForReady(documentId: string, options?: WaitForReadyOptions): Promise<RAGDocument>;
2186
+ /**
2187
+ * Search for similar chunks using vector similarity
2188
+ *
2189
+ * @example
2190
+ * const results = await blink.rag.search({
2191
+ * collectionName: 'docs',
2192
+ * query: 'How do I configure authentication?',
2193
+ * maxResults: 5
2194
+ * })
2195
+ */
2196
+ search(options: SearchOptions): Promise<RAGSearchResponse>;
2197
+ /**
2198
+ * Perform RAG: search + LLM answer generation
2199
+ *
2200
+ * @example
2201
+ * // Non-streaming
2202
+ * const result = await blink.rag.aiSearch({
2203
+ * collectionName: 'docs',
2204
+ * query: 'What are the main features?'
2205
+ * })
2206
+ * console.log(result.answer)
2207
+ *
2208
+ * @example
2209
+ * // Streaming
2210
+ * const stream = await blink.rag.aiSearch({
2211
+ * collectionName: 'docs',
2212
+ * query: 'Explain the architecture',
2213
+ * stream: true
2214
+ * })
2215
+ */
2216
+ aiSearch(options: AISearchOptions & {
2217
+ stream?: false;
2218
+ }): Promise<RAGAISearchResult>;
2219
+ aiSearch(options: AISearchOptions & {
2220
+ stream: true;
2221
+ }): Promise<ReadableStream<Uint8Array>>;
2222
+ }
2223
+ type BlinkRAG = BlinkRAGImpl;
2224
+
2225
+ /**
2226
+ * Blink Sandbox Module - Persistent coding environments for AI agents
2227
+ *
2228
+ * Provides lifecycle management for E2B sandboxes with auto-pause and resume.
2229
+ * Used with agent.generate({ sandbox }) for AI coding agents.
2230
+ */
2231
+
2232
+ interface Sandbox {
2233
+ /** Sandbox ID (sbx_xxx format) - STORE THIS for persistence! */
2234
+ id: string;
2235
+ /** Template used to create the sandbox */
2236
+ template: string;
2237
+ /** Get public URL for any port (sync - computed locally from hostPattern) */
2238
+ getHost(port: number): string;
2239
+ }
2240
+ interface SandboxCreateOptions {
2241
+ /** E2B template ID (default: 'devtools-base') */
2242
+ template?: string;
2243
+ /** Inactivity timeout in ms (default: 5 min, max: 60 min) */
2244
+ timeoutMs?: number;
2245
+ /** Custom metadata for tracking */
2246
+ metadata?: Record<string, string>;
2247
+ /**
2248
+ * Secret names to inject as environment variables.
2249
+ * Secrets are resolved server-side from project secrets vault.
2250
+ * Add secrets via Workspace > Secrets or request_secret tool.
2251
+ * @example ['ANTHROPIC_API_KEY', 'GITHUB_TOKEN']
2252
+ */
2253
+ secrets?: string[];
2254
+ }
2255
+ interface SandboxConnectOptions {
2256
+ /** Reset inactivity timeout in ms (default: 5 min) */
2257
+ timeoutMs?: number;
2258
+ }
2259
+ interface BlinkSandbox {
2260
+ /**
2261
+ * Create a new persistent sandbox with auto-pause enabled.
2262
+ *
2263
+ * @example
2264
+ * ```ts
2265
+ * const sandbox = await blink.sandbox.create({ template: 'nextjs-app' })
2266
+ * console.log(sandbox.id) // "sbx_abc123xyz" - Store this!
2267
+ * console.log(sandbox.getHost(3000)) // "3000-sbx_abc123xyz.preview-blink.com"
2268
+ * ```
2269
+ */
2270
+ create(options?: SandboxCreateOptions): Promise<Sandbox>;
2271
+ /**
2272
+ * Connect to an existing sandbox. Auto-resumes if paused.
2273
+ * Built-in retry with exponential backoff (3 retries: 250ms → 500ms → 1000ms).
2274
+ *
2275
+ * @example
2276
+ * ```ts
2277
+ * const sandbox = await blink.sandbox.connect(storedSandboxId)
2278
+ * ```
2279
+ */
2280
+ connect(sandboxId: string, options?: SandboxConnectOptions): Promise<Sandbox>;
2281
+ /**
2282
+ * Permanently kill a sandbox. Cannot be resumed after kill.
2283
+ *
2284
+ * @example
2285
+ * ```ts
2286
+ * await blink.sandbox.kill(sandboxId)
2287
+ * await blink.db.user_sandboxes.delete({ sandbox_id: sandboxId })
2288
+ * ```
2289
+ */
2290
+ kill(sandboxId: string): Promise<void>;
2291
+ }
2292
+ declare const SANDBOX_TEMPLATES: readonly ["devtools-base", "nextjs-app", "nextjs-app-bun", "vite-react", "vite-react-bun", "expo-app", "desktop", "claude-code"];
2293
+ type SandboxTemplate = typeof SANDBOX_TEMPLATES[number];
2294
+ declare class SandboxConnectionError extends Error {
2295
+ sandboxId: string;
2296
+ constructor(sandboxId: string, cause?: Error);
2297
+ }
2298
+ declare class BlinkSandboxImpl implements BlinkSandbox {
2299
+ private httpClient;
2300
+ private projectId;
2301
+ constructor(httpClient: HttpClient);
2302
+ /**
2303
+ * Build URL with project_id prefix
2304
+ */
2305
+ private url;
2306
+ create(options?: SandboxCreateOptions): Promise<Sandbox>;
2307
+ connect(sandboxId: string, options?: SandboxConnectOptions): Promise<Sandbox>;
2308
+ kill(sandboxId: string): Promise<void>;
2309
+ }
2310
+
1830
2311
  /**
1831
2312
  * Blink Client - Main SDK entry point
1832
2313
  * Factory function and client class for the Blink SDK
1833
2314
  */
1834
2315
 
2316
+ /**
2317
+ * Get the default Blink client instance.
2318
+ * Returns the client created by createClient().
2319
+ *
2320
+ * @throws Error if createClient() hasn't been called yet
2321
+ * @returns The default BlinkClient instance
2322
+ *
2323
+ * @example
2324
+ * ```ts
2325
+ * // First, create client somewhere in your app
2326
+ * createClient({ projectId: '...', secretKey: '...' })
2327
+ *
2328
+ * // Later, get the default client
2329
+ * const client = getDefaultClient()
2330
+ * ```
2331
+ */
2332
+ declare function getDefaultClient(): BlinkClientImpl;
1835
2333
  interface BlinkClient {
1836
2334
  auth: BlinkAuth;
1837
2335
  db: BlinkDatabase;
@@ -1843,9 +2341,42 @@ interface BlinkClient {
1843
2341
  analytics: BlinkAnalytics;
1844
2342
  connectors: BlinkConnectors;
1845
2343
  functions: BlinkFunctions;
2344
+ rag: BlinkRAG;
2345
+ sandbox: BlinkSandbox;
2346
+ }
2347
+ declare class BlinkClientImpl implements BlinkClient {
2348
+ auth: BlinkAuth;
2349
+ db: BlinkDatabase;
2350
+ storage: BlinkStorage;
2351
+ ai: BlinkAI;
2352
+ data: BlinkData;
2353
+ realtime: BlinkRealtime;
2354
+ notifications: BlinkNotifications;
2355
+ analytics: BlinkAnalytics;
2356
+ connectors: BlinkConnectors;
2357
+ functions: BlinkFunctions;
2358
+ rag: BlinkRAG;
2359
+ sandbox: BlinkSandbox;
2360
+ /** @internal HTTP client for Agent auto-binding */
2361
+ _httpClient: HttpClient;
2362
+ constructor(config: BlinkClientConfig);
1846
2363
  }
1847
2364
  /**
1848
- * Create a new Blink client instance
2365
+ * Create a new Blink client instance.
2366
+ * This also sets the default client for Agent auto-binding.
2367
+ *
2368
+ * @example
2369
+ * ```ts
2370
+ * // Create client (call once in your app)
2371
+ * const blink = createClient({
2372
+ * projectId: 'my-project',
2373
+ * secretKey: 'sk_...',
2374
+ * })
2375
+ *
2376
+ * // Now Agent works without explicit binding
2377
+ * const agent = new Agent({ model: 'openai/gpt-4o' })
2378
+ * await agent.generate({ prompt: 'Hello!' })
2379
+ * ```
1849
2380
  */
1850
2381
  declare function createClient(config: BlinkClientConfig): BlinkClient;
1851
2382
 
@@ -1944,6 +2475,372 @@ declare class BlinkStorageImpl implements BlinkStorage {
1944
2475
  remove(...paths: string[]): Promise<void>;
1945
2476
  }
1946
2477
 
2478
+ /**
2479
+ * Agent Tool Types
2480
+ *
2481
+ * Type definitions for AI agent tools used with blink.ai.agent()
2482
+ */
2483
+ /**
2484
+ * JSON Schema type for tool input definitions
2485
+ */
2486
+ type JSONSchema = Record<string, any>;
2487
+ /**
2488
+ * A custom webhook tool definition
2489
+ *
2490
+ * Webhook tools NEED schemas because the server forwards them to your endpoint.
2491
+ */
2492
+ interface WebhookTool {
2493
+ /** Tool name */
2494
+ name: string;
2495
+ /** Human-readable description */
2496
+ description: string;
2497
+ /** JSON Schema for input parameters */
2498
+ input_schema: JSONSchema;
2499
+ /** URL to POST when tool is called */
2500
+ webhook_url: string;
2501
+ }
2502
+ /**
2503
+ * A client-side tool requiring user confirmation
2504
+ *
2505
+ * Client tools NEED schemas so the AI knows what inputs to provide.
2506
+ */
2507
+ interface ClientTool {
2508
+ /** Tool name */
2509
+ name: string;
2510
+ /** Human-readable description */
2511
+ description: string;
2512
+ /** JSON Schema for input parameters */
2513
+ input_schema: JSONSchema;
2514
+ }
2515
+ /**
2516
+ * Stop condition for agent loop
2517
+ */
2518
+ interface StopCondition {
2519
+ type: 'step_count_is';
2520
+ count: number;
2521
+ }
2522
+ /**
2523
+ * Context policy for managing conversation history
2524
+ */
2525
+ interface ContextPolicy {
2526
+ strategy: 'token_budget';
2527
+ max_input_tokens: number;
2528
+ keep_system: boolean;
2529
+ keep_last_messages: number;
2530
+ trim_order: string[];
2531
+ max_tool_result_bytes: number;
2532
+ }
2533
+ /**
2534
+ * Agent configuration
2535
+ */
2536
+ interface AgentConfig {
2537
+ /** Model ID in Vercel AI Gateway format: "provider/model-id" */
2538
+ model: string;
2539
+ /** System prompt */
2540
+ system?: string;
2541
+ /** Built-in tools to enable (tool names) */
2542
+ tools?: string[];
2543
+ /** Custom webhook tools */
2544
+ webhook_tools?: WebhookTool[];
2545
+ /** Client-side tools for HITL */
2546
+ client_tools?: ClientTool[];
2547
+ /** Tool choice strategy */
2548
+ tool_choice?: 'auto' | 'required' | 'none';
2549
+ /** Stop conditions */
2550
+ stop_when?: StopCondition[];
2551
+ /** Context management */
2552
+ prepare_step?: {
2553
+ context_policy: Partial<ContextPolicy>;
2554
+ };
2555
+ }
2556
+ /**
2557
+ * UI Message format (AI SDK compatible)
2558
+ */
2559
+ interface UIMessage {
2560
+ id?: string;
2561
+ role: 'user' | 'assistant';
2562
+ content: string;
2563
+ parts?: UIMessagePart[];
2564
+ }
2565
+ type UIMessagePart = {
2566
+ type: 'text';
2567
+ text: string;
2568
+ } | {
2569
+ type: 'tool-invocation';
2570
+ toolCallId: string;
2571
+ toolName: string;
2572
+ state: 'pending' | 'result' | 'output-available';
2573
+ input: Record<string, any>;
2574
+ output?: any;
2575
+ };
2576
+ /**
2577
+ * Agent request for streaming mode
2578
+ */
2579
+ interface AgentStreamRequest {
2580
+ stream: true;
2581
+ messages: UIMessage[];
2582
+ agent: AgentConfig;
2583
+ }
2584
+ /**
2585
+ * Agent request for non-streaming mode with messages
2586
+ */
2587
+ interface AgentNonStreamMessagesRequest {
2588
+ stream: false;
2589
+ messages: UIMessage[];
2590
+ agent: AgentConfig;
2591
+ }
2592
+ /**
2593
+ * Agent request for non-streaming mode with prompt
2594
+ */
2595
+ interface AgentNonStreamPromptRequest {
2596
+ stream: false;
2597
+ prompt: string;
2598
+ agent: AgentConfig;
2599
+ }
2600
+ /**
2601
+ * Union of all agent request types
2602
+ */
2603
+ type AgentRequest = AgentStreamRequest | AgentNonStreamMessagesRequest | AgentNonStreamPromptRequest;
2604
+ /**
2605
+ * Token usage information
2606
+ */
2607
+ interface TokenUsage {
2608
+ inputTokens: number;
2609
+ outputTokens: number;
2610
+ totalTokens?: number;
2611
+ }
2612
+ /**
2613
+ * Tool call in a step
2614
+ */
2615
+ interface ToolCall {
2616
+ toolCallId: string;
2617
+ toolName: string;
2618
+ args: Record<string, any>;
2619
+ }
2620
+ /**
2621
+ * Tool result in a step
2622
+ */
2623
+ interface ToolResult {
2624
+ toolCallId: string;
2625
+ result: any;
2626
+ }
2627
+ /**
2628
+ * Agent step information
2629
+ */
2630
+ interface AgentStep {
2631
+ text: string;
2632
+ toolCalls: ToolCall[];
2633
+ toolResults: ToolResult[];
2634
+ finishReason: string;
2635
+ usage: TokenUsage;
2636
+ }
2637
+ /**
2638
+ * Billing information
2639
+ */
2640
+ interface AgentBilling {
2641
+ model: string;
2642
+ creditsCharged: number;
2643
+ costUSD: number;
2644
+ breakdown?: {
2645
+ ai: {
2646
+ credits: number;
2647
+ costUSD: number;
2648
+ };
2649
+ tools: Record<string, {
2650
+ count: number;
2651
+ credits: number;
2652
+ costUSD: number;
2653
+ }>;
2654
+ };
2655
+ }
2656
+ /**
2657
+ * Non-streaming agent response
2658
+ */
2659
+ interface AgentResponse {
2660
+ text: string;
2661
+ finishReason: 'stop' | 'length' | 'tool-calls' | 'error' | 'content-filter';
2662
+ steps: AgentStep[];
2663
+ usage: TokenUsage;
2664
+ warnings?: Array<{
2665
+ type: string;
2666
+ message: string;
2667
+ }>;
2668
+ _billing: AgentBilling;
2669
+ }
2670
+
2671
+ /**
2672
+ * Agent Class
2673
+ *
2674
+ * Matches Vercel AI SDK ToolLoopAgent pattern:
2675
+ * - Create agent instance with config
2676
+ * - Call agent.generate() for non-streaming
2677
+ * - Call agent.stream() for streaming
2678
+ *
2679
+ * Auto-binds to default client when createClient() has been called.
2680
+ */
2681
+
2682
+ /**
2683
+ * Options for creating an Agent instance
2684
+ * Matches Vercel AI SDK ToolLoopAgent constructor options
2685
+ */
2686
+ interface AgentOptions {
2687
+ /** Model ID in Vercel AI Gateway format: "provider/model-id" */
2688
+ model: string;
2689
+ /** System prompt / instructions */
2690
+ system?: string;
2691
+ /** Alias for system (Vercel AI SDK compatibility) */
2692
+ instructions?: string;
2693
+ /** Built-in tools to enable (tool names) */
2694
+ tools?: string[];
2695
+ /** Custom webhook tools */
2696
+ webhookTools?: WebhookTool[];
2697
+ /** Client-side tools for HITL */
2698
+ clientTools?: ClientTool[];
2699
+ /** Tool choice strategy */
2700
+ toolChoice?: 'auto' | 'required' | 'none';
2701
+ /** Stop conditions */
2702
+ stopWhen?: StopCondition[];
2703
+ /** Maximum number of steps (convenience for stopWhen) */
2704
+ maxSteps?: number;
2705
+ }
2706
+ /**
2707
+ * Options for agent.generate() call
2708
+ */
2709
+ interface GenerateOptions {
2710
+ /** Simple text prompt */
2711
+ prompt?: string;
2712
+ /** Conversation history */
2713
+ messages?: UIMessage[];
2714
+ /** Sandbox for sandbox tools (object with id, or just sandboxId string) */
2715
+ sandbox?: Sandbox | string;
2716
+ /** Abort signal for cancellation */
2717
+ signal?: AbortSignal;
2718
+ }
2719
+ /**
2720
+ * Options for agent.stream() call
2721
+ */
2722
+ interface StreamOptions {
2723
+ /** Simple text prompt */
2724
+ prompt?: string;
2725
+ /** Conversation history */
2726
+ messages?: UIMessage[];
2727
+ /** Sandbox for sandbox tools (object with id, or just sandboxId string) */
2728
+ sandbox?: Sandbox | string;
2729
+ /** Abort signal for cancellation */
2730
+ signal?: AbortSignal;
2731
+ }
2732
+ /**
2733
+ * AI Agent class following Vercel AI SDK ToolLoopAgent pattern.
2734
+ *
2735
+ * Create an agent instance with configuration, then use:
2736
+ * - `agent.generate({ prompt })` for non-streaming one-shot generation
2737
+ * - `agent.stream({ prompt })` for streaming real-time generation
2738
+ *
2739
+ * **Auto-binding:** After calling `createClient()`, agents automatically
2740
+ * bind to the default client. No need for `blink.ai.createAgent()`.
2741
+ *
2742
+ * @example
2743
+ * ```ts
2744
+ * import { createClient, Agent, webSearch } from '@blinkdotnew/sdk'
2745
+ *
2746
+ * // Initialize client once
2747
+ * createClient({ projectId: '...', secretKey: '...' })
2748
+ *
2749
+ * // Create agent - auto-binds to default client
2750
+ * const weatherAgent = new Agent({
2751
+ * model: 'anthropic/claude-sonnet-4-20250514',
2752
+ * system: 'You are a helpful weather assistant.',
2753
+ * tools: [webSearch],
2754
+ * maxSteps: 10,
2755
+ * })
2756
+ *
2757
+ * // Works immediately - no binding needed!
2758
+ * const result = await weatherAgent.generate({
2759
+ * prompt: 'What is the weather in San Francisco?',
2760
+ * })
2761
+ * console.log(result.text)
2762
+ * ```
2763
+ */
2764
+ declare class Agent {
2765
+ private httpClient;
2766
+ private readonly config;
2767
+ /**
2768
+ * Create a new Agent instance.
2769
+ * Auto-binds to default client if createClient() was called.
2770
+ *
2771
+ * @param options - Agent configuration options
2772
+ */
2773
+ constructor(options: AgentOptions);
2774
+ /**
2775
+ * Internal: Set the HTTP client (called by BlinkClient)
2776
+ */
2777
+ _setHttpClient(client: HttpClient): void;
2778
+ /**
2779
+ * Internal: Get the agent config for API requests
2780
+ */
2781
+ private getAgentConfig;
2782
+ /**
2783
+ * Generate a response (non-streaming)
2784
+ *
2785
+ * @param options - Generation options (prompt or messages)
2786
+ * @returns Promise<AgentResponse> with text, steps, usage, and billing
2787
+ *
2788
+ * @example
2789
+ * ```ts
2790
+ * const result = await agent.generate({
2791
+ * prompt: 'What is the weather in San Francisco?',
2792
+ * })
2793
+ * console.log(result.text)
2794
+ * console.log(result.steps)
2795
+ * ```
2796
+ */
2797
+ generate(options: GenerateOptions): Promise<AgentResponse>;
2798
+ /**
2799
+ * Stream a response (real-time)
2800
+ *
2801
+ * @param options - Stream options (prompt or messages)
2802
+ * @returns Promise<Response> - AI SDK UI Message Stream for useChat compatibility
2803
+ *
2804
+ * @example
2805
+ * ```ts
2806
+ * const stream = await agent.stream({
2807
+ * prompt: 'Tell me a story',
2808
+ * })
2809
+ *
2810
+ * // Process stream
2811
+ * for await (const chunk of stream.body) {
2812
+ * // Handle chunk
2813
+ * }
2814
+ * ```
2815
+ */
2816
+ stream(options: StreamOptions): Promise<Response>;
2817
+ /**
2818
+ * Get the agent's model
2819
+ */
2820
+ get model(): string;
2821
+ /**
2822
+ * Get the agent's system prompt
2823
+ */
2824
+ get system(): string | undefined;
2825
+ /**
2826
+ * Get the agent's tools
2827
+ */
2828
+ get tools(): string[] | undefined;
2829
+ }
2830
+ /**
2831
+ * Creates a stop condition for maximum step count
2832
+ * Matches Vercel AI SDK's stepCountIs helper
2833
+ *
2834
+ * @example
2835
+ * ```ts
2836
+ * const agent = new Agent({
2837
+ * model: 'openai/gpt-4o',
2838
+ * stopWhen: [stepCountIs(10)],
2839
+ * })
2840
+ * ```
2841
+ */
2842
+ declare function stepCountIs(count: number): StopCondition;
2843
+
1947
2844
  /**
1948
2845
  * Blink AI Module
1949
2846
  * Provides AI generation capabilities with Vercel AI SDK compatibility
@@ -2484,6 +3381,110 @@ declare class BlinkAIImpl implements BlinkAI {
2484
3381
  * - `duration`: Audio duration in seconds
2485
3382
  */
2486
3383
  transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
3384
+ /**
3385
+ * Runs an AI agent that can use tools in a loop to accomplish tasks.
3386
+ *
3387
+ * @param options - Agent request options
3388
+ * - `stream`: Whether to stream the response (true for UI, false for headless)
3389
+ * - `prompt`: Simple string prompt (for non-streaming, mutually exclusive with messages)
3390
+ * - `messages`: Array of UI messages (for streaming or non-streaming with history)
3391
+ * - `agent`: Agent configuration (model, tools, system prompt, etc.)
3392
+ * - `signal`: AbortSignal for cancellation
3393
+ *
3394
+ * @example Non-streaming with prompt
3395
+ * ```ts
3396
+ * const result = await blink.ai.agent({
3397
+ * stream: false,
3398
+ * prompt: 'What is the weather in Tokyo?',
3399
+ * agent: {
3400
+ * model: 'openai/gpt-4o',
3401
+ * tools: ['web_search']
3402
+ * }
3403
+ * })
3404
+ * console.log(result.text)
3405
+ * ```
3406
+ *
3407
+ * @example Streaming with messages (for useChat integration)
3408
+ * ```ts
3409
+ * const response = await blink.ai.agent({
3410
+ * stream: true,
3411
+ * messages: [{ role: 'user', content: 'Search for AI news' }],
3412
+ * agent: {
3413
+ * model: 'openai/gpt-4o',
3414
+ * tools: [webSearch, fetchUrl]
3415
+ * }
3416
+ * })
3417
+ * // Response is a ReadableStream for SSE
3418
+ * ```
3419
+ *
3420
+ * @returns For non-streaming: Promise<AgentResponse>
3421
+ * For streaming: Promise<Response> (SSE stream)
3422
+ */
3423
+ agent(options: {
3424
+ stream: false;
3425
+ prompt: string;
3426
+ agent: AgentConfig;
3427
+ signal?: AbortSignal;
3428
+ }): Promise<AgentResponse>;
3429
+ agent(options: {
3430
+ stream: false;
3431
+ messages: UIMessage[];
3432
+ agent: AgentConfig;
3433
+ signal?: AbortSignal;
3434
+ }): Promise<AgentResponse>;
3435
+ agent(options: {
3436
+ stream: true;
3437
+ messages: UIMessage[];
3438
+ agent: AgentConfig;
3439
+ signal?: AbortSignal;
3440
+ }): Promise<Response>;
3441
+ agent(options: {
3442
+ stream: true;
3443
+ prompt: string;
3444
+ agent: AgentConfig;
3445
+ signal?: AbortSignal;
3446
+ }): Promise<Response>;
3447
+ /**
3448
+ * Creates a reusable Agent instance with the Vercel AI SDK pattern.
3449
+ *
3450
+ * The Agent can be used multiple times with different prompts:
3451
+ * - `agent.generate({ prompt })` for non-streaming
3452
+ * - `agent.stream({ prompt })` for streaming
3453
+ *
3454
+ * @param options - Agent configuration (model, tools, system, etc.)
3455
+ * @returns Agent instance
3456
+ *
3457
+ * @example
3458
+ * ```ts
3459
+ * const weatherAgent = blink.ai.createAgent({
3460
+ * model: 'anthropic/claude-sonnet-4-20250514',
3461
+ * system: 'You are a helpful weather assistant.',
3462
+ * tools: [webSearch, fetchUrl],
3463
+ * maxSteps: 10,
3464
+ * })
3465
+ *
3466
+ * // Non-streaming
3467
+ * const result = await weatherAgent.generate({
3468
+ * prompt: 'What is the weather in San Francisco?',
3469
+ * })
3470
+ *
3471
+ * // Streaming
3472
+ * const stream = await weatherAgent.stream({
3473
+ * prompt: 'Tell me about weather patterns',
3474
+ * })
3475
+ * ```
3476
+ */
3477
+ createAgent(options: AgentOptions): Agent;
3478
+ /**
3479
+ * Binds an existing Agent instance to this client's HTTP client.
3480
+ *
3481
+ * Used internally by useAgent() when an Agent instance is passed.
3482
+ * This allows agents created with `new Agent()` to be used with the hook.
3483
+ *
3484
+ * @param agent - Existing Agent instance
3485
+ * @returns The same Agent instance (with httpClient set)
3486
+ */
3487
+ bindAgent(agent: Agent): Agent;
2487
3488
  }
2488
3489
 
2489
3490
  /**
@@ -2578,4 +3579,147 @@ declare class BlinkConnectorsImpl implements BlinkConnectors {
2578
3579
  saveApiKey<TMetadata = Record<string, unknown>>(provider: ConnectorProvider, request: ConnectorApiKeyRequest<TMetadata>): Promise<ConnectorApiKeyResponse>;
2579
3580
  }
2580
3581
 
2581
- export { type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, BlinkConnectorError, type BlinkConnectors, BlinkConnectorsImpl, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkUser, type ConnectorApiKeyRequest, type ConnectorApiKeyResponse, type ConnectorAuthMode, type ConnectorExecuteRequest, type ConnectorExecuteResponse, type ConnectorProvider, type ConnectorStatusResponse, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type ImageGenerationRequest, type ImageGenerationResponse, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenUsage, type TranscriptionRequest, type TranscriptionResponse, type UpdateOptions, type UpsertOptions, type WebBrowserModule, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, platform };
3582
+ /**
3583
+ * Core Agent Tools
3584
+ *
3585
+ * Tool names for web search, URL fetching, and code execution.
3586
+ * The server has the full schemas - SDK just passes tool names.
3587
+ */
3588
+ /** Search the web for current information (Exa AI) */
3589
+ declare const webSearch = "web_search";
3590
+ /** Fetch URL content as clean text (HTML stripped) */
3591
+ declare const fetchUrl = "fetch_url";
3592
+ /** Execute Python code in isolated sandbox */
3593
+ declare const runCode = "run_code";
3594
+ /** Core tools for research agents */
3595
+ declare const coreTools: readonly ["web_search", "fetch_url", "run_code"];
3596
+
3597
+ /**
3598
+ * Sandbox Tools (Cursor-like)
3599
+ *
3600
+ * Tool names for file and command execution in coding agents.
3601
+ * The server has the full schemas - SDK just passes tool names.
3602
+ */
3603
+ /** Read file contents from sandbox */
3604
+ declare const readFile = "read_file";
3605
+ /** List files and directories */
3606
+ declare const listDir = "list_dir";
3607
+ /** Create/overwrite a file (use search_replace for existing files!) */
3608
+ declare const writeFile = "write_file";
3609
+ /** Search and replace in files (PREFERRED for editing) */
3610
+ declare const searchReplace = "search_replace";
3611
+ /** Regex content search using ripgrep */
3612
+ declare const grep = "grep";
3613
+ /** Find files by name pattern */
3614
+ declare const globFileSearch = "glob_file_search";
3615
+ /** Execute shell command in sandbox */
3616
+ declare const runTerminalCmd = "run_terminal_cmd";
3617
+ /** Get public URL for a sandbox port */
3618
+ declare const getHost = "get_host";
3619
+ /** All sandbox tools for coding agents */
3620
+ declare const sandboxTools: readonly ["read_file", "list_dir", "write_file", "search_replace", "grep", "glob_file_search", "run_terminal_cmd", "get_host"];
3621
+
3622
+ /**
3623
+ * Database Tools
3624
+ *
3625
+ * Tool names for RLS-enforced database operations.
3626
+ * The server has the full schemas - SDK just passes tool names.
3627
+ */
3628
+ /** Insert a new row (user_id auto-set via RLS) */
3629
+ declare const dbInsert = "db_insert";
3630
+ /** List rows with filtering/pagination (RLS enforced) */
3631
+ declare const dbList = "db_list";
3632
+ /** Get a single row by ID (RLS enforced) */
3633
+ declare const dbGet = "db_get";
3634
+ /** Update a row by ID (RLS enforced) */
3635
+ declare const dbUpdate = "db_update";
3636
+ /** Delete a row by ID (RLS enforced) */
3637
+ declare const dbDelete = "db_delete";
3638
+ /** All database tools */
3639
+ declare const dbTools: readonly ["db_insert", "db_list", "db_get", "db_update", "db_delete"];
3640
+
3641
+ /**
3642
+ * Storage Tools
3643
+ *
3644
+ * Tool names for file storage operations.
3645
+ * The server has the full schemas - SDK just passes tool names.
3646
+ */
3647
+ /** Upload a file (base64 or URL) */
3648
+ declare const storageUpload = "storage_upload";
3649
+ /** Download file from storage */
3650
+ declare const storageDownload = "storage_download";
3651
+ /** List files in a directory */
3652
+ declare const storageList = "storage_list";
3653
+ /** Delete a file */
3654
+ declare const storageDelete = "storage_delete";
3655
+ /** Get public URL for a file */
3656
+ declare const storagePublicUrl = "storage_public_url";
3657
+ /** Move/rename a file */
3658
+ declare const storageMove = "storage_move";
3659
+ /** Copy a file */
3660
+ declare const storageCopy = "storage_copy";
3661
+ /** All storage tools */
3662
+ declare const storageTools: readonly ["storage_upload", "storage_download", "storage_list", "storage_delete", "storage_public_url", "storage_move", "storage_copy"];
3663
+
3664
+ /**
3665
+ * RAG Agent Tool
3666
+ *
3667
+ * Tool name for knowledge base search.
3668
+ * The server has the full schema - SDK just passes tool name.
3669
+ */
3670
+ /** Search knowledge base using semantic similarity */
3671
+ declare const ragSearch = "rag_search";
3672
+ /** All RAG tools */
3673
+ declare const ragTools: readonly ["rag_search"];
3674
+
3675
+ /**
3676
+ * Media Agent Tools
3677
+ *
3678
+ * Tool names for AI image and video generation.
3679
+ * The server has the full schemas - SDK just passes tool names.
3680
+ */
3681
+ /** Generate images from text prompts */
3682
+ declare const generateImage = "generate_image";
3683
+ /** Edit existing images with text prompts */
3684
+ declare const editImage = "edit_image";
3685
+ /** Generate videos from text prompts */
3686
+ declare const generateVideo = "generate_video";
3687
+ /** Animate static images into videos */
3688
+ declare const imageToVideo = "image_to_video";
3689
+ /** All media generation tools */
3690
+ declare const mediaTools: readonly ["generate_image", "edit_image", "generate_video", "image_to_video"];
3691
+
3692
+ /**
3693
+ * Agent Tools
3694
+ *
3695
+ * Tool names for use with blink.ai.agent() and useAgent()
3696
+ *
3697
+ * Built-in tools are just string names - the server has the schemas.
3698
+ *
3699
+ * @example
3700
+ * ```ts
3701
+ * import {
3702
+ * webSearch, fetchUrl, runCode, // Core tools
3703
+ * ragSearch, // RAG tool
3704
+ * sandboxTools, dbTools, storageTools, // Tool bundles
3705
+ * mediaTools // Media generation
3706
+ * } from '@blinkdotnew/sdk'
3707
+ *
3708
+ * const agent = blink.ai.createAgent({
3709
+ * model: 'google/gemini-3-flash',
3710
+ * tools: [...sandboxTools, webSearch, ragSearch]
3711
+ * })
3712
+ * ```
3713
+ */
3714
+
3715
+ /**
3716
+ * Convert tools array for API request
3717
+ *
3718
+ * Since tools are now just strings, this is essentially an identity function.
3719
+ * Kept for backward compatibility.
3720
+ *
3721
+ * @internal Used by SDK to serialize tools for API
3722
+ */
3723
+ declare function serializeTools(tools: string[]): string[];
3724
+
3725
+ export { type AISearchOptions, Agent, type AgentBilling, type AgentConfig, type AgentNonStreamMessagesRequest, type AgentNonStreamPromptRequest, type AgentOptions, type AgentRequest, type AgentResponse, type AgentStep, type AgentStreamRequest, type TokenUsage as AgentTokenUsage, type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, BlinkConnectorError, type BlinkConnectors, BlinkConnectorsImpl, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRAG, BlinkRAGImpl, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkSandbox, BlinkSandboxImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkTokenType, type BlinkUser, type ClientTool, type ConnectorApiKeyRequest, type ConnectorApiKeyResponse, type ConnectorAuthMode, type ConnectorExecuteRequest, type ConnectorExecuteResponse, type ConnectorProvider, type ConnectorStatusResponse, type ContextPolicy, type CreateCollectionOptions, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type GenerateOptions, type ImageGenerationRequest, type ImageGenerationResponse, type JSONSchema, type ListDocumentsOptions, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RAGAISearchResult, type RAGAISearchSource, type RAGCollection, type RAGDocument, type RAGSearchResponse, type RAGSearchResult, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, SANDBOX_TEMPLATES, type Sandbox, type SandboxConnectOptions, SandboxConnectionError, type SandboxCreateOptions, type SandboxTemplate, type SearchOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StopCondition, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type StreamOptions, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenIntrospectionResult, type TokenUsage$1 as TokenUsage, type ToolCall, type ToolResult, type TranscriptionRequest, type TranscriptionResponse, type UIMessage, type UIMessagePart, type UpdateOptions, type UploadOptions, type UpsertOptions, type WaitForReadyOptions, type WebBrowserModule, WebStorageAdapter, type WebhookTool, coreTools, createClient, dbDelete, dbGet, dbInsert, dbList, dbTools, dbUpdate, editImage, fetchUrl, generateImage, generateVideo, getDefaultClient, getDefaultStorageAdapter, getHost, globFileSearch, grep, imageToVideo, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, listDir, mediaTools, platform, ragSearch, ragTools, readFile, runCode, runTerminalCmd, sandboxTools, searchReplace, serializeTools, stepCountIs, storageCopy, storageDelete, storageDownload, storageList, storageMove, storagePublicUrl, storageTools, storageUpload, webSearch, writeFile };