@blinkdotnew/sdk 2.2.0 → 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +1342 -5
- package/dist/index.d.ts +1342 -5
- package/dist/index.js +4128 -64
- package/dist/index.mjs +4093 -65
- package/package.json +3 -2
package/dist/index.d.ts
CHANGED
|
@@ -353,7 +353,7 @@ interface StorageDownloadResponse {
|
|
|
353
353
|
contentType?: string;
|
|
354
354
|
size?: number;
|
|
355
355
|
}
|
|
356
|
-
interface TokenUsage {
|
|
356
|
+
interface TokenUsage$1 {
|
|
357
357
|
promptTokens: number;
|
|
358
358
|
completionTokens: number;
|
|
359
359
|
totalTokens: number;
|
|
@@ -386,7 +386,7 @@ interface TextGenerationRequest {
|
|
|
386
386
|
interface TextGenerationResponse {
|
|
387
387
|
text: string;
|
|
388
388
|
finishReason?: 'stop' | 'length' | 'content_filter' | 'tool_calls';
|
|
389
|
-
usage?: TokenUsage;
|
|
389
|
+
usage?: TokenUsage$1;
|
|
390
390
|
files?: any[];
|
|
391
391
|
reasoningDetails?: any[];
|
|
392
392
|
toolCalls?: any[];
|
|
@@ -400,7 +400,7 @@ interface TextGenerationResponse {
|
|
|
400
400
|
stepType?: string;
|
|
401
401
|
text?: string;
|
|
402
402
|
finishReason?: string;
|
|
403
|
-
usage?: TokenUsage;
|
|
403
|
+
usage?: TokenUsage$1;
|
|
404
404
|
}>;
|
|
405
405
|
sources?: any[];
|
|
406
406
|
providerMetadata?: any;
|
|
@@ -418,7 +418,7 @@ interface ObjectGenerationRequest {
|
|
|
418
418
|
interface ObjectGenerationResponse {
|
|
419
419
|
object: any;
|
|
420
420
|
finishReason?: 'stop' | 'length' | 'content_filter';
|
|
421
|
-
usage?: TokenUsage;
|
|
421
|
+
usage?: TokenUsage$1;
|
|
422
422
|
warnings?: string[];
|
|
423
423
|
providerMetadata?: {
|
|
424
424
|
openai?: {
|
|
@@ -561,6 +561,11 @@ interface BlinkAI {
|
|
|
561
561
|
generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
|
|
562
562
|
generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
|
|
563
563
|
transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
|
|
564
|
+
agent(options: any): Promise<any>;
|
|
565
|
+
/** Creates a reusable Agent instance (Vercel AI SDK pattern) */
|
|
566
|
+
createAgent(options: any): any;
|
|
567
|
+
/** Binds an existing Agent instance to this client's httpClient */
|
|
568
|
+
bindAgent(agent: any): any;
|
|
564
569
|
}
|
|
565
570
|
interface DataExtraction {
|
|
566
571
|
chunks: string[];
|
|
@@ -806,6 +811,84 @@ interface SendEmailResponse {
|
|
|
806
811
|
interface BlinkNotifications {
|
|
807
812
|
email(params: SendEmailRequest): Promise<SendEmailResponse>;
|
|
808
813
|
}
|
|
814
|
+
/**
|
|
815
|
+
* Token type in Blink Auth system
|
|
816
|
+
* - `access`: Regular user access token (short-lived)
|
|
817
|
+
* - `service`: Service token for server-side operations (permanent secret key)
|
|
818
|
+
*/
|
|
819
|
+
type BlinkTokenType = 'access' | 'service';
|
|
820
|
+
/**
|
|
821
|
+
* Result of token introspection
|
|
822
|
+
* Used by edge functions and server-side code to verify user tokens
|
|
823
|
+
*/
|
|
824
|
+
interface TokenIntrospectionResult {
|
|
825
|
+
/** Whether the token is valid */
|
|
826
|
+
valid: boolean;
|
|
827
|
+
/** Project ID from the token */
|
|
828
|
+
projectId?: string;
|
|
829
|
+
/** User ID (Firebase UID) - only present for access tokens */
|
|
830
|
+
userId?: string;
|
|
831
|
+
/** User's email - only present for access tokens */
|
|
832
|
+
email?: string;
|
|
833
|
+
/** Token type: 'access' or 'service' */
|
|
834
|
+
tokenType?: BlinkTokenType;
|
|
835
|
+
/** User's role in the app (if set via app_role claim) */
|
|
836
|
+
appRole?: string;
|
|
837
|
+
/** Token expiration timestamp (Unix seconds) - not present for secret keys */
|
|
838
|
+
exp?: number;
|
|
839
|
+
/** Legacy service key ID (for JWT-based service tokens) */
|
|
840
|
+
svcKeyId?: string;
|
|
841
|
+
/** Error message if token is invalid */
|
|
842
|
+
error?: string;
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
type ConnectorProvider = 'discord' | 'notion' | 'google_drive' | 'google_calendar' | 'ai';
|
|
846
|
+
type ConnectorAuthMode = 'oauth' | 'api_key' | 'blink_managed' | 'hybrid';
|
|
847
|
+
interface ConnectorStatusData {
|
|
848
|
+
connected: boolean;
|
|
849
|
+
provider: ConnectorProvider;
|
|
850
|
+
auth_mode?: ConnectorAuthMode;
|
|
851
|
+
account_id?: string;
|
|
852
|
+
metadata?: Record<string, unknown>;
|
|
853
|
+
expires_at?: any;
|
|
854
|
+
scopes?: string[];
|
|
855
|
+
}
|
|
856
|
+
interface ConnectorStatusResponse {
|
|
857
|
+
success: boolean;
|
|
858
|
+
data: ConnectorStatusData;
|
|
859
|
+
}
|
|
860
|
+
interface ConnectorExecuteRequest<TParams = Record<string, unknown>> {
|
|
861
|
+
method: string;
|
|
862
|
+
params?: TParams;
|
|
863
|
+
account_id?: string;
|
|
864
|
+
http_method?: string;
|
|
865
|
+
}
|
|
866
|
+
interface ConnectorExecuteResponse<TData = any> {
|
|
867
|
+
success: boolean;
|
|
868
|
+
data: TData;
|
|
869
|
+
}
|
|
870
|
+
interface ConnectorApiKeyRequest<TMetadata = Record<string, unknown>> {
|
|
871
|
+
api_key: string;
|
|
872
|
+
account_id?: string;
|
|
873
|
+
metadata?: TMetadata;
|
|
874
|
+
}
|
|
875
|
+
interface ConnectorApiKeyResponse {
|
|
876
|
+
success: boolean;
|
|
877
|
+
data: {
|
|
878
|
+
id: string;
|
|
879
|
+
account_id?: string;
|
|
880
|
+
};
|
|
881
|
+
}
|
|
882
|
+
interface BlinkConnectors {
|
|
883
|
+
status(provider: ConnectorProvider, options?: {
|
|
884
|
+
account_id?: string;
|
|
885
|
+
}): Promise<ConnectorStatusResponse>;
|
|
886
|
+
execute<TParams = Record<string, unknown>, TData = any>(provider: ConnectorProvider, request: ConnectorExecuteRequest<TParams>): Promise<ConnectorExecuteResponse<TData>>;
|
|
887
|
+
saveApiKey<TMetadata = Record<string, unknown>>(provider: ConnectorProvider, request: ConnectorApiKeyRequest<TMetadata>): Promise<ConnectorApiKeyResponse>;
|
|
888
|
+
}
|
|
889
|
+
declare class BlinkConnectorError extends BlinkError {
|
|
890
|
+
constructor(message: string, status?: number, details?: any);
|
|
891
|
+
}
|
|
809
892
|
|
|
810
893
|
/**
|
|
811
894
|
* HTTP client for Blink API requests
|
|
@@ -988,6 +1071,80 @@ declare class HttpClient {
|
|
|
988
1071
|
cfg_scale?: number;
|
|
989
1072
|
signal?: AbortSignal;
|
|
990
1073
|
}): Promise<BlinkResponse<any>>;
|
|
1074
|
+
/**
|
|
1075
|
+
* AI Agent request (non-streaming)
|
|
1076
|
+
* Returns JSON response with text, steps, usage, and billing
|
|
1077
|
+
*/
|
|
1078
|
+
aiAgent(requestBody: {
|
|
1079
|
+
stream: false;
|
|
1080
|
+
prompt?: string;
|
|
1081
|
+
messages?: Array<{
|
|
1082
|
+
role: string;
|
|
1083
|
+
content: string | any[];
|
|
1084
|
+
parts?: any[];
|
|
1085
|
+
}>;
|
|
1086
|
+
agent: {
|
|
1087
|
+
model: string;
|
|
1088
|
+
system?: string;
|
|
1089
|
+
tools?: string[];
|
|
1090
|
+
webhook_tools?: Array<{
|
|
1091
|
+
name: string;
|
|
1092
|
+
description: string;
|
|
1093
|
+
input_schema: any;
|
|
1094
|
+
webhook_url: string;
|
|
1095
|
+
}>;
|
|
1096
|
+
client_tools?: Array<{
|
|
1097
|
+
name: string;
|
|
1098
|
+
description: string;
|
|
1099
|
+
input_schema: any;
|
|
1100
|
+
}>;
|
|
1101
|
+
tool_choice?: 'auto' | 'required' | 'none';
|
|
1102
|
+
stop_when?: Array<{
|
|
1103
|
+
type: string;
|
|
1104
|
+
count: number;
|
|
1105
|
+
}>;
|
|
1106
|
+
prepare_step?: {
|
|
1107
|
+
context_policy: any;
|
|
1108
|
+
};
|
|
1109
|
+
};
|
|
1110
|
+
}, signal?: AbortSignal): Promise<BlinkResponse<any>>;
|
|
1111
|
+
/**
|
|
1112
|
+
* AI Agent streaming request
|
|
1113
|
+
* Returns raw Response for SSE streaming (compatible with AI SDK useChat)
|
|
1114
|
+
*/
|
|
1115
|
+
aiAgentStream(requestBody: {
|
|
1116
|
+
stream: true;
|
|
1117
|
+
prompt?: string;
|
|
1118
|
+
messages?: Array<{
|
|
1119
|
+
role: string;
|
|
1120
|
+
content: string | any[];
|
|
1121
|
+
parts?: any[];
|
|
1122
|
+
}>;
|
|
1123
|
+
agent: {
|
|
1124
|
+
model: string;
|
|
1125
|
+
system?: string;
|
|
1126
|
+
tools?: string[];
|
|
1127
|
+
webhook_tools?: Array<{
|
|
1128
|
+
name: string;
|
|
1129
|
+
description: string;
|
|
1130
|
+
input_schema: any;
|
|
1131
|
+
webhook_url: string;
|
|
1132
|
+
}>;
|
|
1133
|
+
client_tools?: Array<{
|
|
1134
|
+
name: string;
|
|
1135
|
+
description: string;
|
|
1136
|
+
input_schema: any;
|
|
1137
|
+
}>;
|
|
1138
|
+
tool_choice?: 'auto' | 'required' | 'none';
|
|
1139
|
+
stop_when?: Array<{
|
|
1140
|
+
type: string;
|
|
1141
|
+
count: number;
|
|
1142
|
+
}>;
|
|
1143
|
+
prepare_step?: {
|
|
1144
|
+
context_policy: any;
|
|
1145
|
+
};
|
|
1146
|
+
};
|
|
1147
|
+
}, signal?: AbortSignal): Promise<Response>;
|
|
991
1148
|
/**
|
|
992
1149
|
* Data-specific requests
|
|
993
1150
|
*/
|
|
@@ -997,6 +1154,13 @@ declare class HttpClient {
|
|
|
997
1154
|
dataScreenshot(projectId: string, request: ScreenshotRequest): Promise<BlinkResponse<ScreenshotResponse>>;
|
|
998
1155
|
dataFetch(projectId: string, request: FetchRequest): Promise<BlinkResponse<FetchResponse | AsyncFetchResponse>>;
|
|
999
1156
|
dataSearch(projectId: string, request: SearchRequest): Promise<BlinkResponse<SearchResponse>>;
|
|
1157
|
+
/**
|
|
1158
|
+
* Connector requests
|
|
1159
|
+
*/
|
|
1160
|
+
private formatProviderForPath;
|
|
1161
|
+
connectorStatus(provider: ConnectorProvider): Promise<BlinkResponse<ConnectorStatusResponse>>;
|
|
1162
|
+
connectorExecute<TParams = Record<string, unknown>, TData = any>(provider: ConnectorProvider, request: ConnectorExecuteRequest<TParams>): Promise<BlinkResponse<ConnectorExecuteResponse<TData>>>;
|
|
1163
|
+
connectorSaveApiKey<TMetadata = Record<string, unknown>>(provider: ConnectorProvider, request: ConnectorApiKeyRequest<TMetadata>): Promise<BlinkResponse<ConnectorApiKeyResponse>>;
|
|
1000
1164
|
/**
|
|
1001
1165
|
* Realtime-specific requests
|
|
1002
1166
|
*/
|
|
@@ -1422,6 +1586,44 @@ declare class BlinkAuth {
|
|
|
1422
1586
|
expires_in?: number;
|
|
1423
1587
|
refresh_expires_in?: number;
|
|
1424
1588
|
}, persist?: boolean): Promise<BlinkUser>;
|
|
1589
|
+
/**
|
|
1590
|
+
* Verify a Blink Auth token using the introspection endpoint.
|
|
1591
|
+
*
|
|
1592
|
+
* **Server-side / Edge Function use only.**
|
|
1593
|
+
*
|
|
1594
|
+
* This is the recommended way to verify user tokens in Deno Edge Functions
|
|
1595
|
+
* and other server-side contexts. It calls the Blink API introspection
|
|
1596
|
+
* endpoint which validates the token without exposing the JWT secret.
|
|
1597
|
+
*
|
|
1598
|
+
* @param token - The raw JWT token (without "Bearer " prefix) or full Authorization header
|
|
1599
|
+
* @returns Token introspection result with validity and claims
|
|
1600
|
+
*
|
|
1601
|
+
* @example
|
|
1602
|
+
* // Deno Edge Function usage
|
|
1603
|
+
* import { createClient } from "npm:@blinkdotnew/sdk";
|
|
1604
|
+
*
|
|
1605
|
+
* const blink = createClient({
|
|
1606
|
+
* projectId: Deno.env.get("BLINK_PROJECT_ID")!,
|
|
1607
|
+
* secretKey: Deno.env.get("BLINK_SECRET_KEY"),
|
|
1608
|
+
* });
|
|
1609
|
+
*
|
|
1610
|
+
* async function handler(req: Request): Promise<Response> {
|
|
1611
|
+
* const authHeader = req.headers.get("Authorization");
|
|
1612
|
+
* const result = await blink.auth.verifyToken(authHeader);
|
|
1613
|
+
*
|
|
1614
|
+
* if (!result.valid) {
|
|
1615
|
+
* return new Response(JSON.stringify({ error: result.error }), { status: 401 });
|
|
1616
|
+
* }
|
|
1617
|
+
*
|
|
1618
|
+
* // User is authenticated
|
|
1619
|
+
* console.log("User ID:", result.userId);
|
|
1620
|
+
* console.log("Email:", result.email);
|
|
1621
|
+
* console.log("Project:", result.projectId);
|
|
1622
|
+
*
|
|
1623
|
+
* // Continue with your logic...
|
|
1624
|
+
* }
|
|
1625
|
+
*/
|
|
1626
|
+
verifyToken(token: string | null): Promise<TokenIntrospectionResult>;
|
|
1425
1627
|
/**
|
|
1426
1628
|
* Refresh access token using refresh token
|
|
1427
1629
|
*/
|
|
@@ -1772,6 +1974,317 @@ interface BlinkFunctions {
|
|
|
1772
1974
|
invoke<T = any>(functionSlug: string, options?: FunctionsInvokeOptions): Promise<FunctionsInvokeResponse<T>>;
|
|
1773
1975
|
}
|
|
1774
1976
|
|
|
1977
|
+
/**
|
|
1978
|
+
* Blink RAG Module - Vector Search and AI-powered retrieval
|
|
1979
|
+
*
|
|
1980
|
+
* Provides document ingestion, vector search, and RAG (Retrieval-Augmented Generation) capabilities.
|
|
1981
|
+
*/
|
|
1982
|
+
|
|
1983
|
+
interface RAGCollection {
|
|
1984
|
+
id: string;
|
|
1985
|
+
name: string;
|
|
1986
|
+
description: string | null;
|
|
1987
|
+
embeddingModel: string;
|
|
1988
|
+
embeddingDimensions: number;
|
|
1989
|
+
indexMetric: 'cosine' | 'l2';
|
|
1990
|
+
chunkMaxTokens: number;
|
|
1991
|
+
chunkOverlapTokens: number;
|
|
1992
|
+
documentCount: number;
|
|
1993
|
+
chunkCount: number;
|
|
1994
|
+
shared: boolean;
|
|
1995
|
+
createdAt: string;
|
|
1996
|
+
updatedAt: string;
|
|
1997
|
+
}
|
|
1998
|
+
interface RAGDocument {
|
|
1999
|
+
id: string;
|
|
2000
|
+
collectionId: string;
|
|
2001
|
+
filename: string;
|
|
2002
|
+
sourceType: 'file' | 'url' | 'text';
|
|
2003
|
+
sourceUrl: string | null;
|
|
2004
|
+
contentType: string | null;
|
|
2005
|
+
fileSize: number | null;
|
|
2006
|
+
status: 'pending' | 'processing' | 'ready' | 'error';
|
|
2007
|
+
errorMessage: string | null;
|
|
2008
|
+
processingStartedAt: string | null;
|
|
2009
|
+
processingCompletedAt: string | null;
|
|
2010
|
+
chunkCount: number;
|
|
2011
|
+
tokenCount: number | null;
|
|
2012
|
+
metadata: Record<string, any>;
|
|
2013
|
+
createdAt: string;
|
|
2014
|
+
updatedAt: string;
|
|
2015
|
+
}
|
|
2016
|
+
interface RAGSearchResult {
|
|
2017
|
+
chunkId: string;
|
|
2018
|
+
documentId: string;
|
|
2019
|
+
filename: string;
|
|
2020
|
+
content: string;
|
|
2021
|
+
score: number;
|
|
2022
|
+
chunkIndex: number;
|
|
2023
|
+
metadata: Record<string, any>;
|
|
2024
|
+
}
|
|
2025
|
+
interface RAGSearchResponse {
|
|
2026
|
+
results: RAGSearchResult[];
|
|
2027
|
+
query: string;
|
|
2028
|
+
collectionId: string;
|
|
2029
|
+
totalResults: number;
|
|
2030
|
+
}
|
|
2031
|
+
interface RAGAISearchSource {
|
|
2032
|
+
documentId: string;
|
|
2033
|
+
filename: string;
|
|
2034
|
+
chunkId: string;
|
|
2035
|
+
excerpt: string;
|
|
2036
|
+
score: number;
|
|
2037
|
+
}
|
|
2038
|
+
interface RAGAISearchResult {
|
|
2039
|
+
answer: string;
|
|
2040
|
+
sources: RAGAISearchSource[];
|
|
2041
|
+
query: string;
|
|
2042
|
+
model: string;
|
|
2043
|
+
usage: {
|
|
2044
|
+
inputTokens: number;
|
|
2045
|
+
outputTokens: number;
|
|
2046
|
+
};
|
|
2047
|
+
}
|
|
2048
|
+
interface CreateCollectionOptions {
|
|
2049
|
+
name: string;
|
|
2050
|
+
description?: string;
|
|
2051
|
+
embeddingModel?: string;
|
|
2052
|
+
embeddingDimensions?: number;
|
|
2053
|
+
indexMetric?: 'cosine' | 'l2';
|
|
2054
|
+
chunkMaxTokens?: number;
|
|
2055
|
+
chunkOverlapTokens?: number;
|
|
2056
|
+
shared?: boolean;
|
|
2057
|
+
}
|
|
2058
|
+
interface UploadOptions {
|
|
2059
|
+
collectionId?: string;
|
|
2060
|
+
collectionName?: string;
|
|
2061
|
+
filename: string;
|
|
2062
|
+
content?: string;
|
|
2063
|
+
file?: {
|
|
2064
|
+
data: string;
|
|
2065
|
+
contentType: string;
|
|
2066
|
+
};
|
|
2067
|
+
url?: string;
|
|
2068
|
+
metadata?: Record<string, any>;
|
|
2069
|
+
}
|
|
2070
|
+
interface SearchOptions {
|
|
2071
|
+
collectionId?: string;
|
|
2072
|
+
collectionName?: string;
|
|
2073
|
+
query: string;
|
|
2074
|
+
maxResults?: number;
|
|
2075
|
+
scoreThreshold?: number;
|
|
2076
|
+
filters?: Record<string, any>;
|
|
2077
|
+
includeContent?: boolean;
|
|
2078
|
+
}
|
|
2079
|
+
interface AISearchOptions {
|
|
2080
|
+
collectionId?: string;
|
|
2081
|
+
collectionName?: string;
|
|
2082
|
+
query: string;
|
|
2083
|
+
model?: string;
|
|
2084
|
+
maxContextChunks?: number;
|
|
2085
|
+
scoreThreshold?: number;
|
|
2086
|
+
systemPrompt?: string;
|
|
2087
|
+
stream?: boolean;
|
|
2088
|
+
}
|
|
2089
|
+
interface ListDocumentsOptions {
|
|
2090
|
+
collectionId?: string;
|
|
2091
|
+
status?: 'pending' | 'processing' | 'ready' | 'error';
|
|
2092
|
+
}
|
|
2093
|
+
interface WaitForReadyOptions {
|
|
2094
|
+
timeoutMs?: number;
|
|
2095
|
+
pollIntervalMs?: number;
|
|
2096
|
+
}
|
|
2097
|
+
declare class BlinkRAGImpl {
|
|
2098
|
+
private httpClient;
|
|
2099
|
+
private projectId;
|
|
2100
|
+
constructor(httpClient: HttpClient);
|
|
2101
|
+
/**
|
|
2102
|
+
* Build URL with project_id prefix
|
|
2103
|
+
*/
|
|
2104
|
+
private url;
|
|
2105
|
+
/**
|
|
2106
|
+
* Create a new RAG collection
|
|
2107
|
+
*/
|
|
2108
|
+
createCollection(options: CreateCollectionOptions): Promise<RAGCollection>;
|
|
2109
|
+
/**
|
|
2110
|
+
* List all collections accessible to the current user
|
|
2111
|
+
*/
|
|
2112
|
+
listCollections(): Promise<RAGCollection[]>;
|
|
2113
|
+
/**
|
|
2114
|
+
* Get a specific collection by ID
|
|
2115
|
+
*/
|
|
2116
|
+
getCollection(collectionId: string): Promise<RAGCollection>;
|
|
2117
|
+
/**
|
|
2118
|
+
* Delete a collection and all its documents
|
|
2119
|
+
*/
|
|
2120
|
+
deleteCollection(collectionId: string): Promise<void>;
|
|
2121
|
+
/**
|
|
2122
|
+
* Upload a document for processing
|
|
2123
|
+
*
|
|
2124
|
+
* @example
|
|
2125
|
+
* // Upload text content
|
|
2126
|
+
* const doc = await blink.rag.upload({
|
|
2127
|
+
* collectionName: 'docs',
|
|
2128
|
+
* filename: 'notes.txt',
|
|
2129
|
+
* content: 'My document content...'
|
|
2130
|
+
* })
|
|
2131
|
+
*
|
|
2132
|
+
* @example
|
|
2133
|
+
* // Upload from URL
|
|
2134
|
+
* const doc = await blink.rag.upload({
|
|
2135
|
+
* collectionId: 'col_abc123',
|
|
2136
|
+
* filename: 'article.html',
|
|
2137
|
+
* url: 'https://example.com/article'
|
|
2138
|
+
* })
|
|
2139
|
+
*
|
|
2140
|
+
* @example
|
|
2141
|
+
* // Upload a file (base64)
|
|
2142
|
+
* const doc = await blink.rag.upload({
|
|
2143
|
+
* collectionName: 'docs',
|
|
2144
|
+
* filename: 'report.pdf',
|
|
2145
|
+
* file: { data: base64Data, contentType: 'application/pdf' }
|
|
2146
|
+
* })
|
|
2147
|
+
*/
|
|
2148
|
+
upload(options: UploadOptions): Promise<RAGDocument>;
|
|
2149
|
+
/**
|
|
2150
|
+
* Get document status and metadata
|
|
2151
|
+
*/
|
|
2152
|
+
getDocument(documentId: string): Promise<RAGDocument>;
|
|
2153
|
+
/**
|
|
2154
|
+
* List documents, optionally filtered by collection or status
|
|
2155
|
+
*/
|
|
2156
|
+
listDocuments(options?: ListDocumentsOptions): Promise<RAGDocument[]>;
|
|
2157
|
+
/**
|
|
2158
|
+
* Delete a document and its chunks
|
|
2159
|
+
*/
|
|
2160
|
+
deleteDocument(documentId: string): Promise<void>;
|
|
2161
|
+
/**
|
|
2162
|
+
* Wait for a document to finish processing
|
|
2163
|
+
*
|
|
2164
|
+
* @example
|
|
2165
|
+
* const doc = await blink.rag.upload({ ... })
|
|
2166
|
+
* const readyDoc = await blink.rag.waitForReady(doc.id)
|
|
2167
|
+
* console.log(`Processed ${readyDoc.chunkCount} chunks`)
|
|
2168
|
+
*/
|
|
2169
|
+
waitForReady(documentId: string, options?: WaitForReadyOptions): Promise<RAGDocument>;
|
|
2170
|
+
/**
|
|
2171
|
+
* Search for similar chunks using vector similarity
|
|
2172
|
+
*
|
|
2173
|
+
* @example
|
|
2174
|
+
* const results = await blink.rag.search({
|
|
2175
|
+
* collectionName: 'docs',
|
|
2176
|
+
* query: 'How do I configure authentication?',
|
|
2177
|
+
* maxResults: 5
|
|
2178
|
+
* })
|
|
2179
|
+
*/
|
|
2180
|
+
search(options: SearchOptions): Promise<RAGSearchResponse>;
|
|
2181
|
+
/**
|
|
2182
|
+
* Perform RAG: search + LLM answer generation
|
|
2183
|
+
*
|
|
2184
|
+
* @example
|
|
2185
|
+
* // Non-streaming
|
|
2186
|
+
* const result = await blink.rag.aiSearch({
|
|
2187
|
+
* collectionName: 'docs',
|
|
2188
|
+
* query: 'What are the main features?'
|
|
2189
|
+
* })
|
|
2190
|
+
* console.log(result.answer)
|
|
2191
|
+
*
|
|
2192
|
+
* @example
|
|
2193
|
+
* // Streaming
|
|
2194
|
+
* const stream = await blink.rag.aiSearch({
|
|
2195
|
+
* collectionName: 'docs',
|
|
2196
|
+
* query: 'Explain the architecture',
|
|
2197
|
+
* stream: true
|
|
2198
|
+
* })
|
|
2199
|
+
*/
|
|
2200
|
+
aiSearch(options: AISearchOptions & {
|
|
2201
|
+
stream?: false;
|
|
2202
|
+
}): Promise<RAGAISearchResult>;
|
|
2203
|
+
aiSearch(options: AISearchOptions & {
|
|
2204
|
+
stream: true;
|
|
2205
|
+
}): Promise<ReadableStream<Uint8Array>>;
|
|
2206
|
+
}
|
|
2207
|
+
type BlinkRAG = BlinkRAGImpl;
|
|
2208
|
+
|
|
2209
|
+
/**
|
|
2210
|
+
* Blink Sandbox Module - Persistent coding environments for AI agents
|
|
2211
|
+
*
|
|
2212
|
+
* Provides lifecycle management for E2B sandboxes with auto-pause and resume.
|
|
2213
|
+
* Used with agent.generate({ sandbox }) for AI coding agents.
|
|
2214
|
+
*/
|
|
2215
|
+
|
|
2216
|
+
interface Sandbox {
|
|
2217
|
+
/** Sandbox ID (sbx_xxx format) - STORE THIS for persistence! */
|
|
2218
|
+
id: string;
|
|
2219
|
+
/** Template used to create the sandbox */
|
|
2220
|
+
template: string;
|
|
2221
|
+
/** Get public URL for any port (sync - computed locally from hostPattern) */
|
|
2222
|
+
getHost(port: number): string;
|
|
2223
|
+
}
|
|
2224
|
+
interface SandboxCreateOptions {
|
|
2225
|
+
/** E2B template ID (default: 'devtools-base') */
|
|
2226
|
+
template?: string;
|
|
2227
|
+
/** Inactivity timeout in ms (default: 5 min, max: 60 min) */
|
|
2228
|
+
timeoutMs?: number;
|
|
2229
|
+
/** Custom metadata for tracking */
|
|
2230
|
+
metadata?: Record<string, string>;
|
|
2231
|
+
}
|
|
2232
|
+
interface SandboxConnectOptions {
|
|
2233
|
+
/** Reset inactivity timeout in ms (default: 5 min) */
|
|
2234
|
+
timeoutMs?: number;
|
|
2235
|
+
}
|
|
2236
|
+
interface BlinkSandbox {
|
|
2237
|
+
/**
|
|
2238
|
+
* Create a new persistent sandbox with auto-pause enabled.
|
|
2239
|
+
*
|
|
2240
|
+
* @example
|
|
2241
|
+
* ```ts
|
|
2242
|
+
* const sandbox = await blink.sandbox.create({ template: 'nextjs-app' })
|
|
2243
|
+
* console.log(sandbox.id) // "sbx_abc123xyz" - Store this!
|
|
2244
|
+
* console.log(sandbox.getHost(3000)) // "3000-sbx_abc123xyz.preview-blink.com"
|
|
2245
|
+
* ```
|
|
2246
|
+
*/
|
|
2247
|
+
create(options?: SandboxCreateOptions): Promise<Sandbox>;
|
|
2248
|
+
/**
|
|
2249
|
+
* Connect to an existing sandbox. Auto-resumes if paused.
|
|
2250
|
+
* Built-in retry with exponential backoff (3 retries: 250ms → 500ms → 1000ms).
|
|
2251
|
+
*
|
|
2252
|
+
* @example
|
|
2253
|
+
* ```ts
|
|
2254
|
+
* const sandbox = await blink.sandbox.connect(storedSandboxId)
|
|
2255
|
+
* ```
|
|
2256
|
+
*/
|
|
2257
|
+
connect(sandboxId: string, options?: SandboxConnectOptions): Promise<Sandbox>;
|
|
2258
|
+
/**
|
|
2259
|
+
* Permanently kill a sandbox. Cannot be resumed after kill.
|
|
2260
|
+
*
|
|
2261
|
+
* @example
|
|
2262
|
+
* ```ts
|
|
2263
|
+
* await blink.sandbox.kill(sandboxId)
|
|
2264
|
+
* await blink.db.user_sandboxes.delete({ sandbox_id: sandboxId })
|
|
2265
|
+
* ```
|
|
2266
|
+
*/
|
|
2267
|
+
kill(sandboxId: string): Promise<void>;
|
|
2268
|
+
}
|
|
2269
|
+
declare const SANDBOX_TEMPLATES: readonly ["devtools-base", "nextjs-app", "nextjs-app-bun", "vite-react", "vite-react-bun", "expo-app", "desktop", "claude-code"];
|
|
2270
|
+
type SandboxTemplate = typeof SANDBOX_TEMPLATES[number];
|
|
2271
|
+
declare class SandboxConnectionError extends Error {
|
|
2272
|
+
sandboxId: string;
|
|
2273
|
+
constructor(sandboxId: string, cause?: Error);
|
|
2274
|
+
}
|
|
2275
|
+
declare class BlinkSandboxImpl implements BlinkSandbox {
|
|
2276
|
+
private httpClient;
|
|
2277
|
+
private projectId;
|
|
2278
|
+
constructor(httpClient: HttpClient);
|
|
2279
|
+
/**
|
|
2280
|
+
* Build URL with project_id prefix
|
|
2281
|
+
*/
|
|
2282
|
+
private url;
|
|
2283
|
+
create(options?: SandboxCreateOptions): Promise<Sandbox>;
|
|
2284
|
+
connect(sandboxId: string, options?: SandboxConnectOptions): Promise<Sandbox>;
|
|
2285
|
+
kill(sandboxId: string): Promise<void>;
|
|
2286
|
+
}
|
|
2287
|
+
|
|
1775
2288
|
/**
|
|
1776
2289
|
* Blink Client - Main SDK entry point
|
|
1777
2290
|
* Factory function and client class for the Blink SDK
|
|
@@ -1786,7 +2299,10 @@ interface BlinkClient {
|
|
|
1786
2299
|
realtime: BlinkRealtime;
|
|
1787
2300
|
notifications: BlinkNotifications;
|
|
1788
2301
|
analytics: BlinkAnalytics;
|
|
2302
|
+
connectors: BlinkConnectors;
|
|
1789
2303
|
functions: BlinkFunctions;
|
|
2304
|
+
rag: BlinkRAG;
|
|
2305
|
+
sandbox: BlinkSandbox;
|
|
1790
2306
|
}
|
|
1791
2307
|
/**
|
|
1792
2308
|
* Create a new Blink client instance
|
|
@@ -1888,6 +2404,373 @@ declare class BlinkStorageImpl implements BlinkStorage {
|
|
|
1888
2404
|
remove(...paths: string[]): Promise<void>;
|
|
1889
2405
|
}
|
|
1890
2406
|
|
|
2407
|
+
/**
|
|
2408
|
+
* Agent Tool Types
|
|
2409
|
+
*
|
|
2410
|
+
* Type definitions for AI agent tools used with blink.ai.agent()
|
|
2411
|
+
*/
|
|
2412
|
+
/**
|
|
2413
|
+
* A tool definition that can be passed to the agent
|
|
2414
|
+
*/
|
|
2415
|
+
interface AgentTool {
|
|
2416
|
+
/** Tool name (used as identifier) */
|
|
2417
|
+
name: string;
|
|
2418
|
+
/** Human-readable description for the LLM */
|
|
2419
|
+
description: string;
|
|
2420
|
+
/** JSON Schema for tool input parameters */
|
|
2421
|
+
inputSchema: JSONSchema;
|
|
2422
|
+
}
|
|
2423
|
+
/**
|
|
2424
|
+
* JSON Schema type for tool input definitions
|
|
2425
|
+
*/
|
|
2426
|
+
type JSONSchema = Record<string, any>;
|
|
2427
|
+
/**
|
|
2428
|
+
* A custom webhook tool definition
|
|
2429
|
+
*/
|
|
2430
|
+
interface WebhookTool {
|
|
2431
|
+
/** Tool name */
|
|
2432
|
+
name: string;
|
|
2433
|
+
/** Human-readable description */
|
|
2434
|
+
description: string;
|
|
2435
|
+
/** JSON Schema for input parameters */
|
|
2436
|
+
input_schema: JSONSchema;
|
|
2437
|
+
/** URL to POST when tool is called */
|
|
2438
|
+
webhook_url: string;
|
|
2439
|
+
}
|
|
2440
|
+
/**
|
|
2441
|
+
* A client-side tool requiring user confirmation
|
|
2442
|
+
*/
|
|
2443
|
+
interface ClientTool {
|
|
2444
|
+
/** Tool name */
|
|
2445
|
+
name: string;
|
|
2446
|
+
/** Human-readable description */
|
|
2447
|
+
description: string;
|
|
2448
|
+
/** JSON Schema for input parameters */
|
|
2449
|
+
input_schema: JSONSchema;
|
|
2450
|
+
}
|
|
2451
|
+
/**
|
|
2452
|
+
* Stop condition for agent loop
|
|
2453
|
+
*/
|
|
2454
|
+
interface StopCondition {
|
|
2455
|
+
type: 'step_count_is';
|
|
2456
|
+
count: number;
|
|
2457
|
+
}
|
|
2458
|
+
/**
|
|
2459
|
+
* Context policy for managing conversation history
|
|
2460
|
+
*/
|
|
2461
|
+
interface ContextPolicy {
|
|
2462
|
+
strategy: 'token_budget';
|
|
2463
|
+
max_input_tokens: number;
|
|
2464
|
+
keep_system: boolean;
|
|
2465
|
+
keep_last_messages: number;
|
|
2466
|
+
trim_order: string[];
|
|
2467
|
+
max_tool_result_bytes: number;
|
|
2468
|
+
}
|
|
2469
|
+
/**
|
|
2470
|
+
* Agent configuration
|
|
2471
|
+
*/
|
|
2472
|
+
interface AgentConfig {
|
|
2473
|
+
/** Model ID in Vercel AI Gateway format: "provider/model-id" */
|
|
2474
|
+
model: string;
|
|
2475
|
+
/** System prompt */
|
|
2476
|
+
system?: string;
|
|
2477
|
+
/** Built-in tools to enable (tool names or bundles) */
|
|
2478
|
+
tools?: (AgentTool | string)[];
|
|
2479
|
+
/** Custom webhook tools */
|
|
2480
|
+
webhook_tools?: WebhookTool[];
|
|
2481
|
+
/** Client-side tools for HITL */
|
|
2482
|
+
client_tools?: ClientTool[];
|
|
2483
|
+
/** Tool choice strategy */
|
|
2484
|
+
tool_choice?: 'auto' | 'required' | 'none';
|
|
2485
|
+
/** Stop conditions */
|
|
2486
|
+
stop_when?: StopCondition[];
|
|
2487
|
+
/** Context management */
|
|
2488
|
+
prepare_step?: {
|
|
2489
|
+
context_policy: Partial<ContextPolicy>;
|
|
2490
|
+
};
|
|
2491
|
+
}
|
|
2492
|
+
/**
|
|
2493
|
+
* UI Message format (AI SDK compatible)
|
|
2494
|
+
*/
|
|
2495
|
+
interface UIMessage {
|
|
2496
|
+
id?: string;
|
|
2497
|
+
role: 'user' | 'assistant';
|
|
2498
|
+
content: string;
|
|
2499
|
+
parts?: UIMessagePart[];
|
|
2500
|
+
}
|
|
2501
|
+
type UIMessagePart = {
|
|
2502
|
+
type: 'text';
|
|
2503
|
+
text: string;
|
|
2504
|
+
} | {
|
|
2505
|
+
type: 'tool-invocation';
|
|
2506
|
+
toolCallId: string;
|
|
2507
|
+
toolName: string;
|
|
2508
|
+
state: 'pending' | 'result' | 'output-available';
|
|
2509
|
+
input: Record<string, any>;
|
|
2510
|
+
output?: any;
|
|
2511
|
+
};
|
|
2512
|
+
/**
|
|
2513
|
+
* Agent request for streaming mode
|
|
2514
|
+
*/
|
|
2515
|
+
interface AgentStreamRequest {
|
|
2516
|
+
stream: true;
|
|
2517
|
+
messages: UIMessage[];
|
|
2518
|
+
agent: AgentConfig;
|
|
2519
|
+
}
|
|
2520
|
+
/**
|
|
2521
|
+
* Agent request for non-streaming mode with messages
|
|
2522
|
+
*/
|
|
2523
|
+
interface AgentNonStreamMessagesRequest {
|
|
2524
|
+
stream: false;
|
|
2525
|
+
messages: UIMessage[];
|
|
2526
|
+
agent: AgentConfig;
|
|
2527
|
+
}
|
|
2528
|
+
/**
|
|
2529
|
+
* Agent request for non-streaming mode with prompt
|
|
2530
|
+
*/
|
|
2531
|
+
interface AgentNonStreamPromptRequest {
|
|
2532
|
+
stream: false;
|
|
2533
|
+
prompt: string;
|
|
2534
|
+
agent: AgentConfig;
|
|
2535
|
+
}
|
|
2536
|
+
/**
|
|
2537
|
+
* Union of all agent request types
|
|
2538
|
+
*/
|
|
2539
|
+
type AgentRequest = AgentStreamRequest | AgentNonStreamMessagesRequest | AgentNonStreamPromptRequest;
|
|
2540
|
+
/**
|
|
2541
|
+
* Token usage information
|
|
2542
|
+
*/
|
|
2543
|
+
interface TokenUsage {
|
|
2544
|
+
inputTokens: number;
|
|
2545
|
+
outputTokens: number;
|
|
2546
|
+
totalTokens?: number;
|
|
2547
|
+
}
|
|
2548
|
+
/**
|
|
2549
|
+
* Tool call in a step
|
|
2550
|
+
*/
|
|
2551
|
+
interface ToolCall {
|
|
2552
|
+
toolCallId: string;
|
|
2553
|
+
toolName: string;
|
|
2554
|
+
args: Record<string, any>;
|
|
2555
|
+
}
|
|
2556
|
+
/**
|
|
2557
|
+
* Tool result in a step
|
|
2558
|
+
*/
|
|
2559
|
+
interface ToolResult {
|
|
2560
|
+
toolCallId: string;
|
|
2561
|
+
result: any;
|
|
2562
|
+
}
|
|
2563
|
+
/**
|
|
2564
|
+
* Agent step information
|
|
2565
|
+
*/
|
|
2566
|
+
interface AgentStep {
|
|
2567
|
+
text: string;
|
|
2568
|
+
toolCalls: ToolCall[];
|
|
2569
|
+
toolResults: ToolResult[];
|
|
2570
|
+
finishReason: string;
|
|
2571
|
+
usage: TokenUsage;
|
|
2572
|
+
}
|
|
2573
|
+
/**
|
|
2574
|
+
* Billing information
|
|
2575
|
+
*/
|
|
2576
|
+
interface AgentBilling {
|
|
2577
|
+
model: string;
|
|
2578
|
+
creditsCharged: number;
|
|
2579
|
+
costUSD: number;
|
|
2580
|
+
breakdown?: {
|
|
2581
|
+
ai: {
|
|
2582
|
+
credits: number;
|
|
2583
|
+
costUSD: number;
|
|
2584
|
+
};
|
|
2585
|
+
tools: Record<string, {
|
|
2586
|
+
count: number;
|
|
2587
|
+
credits: number;
|
|
2588
|
+
costUSD: number;
|
|
2589
|
+
}>;
|
|
2590
|
+
};
|
|
2591
|
+
}
|
|
2592
|
+
/**
|
|
2593
|
+
* Non-streaming agent response
|
|
2594
|
+
*/
|
|
2595
|
+
interface AgentResponse {
|
|
2596
|
+
text: string;
|
|
2597
|
+
finishReason: 'stop' | 'length' | 'tool-calls' | 'error' | 'content-filter';
|
|
2598
|
+
steps: AgentStep[];
|
|
2599
|
+
usage: TokenUsage;
|
|
2600
|
+
warnings?: Array<{
|
|
2601
|
+
type: string;
|
|
2602
|
+
message: string;
|
|
2603
|
+
}>;
|
|
2604
|
+
_billing: AgentBilling;
|
|
2605
|
+
}
|
|
2606
|
+
|
|
2607
|
+
/**
|
|
2608
|
+
* Agent Class
|
|
2609
|
+
*
|
|
2610
|
+
* Matches Vercel AI SDK ToolLoopAgent pattern:
|
|
2611
|
+
* - Create agent instance with config
|
|
2612
|
+
* - Call agent.generate() for non-streaming
|
|
2613
|
+
* - Call agent.stream() for streaming
|
|
2614
|
+
*/
|
|
2615
|
+
|
|
2616
|
+
/**
|
|
2617
|
+
* Options for creating an Agent instance
|
|
2618
|
+
* Matches Vercel AI SDK ToolLoopAgent constructor options
|
|
2619
|
+
*/
|
|
2620
|
+
interface AgentOptions {
|
|
2621
|
+
/** Model ID in Vercel AI Gateway format: "provider/model-id" */
|
|
2622
|
+
model: string;
|
|
2623
|
+
/** System prompt / instructions */
|
|
2624
|
+
system?: string;
|
|
2625
|
+
/** Alias for system (Vercel AI SDK compatibility) */
|
|
2626
|
+
instructions?: string;
|
|
2627
|
+
/** Built-in tools to enable */
|
|
2628
|
+
tools?: (AgentTool | string)[];
|
|
2629
|
+
/** Custom webhook tools */
|
|
2630
|
+
webhookTools?: WebhookTool[];
|
|
2631
|
+
/** Client-side tools for HITL */
|
|
2632
|
+
clientTools?: ClientTool[];
|
|
2633
|
+
/** Tool choice strategy */
|
|
2634
|
+
toolChoice?: 'auto' | 'required' | 'none';
|
|
2635
|
+
/** Stop conditions */
|
|
2636
|
+
stopWhen?: StopCondition[];
|
|
2637
|
+
/** Maximum number of steps (convenience for stopWhen) */
|
|
2638
|
+
maxSteps?: number;
|
|
2639
|
+
}
|
|
2640
|
+
/**
|
|
2641
|
+
* Options for agent.generate() call
|
|
2642
|
+
*/
|
|
2643
|
+
interface GenerateOptions {
|
|
2644
|
+
/** Simple text prompt */
|
|
2645
|
+
prompt?: string;
|
|
2646
|
+
/** Conversation history */
|
|
2647
|
+
messages?: UIMessage[];
|
|
2648
|
+
/** Sandbox for sandbox tools (object with id, or just sandboxId string) */
|
|
2649
|
+
sandbox?: Sandbox | string;
|
|
2650
|
+
/** Abort signal for cancellation */
|
|
2651
|
+
signal?: AbortSignal;
|
|
2652
|
+
}
|
|
2653
|
+
/**
|
|
2654
|
+
* Options for agent.stream() call
|
|
2655
|
+
*/
|
|
2656
|
+
interface StreamOptions {
|
|
2657
|
+
/** Simple text prompt */
|
|
2658
|
+
prompt?: string;
|
|
2659
|
+
/** Conversation history */
|
|
2660
|
+
messages?: UIMessage[];
|
|
2661
|
+
/** Sandbox for sandbox tools (object with id, or just sandboxId string) */
|
|
2662
|
+
sandbox?: Sandbox | string;
|
|
2663
|
+
/** Abort signal for cancellation */
|
|
2664
|
+
signal?: AbortSignal;
|
|
2665
|
+
}
|
|
2666
|
+
/**
|
|
2667
|
+
* AI Agent class following Vercel AI SDK ToolLoopAgent pattern.
|
|
2668
|
+
*
|
|
2669
|
+
* Create an agent instance with configuration, then use:
|
|
2670
|
+
* - `agent.generate({ prompt })` for non-streaming one-shot generation
|
|
2671
|
+
* - `agent.stream({ prompt })` for streaming real-time generation
|
|
2672
|
+
*
|
|
2673
|
+
* @example
|
|
2674
|
+
* ```ts
|
|
2675
|
+
* import { Agent, webSearch, fetchUrl } from '@blinkdotnew/sdk'
|
|
2676
|
+
*
|
|
2677
|
+
* const weatherAgent = new Agent({
|
|
2678
|
+
* model: 'anthropic/claude-sonnet-4-20250514',
|
|
2679
|
+
* system: 'You are a helpful weather assistant.',
|
|
2680
|
+
* tools: [webSearch, fetchUrl],
|
|
2681
|
+
* maxSteps: 10,
|
|
2682
|
+
* })
|
|
2683
|
+
*
|
|
2684
|
+
* // Non-streaming
|
|
2685
|
+
* const result = await weatherAgent.generate({
|
|
2686
|
+
* prompt: 'What is the weather in San Francisco?',
|
|
2687
|
+
* })
|
|
2688
|
+
* console.log(result.text)
|
|
2689
|
+
*
|
|
2690
|
+
* // Streaming
|
|
2691
|
+
* const stream = await weatherAgent.stream({
|
|
2692
|
+
* prompt: 'Tell me about weather patterns',
|
|
2693
|
+
* })
|
|
2694
|
+
* ```
|
|
2695
|
+
*/
|
|
2696
|
+
declare class Agent {
|
|
2697
|
+
private httpClient;
|
|
2698
|
+
private readonly config;
|
|
2699
|
+
/**
|
|
2700
|
+
* Create a new Agent instance
|
|
2701
|
+
* @param options - Agent configuration options
|
|
2702
|
+
*/
|
|
2703
|
+
constructor(options: AgentOptions);
|
|
2704
|
+
/**
|
|
2705
|
+
* Internal: Set the HTTP client (called by BlinkClient)
|
|
2706
|
+
*/
|
|
2707
|
+
_setHttpClient(client: HttpClient): void;
|
|
2708
|
+
/**
|
|
2709
|
+
* Internal: Get the agent config for API requests
|
|
2710
|
+
*/
|
|
2711
|
+
private getAgentConfig;
|
|
2712
|
+
/**
|
|
2713
|
+
* Generate a response (non-streaming)
|
|
2714
|
+
*
|
|
2715
|
+
* @param options - Generation options (prompt or messages)
|
|
2716
|
+
* @returns Promise<AgentResponse> with text, steps, usage, and billing
|
|
2717
|
+
*
|
|
2718
|
+
* @example
|
|
2719
|
+
* ```ts
|
|
2720
|
+
* const result = await agent.generate({
|
|
2721
|
+
* prompt: 'What is the weather in San Francisco?',
|
|
2722
|
+
* })
|
|
2723
|
+
* console.log(result.text)
|
|
2724
|
+
* console.log(result.steps)
|
|
2725
|
+
* ```
|
|
2726
|
+
*/
|
|
2727
|
+
generate(options: GenerateOptions): Promise<AgentResponse>;
|
|
2728
|
+
/**
|
|
2729
|
+
* Stream a response (real-time)
|
|
2730
|
+
*
|
|
2731
|
+
* @param options - Stream options (prompt or messages)
|
|
2732
|
+
* @returns Promise<Response> - AI SDK UI Message Stream for useChat compatibility
|
|
2733
|
+
*
|
|
2734
|
+
* @example
|
|
2735
|
+
* ```ts
|
|
2736
|
+
* const stream = await agent.stream({
|
|
2737
|
+
* prompt: 'Tell me a story',
|
|
2738
|
+
* })
|
|
2739
|
+
*
|
|
2740
|
+
* // Process stream
|
|
2741
|
+
* for await (const chunk of stream.body) {
|
|
2742
|
+
* // Handle chunk
|
|
2743
|
+
* }
|
|
2744
|
+
* ```
|
|
2745
|
+
*/
|
|
2746
|
+
stream(options: StreamOptions): Promise<Response>;
|
|
2747
|
+
/**
|
|
2748
|
+
* Get the agent's model
|
|
2749
|
+
*/
|
|
2750
|
+
get model(): string;
|
|
2751
|
+
/**
|
|
2752
|
+
* Get the agent's system prompt
|
|
2753
|
+
*/
|
|
2754
|
+
get system(): string | undefined;
|
|
2755
|
+
/**
|
|
2756
|
+
* Get the agent's tools
|
|
2757
|
+
*/
|
|
2758
|
+
get tools(): (AgentTool | string)[] | undefined;
|
|
2759
|
+
}
|
|
2760
|
+
/**
|
|
2761
|
+
* Creates a stop condition for maximum step count
|
|
2762
|
+
* Matches Vercel AI SDK's stepCountIs helper
|
|
2763
|
+
*
|
|
2764
|
+
* @example
|
|
2765
|
+
* ```ts
|
|
2766
|
+
* const agent = new Agent({
|
|
2767
|
+
* model: 'openai/gpt-4o',
|
|
2768
|
+
* stopWhen: [stepCountIs(10)],
|
|
2769
|
+
* })
|
|
2770
|
+
* ```
|
|
2771
|
+
*/
|
|
2772
|
+
declare function stepCountIs(count: number): StopCondition;
|
|
2773
|
+
|
|
1891
2774
|
/**
|
|
1892
2775
|
* Blink AI Module
|
|
1893
2776
|
* Provides AI generation capabilities with Vercel AI SDK compatibility
|
|
@@ -2428,6 +3311,110 @@ declare class BlinkAIImpl implements BlinkAI {
|
|
|
2428
3311
|
* - `duration`: Audio duration in seconds
|
|
2429
3312
|
*/
|
|
2430
3313
|
transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
|
|
3314
|
+
/**
|
|
3315
|
+
* Runs an AI agent that can use tools in a loop to accomplish tasks.
|
|
3316
|
+
*
|
|
3317
|
+
* @param options - Agent request options
|
|
3318
|
+
* - `stream`: Whether to stream the response (true for UI, false for headless)
|
|
3319
|
+
* - `prompt`: Simple string prompt (for non-streaming, mutually exclusive with messages)
|
|
3320
|
+
* - `messages`: Array of UI messages (for streaming or non-streaming with history)
|
|
3321
|
+
* - `agent`: Agent configuration (model, tools, system prompt, etc.)
|
|
3322
|
+
* - `signal`: AbortSignal for cancellation
|
|
3323
|
+
*
|
|
3324
|
+
* @example Non-streaming with prompt
|
|
3325
|
+
* ```ts
|
|
3326
|
+
* const result = await blink.ai.agent({
|
|
3327
|
+
* stream: false,
|
|
3328
|
+
* prompt: 'What is the weather in Tokyo?',
|
|
3329
|
+
* agent: {
|
|
3330
|
+
* model: 'openai/gpt-4o',
|
|
3331
|
+
* tools: ['web_search']
|
|
3332
|
+
* }
|
|
3333
|
+
* })
|
|
3334
|
+
* console.log(result.text)
|
|
3335
|
+
* ```
|
|
3336
|
+
*
|
|
3337
|
+
* @example Streaming with messages (for useChat integration)
|
|
3338
|
+
* ```ts
|
|
3339
|
+
* const response = await blink.ai.agent({
|
|
3340
|
+
* stream: true,
|
|
3341
|
+
* messages: [{ role: 'user', content: 'Search for AI news' }],
|
|
3342
|
+
* agent: {
|
|
3343
|
+
* model: 'openai/gpt-4o',
|
|
3344
|
+
* tools: [webSearch, fetchUrl]
|
|
3345
|
+
* }
|
|
3346
|
+
* })
|
|
3347
|
+
* // Response is a ReadableStream for SSE
|
|
3348
|
+
* ```
|
|
3349
|
+
*
|
|
3350
|
+
* @returns For non-streaming: Promise<AgentResponse>
|
|
3351
|
+
* For streaming: Promise<Response> (SSE stream)
|
|
3352
|
+
*/
|
|
3353
|
+
agent(options: {
|
|
3354
|
+
stream: false;
|
|
3355
|
+
prompt: string;
|
|
3356
|
+
agent: AgentConfig;
|
|
3357
|
+
signal?: AbortSignal;
|
|
3358
|
+
}): Promise<AgentResponse>;
|
|
3359
|
+
agent(options: {
|
|
3360
|
+
stream: false;
|
|
3361
|
+
messages: UIMessage[];
|
|
3362
|
+
agent: AgentConfig;
|
|
3363
|
+
signal?: AbortSignal;
|
|
3364
|
+
}): Promise<AgentResponse>;
|
|
3365
|
+
agent(options: {
|
|
3366
|
+
stream: true;
|
|
3367
|
+
messages: UIMessage[];
|
|
3368
|
+
agent: AgentConfig;
|
|
3369
|
+
signal?: AbortSignal;
|
|
3370
|
+
}): Promise<Response>;
|
|
3371
|
+
agent(options: {
|
|
3372
|
+
stream: true;
|
|
3373
|
+
prompt: string;
|
|
3374
|
+
agent: AgentConfig;
|
|
3375
|
+
signal?: AbortSignal;
|
|
3376
|
+
}): Promise<Response>;
|
|
3377
|
+
/**
|
|
3378
|
+
* Creates a reusable Agent instance with the Vercel AI SDK pattern.
|
|
3379
|
+
*
|
|
3380
|
+
* The Agent can be used multiple times with different prompts:
|
|
3381
|
+
* - `agent.generate({ prompt })` for non-streaming
|
|
3382
|
+
* - `agent.stream({ prompt })` for streaming
|
|
3383
|
+
*
|
|
3384
|
+
* @param options - Agent configuration (model, tools, system, etc.)
|
|
3385
|
+
* @returns Agent instance
|
|
3386
|
+
*
|
|
3387
|
+
* @example
|
|
3388
|
+
* ```ts
|
|
3389
|
+
* const weatherAgent = blink.ai.createAgent({
|
|
3390
|
+
* model: 'anthropic/claude-sonnet-4-20250514',
|
|
3391
|
+
* system: 'You are a helpful weather assistant.',
|
|
3392
|
+
* tools: [webSearch, fetchUrl],
|
|
3393
|
+
* maxSteps: 10,
|
|
3394
|
+
* })
|
|
3395
|
+
*
|
|
3396
|
+
* // Non-streaming
|
|
3397
|
+
* const result = await weatherAgent.generate({
|
|
3398
|
+
* prompt: 'What is the weather in San Francisco?',
|
|
3399
|
+
* })
|
|
3400
|
+
*
|
|
3401
|
+
* // Streaming
|
|
3402
|
+
* const stream = await weatherAgent.stream({
|
|
3403
|
+
* prompt: 'Tell me about weather patterns',
|
|
3404
|
+
* })
|
|
3405
|
+
* ```
|
|
3406
|
+
*/
|
|
3407
|
+
createAgent(options: AgentOptions): Agent;
|
|
3408
|
+
/**
|
|
3409
|
+
* Binds an existing Agent instance to this client's HTTP client.
|
|
3410
|
+
*
|
|
3411
|
+
* Used internally by useAgent() when an Agent instance is passed.
|
|
3412
|
+
* This allows agents created with `new Agent()` to be used with the hook.
|
|
3413
|
+
*
|
|
3414
|
+
* @param agent - Existing Agent instance
|
|
3415
|
+
* @returns The same Agent instance (with httpClient set)
|
|
3416
|
+
*/
|
|
3417
|
+
bindAgent(agent: Agent): Agent;
|
|
2431
3418
|
}
|
|
2432
3419
|
|
|
2433
3420
|
/**
|
|
@@ -2512,4 +3499,354 @@ declare class BlinkRealtimeImpl implements BlinkRealtime {
|
|
|
2512
3499
|
onPresence(channelName: string, callback: (users: PresenceUser[]) => void): () => void;
|
|
2513
3500
|
}
|
|
2514
3501
|
|
|
2515
|
-
|
|
3502
|
+
declare class BlinkConnectorsImpl implements BlinkConnectors {
|
|
3503
|
+
private httpClient;
|
|
3504
|
+
constructor(httpClient: HttpClient);
|
|
3505
|
+
status(provider: ConnectorProvider, options?: {
|
|
3506
|
+
account_id?: string;
|
|
3507
|
+
}): Promise<ConnectorStatusResponse>;
|
|
3508
|
+
execute<TParams = Record<string, unknown>, TData = any>(provider: ConnectorProvider, request: ConnectorExecuteRequest<TParams>): Promise<ConnectorExecuteResponse<TData>>;
|
|
3509
|
+
saveApiKey<TMetadata = Record<string, unknown>>(provider: ConnectorProvider, request: ConnectorApiKeyRequest<TMetadata>): Promise<ConnectorApiKeyResponse>;
|
|
3510
|
+
}
|
|
3511
|
+
|
|
3512
|
+
/**
|
|
3513
|
+
* Core Agent Tools
|
|
3514
|
+
*
|
|
3515
|
+
* Individual tool definitions for web search, URL fetching, scraping, and code execution.
|
|
3516
|
+
*/
|
|
3517
|
+
|
|
3518
|
+
/**
|
|
3519
|
+
* Web search tool for finding current information
|
|
3520
|
+
*
|
|
3521
|
+
* Powered by Exa AI - performs real-time web searches and returns
|
|
3522
|
+
* relevant results with titles, URLs, and snippets.
|
|
3523
|
+
*
|
|
3524
|
+
* @example
|
|
3525
|
+
* ```ts
|
|
3526
|
+
* import { Agent, webSearch } from '@blinkdotnew/react'
|
|
3527
|
+
*
|
|
3528
|
+
* const researchAgent = new Agent({
|
|
3529
|
+
* model: 'openai/gpt-4o',
|
|
3530
|
+
* tools: [webSearch]
|
|
3531
|
+
* })
|
|
3532
|
+
*
|
|
3533
|
+
* const result = await researchAgent.generate({
|
|
3534
|
+
* prompt: 'What are the latest AI developments?'
|
|
3535
|
+
* })
|
|
3536
|
+
* ```
|
|
3537
|
+
*/
|
|
3538
|
+
declare const webSearch: AgentTool;
|
|
3539
|
+
/**
|
|
3540
|
+
* Fetch URL content as clean text (HTML stripped)
|
|
3541
|
+
*
|
|
3542
|
+
* Fast text extraction from URLs - no JavaScript rendering.
|
|
3543
|
+
* Good for Wikipedia, documentation, articles.
|
|
3544
|
+
*
|
|
3545
|
+
* @example
|
|
3546
|
+
* ```ts
|
|
3547
|
+
* import { fetchUrl } from '@blinkdotnew/react'
|
|
3548
|
+
*
|
|
3549
|
+
* const chat = useAgent({
|
|
3550
|
+
* agent: {
|
|
3551
|
+
* model: 'openai/gpt-4o',
|
|
3552
|
+
* tools: [fetchUrl]
|
|
3553
|
+
* }
|
|
3554
|
+
* })
|
|
3555
|
+
* ```
|
|
3556
|
+
*/
|
|
3557
|
+
declare const fetchUrl: AgentTool;
|
|
3558
|
+
/**
|
|
3559
|
+
* Python code interpreter for AI-generated scripts
|
|
3560
|
+
*
|
|
3561
|
+
* Executes Python code in an isolated E2B sandbox with Python 3.11+.
|
|
3562
|
+
* Pre-installed libraries: numpy, pandas, matplotlib, scipy, scikit-learn, requests.
|
|
3563
|
+
* Safe for running untrusted LLM-generated code.
|
|
3564
|
+
*
|
|
3565
|
+
* Returns:
|
|
3566
|
+
* - text: Last expression result
|
|
3567
|
+
* - stdout/stderr: Standard output streams
|
|
3568
|
+
* - results: Rich outputs (matplotlib plots as PNG/JPEG, HTML, Markdown, etc.)
|
|
3569
|
+
* - error: Execution errors with traceback
|
|
3570
|
+
*
|
|
3571
|
+
* Matplotlib plots are automatically captured and returned as base64 PNG images
|
|
3572
|
+
* in the results array. Use plt.show() or display(plt.gcf()) to generate plots.
|
|
3573
|
+
*
|
|
3574
|
+
* @example
|
|
3575
|
+
* ```ts
|
|
3576
|
+
* import { runCode } from '@blinkdotnew/react'
|
|
3577
|
+
*
|
|
3578
|
+
* const chat = useAgent({
|
|
3579
|
+
* agent: {
|
|
3580
|
+
* model: 'openai/gpt-4o',
|
|
3581
|
+
* tools: [runCode]
|
|
3582
|
+
* }
|
|
3583
|
+
* })
|
|
3584
|
+
*
|
|
3585
|
+
* // Agent can create data visualizations:
|
|
3586
|
+
* // "Create a bar chart of sales data"
|
|
3587
|
+
* // "Plot a sine wave"
|
|
3588
|
+
* // "Analyze this CSV and show trends"
|
|
3589
|
+
* ```
|
|
3590
|
+
*/
|
|
3591
|
+
declare const runCode: AgentTool;
|
|
3592
|
+
|
|
3593
|
+
/**
|
|
3594
|
+
* Sandbox Tools (Cursor-like)
|
|
3595
|
+
*
|
|
3596
|
+
* File and command execution tools for AI coding agents.
|
|
3597
|
+
* Modeled after Cursor IDE tools for familiar DX.
|
|
3598
|
+
*
|
|
3599
|
+
* Tools access `sandboxId` from context (NOT input params).
|
|
3600
|
+
*/
|
|
3601
|
+
|
|
3602
|
+
/**
|
|
3603
|
+
* Read file contents from sandbox
|
|
3604
|
+
* Cursor equivalent: read_file
|
|
3605
|
+
*/
|
|
3606
|
+
declare const readFile: AgentTool;
|
|
3607
|
+
/**
|
|
3608
|
+
* List files and directories
|
|
3609
|
+
* Cursor equivalent: list_dir
|
|
3610
|
+
*/
|
|
3611
|
+
declare const listDir: AgentTool;
|
|
3612
|
+
/**
|
|
3613
|
+
* Create/overwrite a file in sandbox
|
|
3614
|
+
* Cursor equivalent: write
|
|
3615
|
+
*
|
|
3616
|
+
* ⚠️ WARNING: Use search_replace for existing files!
|
|
3617
|
+
*/
|
|
3618
|
+
declare const writeFile: AgentTool;
|
|
3619
|
+
/**
|
|
3620
|
+
* Search and replace in files (PREFERRED editing tool)
|
|
3621
|
+
* Cursor equivalent: search_replace
|
|
3622
|
+
*/
|
|
3623
|
+
declare const searchReplace: AgentTool;
|
|
3624
|
+
/**
|
|
3625
|
+
* Regex content search using ripgrep
|
|
3626
|
+
* Cursor equivalent: grep
|
|
3627
|
+
*/
|
|
3628
|
+
declare const grep: AgentTool;
|
|
3629
|
+
/**
|
|
3630
|
+
* Find files by name pattern
|
|
3631
|
+
* Cursor equivalent: glob_file_search
|
|
3632
|
+
*/
|
|
3633
|
+
declare const globFileSearch: AgentTool;
|
|
3634
|
+
/**
|
|
3635
|
+
* Execute shell command in sandbox
|
|
3636
|
+
* Cursor equivalent: run_terminal_cmd
|
|
3637
|
+
*/
|
|
3638
|
+
declare const runTerminalCmd: AgentTool;
|
|
3639
|
+
/**
|
|
3640
|
+
* Get public URL for a port in sandbox
|
|
3641
|
+
*/
|
|
3642
|
+
declare const getHost: AgentTool;
|
|
3643
|
+
/**
|
|
3644
|
+
* All sandbox tools bundled together (Cursor-like)
|
|
3645
|
+
*
|
|
3646
|
+
* @example
|
|
3647
|
+
* ```ts
|
|
3648
|
+
* import { sandboxTools, Agent } from '@blinkdotnew/sdk'
|
|
3649
|
+
*
|
|
3650
|
+
* const codingAgent = new Agent({
|
|
3651
|
+
* model: 'anthropic/claude-sonnet-4-20250514',
|
|
3652
|
+
* tools: sandboxTools,
|
|
3653
|
+
* })
|
|
3654
|
+
*
|
|
3655
|
+
* // Tools include:
|
|
3656
|
+
* // - read_file, list_dir
|
|
3657
|
+
* // - write_file, search_replace
|
|
3658
|
+
* // - grep, glob_file_search
|
|
3659
|
+
* // - run_terminal_cmd, get_host
|
|
3660
|
+
* ```
|
|
3661
|
+
*/
|
|
3662
|
+
declare const sandboxTools: AgentTool[];
|
|
3663
|
+
|
|
3664
|
+
/**
|
|
3665
|
+
* Database Tools
|
|
3666
|
+
*
|
|
3667
|
+
* RLS-enforced database operations for AI agents.
|
|
3668
|
+
* Automatically scoped to the authenticated user.
|
|
3669
|
+
*/
|
|
3670
|
+
|
|
3671
|
+
/**
|
|
3672
|
+
* Insert a new row into a database table
|
|
3673
|
+
*
|
|
3674
|
+
* RLS automatically sets user_id from JWT.
|
|
3675
|
+
*/
|
|
3676
|
+
declare const dbInsert: AgentTool;
|
|
3677
|
+
/**
|
|
3678
|
+
* List rows from a database table with filtering and pagination
|
|
3679
|
+
*
|
|
3680
|
+
* RLS ensures only the user's rows are returned.
|
|
3681
|
+
*/
|
|
3682
|
+
declare const dbList: AgentTool;
|
|
3683
|
+
/**
|
|
3684
|
+
* Get a single row by ID
|
|
3685
|
+
*
|
|
3686
|
+
* RLS ensures only the user's rows can be retrieved.
|
|
3687
|
+
*/
|
|
3688
|
+
declare const dbGet: AgentTool;
|
|
3689
|
+
/**
|
|
3690
|
+
* Update a row by ID
|
|
3691
|
+
*
|
|
3692
|
+
* RLS ensures only the user's rows can be updated.
|
|
3693
|
+
*/
|
|
3694
|
+
declare const dbUpdate: AgentTool;
|
|
3695
|
+
/**
|
|
3696
|
+
* Delete a row by ID
|
|
3697
|
+
*
|
|
3698
|
+
* RLS ensures only the user's rows can be deleted.
|
|
3699
|
+
*/
|
|
3700
|
+
declare const dbDelete: AgentTool;
|
|
3701
|
+
/**
|
|
3702
|
+
* All database tools bundled together
|
|
3703
|
+
*
|
|
3704
|
+
* @example
|
|
3705
|
+
* ```ts
|
|
3706
|
+
* import { dbTools } from '@blinkdotnew/react'
|
|
3707
|
+
*
|
|
3708
|
+
* const chat = useAgent({
|
|
3709
|
+
* agent: {
|
|
3710
|
+
* model: 'openai/gpt-4o',
|
|
3711
|
+
* tools: [...dbTools]
|
|
3712
|
+
* }
|
|
3713
|
+
* })
|
|
3714
|
+
* ```
|
|
3715
|
+
*/
|
|
3716
|
+
declare const dbTools: AgentTool[];
|
|
3717
|
+
|
|
3718
|
+
/**
|
|
3719
|
+
* Storage Tools
|
|
3720
|
+
*
|
|
3721
|
+
* RLS-enforced file storage operations for AI agents.
|
|
3722
|
+
* Files are stored under project-scoped paths.
|
|
3723
|
+
*/
|
|
3724
|
+
|
|
3725
|
+
/**
|
|
3726
|
+
* Upload a file to storage
|
|
3727
|
+
*
|
|
3728
|
+
* Accepts base64-encoded content or a URL to fetch from.
|
|
3729
|
+
*/
|
|
3730
|
+
declare const storageUpload: AgentTool;
|
|
3731
|
+
/**
|
|
3732
|
+
* Download file from storage
|
|
3733
|
+
*/
|
|
3734
|
+
declare const storageDownload: AgentTool;
|
|
3735
|
+
/**
|
|
3736
|
+
* List files in storage
|
|
3737
|
+
*/
|
|
3738
|
+
declare const storageList: AgentTool;
|
|
3739
|
+
/**
|
|
3740
|
+
* Delete file from storage
|
|
3741
|
+
*/
|
|
3742
|
+
declare const storageDelete: AgentTool;
|
|
3743
|
+
/**
|
|
3744
|
+
* Get public URL for a file
|
|
3745
|
+
*/
|
|
3746
|
+
declare const storagePublicUrl: AgentTool;
|
|
3747
|
+
/**
|
|
3748
|
+
* Move/rename a file in storage
|
|
3749
|
+
*/
|
|
3750
|
+
declare const storageMove: AgentTool;
|
|
3751
|
+
/**
|
|
3752
|
+
* Copy a file in storage
|
|
3753
|
+
*/
|
|
3754
|
+
declare const storageCopy: AgentTool;
|
|
3755
|
+
/**
|
|
3756
|
+
* All storage tools bundled together
|
|
3757
|
+
*
|
|
3758
|
+
* @example
|
|
3759
|
+
* ```ts
|
|
3760
|
+
* import { storageTools } from '@blinkdotnew/react'
|
|
3761
|
+
*
|
|
3762
|
+
* const chat = useAgent({
|
|
3763
|
+
* agent: {
|
|
3764
|
+
* model: 'openai/gpt-4o',
|
|
3765
|
+
* tools: [...storageTools]
|
|
3766
|
+
* }
|
|
3767
|
+
* })
|
|
3768
|
+
* ```
|
|
3769
|
+
*/
|
|
3770
|
+
declare const storageTools: AgentTool[];
|
|
3771
|
+
|
|
3772
|
+
/**
|
|
3773
|
+
* RAG Agent Tool
|
|
3774
|
+
*
|
|
3775
|
+
* Knowledge base search tool using TursoRAG.
|
|
3776
|
+
*
|
|
3777
|
+
* NOTE: Only rag_search is available as an agent tool. Agents ARE AI -
|
|
3778
|
+
* they use rag_search to get relevant chunks, then synthesize answers themselves.
|
|
3779
|
+
* The direct API /api/rag/:project_id/ai-search is available for non-agent use cases.
|
|
3780
|
+
*/
|
|
3781
|
+
|
|
3782
|
+
/**
|
|
3783
|
+
* Vector search across a knowledge base collection
|
|
3784
|
+
*
|
|
3785
|
+
* Performs semantic similarity search to find relevant chunks from uploaded documents.
|
|
3786
|
+
* The agent receives the chunks and synthesizes an answer from them.
|
|
3787
|
+
* Bills for embedding generation (query vectorization).
|
|
3788
|
+
*
|
|
3789
|
+
* @example
|
|
3790
|
+
* ```ts
|
|
3791
|
+
* import { Agent, ragSearch } from '@blinkdotnew/react'
|
|
3792
|
+
*
|
|
3793
|
+
* const supportAgent = new Agent({
|
|
3794
|
+
* model: 'openai/gpt-4o',
|
|
3795
|
+
* tools: [ragSearch],
|
|
3796
|
+
* system: 'You have access to a knowledge base. Use rag_search to find information, then answer based on the results.'
|
|
3797
|
+
* })
|
|
3798
|
+
*
|
|
3799
|
+
* const result = await supportAgent.generate({
|
|
3800
|
+
* prompt: 'What is our refund policy?'
|
|
3801
|
+
* })
|
|
3802
|
+
* ```
|
|
3803
|
+
*/
|
|
3804
|
+
declare const ragSearch: AgentTool;
|
|
3805
|
+
/**
|
|
3806
|
+
* All RAG tools bundled together
|
|
3807
|
+
*
|
|
3808
|
+
* @example
|
|
3809
|
+
* ```ts
|
|
3810
|
+
* import { Agent, ragTools } from '@blinkdotnew/react'
|
|
3811
|
+
*
|
|
3812
|
+
* const knowledgeAgent = new Agent({
|
|
3813
|
+
* model: 'anthropic/claude-sonnet-4-20250514',
|
|
3814
|
+
* tools: [...ragTools],
|
|
3815
|
+
* system: 'Search the knowledge base and answer questions based on the results.'
|
|
3816
|
+
* })
|
|
3817
|
+
* ```
|
|
3818
|
+
*/
|
|
3819
|
+
declare const ragTools: AgentTool[];
|
|
3820
|
+
|
|
3821
|
+
/**
|
|
3822
|
+
* Agent Tools
|
|
3823
|
+
*
|
|
3824
|
+
* Tool definitions for use with blink.ai.agent() and useAgent()
|
|
3825
|
+
*
|
|
3826
|
+
* @example
|
|
3827
|
+
* ```ts
|
|
3828
|
+
* import {
|
|
3829
|
+
* webSearch, fetchUrl, runCode, // Core tools
|
|
3830
|
+
* ragSearch, // RAG tool
|
|
3831
|
+
* readFile, listDir, writeFile, searchReplace, // Sandbox (Cursor-like)
|
|
3832
|
+
* grep, globFileSearch, runTerminalCmd, getHost,
|
|
3833
|
+
* sandboxTools, dbTools, storageTools, ragTools // Tool bundles
|
|
3834
|
+
* } from '@blinkdotnew/react'
|
|
3835
|
+
*
|
|
3836
|
+
* const chat = useAgent({
|
|
3837
|
+
* agent: {
|
|
3838
|
+
* model: 'openai/gpt-4o',
|
|
3839
|
+
* tools: [...sandboxTools, webSearch, ragSearch]
|
|
3840
|
+
* }
|
|
3841
|
+
* })
|
|
3842
|
+
* ```
|
|
3843
|
+
*/
|
|
3844
|
+
|
|
3845
|
+
/**
|
|
3846
|
+
* Convert tool definitions to tool names for API request
|
|
3847
|
+
*
|
|
3848
|
+
* @internal Used by SDK to serialize tools for API
|
|
3849
|
+
*/
|
|
3850
|
+
declare function serializeTools(tools: (AgentTool | string)[]): string[];
|
|
3851
|
+
|
|
3852
|
+
export { type AISearchOptions, Agent, type AgentBilling, type AgentConfig, type AgentNonStreamMessagesRequest, type AgentNonStreamPromptRequest, type AgentOptions, type AgentRequest, type AgentResponse, type AgentStep, type AgentStreamRequest, type TokenUsage as AgentTokenUsage, type AgentTool, type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, BlinkConnectorError, type BlinkConnectors, BlinkConnectorsImpl, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRAG, BlinkRAGImpl, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkSandbox, BlinkSandboxImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkTokenType, type BlinkUser, type ClientTool, type ConnectorApiKeyRequest, type ConnectorApiKeyResponse, type ConnectorAuthMode, type ConnectorExecuteRequest, type ConnectorExecuteResponse, type ConnectorProvider, type ConnectorStatusResponse, type ContextPolicy, type CreateCollectionOptions, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type GenerateOptions, type ImageGenerationRequest, type ImageGenerationResponse, type JSONSchema, type ListDocumentsOptions, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RAGAISearchResult, type RAGAISearchSource, type RAGCollection, type RAGDocument, type RAGSearchResponse, type RAGSearchResult, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, SANDBOX_TEMPLATES, type Sandbox, type SandboxConnectOptions, SandboxConnectionError, type SandboxCreateOptions, type SandboxTemplate, type SearchOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StopCondition, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type StreamOptions, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenIntrospectionResult, type TokenUsage$1 as TokenUsage, type ToolCall, type ToolResult, type TranscriptionRequest, type TranscriptionResponse, type UIMessage, type UIMessagePart, type UpdateOptions, type UploadOptions, type UpsertOptions, type WaitForReadyOptions, type WebBrowserModule, WebStorageAdapter, type WebhookTool, createClient, dbDelete, dbGet, dbInsert, dbList, dbTools, dbUpdate, fetchUrl, getDefaultStorageAdapter, getHost, globFileSearch, grep, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, listDir, platform, ragSearch, ragTools, readFile, runCode, runTerminalCmd, sandboxTools, searchReplace, serializeTools, stepCountIs, storageCopy, storageDelete, storageDownload, storageList, storageMove, storagePublicUrl, storageTools, storageUpload, webSearch, writeFile };
|