@omnikit-ai/sdk 2.2.0 → 2.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -10,7 +10,7 @@ export { cleanTokenFromUrl, getAccessToken, isTokenInUrl, removeAccessToken, sav
10
10
  * SECURITY: getAccessToken requires service token authentication.
11
11
  * Only available to backend functions, not frontend code.
12
12
  */
13
- type ConnectorType = 'slack' | 'google_calendar' | 'notion' | 'salesforce';
13
+ type ConnectorType = 'slack' | 'google_calendar' | 'gmail' | 'notion' | 'salesforce';
14
14
  interface ConnectorAccessTokenResponse {
15
15
  success: boolean;
16
16
  access_token: string;
@@ -178,7 +178,30 @@ interface AppSchema {
178
178
  platform_version?: number;
179
179
  }
180
180
  /**
181
- * MongoDB-style list options (Mongoose-compatible)
181
+ * Paginated response for cursor-based pagination (Google/Notion style)
182
+ * Returned by list() and filter() methods
183
+ *
184
+ * @example
185
+ * ```typescript
186
+ * // First page
187
+ * const page1 = await Task.list({}, { limit: 100 });
188
+ * // page1 = { data: [...], hasMore: true, nextCursor: "abc123" }
189
+ *
190
+ * // Next page
191
+ * const page2 = await Task.list({}, { limit: 100, after: page1.nextCursor });
192
+ * // page2 = { data: [...], hasMore: false, nextCursor: null }
193
+ * ```
194
+ */
195
+ interface PaginatedResult<T = CollectionRecord> {
196
+ /** Array of records for this page */
197
+ data: T[];
198
+ /** Whether there are more records after this page */
199
+ hasMore: boolean;
200
+ /** Cursor to pass to 'after' parameter for next page (null if no more pages) */
201
+ nextCursor: string | null;
202
+ }
203
+ /**
204
+ * MongoDB-style list options with cursor-based pagination
182
205
  * Used in the second parameter of list(filter, options)
183
206
  *
184
207
  * @example
@@ -189,17 +212,29 @@ interface AppSchema {
189
212
  * // Sort descending by timestamp
190
213
  * await Entity.list({}, { sort: { timestamp: -1 } })
191
214
  *
192
- * // With pagination
215
+ * // Cursor-based pagination (RECOMMENDED for large datasets)
216
+ * const page1 = await Entity.list({}, { limit: 100 });
217
+ * const page2 = await Entity.list({}, { limit: 100, after: page1.nextCursor });
218
+ *
219
+ * // Legacy offset pagination (still supported)
193
220
  * await Entity.list({}, { sort: { created_at: -1 }, limit: 20, offset: 10 })
194
221
  * ```
195
222
  */
196
223
  interface ListOptions {
197
224
  /** Sort order: { field: 1 } for ascending, { field: -1 } for descending. Also accepts string format: 'field' or '-field' */
198
225
  sort?: Record<string, 1 | -1> | string;
199
- /** Maximum number of results to return */
226
+ /** Maximum number of results to return (default: 100) */
200
227
  limit?: number;
201
- /** Number of results to skip (for pagination) */
228
+ /**
229
+ * Number of results to skip (for offset-based pagination)
230
+ * @deprecated Use 'after' cursor for better performance on large datasets
231
+ */
202
232
  offset?: number;
233
+ /**
234
+ * Cursor for pagination - pass nextCursor from previous response
235
+ * More efficient than offset for large datasets (O(1) vs O(n))
236
+ */
237
+ after?: string;
203
238
  }
204
239
  /**
205
240
  * @deprecated Use separate filter and ListOptions parameters instead
@@ -300,6 +335,80 @@ interface LLMMessage {
300
335
  * - Legacy aliases: 'gemini-flash', 'gemini-pro', 'gemini-pro-3' (for backward compatibility)
301
336
  */
302
337
  type LLMModel = 'gemini-2.5-flash-lite' | 'gemini-2.5-flash' | 'gemini-2.5-pro' | 'gemini-3-flash' | 'gemini-3-pro' | 'gemini-flash' | 'gemini-pro' | 'gemini-pro-3';
338
+ /**
339
+ * Parameter definition for a tool (OpenAPI/Gemini compatible format)
340
+ */
341
+ interface ToolParameter {
342
+ type: 'object';
343
+ properties: Record<string, {
344
+ type: string;
345
+ description?: string;
346
+ enum?: string[];
347
+ items?: {
348
+ type: string;
349
+ };
350
+ }>;
351
+ required?: string[];
352
+ }
353
+ /**
354
+ * Tool definition for LLM function calling.
355
+ *
356
+ * @example
357
+ * ```typescript
358
+ * const markStepTool: ToolDefinition = {
359
+ * name: 'mark_step_complete',
360
+ * description: 'Mark a workshop step as completed',
361
+ * parameters: {
362
+ * type: 'object',
363
+ * properties: {
364
+ * step_id: { type: 'string', description: 'The step ID' },
365
+ * summary: { type: 'string', description: 'Summary of completion' }
366
+ * },
367
+ * required: ['step_id', 'summary']
368
+ * }
369
+ * };
370
+ * ```
371
+ */
372
+ interface ToolDefinition {
373
+ name: string;
374
+ description: string;
375
+ parameters: ToolParameter;
376
+ }
377
+ /**
378
+ * A tool call returned by the LLM during streaming or in response
379
+ */
380
+ interface ToolCall {
381
+ id: string;
382
+ name: string;
383
+ arguments: Record<string, any>;
384
+ }
385
+ /**
386
+ * Configuration for server-side persistence of LLM results.
387
+ * When provided, the backend automatically saves the result to the specified collection,
388
+ * ensuring data is not lost if the user navigates away before polling completes.
389
+ *
390
+ * @example
391
+ * ```typescript
392
+ * await invokeLLM({
393
+ * prompt: 'Analyze this data...',
394
+ * saveResult: {
395
+ * collection: 'analysis_results',
396
+ * field: 'result',
397
+ * additionalFields: { status: 'completed' }
398
+ * }
399
+ * });
400
+ * ```
401
+ */
402
+ interface SaveResultConfig {
403
+ /** Collection name to save the result to */
404
+ collection: string;
405
+ /** Document ID to update. If omitted, creates a new document */
406
+ documentId?: string;
407
+ /** Field name to save the LLM result to (default: 'content') */
408
+ field?: string;
409
+ /** Additional fields to include when creating/updating the document */
410
+ additionalFields?: Record<string, any>;
411
+ }
303
412
  interface LLMParams {
304
413
  /** Message-based format for advanced use */
305
414
  messages?: LLMMessage[];
@@ -358,6 +467,125 @@ interface LLMParams {
358
467
  * @param error - The error that occurred
359
468
  */
360
469
  onError?: (error: Error) => void;
470
+ /**
471
+ * Tool definitions for function calling.
472
+ * When provided, the LLM can request tool calls which are executed client-side.
473
+ *
474
+ * @example
475
+ * ```typescript
476
+ * await omnikit.services.InvokeLLM({
477
+ * messages: [...],
478
+ * stream: true,
479
+ * tools: [{
480
+ * name: 'mark_step_complete',
481
+ * description: 'Mark a step as completed',
482
+ * parameters: {
483
+ * type: 'object',
484
+ * properties: {
485
+ * step_id: { type: 'string', description: 'Step ID' }
486
+ * },
487
+ * required: ['step_id']
488
+ * }
489
+ * }],
490
+ * onToolCall: async (toolCall) => {
491
+ * if (toolCall.name === 'mark_step_complete') {
492
+ * await markStepComplete(toolCall.arguments.step_id);
493
+ * }
494
+ * }
495
+ * });
496
+ * ```
497
+ */
498
+ tools?: ToolDefinition[];
499
+ /**
500
+ * Callback when LLM requests a tool call (fires during streaming).
501
+ * Handle the tool execution and optionally continue the conversation.
502
+ *
503
+ * @param toolCall - The tool call with id, name, and arguments
504
+ */
505
+ onToolCall?: (toolCall: ToolCall) => void | Promise<void>;
506
+ /**
507
+ * Server-side persistence configuration.
508
+ * When provided, the backend saves the LLM result directly to the app's database,
509
+ * ensuring data is not lost if the user navigates away before polling completes.
510
+ *
511
+ * @example
512
+ * ```typescript
513
+ * await invokeLLM({
514
+ * prompt: 'Analyze this data...',
515
+ * saveResult: {
516
+ * collection: 'analysis_results',
517
+ * field: 'result',
518
+ * additionalFields: { status: 'completed' }
519
+ * }
520
+ * });
521
+ * // Result is saved server-side even if user navigates away!
522
+ * ```
523
+ */
524
+ saveResult?: SaveResultConfig;
525
+ }
526
+ /**
527
+ * Grounding chunk from Google Search results
528
+ */
529
+ interface GroundingChunk {
530
+ /** Web source */
531
+ web?: {
532
+ /** URL of the source */
533
+ uri: string;
534
+ /** Title of the source */
535
+ title: string;
536
+ };
537
+ }
538
+ /**
539
+ * Grounding support linking text segments to sources
540
+ */
541
+ interface GroundingSupport {
542
+ /** Text segment in the response */
543
+ segment?: {
544
+ startIndex: number;
545
+ endIndex: number;
546
+ text: string;
547
+ };
548
+ /** Indices into groundingChunks array */
549
+ groundingChunkIndices?: number[];
550
+ }
551
+ /**
552
+ * Metadata from Google Search grounding
553
+ */
554
+ interface GroundingMetadata {
555
+ /** Search queries that were executed */
556
+ webSearchQueries?: string[];
557
+ /** HTML/CSS for search suggestions widget (per Gemini API ToS) */
558
+ searchEntryPoint?: {
559
+ renderedContent: string;
560
+ };
561
+ /** Source chunks from web search */
562
+ groundingChunks?: GroundingChunk[];
563
+ /** Text-to-source mappings for citations */
564
+ groundingSupports?: GroundingSupport[];
565
+ /** Items array (alternative format) */
566
+ items?: any[];
567
+ }
568
+ /**
569
+ * URL retrieval status
570
+ */
571
+ type UrlRetrievalStatus = 'URL_RETRIEVAL_STATUS_SUCCESS' | 'URL_RETRIEVAL_STATUS_UNSAFE' | 'URL_RETRIEVAL_STATUS_FAILED' | 'URL_RETRIEVAL_STATUS_UNSPECIFIED';
572
+ /**
573
+ * Metadata for a single URL context retrieval
574
+ */
575
+ interface UrlMetadata {
576
+ /** The URL that was retrieved */
577
+ retrieved_url: string;
578
+ /** Status of the retrieval */
579
+ url_retrieval_status: UrlRetrievalStatus;
580
+ }
581
+ /**
582
+ * Metadata from URL context tool
583
+ */
584
+ interface UrlContextMetadata {
585
+ /** Array of URL metadata for each URL processed */
586
+ url_metadata?: UrlMetadata[];
587
+ /** Items array (alternative format) */
588
+ items?: any[];
361
589
  }
362
590
  /**
363
591
  * Result from streaming LLM completion
@@ -378,14 +606,71 @@ interface LLMStreamResult {
378
606
  /** Whether files were in the input */
379
607
  has_files?: boolean;
380
608
  }
609
+ /**
610
+ * Full LLM response (non-streaming)
611
+ */
612
+ interface LLMResponse {
613
+ /** Whether the request was successful */
614
+ success: boolean;
615
+ /** Response content (string or parsed JSON when response_format is used) */
616
+ result: any;
617
+ /** Model that was used */
618
+ model_used: string;
619
+ /** Whether images were in the input */
620
+ has_images?: boolean;
621
+ /** Whether files were in the input */
622
+ has_files?: boolean;
623
+ /** Whether Google Search grounding was used */
624
+ google_search_used?: boolean;
625
+ /** Whether URL context was used */
626
+ url_context_used?: boolean;
627
+ /** Number of continuation requests made for long outputs */
628
+ continuation_count?: number;
629
+ /** Token usage statistics */
630
+ usage?: {
631
+ prompt_tokens: number;
632
+ completion_tokens: number;
633
+ total_tokens: number;
634
+ };
635
+ /**
636
+ * Grounding metadata from Google Search (when google_search: true)
637
+ * Contains search queries, source URLs, and text-to-source mappings for citations
638
+ */
639
+ grounding_metadata?: GroundingMetadata;
640
+ /**
641
+ * URL context metadata (when url_context: true)
642
+ * Contains info about which URLs were retrieved and their status
643
+ */
644
+ url_context_metadata?: UrlContextMetadata;
645
+ /**
646
+ * Tool calls requested by the LLM (when tools are provided).
647
+ * In streaming mode, these are delivered via onToolCall callback.
648
+ * In non-streaming mode, they are included in the response.
649
+ */
650
+ tool_calls?: ToolCall[];
651
+ /**
652
+ * Server-side save result (when saveResult was provided in the request).
653
+ * Contains the collection and document ID where the result was saved.
654
+ */
655
+ saved_to?: {
656
+ collection: string;
657
+ document_id: string;
658
+ };
659
+ /** @deprecated Use google_search_used instead */
660
+ web_search_used?: boolean;
661
+ }
381
662
  /**
382
663
  * SSE event types for LLM streaming
383
664
  */
384
665
  interface LLMStreamEvent {
385
666
  /** Event type */
386
- type: 'token' | 'done' | 'error';
667
+ type: 'token' | 'done' | 'error' | 'tool_call';
387
668
  /** Token content (for type: 'token') */
388
669
  content?: string;
670
+ /** Tool call data (for type: 'tool_call') */
671
+ id?: string;
672
+ name?: string;
673
+ arguments?: Record<string, any>;
389
674
  /** Complete result (for type: 'done') */
390
675
  result?: string;
391
676
  /** Model used (for type: 'done') */
@@ -720,11 +1005,45 @@ interface BuiltInIntegration {
720
1005
  SendSMS(params: SMSParams): Promise<ServiceResponse>;
721
1006
  /**
722
1007
  * Invoke LLM for text/vision/file processing
723
- * @param params - LLM parameters
1008
+ *
1009
+ * Features:
1010
+ * - Multi-modal inputs: text, images, PDFs, videos, audio, YouTube URLs
1011
+ * - Google Search grounding: Enable `google_search: true` for real-time web data
1012
+ * - URL context: Enable `url_context: true` to have the model read URLs in your prompt
1013
+ * - Streaming: Enable `stream: true` with callbacks for real-time token output
1014
+ * - JSON output: Use `response_format: { type: 'json_object' }` for structured responses
1015
+ *
1016
+ * @param params - LLM parameters including messages, google_search, url_context, etc.
724
1017
  * @param options - Async options for handling long-running operations
725
- * @returns Result or AsyncJobCreatedResponse (if async_mode or returnJobId)
1018
+ * @returns LLMResponse with result, grounding_metadata, and url_context_metadata
1019
+ *
1020
+ * @example Basic usage
1021
+ * ```typescript
1022
+ * const response = await InvokeLLM({ prompt: 'Hello, world!' });
1023
+ * console.log(response.result);
1024
+ * ```
1025
+ *
1026
+ * @example With Google Search grounding
1027
+ * ```typescript
1028
+ * const response = await InvokeLLM({
1029
+ * prompt: 'What are the latest AI news?',
1030
+ * google_search: true
1031
+ * });
1032
+ * console.log(response.result);
1033
+ * console.log(response.grounding_metadata?.groundingChunks); // Source URLs
1034
+ * ```
1035
+ *
1036
+ * @example With URL context
1037
+ * ```typescript
1038
+ * const response = await InvokeLLM({
1039
+ * prompt: 'Summarize the content at https://example.com/article',
1040
+ * url_context: true
1041
+ * });
1042
+ * console.log(response.result);
1043
+ * console.log(response.url_context_metadata?.url_metadata); // Retrieval status
1044
+ * ```
726
1045
  */
727
- InvokeLLM(params: LLMParams, options?: AsyncOptions): Promise<ServiceResponse | AsyncJobCreatedResponse>;
1046
+ InvokeLLM(params: LLMParams, options?: AsyncOptions): Promise<LLMResponse | AsyncJobCreatedResponse>;
728
1047
  UploadFile(params: {
729
1048
  file: File;
730
1049
  metadata?: Record<string, any>;
@@ -791,10 +1110,11 @@ interface AuthModule {
791
1110
  */
792
1111
  me(): Promise<UserInfo>;
793
1112
  /**
794
- * Redirect to platform login page
795
- * @param returnPath - Path to return to after login
1113
+ * Redirect to platform login page, or navigate directly if already authenticated.
1114
+ * Smart login: checks auth state first, only shows modal if not logged in.
1115
+ * @param returnPath - Path to return to after login (relative paths are resolved to absolute)
796
1116
  */
797
- login(returnPath?: string): void;
1117
+ login(returnPath?: string): void | Promise<void>;
798
1118
  /**
799
1119
  * Request a passwordless login code to email
800
1120
  * @param email - User email
@@ -1168,6 +1488,156 @@ interface IntegrationPackage {
1168
1488
  interface IntegrationSchema {
1169
1489
  installed_packages: IntegrationPackage[];
1170
1490
  }
1491
+ /**
1492
+ * Message in an assistant thread
1493
+ */
1494
+ interface AssistantMessage {
1495
+ role: 'user' | 'assistant' | 'tool';
1496
+ content: string;
1497
+ }
1498
+ /**
1499
+ * Action call made by the assistant during execution
1500
+ */
1501
+ interface AssistantActionCall {
1502
+ id: string;
1503
+ name: string;
1504
+ arguments: Record<string, any>;
1505
+ status: 'pending' | 'running' | 'completed' | 'failed';
1506
+ output?: any;
1507
+ error?: string;
1508
+ }
1509
+ /**
1510
+ * Parameters for running an assistant
1511
+ */
1512
+ interface AssistantRunParams {
1513
+ /**
1514
+ * Messages to send to the assistant
1515
+ */
1516
+ messages: AssistantMessage[];
1517
+ /**
1518
+ * Optional thread ID to continue an existing conversation.
1519
+ * If not provided, a new thread will be created.
1520
+ */
1521
+ threadId?: string;
1522
+ /**
1523
+ * Callback when a text token is received during streaming
1524
+ */
1525
+ onToken?: (token: string) => void;
1526
+ /**
1527
+ * Callback when an action starts executing
1528
+ */
1529
+ onActionStart?: (action: {
1530
+ id: string;
1531
+ name: string;
1532
+ arguments: Record<string, any>;
1533
+ }) => void;
1534
+ /**
1535
+ * Callback when an action completes successfully
1536
+ */
1537
+ onActionComplete?: (action: {
1538
+ id: string;
1539
+ name: string;
1540
+ output: any;
1541
+ }) => void;
1542
+ /**
1543
+ * Callback when an action fails
1544
+ */
1545
+ onActionFailed?: (action: {
1546
+ id: string;
1547
+ name: string;
1548
+ error: string;
1549
+ }) => void;
1550
+ /**
1551
+ * Callback when the run completes
1552
+ */
1553
+ onComplete?: (result: AssistantRunResult) => void;
1554
+ /**
1555
+ * Callback when an error occurs
1556
+ */
1557
+ onError?: (error: Error) => void;
1558
+ }
1559
+ /**
1560
+ * Result from running an assistant
1561
+ */
1562
+ interface AssistantRunResult {
1563
+ /** Thread ID (for continuing the conversation) */
1564
+ threadId: string;
1565
+ /** Number of messages in the thread */
1566
+ messageCount: number;
1567
+ /** Full response text from the assistant */
1568
+ response: string;
1569
+ }
1570
+ /**
1571
+ * Callbacks for thread subscription via WebSocket
1572
+ */
1573
+ interface ThreadSubscriptionCallbacks {
1574
+ /**
1575
+ * Called when connection is established
1576
+ */
1577
+ onConnected?: (info: {
1578
+ threadId: string;
1579
+ assistantName: string;
1580
+ messageCount: number;
1581
+ }) => void;
1582
+ /**
1583
+ * Called when a text token is received
1584
+ */
1585
+ onToken?: (token: string) => void;
1586
+ /**
1587
+ * Called when an action starts
1588
+ */
1589
+ onActionStart?: (action: {
1590
+ id: string;
1591
+ name: string;
1592
+ arguments: Record<string, any>;
1593
+ }) => void;
1594
+ /**
1595
+ * Called when an action completes
1596
+ */
1597
+ onActionComplete?: (action: {
1598
+ id: string;
1599
+ name: string;
1600
+ output: any;
1601
+ }) => void;
1602
+ /**
1603
+ * Called when an action fails
1604
+ */
1605
+ onActionFailed?: (action: {
1606
+ id: string;
1607
+ name: string;
1608
+ error: string;
1609
+ }) => void;
1610
+ /**
1611
+ * Called when the run completes
1612
+ */
1613
+ onRunComplete?: (result: {
1614
+ threadId: string;
1615
+ messageCount: number;
1616
+ }) => void;
1617
+ /**
1618
+ * Called when an error occurs
1619
+ */
1620
+ onError?: (error: Error) => void;
1621
+ /**
1622
+ * Called when connection closes
1623
+ */
1624
+ onDisconnect?: () => void;
1625
+ }
1626
+ /**
1627
+ * SSE event types for assistant streaming
1628
+ */
1629
+ interface AssistantStreamEvent {
1630
+ type: 'token' | 'action_start' | 'action_complete' | 'action_failed' | 'done' | 'error';
1631
+ content?: string;
1632
+ id?: string;
1633
+ name?: string;
1634
+ arguments?: Record<string, any>;
1635
+ output?: any;
1636
+ error?: string;
1637
+ message?: string;
1638
+ thread_id?: string;
1639
+ message_count?: number;
1640
+ }
1171
1641
  /**
1172
1642
  * Available voice options for Live Voice AI
1173
1643
  */
@@ -1442,6 +1912,13 @@ declare class APIClient implements OmnikitClient {
1442
1912
  * ```
1443
1913
  */
1444
1914
  get connectors(): ConnectorsModule$1;
1915
+ /**
1916
+ * Resolve a return URL to an absolute URL.
1917
+ * Handles relative paths like "/profile" by combining with current location.
1918
+ * This fixes the OAuth redirect bug where relative URLs like "/profile" become
1919
+ * "https://omnikit.ai/profile" instead of "https://omnikit.ai/app-builder/{id}/preview/profile"
1920
+ */
1921
+ private _resolveReturnUrl;
1445
1922
  /**
1446
1923
  * Create auth proxy that auto-initializes
1447
1924
  */
@@ -1584,6 +2061,63 @@ declare class APIClient implements OmnikitClient {
1584
2061
  * @returns A LiveVoiceSession object to control the session
1585
2062
  */
1586
2063
  createLiveVoiceSession(config?: LiveVoiceConfig): LiveVoiceSession;
2064
+ /**
2065
+ * Run an assistant with streaming response.
2066
+ *
2067
+ * Assistants are server-side AI agents with custom instructions and built-in actions.
2068
+ * They can create/update/delete records, send emails, make HTTP requests, etc.
2069
+ *
2070
+ * @example
2071
+ * ```typescript
2072
+ * const result = await omnikit.runAssistant('customer_support', {
2073
+ * messages: [{ role: 'user', content: 'Help me track my order #12345' }],
2074
+ * onToken: (token) => setResponse(prev => prev + token),
2075
+ * onActionStart: (action) => console.log(`Starting: ${action.name}`),
2076
+ * onActionComplete: (action) => console.log(`Completed: ${action.name}`),
2077
+ * });
2078
+ *
2079
+ * // Continue the conversation
2080
+ * await omnikit.runAssistant('customer_support', {
2081
+ * messages: [{ role: 'user', content: 'When will it arrive?' }],
2082
+ * threadId: result.threadId,
2083
+ * onToken: (token) => setResponse(prev => prev + token),
2084
+ * });
2085
+ * ```
2086
+ *
2087
+ * @param assistantName - Name of the assistant to run
2088
+ * @param params - Run parameters including messages and callbacks
2089
+ * @returns Promise resolving to the run result with threadId
2090
+ */
2091
+ runAssistant(assistantName: string, params: AssistantRunParams): Promise<AssistantRunResult>;
2092
+ /**
2093
+ * Subscribe to a thread via WebSocket for real-time updates.
2094
+ *
2095
+ * Use this to watch a thread without triggering a run. Useful for:
2096
+ * - Showing live updates when another client runs the assistant
2097
+ * - Reconnecting after page navigation
2098
+ * - Observing thread activity in real-time
2099
+ *
2100
+ * @example
2101
+ * ```typescript
2102
+ * // Subscribe to a thread
2103
+ * const unsubscribe = omnikit.subscribeToThread(threadId, {
2104
+ * onConnected: (info) => console.log(`Connected to ${info.assistantName}`),
2105
+ * onToken: (token) => setResponse(prev => prev + token),
2106
+ * onActionStart: (action) => console.log(`Action: ${action.name}`),
2107
+ * onActionComplete: (action) => console.log(`Result: ${JSON.stringify(action.output)}`),
2108
+ * onRunComplete: (result) => console.log(`Run complete, ${result.messageCount} messages`),
2109
+ * onError: (error) => console.error(error),
2110
+ * });
2111
+ *
2112
+ * // Later: disconnect
2113
+ * unsubscribe();
2114
+ * ```
2115
+ *
2116
+ * @param threadId - Thread ID to subscribe to
2117
+ * @param callbacks - Event callbacks for real-time updates
2118
+ * @returns Unsubscribe function to close the WebSocket
2119
+ */
2120
+ subscribeToThread(threadId: string, callbacks: ThreadSubscriptionCallbacks): () => void;
1587
2121
  /**
1588
2122
  * Invoke a backend function by name.
1589
2123
  *
@@ -1883,4 +2417,4 @@ declare class Analytics {
1883
2417
  }
1884
2418
  declare function createAnalytics(config: AnalyticsConfig): Analytics;
1885
2419
 
1886
- export { APIClient, Analytics, type AnalyticsConfig, type AppMetadata, type AppSchema, type AsyncJobCreatedResponse, type AsyncJobStatus, type AsyncJobStatusResponse, type AsyncJobType, type AsyncOptions, type AuthModule, type AuthResponse, type BuiltInIntegration, type BulkResult, type CachedMetadata, type CheckJobStatusParams, type CollectionClass, type CollectionDefinition, type CollectionField, type CollectionRecord, type ConnectorAccessTokenResponse, type ConnectorStatusResponse, type ConnectorType, type ConnectorsModule, type EmailParams, type Entity, type EntityClass, type EntityDefinition, type EntityField, type EntityRecord, type EventPayload, type ExtractParams, type ImageParams, type ImportResult, type InitialMetadata, type IntegrationEndpoint, type IntegrationMethod, type IntegrationPackage, type IntegrationSchema, type LLMMessage, type LLMModel, type LLMParams, type LLMStreamEvent, type LLMStreamResult, type ListOptions, type LiveVoiceClientMessage, type LiveVoiceConfig, type LiveVoiceServerMessage, type LiveVoiceSession, LiveVoiceSessionImpl, type LiveVoiceStatus, type LiveVoiceVoice, type OAuthProvider, type OAuthProvidersResponse, type OmnikitClient, type OmnikitConfig, OmnikitError, type QueryOptions, type RequestOptions, type SMSParams, type ServiceDefinition, type ServiceResponse, type ServiceRoleClient, type ServicesSchema, type SpeechParams, type TemplateDefinition, type UserCollectionClass, type UserEntityClass, type UserInfo, type VideoParams, type VideoStatusParams, createAnalytics, createClient, createClientFromRequest, createServerClient };
2420
+ export { APIClient, Analytics, type AnalyticsConfig, type AppMetadata, type AppSchema, type AssistantActionCall, type AssistantMessage, type AssistantRunParams, type AssistantRunResult, type AssistantStreamEvent, type AsyncJobCreatedResponse, type AsyncJobStatus, type AsyncJobStatusResponse, type AsyncJobType, type AsyncOptions, type AuthModule, type AuthResponse, type BuiltInIntegration, type BulkResult, type CachedMetadata, type CheckJobStatusParams, type CollectionClass, type CollectionDefinition, type CollectionField, type CollectionRecord, type ConnectorAccessTokenResponse, type ConnectorStatusResponse, type ConnectorType, type ConnectorsModule, type EmailParams, type Entity, type EntityClass, type EntityDefinition, type EntityField, type EntityRecord, type EventPayload, type ExtractParams, type GroundingChunk, type GroundingMetadata, type GroundingSupport, type ImageParams, type ImportResult, type InitialMetadata, type IntegrationEndpoint, type IntegrationMethod, type IntegrationPackage, type IntegrationSchema, type LLMMessage, type LLMModel, type LLMParams, type LLMResponse, type LLMStreamEvent, type LLMStreamResult, type ListOptions, type LiveVoiceClientMessage, type LiveVoiceConfig, type LiveVoiceServerMessage, type LiveVoiceSession, LiveVoiceSessionImpl, type LiveVoiceStatus, type LiveVoiceVoice, type OAuthProvider, type OAuthProvidersResponse, type OmnikitClient, type OmnikitConfig, OmnikitError, type PaginatedResult, type QueryOptions, type RequestOptions, type SMSParams, type SaveResultConfig, type ServiceDefinition, type ServiceResponse, type ServiceRoleClient, type ServicesSchema, type SpeechParams, type TemplateDefinition, type ThreadSubscriptionCallbacks, type ToolCall, type ToolDefinition, type ToolParameter, type UrlContextMetadata, type UrlMetadata, type UrlRetrievalStatus, type UserCollectionClass, type UserEntityClass, type UserInfo, type VideoParams, type VideoStatusParams, createAnalytics, createClient, createClientFromRequest, createServerClient };