@omnikit-ai/sdk 2.2.0 → 2.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -10,7 +10,7 @@ export { cleanTokenFromUrl, getAccessToken, isTokenInUrl, removeAccessToken, sav
10
10
  * SECURITY: getAccessToken requires service token authentication.
11
11
  * Only available to backend functions, not frontend code.
12
12
  */
13
- type ConnectorType = 'slack' | 'google_calendar' | 'notion' | 'salesforce';
13
+ type ConnectorType = 'slack' | 'google_calendar' | 'gmail' | 'notion' | 'salesforce';
14
14
  interface ConnectorAccessTokenResponse {
15
15
  success: boolean;
16
16
  access_token: string;
@@ -300,6 +300,53 @@ interface LLMMessage {
300
300
  * - Legacy aliases: 'gemini-flash', 'gemini-pro', 'gemini-pro-3' (for backward compatibility)
301
301
  */
302
302
  type LLMModel = 'gemini-2.5-flash-lite' | 'gemini-2.5-flash' | 'gemini-2.5-pro' | 'gemini-3-flash' | 'gemini-3-pro' | 'gemini-flash' | 'gemini-pro' | 'gemini-pro-3';
303
+ /**
304
+ * Parameter definition for a tool (OpenAPI/Gemini compatible format)
305
+ */
306
+ interface ToolParameter {
307
+ type: 'object';
308
+ properties: Record<string, {
309
+ type: string;
310
+ description?: string;
311
+ enum?: string[];
312
+ items?: {
313
+ type: string;
314
+ };
315
+ }>;
316
+ required?: string[];
317
+ }
318
+ /**
319
+ * Tool definition for LLM function calling.
320
+ *
321
+ * @example
322
+ * ```typescript
323
+ * const markStepTool: ToolDefinition = {
324
+ * name: 'mark_step_complete',
325
+ * description: 'Mark a workshop step as completed',
326
+ * parameters: {
327
+ * type: 'object',
328
+ * properties: {
329
+ * step_id: { type: 'string', description: 'The step ID' },
330
+ * summary: { type: 'string', description: 'Summary of completion' }
331
+ * },
332
+ * required: ['step_id', 'summary']
333
+ * }
334
+ * };
335
+ * ```
336
+ */
337
+ interface ToolDefinition {
338
+ name: string;
339
+ description: string;
340
+ parameters: ToolParameter;
341
+ }
342
+ /**
343
+ * A tool call returned by the LLM during streaming or in response
344
+ */
345
+ interface ToolCall {
346
+ id: string;
347
+ name: string;
348
+ arguments: Record<string, any>;
349
+ }
303
350
  interface LLMParams {
304
351
  /** Message-based format for advanced use */
305
352
  messages?: LLMMessage[];
@@ -358,6 +405,106 @@ interface LLMParams {
358
405
  * @param error - The error that occurred
359
406
  */
360
407
  onError?: (error: Error) => void;
408
+ /**
409
+ * Tool definitions for function calling.
410
+ * When provided, the LLM can request tool calls which are executed client-side.
411
+ *
412
+ * @example
413
+ * ```typescript
414
+ * await omnikit.services.InvokeLLM({
415
+ * messages: [...],
416
+ * stream: true,
417
+ * tools: [{
418
+ * name: 'mark_step_complete',
419
+ * description: 'Mark a step as completed',
420
+ * parameters: {
421
+ * type: 'object',
422
+ * properties: {
423
+ * step_id: { type: 'string', description: 'Step ID' }
424
+ * },
425
+ * required: ['step_id']
426
+ * }
427
+ * }],
428
+ * onToolCall: async (toolCall) => {
429
+ * if (toolCall.name === 'mark_step_complete') {
430
+ * await markStepComplete(toolCall.arguments.step_id);
431
+ * }
432
+ * }
433
+ * });
434
+ * ```
435
+ */
436
+ tools?: ToolDefinition[];
437
+ /**
438
+ * Callback when LLM requests a tool call (fires during streaming).
439
+ * Handle the tool execution and optionally continue the conversation.
440
+ *
441
+ * @param toolCall - The tool call with id, name, and arguments
442
+ */
443
+ onToolCall?: (toolCall: ToolCall) => void | Promise<void>;
444
+ }
445
+ /**
446
+ * Grounding chunk from Google Search results
447
+ */
448
+ interface GroundingChunk {
449
+ /** Web source */
450
+ web?: {
451
+ /** URL of the source */
452
+ uri: string;
453
+ /** Title of the source */
454
+ title: string;
455
+ };
456
+ }
457
+ /**
458
+ * Grounding support linking text segments to sources
459
+ */
460
+ interface GroundingSupport {
461
+ /** Text segment in the response */
462
+ segment?: {
463
+ startIndex: number;
464
+ endIndex: number;
465
+ text: string;
466
+ };
467
+ /** Indices into groundingChunks array */
468
+ groundingChunkIndices?: number[];
469
+ }
470
+ /**
471
+ * Metadata from Google Search grounding
472
+ */
473
+ interface GroundingMetadata {
474
+ /** Search queries that were executed */
475
+ webSearchQueries?: string[];
476
+ /** HTML/CSS for search suggestions widget (per Gemini API ToS) */
477
+ searchEntryPoint?: {
478
+ renderedContent: string;
479
+ };
480
+ /** Source chunks from web search */
481
+ groundingChunks?: GroundingChunk[];
482
+ /** Text-to-source mappings for citations */
483
+ groundingSupports?: GroundingSupport[];
484
+ /** Items array (alternative format) */
485
+ items?: any[];
486
+ }
487
+ /**
488
+ * URL retrieval status
489
+ */
490
+ type UrlRetrievalStatus = 'URL_RETRIEVAL_STATUS_SUCCESS' | 'URL_RETRIEVAL_STATUS_UNSAFE' | 'URL_RETRIEVAL_STATUS_FAILED' | 'URL_RETRIEVAL_STATUS_UNSPECIFIED';
491
+ /**
492
+ * Metadata for a single URL context retrieval
493
+ */
494
+ interface UrlMetadata {
495
+ /** The URL that was retrieved */
496
+ retrieved_url: string;
497
+ /** Status of the retrieval */
498
+ url_retrieval_status: UrlRetrievalStatus;
499
+ }
500
+ /**
501
+ * Metadata from URL context tool
502
+ */
503
+ interface UrlContextMetadata {
504
+ /** Array of URL metadata for each URL processed */
505
+ url_metadata?: UrlMetadata[];
506
+ /** Items array (alternative format) */
507
+ items?: any[];
361
508
  }
362
509
  /**
363
510
  * Result from streaming LLM completion
@@ -378,14 +525,63 @@ interface LLMStreamResult {
378
525
  /** Whether files were in the input */
379
526
  has_files?: boolean;
380
527
  }
528
+ /**
529
+ * Full LLM response (non-streaming)
530
+ */
531
+ interface LLMResponse {
532
+ /** Whether the request was successful */
533
+ success: boolean;
534
+ /** Response content (string or parsed JSON when response_format is used) */
535
+ result: any;
536
+ /** Model that was used */
537
+ model_used: string;
538
+ /** Whether images were in the input */
539
+ has_images?: boolean;
540
+ /** Whether files were in the input */
541
+ has_files?: boolean;
542
+ /** Whether Google Search grounding was used */
543
+ google_search_used?: boolean;
544
+ /** Whether URL context was used */
545
+ url_context_used?: boolean;
546
+ /** Number of continuation requests made for long outputs */
547
+ continuation_count?: number;
548
+ /** Token usage statistics */
549
+ usage?: {
550
+ prompt_tokens: number;
551
+ completion_tokens: number;
552
+ total_tokens: number;
553
+ };
554
+ /**
555
+ * Grounding metadata from Google Search (when google_search: true)
556
+ * Contains search queries, source URLs, and text-to-source mappings for citations
557
+ */
558
+ grounding_metadata?: GroundingMetadata;
559
+ /**
560
+ * URL context metadata (when url_context: true)
561
+ * Contains info about which URLs were retrieved and their status
562
+ */
563
+ url_context_metadata?: UrlContextMetadata;
564
+ /**
565
+ * Tool calls requested by the LLM (when tools are provided).
566
+ * In streaming mode, these are delivered via onToolCall callback.
567
+ * In non-streaming mode, they are included in the response.
568
+ */
569
+ tool_calls?: ToolCall[];
570
+ /** @deprecated Use google_search_used instead */
571
+ web_search_used?: boolean;
572
+ }
381
573
  /**
382
574
  * SSE event types for LLM streaming
383
575
  */
384
576
  interface LLMStreamEvent {
385
577
  /** Event type */
386
- type: 'token' | 'done' | 'error';
578
+ type: 'token' | 'done' | 'error' | 'tool_call';
387
579
  /** Token content (for type: 'token') */
388
580
  content?: string;
581
+ /** Tool call data (for type: 'tool_call') */
582
+ id?: string;
583
+ name?: string;
584
+ arguments?: Record<string, any>;
389
585
  /** Complete result (for type: 'done') */
390
586
  result?: string;
391
587
  /** Model used (for type: 'done') */
@@ -720,11 +916,45 @@ interface BuiltInIntegration {
720
916
  SendSMS(params: SMSParams): Promise<ServiceResponse>;
721
917
  /**
722
918
  * Invoke LLM for text/vision/file processing
723
- * @param params - LLM parameters
919
+ *
920
+ * Features:
921
+ * - Multi-modal inputs: text, images, PDFs, videos, audio, YouTube URLs
922
+ * - Google Search grounding: Enable `google_search: true` for real-time web data
923
+ * - URL context: Enable `url_context: true` to have the model read URLs in your prompt
924
+ * - Streaming: Enable `stream: true` with callbacks for real-time token output
925
+ * - JSON output: Use `response_format: { type: 'json_object' }` for structured responses
926
+ *
927
+ * @param params - LLM parameters including messages, google_search, url_context, etc.
724
928
  * @param options - Async options for handling long-running operations
725
- * @returns Result or AsyncJobCreatedResponse (if async_mode or returnJobId)
929
+ * @returns LLMResponse with result, grounding_metadata, and url_context_metadata
930
+ *
931
+ * @example Basic usage
932
+ * ```typescript
933
+ * const response = await InvokeLLM({ prompt: 'Hello, world!' });
934
+ * console.log(response.result);
935
+ * ```
936
+ *
937
+ * @example With Google Search grounding
938
+ * ```typescript
939
+ * const response = await InvokeLLM({
940
+ * prompt: 'What are the latest AI news?',
941
+ * google_search: true
942
+ * });
943
+ * console.log(response.result);
944
+ * console.log(response.grounding_metadata?.groundingChunks); // Source URLs
945
+ * ```
946
+ *
947
+ * @example With URL context
948
+ * ```typescript
949
+ * const response = await InvokeLLM({
950
+ * prompt: 'Summarize the content at https://example.com/article',
951
+ * url_context: true
952
+ * });
953
+ * console.log(response.result);
954
+ * console.log(response.url_context_metadata?.url_metadata); // Retrieval status
955
+ * ```
726
956
  */
727
- InvokeLLM(params: LLMParams, options?: AsyncOptions): Promise<ServiceResponse | AsyncJobCreatedResponse>;
957
+ InvokeLLM(params: LLMParams, options?: AsyncOptions): Promise<LLMResponse | AsyncJobCreatedResponse>;
728
958
  UploadFile(params: {
729
959
  file: File;
730
960
  metadata?: Record<string, any>;
@@ -791,10 +1021,11 @@ interface AuthModule {
791
1021
  */
792
1022
  me(): Promise<UserInfo>;
793
1023
  /**
794
- * Redirect to platform login page
795
- * @param returnPath - Path to return to after login
1024
+ * Redirect to platform login page, or navigate directly if already authenticated.
1025
+ * Smart login: checks auth state first, only shows modal if not logged in.
1026
+ * @param returnPath - Path to return to after login (relative paths are resolved to absolute)
796
1027
  */
797
- login(returnPath?: string): void;
1028
+ login(returnPath?: string): void | Promise<void>;
798
1029
  /**
799
1030
  * Request a passwordless login code to email
800
1031
  * @param email - User email
@@ -1442,6 +1673,13 @@ declare class APIClient implements OmnikitClient {
1442
1673
  * ```
1443
1674
  */
1444
1675
  get connectors(): ConnectorsModule$1;
1676
+ /**
1677
+ * Resolve a return URL to an absolute URL.
1678
+ * Handles relative paths like "/profile" by combining with current location.
1679
+ * This fixes the OAuth redirect bug where relative URLs like "/profile" become
1680
+ * "https://omnikit.ai/profile" instead of "https://omnikit.ai/app-builder/{id}/preview/profile"
1681
+ */
1682
+ private _resolveReturnUrl;
1445
1683
  /**
1446
1684
  * Create auth proxy that auto-initializes
1447
1685
  */
@@ -1883,4 +2121,4 @@ declare class Analytics {
1883
2121
  }
1884
2122
  declare function createAnalytics(config: AnalyticsConfig): Analytics;
1885
2123
 
1886
- export { APIClient, Analytics, type AnalyticsConfig, type AppMetadata, type AppSchema, type AsyncJobCreatedResponse, type AsyncJobStatus, type AsyncJobStatusResponse, type AsyncJobType, type AsyncOptions, type AuthModule, type AuthResponse, type BuiltInIntegration, type BulkResult, type CachedMetadata, type CheckJobStatusParams, type CollectionClass, type CollectionDefinition, type CollectionField, type CollectionRecord, type ConnectorAccessTokenResponse, type ConnectorStatusResponse, type ConnectorType, type ConnectorsModule, type EmailParams, type Entity, type EntityClass, type EntityDefinition, type EntityField, type EntityRecord, type EventPayload, type ExtractParams, type ImageParams, type ImportResult, type InitialMetadata, type IntegrationEndpoint, type IntegrationMethod, type IntegrationPackage, type IntegrationSchema, type LLMMessage, type LLMModel, type LLMParams, type LLMStreamEvent, type LLMStreamResult, type ListOptions, type LiveVoiceClientMessage, type LiveVoiceConfig, type LiveVoiceServerMessage, type LiveVoiceSession, LiveVoiceSessionImpl, type LiveVoiceStatus, type LiveVoiceVoice, type OAuthProvider, type OAuthProvidersResponse, type OmnikitClient, type OmnikitConfig, OmnikitError, type QueryOptions, type RequestOptions, type SMSParams, type ServiceDefinition, type ServiceResponse, type ServiceRoleClient, type ServicesSchema, type SpeechParams, type TemplateDefinition, type UserCollectionClass, type UserEntityClass, type UserInfo, type VideoParams, type VideoStatusParams, createAnalytics, createClient, createClientFromRequest, createServerClient };
2124
+ export { APIClient, Analytics, type AnalyticsConfig, type AppMetadata, type AppSchema, type AsyncJobCreatedResponse, type AsyncJobStatus, type AsyncJobStatusResponse, type AsyncJobType, type AsyncOptions, type AuthModule, type AuthResponse, type BuiltInIntegration, type BulkResult, type CachedMetadata, type CheckJobStatusParams, type CollectionClass, type CollectionDefinition, type CollectionField, type CollectionRecord, type ConnectorAccessTokenResponse, type ConnectorStatusResponse, type ConnectorType, type ConnectorsModule, type EmailParams, type Entity, type EntityClass, type EntityDefinition, type EntityField, type EntityRecord, type EventPayload, type ExtractParams, type GroundingChunk, type GroundingMetadata, type GroundingSupport, type ImageParams, type ImportResult, type InitialMetadata, type IntegrationEndpoint, type IntegrationMethod, type IntegrationPackage, type IntegrationSchema, type LLMMessage, type LLMModel, type LLMParams, type LLMResponse, type LLMStreamEvent, type LLMStreamResult, type ListOptions, type LiveVoiceClientMessage, type LiveVoiceConfig, type LiveVoiceServerMessage, type LiveVoiceSession, LiveVoiceSessionImpl, type LiveVoiceStatus, type LiveVoiceVoice, type OAuthProvider, type OAuthProvidersResponse, type OmnikitClient, type OmnikitConfig, OmnikitError, type QueryOptions, type RequestOptions, type SMSParams, type ServiceDefinition, type ServiceResponse, type ServiceRoleClient, type ServicesSchema, type SpeechParams, type TemplateDefinition, type ToolCall, type ToolDefinition, type ToolParameter, type UrlContextMetadata, type UrlMetadata, type UrlRetrievalStatus, type UserCollectionClass, type UserEntityClass, type UserInfo, type VideoParams, type VideoStatusParams, createAnalytics, createClient, createClientFromRequest, createServerClient };
package/dist/index.d.ts CHANGED
@@ -10,7 +10,7 @@ export { cleanTokenFromUrl, getAccessToken, isTokenInUrl, removeAccessToken, sav
10
10
  * SECURITY: getAccessToken requires service token authentication.
11
11
  * Only available to backend functions, not frontend code.
12
12
  */
13
- type ConnectorType = 'slack' | 'google_calendar' | 'notion' | 'salesforce';
13
+ type ConnectorType = 'slack' | 'google_calendar' | 'gmail' | 'notion' | 'salesforce';
14
14
  interface ConnectorAccessTokenResponse {
15
15
  success: boolean;
16
16
  access_token: string;
@@ -300,6 +300,53 @@ interface LLMMessage {
300
300
  * - Legacy aliases: 'gemini-flash', 'gemini-pro', 'gemini-pro-3' (for backward compatibility)
301
301
  */
302
302
  type LLMModel = 'gemini-2.5-flash-lite' | 'gemini-2.5-flash' | 'gemini-2.5-pro' | 'gemini-3-flash' | 'gemini-3-pro' | 'gemini-flash' | 'gemini-pro' | 'gemini-pro-3';
303
+ /**
304
+ * Parameter definition for a tool (OpenAPI/Gemini compatible format)
305
+ */
306
+ interface ToolParameter {
307
+ type: 'object';
308
+ properties: Record<string, {
309
+ type: string;
310
+ description?: string;
311
+ enum?: string[];
312
+ items?: {
313
+ type: string;
314
+ };
315
+ }>;
316
+ required?: string[];
317
+ }
318
+ /**
319
+ * Tool definition for LLM function calling.
320
+ *
321
+ * @example
322
+ * ```typescript
323
+ * const markStepTool: ToolDefinition = {
324
+ * name: 'mark_step_complete',
325
+ * description: 'Mark a workshop step as completed',
326
+ * parameters: {
327
+ * type: 'object',
328
+ * properties: {
329
+ * step_id: { type: 'string', description: 'The step ID' },
330
+ * summary: { type: 'string', description: 'Summary of completion' }
331
+ * },
332
+ * required: ['step_id', 'summary']
333
+ * }
334
+ * };
335
+ * ```
336
+ */
337
+ interface ToolDefinition {
338
+ name: string;
339
+ description: string;
340
+ parameters: ToolParameter;
341
+ }
342
+ /**
343
+ * A tool call returned by the LLM during streaming or in response
344
+ */
345
+ interface ToolCall {
346
+ id: string;
347
+ name: string;
348
+ arguments: Record<string, any>;
349
+ }
303
350
  interface LLMParams {
304
351
  /** Message-based format for advanced use */
305
352
  messages?: LLMMessage[];
@@ -358,6 +405,106 @@ interface LLMParams {
358
405
  * @param error - The error that occurred
359
406
  */
360
407
  onError?: (error: Error) => void;
408
+ /**
409
+ * Tool definitions for function calling.
410
+ * When provided, the LLM can request tool calls which are executed client-side.
411
+ *
412
+ * @example
413
+ * ```typescript
414
+ * await omnikit.services.InvokeLLM({
415
+ * messages: [...],
416
+ * stream: true,
417
+ * tools: [{
418
+ * name: 'mark_step_complete',
419
+ * description: 'Mark a step as completed',
420
+ * parameters: {
421
+ * type: 'object',
422
+ * properties: {
423
+ * step_id: { type: 'string', description: 'Step ID' }
424
+ * },
425
+ * required: ['step_id']
426
+ * }
427
+ * }],
428
+ * onToolCall: async (toolCall) => {
429
+ * if (toolCall.name === 'mark_step_complete') {
430
+ * await markStepComplete(toolCall.arguments.step_id);
431
+ * }
432
+ * }
433
+ * });
434
+ * ```
435
+ */
436
+ tools?: ToolDefinition[];
437
+ /**
438
+ * Callback when LLM requests a tool call (fires during streaming).
439
+ * Handle the tool execution and optionally continue the conversation.
440
+ *
441
+ * @param toolCall - The tool call with id, name, and arguments
442
+ */
443
+ onToolCall?: (toolCall: ToolCall) => void | Promise<void>;
444
+ }
445
+ /**
446
+ * Grounding chunk from Google Search results
447
+ */
448
+ interface GroundingChunk {
449
+ /** Web source */
450
+ web?: {
451
+ /** URL of the source */
452
+ uri: string;
453
+ /** Title of the source */
454
+ title: string;
455
+ };
456
+ }
457
+ /**
458
+ * Grounding support linking text segments to sources
459
+ */
460
+ interface GroundingSupport {
461
+ /** Text segment in the response */
462
+ segment?: {
463
+ startIndex: number;
464
+ endIndex: number;
465
+ text: string;
466
+ };
467
+ /** Indices into groundingChunks array */
468
+ groundingChunkIndices?: number[];
469
+ }
470
+ /**
471
+ * Metadata from Google Search grounding
472
+ */
473
+ interface GroundingMetadata {
474
+ /** Search queries that were executed */
475
+ webSearchQueries?: string[];
476
+ /** HTML/CSS for search suggestions widget (per Gemini API ToS) */
477
+ searchEntryPoint?: {
478
+ renderedContent: string;
479
+ };
480
+ /** Source chunks from web search */
481
+ groundingChunks?: GroundingChunk[];
482
+ /** Text-to-source mappings for citations */
483
+ groundingSupports?: GroundingSupport[];
484
+ /** Items array (alternative format) */
485
+ items?: any[];
486
+ }
487
+ /**
488
+ * URL retrieval status
489
+ */
490
+ type UrlRetrievalStatus = 'URL_RETRIEVAL_STATUS_SUCCESS' | 'URL_RETRIEVAL_STATUS_UNSAFE' | 'URL_RETRIEVAL_STATUS_FAILED' | 'URL_RETRIEVAL_STATUS_UNSPECIFIED';
491
+ /**
492
+ * Metadata for a single URL context retrieval
493
+ */
494
+ interface UrlMetadata {
495
+ /** The URL that was retrieved */
496
+ retrieved_url: string;
497
+ /** Status of the retrieval */
498
+ url_retrieval_status: UrlRetrievalStatus;
499
+ }
500
+ /**
501
+ * Metadata from URL context tool
502
+ */
503
+ interface UrlContextMetadata {
504
+ /** Array of URL metadata for each URL processed */
505
+ url_metadata?: UrlMetadata[];
506
+ /** Items array (alternative format) */
507
+ items?: any[];
361
508
  }
362
509
  /**
363
510
  * Result from streaming LLM completion
@@ -378,14 +525,63 @@ interface LLMStreamResult {
378
525
  /** Whether files were in the input */
379
526
  has_files?: boolean;
380
527
  }
528
+ /**
529
+ * Full LLM response (non-streaming)
530
+ */
531
+ interface LLMResponse {
532
+ /** Whether the request was successful */
533
+ success: boolean;
534
+ /** Response content (string or parsed JSON when response_format is used) */
535
+ result: any;
536
+ /** Model that was used */
537
+ model_used: string;
538
+ /** Whether images were in the input */
539
+ has_images?: boolean;
540
+ /** Whether files were in the input */
541
+ has_files?: boolean;
542
+ /** Whether Google Search grounding was used */
543
+ google_search_used?: boolean;
544
+ /** Whether URL context was used */
545
+ url_context_used?: boolean;
546
+ /** Number of continuation requests made for long outputs */
547
+ continuation_count?: number;
548
+ /** Token usage statistics */
549
+ usage?: {
550
+ prompt_tokens: number;
551
+ completion_tokens: number;
552
+ total_tokens: number;
553
+ };
554
+ /**
555
+ * Grounding metadata from Google Search (when google_search: true)
556
+ * Contains search queries, source URLs, and text-to-source mappings for citations
557
+ */
558
+ grounding_metadata?: GroundingMetadata;
559
+ /**
560
+ * URL context metadata (when url_context: true)
561
+ * Contains info about which URLs were retrieved and their status
562
+ */
563
+ url_context_metadata?: UrlContextMetadata;
564
+ /**
565
+ * Tool calls requested by the LLM (when tools are provided).
566
+ * In streaming mode, these are delivered via onToolCall callback.
567
+ * In non-streaming mode, they are included in the response.
568
+ */
569
+ tool_calls?: ToolCall[];
570
+ /** @deprecated Use google_search_used instead */
571
+ web_search_used?: boolean;
572
+ }
381
573
  /**
382
574
  * SSE event types for LLM streaming
383
575
  */
384
576
  interface LLMStreamEvent {
385
577
  /** Event type */
386
- type: 'token' | 'done' | 'error';
578
+ type: 'token' | 'done' | 'error' | 'tool_call';
387
579
  /** Token content (for type: 'token') */
388
580
  content?: string;
581
+ /** Tool call data (for type: 'tool_call') */
582
+ id?: string;
583
+ name?: string;
584
+ arguments?: Record<string, any>;
389
585
  /** Complete result (for type: 'done') */
390
586
  result?: string;
391
587
  /** Model used (for type: 'done') */
@@ -720,11 +916,45 @@ interface BuiltInIntegration {
720
916
  SendSMS(params: SMSParams): Promise<ServiceResponse>;
721
917
  /**
722
918
  * Invoke LLM for text/vision/file processing
723
- * @param params - LLM parameters
919
+ *
920
+ * Features:
921
+ * - Multi-modal inputs: text, images, PDFs, videos, audio, YouTube URLs
922
+ * - Google Search grounding: Enable `google_search: true` for real-time web data
923
+ * - URL context: Enable `url_context: true` to have the model read URLs in your prompt
924
+ * - Streaming: Enable `stream: true` with callbacks for real-time token output
925
+ * - JSON output: Use `response_format: { type: 'json_object' }` for structured responses
926
+ *
927
+ * @param params - LLM parameters including messages, google_search, url_context, etc.
724
928
  * @param options - Async options for handling long-running operations
725
- * @returns Result or AsyncJobCreatedResponse (if async_mode or returnJobId)
929
+ * @returns LLMResponse with result, grounding_metadata, and url_context_metadata
930
+ *
931
+ * @example Basic usage
932
+ * ```typescript
933
+ * const response = await InvokeLLM({ prompt: 'Hello, world!' });
934
+ * console.log(response.result);
935
+ * ```
936
+ *
937
+ * @example With Google Search grounding
938
+ * ```typescript
939
+ * const response = await InvokeLLM({
940
+ * prompt: 'What are the latest AI news?',
941
+ * google_search: true
942
+ * });
943
+ * console.log(response.result);
944
+ * console.log(response.grounding_metadata?.groundingChunks); // Source URLs
945
+ * ```
946
+ *
947
+ * @example With URL context
948
+ * ```typescript
949
+ * const response = await InvokeLLM({
950
+ * prompt: 'Summarize the content at https://example.com/article',
951
+ * url_context: true
952
+ * });
953
+ * console.log(response.result);
954
+ * console.log(response.url_context_metadata?.url_metadata); // Retrieval status
955
+ * ```
726
956
  */
727
- InvokeLLM(params: LLMParams, options?: AsyncOptions): Promise<ServiceResponse | AsyncJobCreatedResponse>;
957
+ InvokeLLM(params: LLMParams, options?: AsyncOptions): Promise<LLMResponse | AsyncJobCreatedResponse>;
728
958
  UploadFile(params: {
729
959
  file: File;
730
960
  metadata?: Record<string, any>;
@@ -791,10 +1021,11 @@ interface AuthModule {
791
1021
  */
792
1022
  me(): Promise<UserInfo>;
793
1023
  /**
794
- * Redirect to platform login page
795
- * @param returnPath - Path to return to after login
1024
+ * Redirect to platform login page, or navigate directly if already authenticated.
1025
+ * Smart login: checks auth state first, only shows modal if not logged in.
1026
+ * @param returnPath - Path to return to after login (relative paths are resolved to absolute)
796
1027
  */
797
- login(returnPath?: string): void;
1028
+ login(returnPath?: string): void | Promise<void>;
798
1029
  /**
799
1030
  * Request a passwordless login code to email
800
1031
  * @param email - User email
@@ -1442,6 +1673,13 @@ declare class APIClient implements OmnikitClient {
1442
1673
  * ```
1443
1674
  */
1444
1675
  get connectors(): ConnectorsModule$1;
1676
+ /**
1677
+ * Resolve a return URL to an absolute URL.
1678
+ * Handles relative paths like "/profile" by combining with current location.
1679
+ * This fixes the OAuth redirect bug where relative URLs like "/profile" become
1680
+ * "https://omnikit.ai/profile" instead of "https://omnikit.ai/app-builder/{id}/preview/profile"
1681
+ */
1682
+ private _resolveReturnUrl;
1445
1683
  /**
1446
1684
  * Create auth proxy that auto-initializes
1447
1685
  */
@@ -1883,4 +2121,4 @@ declare class Analytics {
1883
2121
  }
1884
2122
  declare function createAnalytics(config: AnalyticsConfig): Analytics;
1885
2123
 
1886
- export { APIClient, Analytics, type AnalyticsConfig, type AppMetadata, type AppSchema, type AsyncJobCreatedResponse, type AsyncJobStatus, type AsyncJobStatusResponse, type AsyncJobType, type AsyncOptions, type AuthModule, type AuthResponse, type BuiltInIntegration, type BulkResult, type CachedMetadata, type CheckJobStatusParams, type CollectionClass, type CollectionDefinition, type CollectionField, type CollectionRecord, type ConnectorAccessTokenResponse, type ConnectorStatusResponse, type ConnectorType, type ConnectorsModule, type EmailParams, type Entity, type EntityClass, type EntityDefinition, type EntityField, type EntityRecord, type EventPayload, type ExtractParams, type ImageParams, type ImportResult, type InitialMetadata, type IntegrationEndpoint, type IntegrationMethod, type IntegrationPackage, type IntegrationSchema, type LLMMessage, type LLMModel, type LLMParams, type LLMStreamEvent, type LLMStreamResult, type ListOptions, type LiveVoiceClientMessage, type LiveVoiceConfig, type LiveVoiceServerMessage, type LiveVoiceSession, LiveVoiceSessionImpl, type LiveVoiceStatus, type LiveVoiceVoice, type OAuthProvider, type OAuthProvidersResponse, type OmnikitClient, type OmnikitConfig, OmnikitError, type QueryOptions, type RequestOptions, type SMSParams, type ServiceDefinition, type ServiceResponse, type ServiceRoleClient, type ServicesSchema, type SpeechParams, type TemplateDefinition, type UserCollectionClass, type UserEntityClass, type UserInfo, type VideoParams, type VideoStatusParams, createAnalytics, createClient, createClientFromRequest, createServerClient };
2124
+ export { APIClient, Analytics, type AnalyticsConfig, type AppMetadata, type AppSchema, type AsyncJobCreatedResponse, type AsyncJobStatus, type AsyncJobStatusResponse, type AsyncJobType, type AsyncOptions, type AuthModule, type AuthResponse, type BuiltInIntegration, type BulkResult, type CachedMetadata, type CheckJobStatusParams, type CollectionClass, type CollectionDefinition, type CollectionField, type CollectionRecord, type ConnectorAccessTokenResponse, type ConnectorStatusResponse, type ConnectorType, type ConnectorsModule, type EmailParams, type Entity, type EntityClass, type EntityDefinition, type EntityField, type EntityRecord, type EventPayload, type ExtractParams, type GroundingChunk, type GroundingMetadata, type GroundingSupport, type ImageParams, type ImportResult, type InitialMetadata, type IntegrationEndpoint, type IntegrationMethod, type IntegrationPackage, type IntegrationSchema, type LLMMessage, type LLMModel, type LLMParams, type LLMResponse, type LLMStreamEvent, type LLMStreamResult, type ListOptions, type LiveVoiceClientMessage, type LiveVoiceConfig, type LiveVoiceServerMessage, type LiveVoiceSession, LiveVoiceSessionImpl, type LiveVoiceStatus, type LiveVoiceVoice, type OAuthProvider, type OAuthProvidersResponse, type OmnikitClient, type OmnikitConfig, OmnikitError, type QueryOptions, type RequestOptions, type SMSParams, type ServiceDefinition, type ServiceResponse, type ServiceRoleClient, type ServicesSchema, type SpeechParams, type TemplateDefinition, type ToolCall, type ToolDefinition, type ToolParameter, type UrlContextMetadata, type UrlMetadata, type UrlRetrievalStatus, type UserCollectionClass, type UserEntityClass, type UserInfo, type VideoParams, type VideoStatusParams, createAnalytics, createClient, createClientFromRequest, createServerClient };