@yourgpt/copilot-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1225 @@
1
+ /**
2
+ * Tool types for App Context Awareness
3
+ */
4
+ interface ScreenshotOptions {
5
+ /** Target element to capture (defaults to document.body) */
6
+ element?: HTMLElement;
7
+ /** Image quality (0.1-1.0, default 0.8) */
8
+ quality?: number;
9
+ /** Image format */
10
+ format?: "png" | "jpeg" | "webp";
11
+ /** Max width to scale down to */
12
+ maxWidth?: number;
13
+ /** Max height to scale down to */
14
+ maxHeight?: number;
15
+ /** Whether to include cursor */
16
+ includeCursor?: boolean;
17
+ }
18
+ interface ScreenshotResult {
19
+ /** Base64-encoded image data */
20
+ data: string;
21
+ /** Image format */
22
+ format: "png" | "jpeg" | "webp";
23
+ /** Image width */
24
+ width: number;
25
+ /** Image height */
26
+ height: number;
27
+ /** Timestamp of capture */
28
+ timestamp: number;
29
+ }
30
+ type ConsoleLogType = "log" | "info" | "warn" | "error" | "debug";
31
+ interface ConsoleLogEntry {
32
+ /** Type of console method */
33
+ type: ConsoleLogType;
34
+ /** Log message(s) */
35
+ message: string;
36
+ /** Additional arguments passed to console */
37
+ args?: unknown[];
38
+ /** Stack trace (for errors) */
39
+ stack?: string;
40
+ /** Timestamp */
41
+ timestamp: number;
42
+ }
43
+ interface ConsoleLogOptions {
44
+ /** Types of logs to capture */
45
+ types?: ConsoleLogType[];
46
+ /** Maximum number of logs to store */
47
+ limit?: number;
48
+ /** Filter function */
49
+ filter?: (entry: ConsoleLogEntry) => boolean;
50
+ }
51
+ interface ConsoleLogResult {
52
+ /** Captured log entries */
53
+ logs: ConsoleLogEntry[];
54
+ /** Total logs captured (before limit) */
55
+ totalCaptured: number;
56
+ }
57
+ type HttpMethod = "GET" | "POST" | "PUT" | "PATCH" | "DELETE" | "HEAD" | "OPTIONS";
58
+ interface NetworkRequestEntry {
59
+ /** Request URL */
60
+ url: string;
61
+ /** HTTP method */
62
+ method: HttpMethod;
63
+ /** Response status code */
64
+ status: number;
65
+ /** Status text */
66
+ statusText: string;
67
+ /** Whether request failed (non-2xx or error) */
68
+ failed: boolean;
69
+ /** Request headers (sanitized) */
70
+ requestHeaders?: Record<string, string>;
71
+ /** Response headers (sanitized) */
72
+ responseHeaders?: Record<string, string>;
73
+ /** Request body (if captured) */
74
+ requestBody?: unknown;
75
+ /** Response body (if captured and failed) */
76
+ responseBody?: unknown;
77
+ /** Request duration in ms */
78
+ duration: number;
79
+ /** Timestamp of request start */
80
+ timestamp: number;
81
+ /** Error message if request failed */
82
+ error?: string;
83
+ }
84
+ interface NetworkRequestOptions {
85
+ /** Maximum number of requests to store */
86
+ limit?: number;
87
+ /** Only capture failed requests (default: true) */
88
+ failedOnly?: boolean;
89
+ /** HTTP methods to capture */
90
+ methods?: HttpMethod[];
91
+ /** URL patterns to include (regex) */
92
+ includeUrls?: RegExp[];
93
+ /** URL patterns to exclude (regex) */
94
+ excludeUrls?: RegExp[];
95
+ /** Whether to capture request body */
96
+ captureRequestBody?: boolean;
97
+ /** Whether to capture response body */
98
+ captureResponseBody?: boolean;
99
+ /** Max body size to capture (bytes) */
100
+ maxBodySize?: number;
101
+ }
102
+ interface NetworkRequestResult {
103
+ /** Captured network requests */
104
+ requests: NetworkRequestEntry[];
105
+ /** Total requests captured (before limit) */
106
+ totalCaptured: number;
107
+ }
108
+ type ToolType = "screenshot" | "console" | "network";
109
+ interface IntentDetectionResult {
110
+ /** Detected tools that might be helpful */
111
+ suggestedTools: ToolType[];
112
+ /** Confidence score (0-1) for each tool */
113
+ confidence: Record<ToolType, number>;
114
+ /** Keywords that triggered detection */
115
+ matchedKeywords: Record<ToolType, string[]>;
116
+ }
117
+ interface ToolsConfig {
118
+ /** Enable screenshot capture */
119
+ screenshot?: boolean;
120
+ /** Enable console log capture */
121
+ console?: boolean;
122
+ /** Enable network request capture */
123
+ network?: boolean;
124
+ /** Always require user consent before capturing (default: true) */
125
+ requireConsent?: boolean;
126
+ /** Screenshot-specific options */
127
+ screenshotOptions?: ScreenshotOptions;
128
+ /** Console-specific options */
129
+ consoleOptions?: ConsoleLogOptions;
130
+ /** Network-specific options */
131
+ networkOptions?: NetworkRequestOptions;
132
+ }
133
+ interface ToolConsentRequest {
134
+ /** Tools being requested */
135
+ tools: ToolType[];
136
+ /** Reason for request (from intent detection) */
137
+ reason: string;
138
+ /** Keywords that triggered this request */
139
+ keywords: string[];
140
+ }
141
+ interface ToolConsentResponse {
142
+ /** Tools user approved */
143
+ approved: ToolType[];
144
+ /** Tools user denied */
145
+ denied: ToolType[];
146
+ /** Remember preference for session */
147
+ remember?: boolean;
148
+ }
149
+ interface CapturedContext {
150
+ /** Screenshot data (if captured) */
151
+ screenshot?: ScreenshotResult;
152
+ /** Console logs (if captured) */
153
+ consoleLogs?: ConsoleLogResult;
154
+ /** Network requests (if captured) */
155
+ networkRequests?: NetworkRequestResult;
156
+ /** Timestamp of capture */
157
+ timestamp: number;
158
+ }
159
+
160
+ /**
161
+ * Intent Detector
162
+ *
163
+ * Detects user intent from messages to suggest relevant tools.
164
+ * Framework-agnostic implementation using keyword matching.
165
+ */
166
+
167
+ /**
168
+ * Detect user intent from a message
169
+ *
170
+ * @param message - User message to analyze
171
+ * @returns Detection result with suggested tools
172
+ *
173
+ * @example
174
+ * ```typescript
175
+ * const result = detectIntent("I'm seeing an error on my screen");
176
+ * // Returns:
177
+ * // {
178
+ * // suggestedTools: ['screenshot', 'console'],
179
+ * // confidence: { screenshot: 0.6, console: 0.8, network: 0 },
180
+ * // matchedKeywords: { screenshot: ['seeing', 'screen'], console: ['error'] }
181
+ * // }
182
+ * ```
183
+ */
184
+ declare function detectIntent(message: string): IntentDetectionResult;
185
+ /**
186
+ * Check if a message suggests any tools
187
+ */
188
+ declare function hasToolSuggestions(message: string): boolean;
189
+ /**
190
+ * Get the primary suggested tool (highest confidence)
191
+ */
192
+ declare function getPrimaryTool(message: string): ToolType | null;
193
+ /**
194
+ * Generate a reason string for why tools are being suggested
195
+ */
196
+ declare function generateSuggestionReason(result: IntentDetectionResult): string;
197
+ /**
198
+ * Custom keyword configuration
199
+ */
200
+ interface CustomKeywords {
201
+ screenshot?: string[];
202
+ console?: string[];
203
+ network?: string[];
204
+ }
205
+ /**
206
+ * Create a custom intent detector with additional keywords
207
+ */
208
+ declare function createCustomDetector(customKeywords: CustomKeywords): (message: string) => IntentDetectionResult;
209
+
210
+ /**
211
+ * Tool-related types for the agentic loop
212
+ */
213
+ /**
214
+ * Supported AI providers for tool calling
215
+ */
216
+ type AIProvider = "anthropic" | "openai" | "xai" | "grok" | "gemini" | "groq" | "ollama";
217
+ /**
218
+ * Where the tool executes
219
+ */
220
+ type ToolLocation = "server" | "client";
221
+ /**
222
+ * JSON Schema property definition
223
+ */
224
+ interface JSONSchemaProperty {
225
+ type: "string" | "number" | "boolean" | "object" | "array" | "integer" | "null";
226
+ description?: string;
227
+ enum?: (string | number | boolean)[];
228
+ items?: JSONSchemaProperty;
229
+ properties?: Record<string, JSONSchemaProperty>;
230
+ required?: string[];
231
+ default?: unknown;
232
+ minLength?: number;
233
+ maxLength?: number;
234
+ minimum?: number;
235
+ maximum?: number;
236
+ pattern?: string;
237
+ }
238
+ /**
239
+ * JSON Schema for tool input
240
+ */
241
+ interface ToolInputSchema {
242
+ type: "object";
243
+ properties: Record<string, JSONSchemaProperty>;
244
+ required?: string[];
245
+ additionalProperties?: boolean;
246
+ }
247
+ /**
248
+ * Tool execution context
249
+ *
250
+ * Provides runtime information to tool handlers including cancellation signals,
251
+ * request metadata, and custom context data.
252
+ */
253
+ interface ToolContext {
254
+ /** Abort signal for cancellation */
255
+ signal?: AbortSignal;
256
+ /** Thread ID if using threads */
257
+ threadId?: string;
258
+ /** Custom context data passed from runtime config */
259
+ data?: Record<string, unknown>;
260
+ /**
261
+ * Unique ID for this specific tool call.
262
+ * Useful for logging, tracing, and correlating tool executions.
263
+ */
264
+ toolCallId?: string;
265
+ /**
266
+ * Request headers (for auth in server tools).
267
+ * Contains headers from the original HTTP request.
268
+ *
269
+ * @example
270
+ * ```typescript
271
+ * handler: async (params, context) => {
272
+ * const token = context?.headers?.authorization;
273
+ * if (!token) return failure('Authentication required');
274
+ * // ...
275
+ * }
276
+ * ```
277
+ */
278
+ headers?: Record<string, string>;
279
+ /**
280
+ * Full request metadata for server-side tools.
281
+ * Provides access to HTTP method, URL, and headers.
282
+ *
283
+ * @example
284
+ * ```typescript
285
+ * handler: async (params, context) => {
286
+ * console.log(`Tool called from: ${context?.request?.url}`);
287
+ * // Forward auth to internal service
288
+ * const authHeader = context?.request?.headers?.authorization;
289
+ * }
290
+ * ```
291
+ */
292
+ request?: {
293
+ /** HTTP method (GET, POST, etc.) */
294
+ method?: string;
295
+ /** Request URL path */
296
+ url?: string;
297
+ /** Request headers */
298
+ headers?: Record<string, string>;
299
+ };
300
+ }
301
+ /**
302
+ * AI response behavior for tool results.
303
+ *
304
+ * Controls what the AI sees after a tool executes and renders UI.
305
+ *
306
+ * - `'none'`: AI generates minimal response, UI component handles display
307
+ * - `'brief'`: AI gets summary context (via aiContext), gives brief acknowledgment
308
+ * - `'full'`: AI receives full data and responds accordingly (default)
309
+ */
310
+ type AIResponseMode = "none" | "brief" | "full";
311
+ /**
312
+ * Multimodal content for AI to analyze
313
+ */
314
+ type AIContent = {
315
+ type: "image";
316
+ data: string;
317
+ mediaType: string;
318
+ } | {
319
+ type: "text";
320
+ text: string;
321
+ };
322
+ /**
323
+ * Tool response format
324
+ */
325
+ interface ToolResponse<T = unknown> {
326
+ /** Whether the tool succeeded */
327
+ success: boolean;
328
+ /** Human-readable message */
329
+ message?: string;
330
+ /** Error message if failed */
331
+ error?: string;
332
+ /** Result data */
333
+ data?: T;
334
+ /**
335
+ * Override AI context for this specific result.
336
+ * Takes precedence over tool-level aiContext config.
337
+ * If set, this message is sent to AI instead of full result data.
338
+ *
339
+ * @example
340
+ * ```typescript
341
+ * return {
342
+ * success: true,
343
+ * data: sensitiveData,
344
+ * _aiContext: '[Data retrieved - contains sensitive info, displayed to user]'
345
+ * };
346
+ * ```
347
+ */
348
+ _aiContext?: string;
349
+ /**
350
+ * Override AI response mode for this specific result.
351
+ * Takes precedence over tool-level aiResponseMode config.
352
+ */
353
+ _aiResponseMode?: AIResponseMode;
354
+ /**
355
+ * Content for AI to analyze (images, documents, etc.).
356
+ * When present, these are included as multimodal content for AI analysis.
357
+ *
358
+ * @example
359
+ * ```typescript
360
+ * // Screenshot for AI to analyze
361
+ * return {
362
+ * success: true,
363
+ * message: 'Screenshot captured',
364
+ * _aiContent: [{ type: 'image', data: base64, mediaType: 'image/png' }]
365
+ * };
366
+ * ```
367
+ */
368
+ _aiContent?: AIContent[];
369
+ }
370
+ /**
371
+ * Props passed to tool render function
372
+ */
373
+ interface ToolRenderProps<TParams = Record<string, unknown>> {
374
+ /** Current execution status */
375
+ status: "pending" | "executing" | "completed" | "error";
376
+ /** Arguments passed to the tool */
377
+ args: TParams;
378
+ /** Result if completed */
379
+ result?: ToolResponse;
380
+ /** Error if failed */
381
+ error?: string;
382
+ }
383
+ /**
384
+ * Tool definition with JSON Schema
385
+ */
386
+ interface ToolDefinition<TParams = Record<string, unknown>> {
387
+ /** Unique tool name */
388
+ name: string;
389
+ /** Tool description for LLM */
390
+ description: string;
391
+ /** Where the tool executes (server or client) */
392
+ location: ToolLocation;
393
+ /**
394
+ * Human-readable title for UI display.
395
+ * Can be a static string or a function that generates title from args.
396
+ *
397
+ * @example
398
+ * ```typescript
399
+ * title: "Get order details"
400
+ * // or dynamic:
401
+ * title: (args) => `Order #${args.orderId}`
402
+ * ```
403
+ */
404
+ title?: string | ((args: TParams) => string);
405
+ /**
406
+ * Title shown while executing (present tense with "...").
407
+ * If not provided, uses `title` with "..." appended.
408
+ *
409
+ * @example
410
+ * ```typescript
411
+ * executingTitle: (args) => `Fetching order #${args.orderId}...`
412
+ * ```
413
+ */
414
+ executingTitle?: string | ((args: TParams) => string);
415
+ /**
416
+ * Title shown after completion.
417
+ * If not provided, defaults to `title`.
418
+ *
419
+ * @example
420
+ * ```typescript
421
+ * completedTitle: (args) => `Retrieved order #${args.orderId}`
422
+ * ```
423
+ */
424
+ completedTitle?: string | ((args: TParams) => string);
425
+ /** JSON Schema for input parameters */
426
+ inputSchema: ToolInputSchema;
427
+ /** Handler function (optional for client tools registered on server) */
428
+ handler?: (params: TParams, context?: ToolContext) => Promise<ToolResponse> | ToolResponse;
429
+ /** Optional render function for UI */
430
+ render?: (props: ToolRenderProps<TParams>) => unknown;
431
+ /** Whether the tool is available (for conditional registration) */
432
+ available?: boolean;
433
+ /**
434
+ * Require user approval before execution.
435
+ * Can be:
436
+ * - `true`: Always require approval
437
+ * - `false` or `undefined`: No approval needed (default)
438
+ * - `(params) => boolean`: Conditional approval based on input
439
+ *
440
+ * Similar to Vercel AI SDK v6's needsApproval pattern.
441
+ */
442
+ needsApproval?: boolean | ((params: TParams) => boolean | Promise<boolean>);
443
+ /**
444
+ * Custom message shown in the approval UI.
445
+ * Can be a string or a function that generates a message from params.
446
+ * If not provided, a default message with the tool name is shown.
447
+ */
448
+ approvalMessage?: string | ((params: TParams) => string);
449
+ /**
450
+ * How the AI should respond when this tool's result is rendered as UI.
451
+ *
452
+ * - `'none'`: AI generates minimal response ("[Result displayed to user]").
453
+ * Use for tools where UI component fully handles the display (stats cards, etc.)
454
+ *
455
+ * - `'brief'`: AI receives summary context (from aiContext) and gives brief acknowledgment.
456
+ * Use for charts/visualizations where AI should acknowledge but not repeat data.
457
+ *
458
+ * - `'full'`: AI receives complete data and responds accordingly (default).
459
+ * Use for tools where AI should analyze and elaborate on results.
460
+ *
461
+ * @default 'full'
462
+ *
463
+ * @example
464
+ * ```typescript
465
+ * // Chart tool - AI acknowledges without repeating data
466
+ * const chartTool: ToolDefinition = {
467
+ * name: 'get_chart',
468
+ * aiResponseMode: 'brief',
469
+ * aiContext: (result) => `[Chart displayed: ${result.data.title}]`,
470
+ * handler: async () => ({ success: true, data: chartData })
471
+ * };
472
+ * ```
473
+ */
474
+ aiResponseMode?: AIResponseMode;
475
+ /**
476
+ * Context/summary sent to AI instead of (or along with) full result.
477
+ *
478
+ * Used when:
479
+ * - `aiResponseMode: 'brief'` - This becomes the only thing AI sees
480
+ * - `aiResponseMode: 'full'` - This is prepended to full data for context
481
+ *
482
+ * Can be:
483
+ * - `string`: Static message (e.g., "[Weather data displayed]")
484
+ * - `function`: Dynamic based on result (e.g., (result) => `[Chart: ${result.data.title}]`)
485
+ *
486
+ * @example
487
+ * ```typescript
488
+ * // Static context
489
+ * aiContext: '[Analytics chart displayed to user]'
490
+ *
491
+ * // Dynamic context based on result
492
+ * aiContext: (result, args) => {
493
+ * const { title, currentValue } = result.data;
494
+ * return `[Chart displayed: ${title}, showing ${currentValue}]`;
495
+ * }
496
+ * ```
497
+ */
498
+ aiContext?: string | ((result: ToolResponse, args: Record<string, unknown>) => string);
499
+ }
500
+ /**
501
+ * Unified tool call format (internal representation)
502
+ */
503
+ interface UnifiedToolCall {
504
+ /** Unique tool call ID */
505
+ id: string;
506
+ /** Tool name */
507
+ name: string;
508
+ /** Tool input arguments */
509
+ input: Record<string, unknown>;
510
+ }
511
+ /**
512
+ * Unified tool result format
513
+ */
514
+ interface UnifiedToolResult {
515
+ /** Tool call ID this result is for */
516
+ toolCallId: string;
517
+ /** Serialized result content (JSON string) */
518
+ content: string;
519
+ /** Whether the tool succeeded */
520
+ success: boolean;
521
+ /** Error message if failed */
522
+ error?: string;
523
+ }
524
+ /**
525
+ * Tool execution status
526
+ */
527
+ type ToolExecutionStatus = "pending" | "executing" | "completed" | "error";
528
+ /**
529
+ * Tool approval status (for human-in-the-loop)
530
+ *
531
+ * Similar to Vercel AI SDK v6's tool approval pattern.
532
+ */
533
+ type ToolApprovalStatus = "none" | "required" | "approved" | "rejected";
534
+ /**
535
+ * Permission level for tool execution
536
+ *
537
+ * Controls whether approval is needed and how the choice is remembered:
538
+ * - "ask" - Always prompt user (default)
539
+ * - "allow_always" - Auto-approve, persisted to storage
540
+ * - "deny_always" - Auto-reject, persisted to storage
541
+ * - "session" - Auto-approve for current session only
542
+ */
543
+ type PermissionLevel = "ask" | "allow_always" | "deny_always" | "session";
544
+ /**
545
+ * Stored tool permission record
546
+ */
547
+ interface ToolPermission {
548
+ /** Tool name (unique identifier) */
549
+ toolName: string;
550
+ /** Permission level */
551
+ level: PermissionLevel;
552
+ /** When permission was set */
553
+ createdAt: number;
554
+ /** Last time this permission was used */
555
+ lastUsedAt?: number;
556
+ }
557
+ /**
558
+ * Permission storage configuration
559
+ */
560
+ interface PermissionStorageConfig {
561
+ /**
562
+ * Storage type:
563
+ * - "localStorage" - Persists across browser sessions
564
+ * - "sessionStorage" - Clears when tab closes
565
+ * - "memory" - In-memory only (for SSR or testing)
566
+ */
567
+ type: "localStorage" | "sessionStorage" | "memory";
568
+ /** Storage key prefix (default: "yourgpt-permissions") */
569
+ keyPrefix?: string;
570
+ }
571
+ /**
572
+ * Permission storage adapter interface (for custom implementations)
573
+ */
574
+ interface PermissionStorageAdapter {
575
+ /** Get permission for a tool */
576
+ get(toolName: string): Promise<ToolPermission | null>;
577
+ /** Set permission for a tool */
578
+ set(permission: ToolPermission): Promise<void>;
579
+ /** Remove permission for a tool */
580
+ remove(toolName: string): Promise<void>;
581
+ /** Get all permissions */
582
+ getAll(): Promise<ToolPermission[]>;
583
+ /** Clear all permissions */
584
+ clear(): Promise<void>;
585
+ }
586
+ /**
587
+ * Tool execution record (for UI tracking)
588
+ */
589
+ interface ToolExecution {
590
+ /** Tool call ID */
591
+ id: string;
592
+ /** Tool name */
593
+ name: string;
594
+ /** Tool arguments */
595
+ args: Record<string, unknown>;
596
+ /** Execution status */
597
+ status: ToolExecutionStatus;
598
+ /** Result if completed */
599
+ result?: ToolResponse;
600
+ /** Error message if failed */
601
+ error?: string;
602
+ /** Timestamp when execution started */
603
+ timestamp: number;
604
+ /** Duration in ms (set when completed) */
605
+ duration?: number;
606
+ /** Approval status for this execution */
607
+ approvalStatus: ToolApprovalStatus;
608
+ /** Message shown in approval UI (from tool's approvalMessage) */
609
+ approvalMessage?: string;
610
+ /** Timestamp when user responded to approval request */
611
+ approvalTimestamp?: number;
612
+ }
613
+ /**
614
+ * Agentic loop configuration
615
+ */
616
+ interface AgentLoopConfig {
617
+ /** Maximum iterations before stopping (default: 20) */
618
+ maxIterations?: number;
619
+ /** Enable debug logging */
620
+ debug?: boolean;
621
+ /** Whether to enable the agentic loop (default: true) */
622
+ enabled?: boolean;
623
+ }
624
+ /**
625
+ * Agent loop state (for tracking)
626
+ */
627
+ interface AgentLoopState {
628
+ /** Current iteration number */
629
+ iteration: number;
630
+ /** Maximum iterations allowed */
631
+ maxIterations: number;
632
+ /** Whether the loop is currently running */
633
+ running: boolean;
634
+ /** Whether max iterations was reached */
635
+ maxIterationsReached: boolean;
636
+ /** Whether the loop was aborted */
637
+ aborted: boolean;
638
+ }
639
+ /**
640
+ * A set of tools, keyed by tool name
641
+ *
642
+ * @example
643
+ * ```typescript
644
+ * const myTools: ToolSet = {
645
+ * capture_screenshot: screenshotTool,
646
+ * get_weather: weatherTool,
647
+ * };
648
+ * ```
649
+ */
650
+ type ToolSet = Record<string, ToolDefinition>;
651
+ /**
652
+ * Configuration for creating a tool
653
+ */
654
+ interface ToolConfig<TParams = Record<string, unknown>> {
655
+ /** Tool description for LLM */
656
+ description: string;
657
+ /** Where the tool executes (default: 'client') */
658
+ location?: ToolLocation;
659
+ /** Human-readable title for UI display */
660
+ title?: string | ((args: TParams) => string);
661
+ /** Title shown while executing */
662
+ executingTitle?: string | ((args: TParams) => string);
663
+ /** Title shown after completion */
664
+ completedTitle?: string | ((args: TParams) => string);
665
+ /** JSON Schema for input parameters */
666
+ inputSchema?: ToolInputSchema;
667
+ /** Handler function */
668
+ handler?: (params: TParams, context?: ToolContext) => Promise<ToolResponse> | ToolResponse;
669
+ /** Optional render function for UI */
670
+ render?: (props: ToolRenderProps<TParams>) => unknown;
671
+ /** Whether the tool is available */
672
+ available?: boolean;
673
+ /** Require user approval before execution */
674
+ needsApproval?: boolean | ((params: TParams) => boolean | Promise<boolean>);
675
+ /** Custom message shown in the approval UI */
676
+ approvalMessage?: string | ((params: TParams) => string);
677
+ /** AI response mode for this tool (default: 'full') */
678
+ aiResponseMode?: AIResponseMode;
679
+ /** Context/summary sent to AI instead of full result */
680
+ aiContext?: string | ((result: ToolResponse, args: Record<string, unknown>) => string);
681
+ }
682
+ /**
683
+ * Create a tool definition (similar to Vercel AI SDK's tool())
684
+ *
685
+ * @example
686
+ * ```typescript
687
+ * const weatherTool = tool({
688
+ * description: 'Get weather for a location',
689
+ * inputSchema: {
690
+ * type: 'object',
691
+ * properties: {
692
+ * location: { type: 'string', description: 'City name' },
693
+ * },
694
+ * required: ['location'],
695
+ * },
696
+ * handler: async ({ location }) => {
697
+ * const weather = await fetchWeather(location);
698
+ * return success(weather);
699
+ * },
700
+ * });
701
+ * ```
702
+ */
703
+ declare function tool<TParams = Record<string, unknown>>(config: ToolConfig<TParams>): Omit<ToolDefinition<TParams>, "name">;
704
+ /**
705
+ * Convert ToolDefinition to OpenAI tool format
706
+ */
707
+ declare function toolToOpenAIFormat(tool: ToolDefinition): object;
708
+ /**
709
+ * Convert ToolDefinition to Anthropic tool format
710
+ */
711
+ declare function toolToAnthropicFormat(tool: ToolDefinition): object;
712
+ /**
713
+ * Create a tool result response
714
+ */
715
+ declare function createToolResult(toolCallId: string, response: ToolResponse): UnifiedToolResult;
716
+ /**
717
+ * Create a successful tool response
718
+ */
719
+ declare function success<T = unknown>(data?: T, message?: string): ToolResponse<T>;
720
+ /**
721
+ * Create a failed tool response
722
+ */
723
+ declare function failure(error: string): ToolResponse;
724
+
725
+ /**
726
+ * Message roles in a conversation (OpenAI format)
727
+ */
728
+ type MessageRole = "user" | "assistant" | "system" | "tool";
729
+ /**
730
+ * A source document from knowledge base
731
+ */
732
+ interface Source {
733
+ /** Unique identifier */
734
+ id: string;
735
+ /** Source title or filename */
736
+ title: string;
737
+ /** Relevant content snippet */
738
+ content: string;
739
+ /** URL if available */
740
+ url?: string;
741
+ /** Relevance score (0-1) */
742
+ score?: number;
743
+ /** Additional metadata */
744
+ metadata?: Record<string, unknown>;
745
+ }
746
+ /**
747
+ * Tool/function call in OpenAI format
748
+ * Used in assistant messages when AI wants to call tools
749
+ */
750
+ interface ToolCall {
751
+ /** Unique identifier for this call */
752
+ id: string;
753
+ /** Always "function" for OpenAI compatibility */
754
+ type: "function";
755
+ /** Function details */
756
+ function: {
757
+ /** Name of the function/tool */
758
+ name: string;
759
+ /** Arguments as JSON string (OpenAI format) */
760
+ arguments: string;
761
+ };
762
+ }
763
+ /**
764
+ * Attachment in a message (images, files, etc.)
765
+ *
766
+ * Attachments can be stored as:
767
+ * - Base64 data (free tier, embedded in message)
768
+ * - URL (premium cloud storage, lighter payload)
769
+ */
770
+ interface MessageAttachment {
771
+ /** Type of attachment */
772
+ type: "image" | "file" | "audio" | "video";
773
+ /** Base64 data (for embedded attachments) */
774
+ data?: string;
775
+ /** URL for cloud-stored attachments (managed cloud storage) */
776
+ url?: string;
777
+ /** MIME type */
778
+ mimeType: string;
779
+ /** Optional filename */
780
+ filename?: string;
781
+ }
782
+ /**
783
+ * Token usage information
784
+ */
785
+ interface TokenUsage {
786
+ prompt_tokens: number;
787
+ completion_tokens: number;
788
+ total_tokens?: number;
789
+ }
790
+ /**
791
+ * Message metadata (flexible container for provider-specific data)
792
+ */
793
+ interface MessageMetadata {
794
+ /** Extended thinking/reasoning (Claude, DeepSeek) */
795
+ thinking?: string;
796
+ /** Knowledge base sources */
797
+ sources?: Source[];
798
+ /** Attachments (images, files) */
799
+ attachments?: MessageAttachment[];
800
+ /** Model used to generate this message */
801
+ model?: string;
802
+ /** Token usage */
803
+ usage?: TokenUsage;
804
+ /** Any additional data */
805
+ [key: string]: unknown;
806
+ }
807
+ /**
808
+ * A message in the conversation (OpenAI format)
809
+ *
810
+ * This format is compatible with OpenAI's Chat Completions API
811
+ * and can be stored directly in a database (1 row per message).
812
+ *
813
+ * Message types:
814
+ * - user: User's input message
815
+ * - assistant: AI's response (may include tool_calls)
816
+ * - tool: Result of a tool execution (has tool_call_id)
817
+ * - system: System prompt (usually first message)
818
+ *
819
+ * @example
820
+ * // User message
821
+ * { role: "user", content: "What's the weather?" }
822
+ *
823
+ * // Assistant requesting tool
824
+ * { role: "assistant", content: null, tool_calls: [{...}] }
825
+ *
826
+ * // Tool result
827
+ * { role: "tool", content: '{"temp": 72}', tool_call_id: "call_abc" }
828
+ *
829
+ * // Final assistant response
830
+ * { role: "assistant", content: "The temperature is 72°F" }
831
+ */
832
+ interface Message {
833
+ /** Unique identifier */
834
+ id: string;
835
+ /** Thread/conversation ID (for multi-session apps) */
836
+ thread_id?: string;
837
+ /** Role of the message sender */
838
+ role: MessageRole;
839
+ /** Text content (null for tool-calling assistant messages) */
840
+ content: string | null;
841
+ /**
842
+ * Tool calls made by assistant (OpenAI format)
843
+ * Only present when role is "assistant" and AI wants to call tools
844
+ */
845
+ tool_calls?: ToolCall[];
846
+ /**
847
+ * Tool call ID this message is responding to
848
+ * Only present when role is "tool"
849
+ */
850
+ tool_call_id?: string;
851
+ /**
852
+ * Flexible metadata container
853
+ * Contains: thinking, sources, attachments, model, usage, etc.
854
+ */
855
+ metadata?: MessageMetadata;
856
+ /** When the message was created */
857
+ created_at: Date;
858
+ }
859
+ /**
860
+ * Helper to parse tool call arguments
861
+ */
862
+ declare function parseToolCallArgs<T = Record<string, unknown>>(toolCall: ToolCall): T;
863
+ /**
864
+ * Helper to create a tool call
865
+ */
866
+ declare function createToolCall(id: string, name: string, args: Record<string, unknown>): ToolCall;
867
+ /**
868
+ * Create a new message with defaults
869
+ */
870
+ declare function createMessage(partial: Partial<Message> & Pick<Message, "role"> & {
871
+ content?: string | null;
872
+ }): Message;
873
+ /**
874
+ * Create a user message
875
+ */
876
+ declare function createUserMessage(content: string, options?: {
877
+ id?: string;
878
+ thread_id?: string;
879
+ attachments?: MessageAttachment[];
880
+ }): Message;
881
+ /**
882
+ * Create an assistant message
883
+ */
884
+ declare function createAssistantMessage(content: string | null, options?: {
885
+ id?: string;
886
+ thread_id?: string;
887
+ tool_calls?: ToolCall[];
888
+ thinking?: string;
889
+ sources?: Source[];
890
+ model?: string;
891
+ }): Message;
892
+ /**
893
+ * Create a tool result message
894
+ */
895
+ declare function createToolMessage(toolCallId: string, result: {
896
+ success: boolean;
897
+ data?: unknown;
898
+ error?: string;
899
+ message?: string;
900
+ }, options?: {
901
+ id?: string;
902
+ thread_id?: string;
903
+ }): Message;
904
+ /**
905
+ * Check if a message has tool calls
906
+ */
907
+ declare function hasToolCalls(message: Message): boolean;
908
+ /**
909
+ * Check if a message is a tool result
910
+ */
911
+ declare function isToolResult(message: Message): boolean;
912
+
913
+ /**
914
+ * Supported LLM providers
915
+ */
916
+ type LLMProvider = "openai" | "anthropic" | "google" | "groq" | "ollama" | "custom";
917
+ /**
918
+ * LLM configuration
919
+ */
920
+ interface LLMConfig {
921
+ /** LLM provider */
922
+ provider: LLMProvider;
923
+ /** Model name (e.g., 'gpt-4o', 'claude-3-5-sonnet-latest') */
924
+ model?: string;
925
+ /** API key for the provider */
926
+ apiKey?: string;
927
+ /** Base URL for custom/self-hosted models */
928
+ baseUrl?: string;
929
+ /** Temperature (0-2) */
930
+ temperature?: number;
931
+ /** Maximum tokens in response */
932
+ maxTokens?: number;
933
+ /** Top P sampling */
934
+ topP?: number;
935
+ /** Frequency penalty */
936
+ frequencyPenalty?: number;
937
+ /** Presence penalty */
938
+ presencePenalty?: number;
939
+ /** Enable streaming responses (default: true) */
940
+ streaming?: boolean;
941
+ }
942
+ /**
943
+ * Cloud configuration (for managed hosting)
944
+ */
945
+ interface CloudConfig {
946
+ /** API key */
947
+ apiKey: string;
948
+ /** Bot ID */
949
+ botId: string;
950
+ /** Custom API endpoint (optional) */
951
+ endpoint?: string;
952
+ }
953
+ /**
954
+ * Extension configuration
955
+ */
956
+ interface Extension {
957
+ /** Extension name */
958
+ name: string;
959
+ /** Extension configuration */
960
+ config: Record<string, unknown>;
961
+ /** Initialize the extension */
962
+ init?: () => Promise<void>;
963
+ }
964
+ /**
965
+ * Main SDK configuration
966
+ */
967
+ interface CopilotConfig {
968
+ /** LLM configuration (for self-hosted) */
969
+ config?: LLMConfig;
970
+ /** Cloud configuration (for managed hosting) */
971
+ cloud?: CloudConfig;
972
+ /** Runtime URL for self-hosted backend */
973
+ runtimeUrl?: string;
974
+ /** System prompt */
975
+ systemPrompt?: string;
976
+ /** Extensions (like knowledge base) */
977
+ extensions?: Extension[];
978
+ /** Enable debug logging */
979
+ debug?: boolean;
980
+ }
981
+ /**
982
+ * Default LLM configurations per provider
983
+ */
984
+ declare const DEFAULT_MODELS: Record<LLMProvider, string>;
985
+ /**
986
+ * Get default model for a provider
987
+ */
988
+ declare function getDefaultModel(provider: LLMProvider): string;
989
+
990
+ /**
991
+ * Parameter types for actions
992
+ */
993
+ type ParameterType = "string" | "number" | "boolean" | "object" | "array";
994
+ /**
995
+ * Action parameter definition
996
+ */
997
+ interface ActionParameter {
998
+ /** Parameter type */
999
+ type: ParameterType;
1000
+ /** Description of the parameter */
1001
+ description?: string;
1002
+ /** Whether the parameter is required */
1003
+ required?: boolean;
1004
+ /** Default value */
1005
+ default?: unknown;
1006
+ /** Enum values for string type */
1007
+ enum?: string[];
1008
+ /** Properties for object type */
1009
+ properties?: Record<string, ActionParameter>;
1010
+ /** Items schema for array type */
1011
+ items?: ActionParameter;
1012
+ }
1013
+ /**
1014
+ * Action definition
1015
+ */
1016
+ interface ActionDefinition<TParams = Record<string, unknown>> {
1017
+ /** Unique name for the action */
1018
+ name: string;
1019
+ /** Description of what the action does */
1020
+ description: string;
1021
+ /** Parameter definitions */
1022
+ parameters?: Record<string, ActionParameter>;
1023
+ /** Handler function */
1024
+ handler: (params: TParams) => unknown | Promise<unknown>;
1025
+ /** Optional render function for UI */
1026
+ render?: (props: ActionRenderProps<TParams>) => unknown;
1027
+ }
1028
+ /**
1029
+ * Props passed to action render function
1030
+ */
1031
+ interface ActionRenderProps<TParams = Record<string, unknown>> {
1032
+ /** Current status */
1033
+ status: "pending" | "executing" | "completed" | "error";
1034
+ /** Arguments passed to the action */
1035
+ args: TParams;
1036
+ /** Result if completed */
1037
+ result?: unknown;
1038
+ /** Error if failed */
1039
+ error?: string;
1040
+ }
1041
+ /**
1042
+ * Convert action definition to OpenAI tool format
1043
+ */
1044
+ declare function actionToTool(action: ActionDefinition): object;
1045
+
1046
+ /**
1047
+ * Knowledge Base Types
1048
+ *
1049
+ * Configuration and types for Knowledge Base (RAG) integration.
1050
+ * Currently a placeholder - full implementation coming soon.
1051
+ */
1052
+ /**
1053
+ * Supported vector database providers
1054
+ */
1055
+ type KnowledgeBaseProvider = "pinecone" | "qdrant" | "chroma" | "supabase" | "weaviate" | "custom";
1056
+ /**
1057
+ * Knowledge Base configuration
1058
+ */
1059
+ interface KnowledgeBaseConfig {
1060
+ /** Unique identifier for this knowledge base */
1061
+ id: string;
1062
+ /** Display name */
1063
+ name?: string;
1064
+ /** Vector database provider */
1065
+ provider: KnowledgeBaseProvider;
1066
+ /** API key for the vector database */
1067
+ apiKey?: string;
1068
+ /** Index/collection name */
1069
+ index?: string;
1070
+ /** Namespace within the index */
1071
+ namespace?: string;
1072
+ /** Custom endpoint URL (for self-hosted or custom providers) */
1073
+ endpoint?: string;
1074
+ /** Number of results to return (default: 5) */
1075
+ topK?: number;
1076
+ /** Minimum similarity score threshold (0-1) */
1077
+ scoreThreshold?: number;
1078
+ /** Whether to include source metadata in results */
1079
+ includeMetadata?: boolean;
1080
+ }
1081
+ /**
1082
+ * Knowledge Base search result
1083
+ */
1084
+ interface KnowledgeBaseResult {
1085
+ /** Result content/text */
1086
+ content: string;
1087
+ /** Similarity score (0-1) */
1088
+ score: number;
1089
+ /** Source metadata */
1090
+ metadata?: {
1091
+ /** Source document/URL */
1092
+ source?: string;
1093
+ /** Document title */
1094
+ title?: string;
1095
+ /** Page number (for PDFs) */
1096
+ page?: number;
1097
+ /** Chunk index */
1098
+ chunk?: number;
1099
+ /** Any additional metadata */
1100
+ [key: string]: unknown;
1101
+ };
1102
+ }
1103
+ /**
1104
+ * Knowledge Base search request
1105
+ */
1106
+ interface KnowledgeBaseSearchRequest {
1107
+ /** Search query */
1108
+ query: string;
1109
+ /** Knowledge base ID to search */
1110
+ knowledgeBaseId: string;
1111
+ /** Number of results (overrides config) */
1112
+ limit?: number;
1113
+ /** Filter by metadata */
1114
+ filter?: Record<string, unknown>;
1115
+ }
1116
+ /**
1117
+ * Knowledge Base search response
1118
+ */
1119
+ interface KnowledgeBaseSearchResponse {
1120
+ /** Search results */
1121
+ results: KnowledgeBaseResult[];
1122
+ /** Knowledge base ID */
1123
+ knowledgeBaseId: string;
1124
+ /** Query that was searched */
1125
+ query: string;
1126
+ /** Search duration in ms */
1127
+ durationMs?: number;
1128
+ }
1129
+ /**
1130
+ * Internal Knowledge Base configuration
1131
+ * Used for managed cloud searchIndexDocument API
1132
+ */
1133
+ interface InternalKnowledgeBaseConfig {
1134
+ /** Project UID for the knowledge base */
1135
+ projectUid: string;
1136
+ /** Auth token for API calls */
1137
+ token: string;
1138
+ /** App ID (default: "1") */
1139
+ appId?: string;
1140
+ /** Results limit (default: 5) */
1141
+ limit?: number;
1142
+ /** Whether KB is enabled (default: true) */
1143
+ enabled?: boolean;
1144
+ }
1145
+ /**
1146
+ * Internal Knowledge Base search result
1147
+ */
1148
+ interface InternalKnowledgeBaseResult {
1149
+ /** Document ID */
1150
+ id: string;
1151
+ /** Document title */
1152
+ title?: string;
1153
+ /** Matched content snippet */
1154
+ content: string;
1155
+ /** Relevance score */
1156
+ score?: number;
1157
+ /** Source URL if available */
1158
+ url?: string;
1159
+ /** Additional metadata */
1160
+ metadata?: Record<string, unknown>;
1161
+ }
1162
+ /**
1163
+ * Internal Knowledge Base search response
1164
+ */
1165
+ interface InternalKnowledgeBaseSearchResponse {
1166
+ /** Whether the search was successful */
1167
+ success: boolean;
1168
+ /** Search results */
1169
+ results: InternalKnowledgeBaseResult[];
1170
+ /** Total number of results */
1171
+ total?: number;
1172
+ /** Error message if failed */
1173
+ error?: string;
1174
+ }
1175
+
1176
+ /**
1177
+ * Thread metadata (for listing threads)
1178
+ */
1179
+ interface Thread {
1180
+ /** Unique thread identifier */
1181
+ id: string;
1182
+ /** Thread title (auto-generated from first message or manual) */
1183
+ title?: string;
1184
+ /** When thread was created */
1185
+ createdAt: Date;
1186
+ /** When thread was last updated */
1187
+ updatedAt: Date;
1188
+ }
1189
+ /**
1190
+ * Full thread data including messages
1191
+ */
1192
+ interface ThreadData extends Thread {
1193
+ /** Messages in this thread */
1194
+ messages: Message[];
1195
+ /** Sources from knowledge base for this thread */
1196
+ sources: Source[];
1197
+ }
1198
+ /**
1199
+ * Persistence storage interface for custom adapters
1200
+ */
1201
+ interface ThreadStorageAdapter {
1202
+ /** Save threads to storage */
1203
+ save: (threads: ThreadData[]) => Promise<void>;
1204
+ /** Load threads from storage */
1205
+ load: () => Promise<ThreadData[]>;
1206
+ /** Clear all threads from storage */
1207
+ clear: () => Promise<void>;
1208
+ }
1209
+ /**
1210
+ * Persistence configuration
1211
+ */
1212
+ interface PersistenceConfig {
1213
+ /** Enable persistence (default: false) */
1214
+ enabled: boolean;
1215
+ /** Storage type */
1216
+ storage?: "localStorage" | "custom";
1217
+ /** Custom storage adapter (required if storage is 'custom') */
1218
+ customStorage?: ThreadStorageAdapter;
1219
+ }
1220
+ /**
1221
+ * Generate a thread title from message content
1222
+ */
1223
+ declare function generateThreadTitle(content: string): string;
1224
+
1225
+ export { type Thread as $, type ToolCall as A, type TokenUsage as B, type ConsoleLogOptions as C, type LLMConfig as D, type CloudConfig as E, type Extension as F, type CopilotConfig as G, type HttpMethod as H, type IntentDetectionResult as I, type JSONSchemaProperty as J, type ActionParameter as K, type LLMProvider as L, type MessageAttachment as M, type NetworkRequestOptions as N, type ActionDefinition as O, type ParameterType as P, type ActionRenderProps as Q, type KnowledgeBaseProvider as R, type ScreenshotOptions as S, type ToolDefinition as T, type KnowledgeBaseConfig as U, type KnowledgeBaseResult as V, type KnowledgeBaseSearchRequest as W, type KnowledgeBaseSearchResponse as X, type InternalKnowledgeBaseConfig as Y, type InternalKnowledgeBaseResult as Z, type InternalKnowledgeBaseSearchResponse as _, type ScreenshotResult as a, type ThreadData as a0, type PersistenceConfig as a1, type ThreadStorageAdapter as a2, type AIProvider as a3, type ToolRenderProps as a4, type ToolConfig as a5, type ToolSet as a6, type UnifiedToolCall as a7, type UnifiedToolResult as a8, type ToolApprovalStatus as a9, failure as aA, type ToolExecution as aa, type AgentLoopConfig as ab, type AgentLoopState as ac, type AIResponseMode as ad, type AIContent as ae, type PermissionLevel as af, type ToolPermission as ag, type PermissionStorageConfig as ah, type PermissionStorageAdapter as ai, generateThreadTitle as aj, createMessage as ak, createUserMessage as al, createAssistantMessage as am, createToolMessage as an, createToolCall as ao, parseToolCallArgs as ap, hasToolCalls as aq, isToolResult as ar, actionToTool as as, getDefaultModel as at, DEFAULT_MODELS as au, tool as av, toolToOpenAIFormat as aw, toolToAnthropicFormat as ax, createToolResult as ay, success as az, type ConsoleLogResult as b, type ConsoleLogEntry as c, type NetworkRequestResult as d, type NetworkRequestEntry as e, type Source as f, type ToolExecutionStatus as g, type ToolResponse as h, type ToolInputSchema as i, type ToolLocation as j, type ToolContext as k, detectIntent as l, hasToolSuggestions as m, getPrimaryTool as n, generateSuggestionReason as o, createCustomDetector as p, type ConsoleLogType as q, type ToolType as r, type ToolsConfig as s, type ToolConsentRequest as t, type ToolConsentResponse as u, type CapturedContext as v, type CustomKeywords as w, type MessageRole as x, type Message as y, type MessageMetadata as z };