@squidcloud/slack-client 1.0.409
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/connectors/slack/common/src/slack-types.d.ts +98 -0
- package/dist/connectors/slack/slack-client/src/index.d.ts +1 -0
- package/dist/connectors/slack/slack-client/src/slack-client.d.ts +35 -0
- package/dist/index.js +1 -0
- package/dist/internal-common/src/public-types/ai-agent.public-types.d.ts +743 -0
- package/dist/internal-common/src/public-types/ai-common.public-types.d.ts +172 -0
- package/dist/internal-common/src/public-types/ai-knowledge-base.public-types.d.ts +387 -0
- package/dist/internal-common/src/public-types/backend.public-types.d.ts +50 -0
- package/dist/internal-common/src/public-types/communication.public-types.d.ts +30 -0
- package/dist/internal-common/src/public-types/extraction.public-types.d.ts +183 -0
- package/dist/internal-common/src/public-types/integration.public-types.d.ts +51 -0
- package/dist/internal-common/src/public-types/job.public-types.d.ts +59 -0
- package/dist/internal-common/src/public-types/secret.public-types.d.ts +51 -0
- package/package.json +28 -0
|
@@ -0,0 +1,743 @@
|
|
|
1
|
+
import { AiAudioCreateSpeechModelName, AiAudioTranscriptionModelName, AiChatModelName, AiEmbeddingsModelName, AiImageModelName, AiProviderType, AiRerankProvider, AnthropicChatModelName, GeminiChatModelName, GrokChatModelName, OpenAiAudioCreateSpeechModelName, OpenAiAudioTranscriptionModelName, OpenAiChatModelName, OpenAiCreateSpeechFormat } from './ai-common.public-types';
|
|
2
|
+
import { AiContextMetadata, AiContextMetadataFilter, AiKnowledgeBaseContextType, AiRagType } from './ai-knowledge-base.public-types';
|
|
3
|
+
import { AiFunctionId, AiFunctionIdWithContext, UserAiChatModelName } from './backend.public-types';
|
|
4
|
+
import { AiAgentId, AiContextId, AiKnowledgeBaseId, AppId, IntegrationId } from './communication.public-types';
|
|
5
|
+
import { DocumentExtractionMethod } from './extraction.public-types';
|
|
6
|
+
import { IntegrationType } from './integration.public-types';
|
|
7
|
+
import { JobId } from './job.public-types';
|
|
8
|
+
import { SecretKey } from './secret.public-types';
|
|
9
|
+
/**
|
|
10
|
+
* @category AI
|
|
11
|
+
*/
|
|
12
|
+
export type AiGenerateImageOptions = DallEOptions | StableDiffusionCoreOptions | FluxOptions;
|
|
13
|
+
/**
|
|
14
|
+
* @category AI
|
|
15
|
+
*/
|
|
16
|
+
export type OpenAiAudioTranscribeOptions = WhisperTranscribeOptions | Gpt4oTranscribeOptions;
|
|
17
|
+
/**
|
|
18
|
+
* @category AI
|
|
19
|
+
*/
|
|
20
|
+
export type AiAudioTranscribeOptions = OpenAiAudioTranscribeOptions;
|
|
21
|
+
/**
|
|
22
|
+
* @category AI
|
|
23
|
+
*/
|
|
24
|
+
export type AiAudioCreateSpeechOptions = OpenAiCreateSpeechOptions;
|
|
25
|
+
/**
|
|
26
|
+
* Base options for generating images with an AI model.
|
|
27
|
+
* @category AI
|
|
28
|
+
*/
|
|
29
|
+
export interface BaseAiGenerateImageOptions {
|
|
30
|
+
/** The name of the AI model to use for image generation. */
|
|
31
|
+
modelName: AiImageModelName;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Base options for transcribing audio with an AI model.
|
|
35
|
+
* @category AI
|
|
36
|
+
*/
|
|
37
|
+
export interface BaseAiAudioTranscribeOptions {
|
|
38
|
+
/** The name of the AI model to use for audio transcription. */
|
|
39
|
+
modelName: AiAudioTranscriptionModelName;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Base options for creating speech with an AI model.
|
|
43
|
+
* @category AI
|
|
44
|
+
*/
|
|
45
|
+
export interface BaseAiAudioCreateSpeechOptions {
|
|
46
|
+
/** The name of the AI model to use for speech creation. */
|
|
47
|
+
modelName: AiAudioCreateSpeechModelName;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Options for generating images using the DALL-E model.
|
|
51
|
+
* @category AI
|
|
52
|
+
*/
|
|
53
|
+
export interface DallEOptions extends BaseAiGenerateImageOptions {
|
|
54
|
+
/** Specifies the DALL-E 3 model for image generation. */
|
|
55
|
+
modelName: 'dall-e-3';
|
|
56
|
+
/** The quality of the generated image; defaults to 'standard'. */
|
|
57
|
+
quality?: 'hd' | 'standard';
|
|
58
|
+
/** The size of the generated image; defaults to '1024x1024'. */
|
|
59
|
+
size?: '1024x1024' | '1792x1024' | '1024x1792';
|
|
60
|
+
/** The number of images to generate; defaults to 1 and limited to 1. */
|
|
61
|
+
numberOfImagesToGenerate?: 1;
|
|
62
|
+
}
|
|
63
|
+
interface BaseOpenAiAudioTranscribeOptions extends BaseAiAudioTranscribeOptions {
|
|
64
|
+
/** Specifies the model for audio transcription. */
|
|
65
|
+
modelName: OpenAiAudioTranscriptionModelName;
|
|
66
|
+
/** The temperature for sampling during transcription; defaults to model-specific value. */
|
|
67
|
+
temperature?: number;
|
|
68
|
+
/** An optional prompt to guide the transcription process. */
|
|
69
|
+
prompt?: string;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Options for transcribing audio using the Whisper model.
|
|
73
|
+
* @category AI
|
|
74
|
+
*/
|
|
75
|
+
export interface WhisperTranscribeOptions extends BaseOpenAiAudioTranscribeOptions {
|
|
76
|
+
/** Specifies the Whisper-1 model for audio transcription. */
|
|
77
|
+
modelName: 'whisper-1';
|
|
78
|
+
/** The format of the transcription response; defaults to 'json'. */
|
|
79
|
+
responseFormat?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Options for transcribing audio using the GPT-4o model.
|
|
83
|
+
* @category AI
|
|
84
|
+
*/
|
|
85
|
+
export interface Gpt4oTranscribeOptions extends BaseOpenAiAudioTranscribeOptions {
|
|
86
|
+
/** Specifies the Whisper-1 model for audio transcription. */
|
|
87
|
+
modelName: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe';
|
|
88
|
+
/** The format of the transcription response; defaults to 'json'. */
|
|
89
|
+
responseFormat?: 'json';
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Options for creating speech using OpenAI's text-to-speech models.
|
|
93
|
+
* @category AI
|
|
94
|
+
*/
|
|
95
|
+
export interface OpenAiCreateSpeechOptions extends BaseAiAudioCreateSpeechOptions {
|
|
96
|
+
/** The OpenAI model to use for speech creation (e.g., 'tts-1' or 'tts-1-hd'). */
|
|
97
|
+
modelName: OpenAiAudioCreateSpeechModelName;
|
|
98
|
+
/** The voice to use for speech synthesis; defaults to model-specific value. */
|
|
99
|
+
voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
|
100
|
+
/** The format of the audio output; defaults to 'mp3'. */
|
|
101
|
+
responseFormat?: OpenAiCreateSpeechFormat;
|
|
102
|
+
/** An optional prompt to guide the speech synthesis process. */
|
|
103
|
+
instructions?: string;
|
|
104
|
+
/** The speed of the speech; defaults to 1.0. */
|
|
105
|
+
speed?: number;
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Options for generating images using the Flux model.
|
|
109
|
+
* @category AI
|
|
110
|
+
*/
|
|
111
|
+
export interface FluxOptions extends BaseAiGenerateImageOptions {
|
|
112
|
+
/** Specifies the Flux Pro 1.1 model for image generation. */
|
|
113
|
+
modelName: 'flux-pro-1.1' | 'flux-kontext-pro';
|
|
114
|
+
/** The width of the generated image, must be a multiple of 32, min 256, max 1440; defaults to 1024. */
|
|
115
|
+
width?: number;
|
|
116
|
+
/** The height of the generated image, must be a multiple of 32, min 256, max 1440; defaults to 768. */
|
|
117
|
+
height?: number;
|
|
118
|
+
/** Whether to enhance the prompt for more creative output; defaults to false. */
|
|
119
|
+
prompt_upsampling?: boolean;
|
|
120
|
+
/** A random seed for reproducible generation, if specified. */
|
|
121
|
+
seed?: number;
|
|
122
|
+
/** The safety tolerance level, from 1 (strict) to 5 (permissive). */
|
|
123
|
+
safety_tolerance?: number;
|
|
124
|
+
}
|
|
125
|
+
/**
|
|
126
|
+
* Options for generating images using the Stable Diffusion Core model.
|
|
127
|
+
* @category AI
|
|
128
|
+
*/
|
|
129
|
+
export interface StableDiffusionCoreOptions extends BaseAiGenerateImageOptions {
|
|
130
|
+
/** Specifies the Stable Diffusion Core model for image generation. */
|
|
131
|
+
modelName: 'stable-diffusion-core';
|
|
132
|
+
/** The aspect ratio of the generated image; defaults to '1:1'. */
|
|
133
|
+
aspectRatio?: '16:9' | '1:1' | '21:9' | '2:3' | '3:2' | '4:5' | '5:4' | '9:16' | '9:21';
|
|
134
|
+
/** An optional negative prompt to guide what to exclude from the image. */
|
|
135
|
+
negativePrompt?: string;
|
|
136
|
+
/** A random seed for reproducible generation, if specified. */
|
|
137
|
+
seed?: number;
|
|
138
|
+
/** A style preset to apply to the generated image, if specified. */
|
|
139
|
+
stylePreset?: 'analog-film' | 'anime' | 'cinematic' | 'comic-book' | 'digital-art' | 'enhance' | 'fantasy-art' | 'isometric' | 'line-art' | 'low-poly' | 'modeling-compound' | 'neon-punk' | 'origami' | 'photographic' | 'pixel-art' | 'tile-texture';
|
|
140
|
+
/** The format of the output image; defaults to 'png'. */
|
|
141
|
+
outputFormat?: 'jpeg' | 'png' | 'webp';
|
|
142
|
+
}
|
|
143
|
+
/**
|
|
144
|
+
* The possible sources for the LLM provider API key.
|
|
145
|
+
* @category AI
|
|
146
|
+
*/
|
|
147
|
+
export type ApiKeySource = 'user' | 'system';
|
|
148
|
+
/**
|
|
149
|
+
* @category AI
|
|
150
|
+
*/
|
|
151
|
+
export type AiAgentResponseFormat = 'text' | 'json_object';
|
|
152
|
+
/**
|
|
153
|
+
* @category AI
|
|
154
|
+
*/
|
|
155
|
+
export type AiFileUrlType = 'image' | 'document';
|
|
156
|
+
/**
|
|
157
|
+
* Represents a URL reference to an AI-processed file, such as an image.
|
|
158
|
+
* @category AI
|
|
159
|
+
*/
|
|
160
|
+
export interface AiFileUrl {
|
|
161
|
+
/** The unique identifier for the file URL for the current request. */
|
|
162
|
+
id: string;
|
|
163
|
+
/** The type of file referenced by the URL (e.g., 'image'). */
|
|
164
|
+
type: AiFileUrlType;
|
|
165
|
+
/** The purpose of the file, indicating how it will be used in AI processing. */
|
|
166
|
+
purpose: AiFilePurpose;
|
|
167
|
+
/** The URL pointing to the file. */
|
|
168
|
+
url: string;
|
|
169
|
+
/** An optional description of the file's content or purpose - sent to the AI. */
|
|
170
|
+
description?: string;
|
|
171
|
+
/** The file name, can be used in the prompt to reference the file. */
|
|
172
|
+
fileName?: string;
|
|
173
|
+
}
|
|
174
|
+
/** The purpose of the AI file, used to determine how the file should be processed or utilized. */
|
|
175
|
+
export type AiFilePurpose = 'context' | 'tools';
|
|
176
|
+
/**
|
|
177
|
+
* Metadata for an AI agent connected to an integration.
|
|
178
|
+
* @category AI
|
|
179
|
+
*/
|
|
180
|
+
export interface AiConnectedIntegrationMetadata<AiConnectedIntegrationOptionsType = unknown> {
|
|
181
|
+
/** The ID of the connected integration. */
|
|
182
|
+
integrationId: IntegrationId;
|
|
183
|
+
/** The type of the connected integration (e.g., API, database). */
|
|
184
|
+
integrationType: IntegrationType;
|
|
185
|
+
/** An optional description of the integration for the parent agent context, used as AI function description. */
|
|
186
|
+
description?: string;
|
|
187
|
+
/** Optional instructions for the connected integration agent, overriding the default if provided. */
|
|
188
|
+
instructions?: string;
|
|
189
|
+
/**
|
|
190
|
+
* Additional options for the connected integration.
|
|
191
|
+
* Squid Core or AI functions in connector packages may use these options to adjust behavior of the integration agent.
|
|
192
|
+
*/
|
|
193
|
+
options?: AiConnectedIntegrationOptionsType;
|
|
194
|
+
}
|
|
195
|
+
/**
|
|
196
|
+
* Metadata for a connected AI agent callable by the current agent.
|
|
197
|
+
* @category AI
|
|
198
|
+
*/
|
|
199
|
+
export interface AiConnectedAgentMetadata {
|
|
200
|
+
/** The ID of the connected AI agent. */
|
|
201
|
+
agentId: AiAgentId;
|
|
202
|
+
/** A description of the connected agent for the parent agent context, used as AI function description. */
|
|
203
|
+
description: string;
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Metadata for a connected AI Knowledge Base callable by the current agent.
|
|
207
|
+
* @category AI
|
|
208
|
+
*/
|
|
209
|
+
export interface AiConnectedKnowledgeBaseMetadata {
|
|
210
|
+
/** The ID of the connected AI KnowledgeBase */
|
|
211
|
+
knowledgeBaseId: AiKnowledgeBaseId;
|
|
212
|
+
/** A description of when to use this AiKnowledgeBase */
|
|
213
|
+
description: string;
|
|
214
|
+
}
|
|
215
|
+
/**
|
|
216
|
+
* Quotas for a single AI chat prompt (`ask()` method call).
|
|
217
|
+
* @category AI
|
|
218
|
+
*/
|
|
219
|
+
export interface AiChatPromptQuotas {
|
|
220
|
+
/**
|
|
221
|
+
* Maximum depth of AI call recursion allowed.
|
|
222
|
+
* Recursion occurs when one AI agent calls another.
|
|
223
|
+
* Note that, for simplicity of implementation, this option guarantees only the maximum recursion depth,
|
|
224
|
+
* not the total number of nested AI calls.
|
|
225
|
+
* Default: 5.
|
|
226
|
+
*/
|
|
227
|
+
maxAiCallStackSize: number;
|
|
228
|
+
}
|
|
229
|
+
/**
|
|
230
|
+
* Options for AI agent execution plan, allowing the agent to plan what functionality to use (connected agents,
|
|
231
|
+
* connected, integrations, or functions).
|
|
232
|
+
*/
|
|
233
|
+
export interface AiAgentExecutionPlanOptions {
|
|
234
|
+
/** Whether to enable an execution plan for the agent. */
|
|
235
|
+
enabled: boolean;
|
|
236
|
+
/** The model to use for the execution plan - defaults to the model used by the agent. */
|
|
237
|
+
model?: AiChatModelName;
|
|
238
|
+
/** In case the model supports reasoning, this will control the level of effort - defaults to `high`. */
|
|
239
|
+
reasoningEffort?: AiReasoningEffort;
|
|
240
|
+
}
|
|
241
|
+
/**
|
|
242
|
+
* Options for AI agent memory management.
|
|
243
|
+
* @category AI
|
|
244
|
+
*/
|
|
245
|
+
export interface AiAgentMemoryOptions {
|
|
246
|
+
/**
|
|
247
|
+
* The memory mode:
|
|
248
|
+
* - 'none': No memory is used, the agent does not remember past interactions.
|
|
249
|
+
* - 'read-only': The agent can read from memory but cannot write to it.
|
|
250
|
+
* - 'read-write': The agent can both read from and write to memory.
|
|
251
|
+
*/
|
|
252
|
+
memoryMode: 'none' | 'read-only' | 'read-write';
|
|
253
|
+
/**
|
|
254
|
+
* A unique ID to store the chat memory.
|
|
255
|
+
*
|
|
256
|
+
* The full chat memory key is constructed as a combination of 'appId', 'agentId', and 'memoryId'.
|
|
257
|
+
* If not provided the Squid client instance ID used as a memoryId.
|
|
258
|
+
*
|
|
259
|
+
* Important: the memory ID should be treated with the same security measures as Access Token because it unblocks
|
|
260
|
+
* direct access to the agent chat history. A good practice is to use a non-trivial and a unique value.
|
|
261
|
+
*/
|
|
262
|
+
memoryId?: string;
|
|
263
|
+
/**
|
|
264
|
+
* Overrides the expiration for the whole chat history.
|
|
265
|
+
* If not provided, the expiration will not be changed.
|
|
266
|
+
*
|
|
267
|
+
* Default: the last provided expiration by user or 1 day if
|
|
268
|
+
* the user didn't provide any expiration.
|
|
269
|
+
*/
|
|
270
|
+
expirationMinutes?: number;
|
|
271
|
+
}
|
|
272
|
+
/**
|
|
273
|
+
* The base AI agent chat options, should not be used directly.
|
|
274
|
+
* @category AI
|
|
275
|
+
*/
|
|
276
|
+
export interface BaseAiChatOptions {
|
|
277
|
+
/**
|
|
278
|
+
* The maximum number of input tokens that Squid can use when making the request to the AI model.
|
|
279
|
+
* Defaults to the max tokens the model can accept.
|
|
280
|
+
*/
|
|
281
|
+
maxTokens?: number;
|
|
282
|
+
/**
|
|
283
|
+
* The maximum number of tokens the model should output.
|
|
284
|
+
* Passed directly to the AI model. Can be used to control the output verbosity.
|
|
285
|
+
*/
|
|
286
|
+
maxOutputTokens?: number;
|
|
287
|
+
/** The context ID to use for the request. If not provided, the agent's default context will be used. */
|
|
288
|
+
memoryOptions?: AiAgentMemoryOptions;
|
|
289
|
+
/** Whether to disable the whole context for the request. Default to false. */
|
|
290
|
+
disableContext?: boolean;
|
|
291
|
+
/** Rewrite prompt for RAG - defaults to false */
|
|
292
|
+
enablePromptRewriteForRag?: boolean;
|
|
293
|
+
/** Whether to include references from the source context in the response. Default to false. */
|
|
294
|
+
includeReference?: boolean;
|
|
295
|
+
/** The format of the response from the AI model. Note that not all models support JSON format. Default to 'text'. */
|
|
296
|
+
responseFormat?: AiAgentResponseFormat;
|
|
297
|
+
/** Whether to response in a "smooth typing" way, beneficial when the chat result is displayed in a UI. Default to true. */
|
|
298
|
+
smoothTyping?: boolean;
|
|
299
|
+
/** Global context passed to the agent and all AI functions of the agent. */
|
|
300
|
+
agentContext?: Record<string, unknown>;
|
|
301
|
+
/**
|
|
302
|
+
* Functions to expose to the AI.
|
|
303
|
+
* Either a function name or a name with an extra function context passed only to this function.
|
|
304
|
+
* The parameter values must be valid serializable JSON values.
|
|
305
|
+
* Overrides the stored value.
|
|
306
|
+
*/
|
|
307
|
+
functions?: Array<AiFunctionId | AiFunctionIdWithContext>;
|
|
308
|
+
/** Instructions to include with the prompt. */
|
|
309
|
+
instructions?: string;
|
|
310
|
+
/** A set of filters that will limit the context the AI can access.
|
|
311
|
+
* @deprecated use `contextMetadataFilterForKnowledgeBase` instead.
|
|
312
|
+
*/
|
|
313
|
+
contextMetadataFilter?: AiContextMetadataFilter;
|
|
314
|
+
/** A set of filters that will limit the context the AI can access. */
|
|
315
|
+
contextMetadataFilterForKnowledgeBase?: Record<AiKnowledgeBaseId, AiContextMetadataFilter>;
|
|
316
|
+
/** The options to use for the response in voice. */
|
|
317
|
+
voiceOptions?: AiAudioCreateSpeechOptions;
|
|
318
|
+
/** List of connected AI agents can be called by the current agent. Overrides the stored value. */
|
|
319
|
+
connectedAgents?: Array<AiConnectedAgentMetadata>;
|
|
320
|
+
/** List of connected AI agents can be called by the current agent. Overrides the stored value. */
|
|
321
|
+
connectedIntegrations?: Array<AiConnectedIntegrationMetadata>;
|
|
322
|
+
/** List of connected AiKnowlegeBases that can be called by the current agent */
|
|
323
|
+
connectedKnowledgeBases?: Array<AiConnectedKnowledgeBaseMetadata>;
|
|
324
|
+
/** Current budget for nested or recursive AI chat calls per single prompt. */
|
|
325
|
+
quotas?: AiChatPromptQuotas;
|
|
326
|
+
/** Include metadata in the context */
|
|
327
|
+
includeMetadata?: boolean;
|
|
328
|
+
/** The temperature to use when sampling from the model. Default to 0.5. */
|
|
329
|
+
temperature?: number;
|
|
330
|
+
/** Preset instruction options that can be toggled on */
|
|
331
|
+
guardrails?: GuardrailsOptions;
|
|
332
|
+
/** The LLM model to use. */
|
|
333
|
+
model?: AiChatModelName;
|
|
334
|
+
/** Which provider's reranker to use for reranking the context. Defaults to 'cohere'. */
|
|
335
|
+
rerankProvider?: AiRerankProvider;
|
|
336
|
+
/**
|
|
337
|
+
* Options for AI agent execution plan, allowing the agent to perform an execution plan before invoking
|
|
338
|
+
* connected agents, connected integrations, or functions.
|
|
339
|
+
*/
|
|
340
|
+
executionPlanOptions?: AiAgentExecutionPlanOptions;
|
|
341
|
+
/** An array of file URLs to include in the chat context. */
|
|
342
|
+
fileUrls?: Array<AiFileUrl>;
|
|
343
|
+
/**
|
|
344
|
+
* The level of reasoning effort to apply; defaults to model-specific value.
|
|
345
|
+
* Effective only for models with reasoning.
|
|
346
|
+
*/
|
|
347
|
+
reasoningEffort?: AiReasoningEffort;
|
|
348
|
+
/**
|
|
349
|
+
* Controls response length and detail level.
|
|
350
|
+
* Use `low` for brief responses, `medium` for balanced detail, or `high` for comprehensive explanations.
|
|
351
|
+
* Default: 'medium'.
|
|
352
|
+
*
|
|
353
|
+
* Note: this parameter is only supported by OpenAI plain text responses and is ignored for others.
|
|
354
|
+
* For other providers ask about verbosity in prompt and using `maxOutputTokens`.
|
|
355
|
+
*/
|
|
356
|
+
verbosity?: AiVerbosityLevel;
|
|
357
|
+
/**
|
|
358
|
+
* Enable LLMs built-in code interpreter for executing Python code.
|
|
359
|
+
* - 'none': Code interpreter is disabled (default).
|
|
360
|
+
* - 'llm': Use LLM's native code interpreter to run Python code.
|
|
361
|
+
*
|
|
362
|
+
* Note: Only supported by OpenAI and Gemini models. Ignored for other providers.
|
|
363
|
+
*/
|
|
364
|
+
useCodeInterpreter?: 'none' | 'llm';
|
|
365
|
+
}
|
|
366
|
+
/**
|
|
367
|
+
* Chat options specific to Gemini models, extending base options.
|
|
368
|
+
* @category AI
|
|
369
|
+
*/
|
|
370
|
+
export interface GeminiChatOptions extends BaseAiChatOptions {
|
|
371
|
+
/** The Gemini model to use for the chat. */
|
|
372
|
+
model?: GeminiChatModelName;
|
|
373
|
+
/** Enables grounding with web search for more informed responses; defaults to false. */
|
|
374
|
+
groundingWithWebSearch?: boolean;
|
|
375
|
+
}
|
|
376
|
+
/**
|
|
377
|
+
* Chat options specific to Grok models, extending base options.
|
|
378
|
+
* @category AI
|
|
379
|
+
*/
|
|
380
|
+
export interface GrokChatOptions extends BaseAiChatOptions {
|
|
381
|
+
/** The Grok model to use for the chat. */
|
|
382
|
+
model?: GrokChatModelName;
|
|
383
|
+
}
|
|
384
|
+
/**
|
|
385
|
+
* Chat options specific to OpenAI models, extending base options.
|
|
386
|
+
* @category AI
|
|
387
|
+
*/
|
|
388
|
+
export interface OpenAiChatOptions extends BaseAiChatOptions {
|
|
389
|
+
/** The OpenAI model to use for the chat. */
|
|
390
|
+
model?: OpenAiChatModelName;
|
|
391
|
+
}
|
|
392
|
+
/**
|
|
393
|
+
* AI reasoning effort: Unsupported values are mapped to the closest supported option.
|
|
394
|
+
* @category AI
|
|
395
|
+
*/
|
|
396
|
+
export type AiReasoningEffort = 'minimal' | 'low' | 'medium' | 'high';
|
|
397
|
+
/**
|
|
398
|
+
* Controls response length and detail level.
|
|
399
|
+
* Use `low` for brief responses,`medium` for balanced detail, or `high` for comprehensive explanations.
|
|
400
|
+
* Default: 'medium'.
|
|
401
|
+
*/
|
|
402
|
+
export type AiVerbosityLevel = 'low' | 'medium' | 'high';
|
|
403
|
+
/**
|
|
404
|
+
* Chat options specific to Anthropic models, extending base options.
|
|
405
|
+
* @category AI
|
|
406
|
+
*/
|
|
407
|
+
export interface AnthropicChatOptions extends BaseAiChatOptions {
|
|
408
|
+
/** The Anthropic model to use for the chat. */
|
|
409
|
+
model?: AnthropicChatModelName;
|
|
410
|
+
}
|
|
411
|
+
/**
|
|
412
|
+
* The generic options type. When no generic is provided,
|
|
413
|
+
* the type is inferred from the provided overrideModel (or falls back to BaseAiAgentChatOptions).
|
|
414
|
+
* @category AI
|
|
415
|
+
*/
|
|
416
|
+
export type AiChatOptions<T extends AiChatModelName | undefined = undefined> = T extends undefined ? BaseAiChatOptions | GeminiChatOptions | OpenAiChatOptions | AnthropicChatOptions : T extends GeminiChatModelName ? GeminiChatOptions : T extends OpenAiChatModelName ? OpenAiChatOptions : T extends AnthropicChatModelName ? AnthropicChatOptions : T extends GrokChatModelName ? GrokChatOptions : never;
|
|
417
|
+
/**
|
|
418
|
+
* @category AI
|
|
419
|
+
*/
|
|
420
|
+
export type AllAiAgentChatOptions = {
|
|
421
|
+
[K in keyof BaseAiChatOptions | keyof GeminiChatOptions | keyof GrokChatOptions | keyof OpenAiChatOptions | keyof AnthropicChatOptions]?: (K extends keyof BaseAiChatOptions ? BaseAiChatOptions[K] : never) | (K extends keyof GeminiChatOptions ? GeminiChatOptions[K] : never) | (K extends keyof GrokChatOptions ? GrokChatOptions[K] : never) | (K extends keyof OpenAiChatOptions ? OpenAiChatOptions[K] : never) | (K extends keyof AnthropicChatOptions ? AnthropicChatOptions[K] : never);
|
|
422
|
+
};
|
|
423
|
+
/**
|
|
424
|
+
* A definition of an AI agent with its properties and default chat options.
|
|
425
|
+
* @category AI
|
|
426
|
+
*/
|
|
427
|
+
export interface AiAgent<T extends AiChatModelName | undefined = undefined> {
|
|
428
|
+
/** The unique identifier of the AI agent. */
|
|
429
|
+
id: AiAgentId;
|
|
430
|
+
/** The date and time the agent was created. */
|
|
431
|
+
createdAt: Date;
|
|
432
|
+
/** The date and time the agent was last updated. */
|
|
433
|
+
updatedAt: Date;
|
|
434
|
+
/** An optional description of the agent's purpose or capabilities. */
|
|
435
|
+
description?: string;
|
|
436
|
+
/** Indicates whether the agent is publicly accessible; defaults to false. */
|
|
437
|
+
isPublic?: boolean;
|
|
438
|
+
/** Enables audit logging for the agent's activities if true; defaults to false. */
|
|
439
|
+
auditLog?: boolean;
|
|
440
|
+
/** The default chat options for the agent, overridable by the user during use. */
|
|
441
|
+
options: AiChatOptions<T>;
|
|
442
|
+
/** The embedding model name used by the agent. */
|
|
443
|
+
embeddingModelName: AiEmbeddingsModelName;
|
|
444
|
+
}
|
|
445
|
+
/**
|
|
446
|
+
* @category AI
|
|
447
|
+
*/
|
|
448
|
+
export type UpsertAgentRequest = Omit<AiAgent, 'createdAt' | 'updatedAt' | 'options' | 'embeddingModelName'> & {
|
|
449
|
+
options?: AiChatOptions;
|
|
450
|
+
embeddingModelName?: AiEmbeddingsModelName;
|
|
451
|
+
};
|
|
452
|
+
/**
|
|
453
|
+
* A status message from an AI agent operation.
|
|
454
|
+
* @category AI
|
|
455
|
+
*/
|
|
456
|
+
export interface AiStatusMessage {
|
|
457
|
+
/** ID of the status update message. */
|
|
458
|
+
messageId: string;
|
|
459
|
+
/** The ID of the agent generating the status message. */
|
|
460
|
+
agentId: AiAgentId;
|
|
461
|
+
/** An optional chat ID associated with the status message. */
|
|
462
|
+
chatId?: string;
|
|
463
|
+
/** The title or summary of the status message. */
|
|
464
|
+
title: string;
|
|
465
|
+
/** The Job ID associated with the status message. */
|
|
466
|
+
jobId: JobId;
|
|
467
|
+
/** Optional tags providing additional metadata about the status. */
|
|
468
|
+
tags?: Record<string, any>;
|
|
469
|
+
}
|
|
470
|
+
/** List of all chat history (memory) sources. */
|
|
471
|
+
export declare const AI_CHAT_MESSAGE_SOURCE: readonly ["user", "ai"];
|
|
472
|
+
/** Source of the chat history entry: either an AI response or a user. */
|
|
473
|
+
export type AiChatMessageSource = (typeof AI_CHAT_MESSAGE_SOURCE)[number];
|
|
474
|
+
/** A history entry for a chat. */
|
|
475
|
+
export interface AiChatMessage {
|
|
476
|
+
/** ID of the entry. Unique per agent. */
|
|
477
|
+
id: string;
|
|
478
|
+
/** Agent's application. */
|
|
479
|
+
appId: AppId;
|
|
480
|
+
/** The ID of the agent that owns the history. */
|
|
481
|
+
agentId: AiAgentId;
|
|
482
|
+
/** A memory (history) ID associated with the chat conversation. */
|
|
483
|
+
memoryId: string;
|
|
484
|
+
/** The source of the message: a user or an AI. */
|
|
485
|
+
source: AiChatMessageSource;
|
|
486
|
+
/** The text of the entry: a user's prompt or an AI-generated response. */
|
|
487
|
+
message: string;
|
|
488
|
+
/** Time the entry is created. Unix time millis. */
|
|
489
|
+
timestamp: number;
|
|
490
|
+
}
|
|
491
|
+
/**
|
|
492
|
+
* Name of the tag that contains ID of the parent message.
|
|
493
|
+
* When the tag is present, the current message should be considered in the context of the parent message.
|
|
494
|
+
* Example: the message can contain a function call result.
|
|
495
|
+
*/
|
|
496
|
+
export declare const AI_STATUS_MESSAGE_PARENT_MESSAGE_ID_TAG = "parent";
|
|
497
|
+
/** The tag contains a response of the AI tool call. */
|
|
498
|
+
export declare const AI_STATUS_MESSAGE_RESULT_TAG = "result";
|
|
499
|
+
/**
|
|
500
|
+
* The options for the AI agent search method.
|
|
501
|
+
* @category AI
|
|
502
|
+
*/
|
|
503
|
+
export interface AiSearchOptions {
|
|
504
|
+
/** The prompt to search for */
|
|
505
|
+
prompt: string;
|
|
506
|
+
/** DEPRECATED: A set of filters that will limit the context the AI can access. */
|
|
507
|
+
contextMetadataFilter?: AiContextMetadataFilter;
|
|
508
|
+
/** A set of filters that will limit the context the AI can access. */
|
|
509
|
+
contextMetadataFilterForKnowledgeBase?: Record<AiKnowledgeBaseId, AiContextMetadataFilter>;
|
|
510
|
+
/** The maximum number of results to return */
|
|
511
|
+
limit?: number;
|
|
512
|
+
/** Which provider's reranker to use for reranking the context. Defaults to 'cohere'. */
|
|
513
|
+
rerankProvider?: AiRerankProvider;
|
|
514
|
+
}
|
|
515
|
+
/**
|
|
516
|
+
* A single chunk of data returned from an AI search operation.
|
|
517
|
+
* @category AI
|
|
518
|
+
*/
|
|
519
|
+
export interface AiSearchResultChunk {
|
|
520
|
+
/** The data content of the search result chunk. */
|
|
521
|
+
data: string;
|
|
522
|
+
/** Optional metadata associated with the chunk. */
|
|
523
|
+
metadata?: AiContextMetadata;
|
|
524
|
+
/** The relevance score of the chunk, indicating match quality. */
|
|
525
|
+
score: number;
|
|
526
|
+
}
|
|
527
|
+
/**
|
|
528
|
+
* Represents an AI agent's context entry with metadata and content.
|
|
529
|
+
* @category AI
|
|
530
|
+
*/
|
|
531
|
+
export interface AiAgentContext {
|
|
532
|
+
/** The unique identifier of the context entry. */
|
|
533
|
+
id: AiContextId;
|
|
534
|
+
/** The date and time the context was created. */
|
|
535
|
+
createdAt: Date;
|
|
536
|
+
/** The date and time the context was last updated. */
|
|
537
|
+
updatedAt: Date;
|
|
538
|
+
/** The ID of the agent owning this context. */
|
|
539
|
+
agentId: AiAgentId;
|
|
540
|
+
/** The type of context (e.g., 'text' or 'file'). */
|
|
541
|
+
type: AiKnowledgeBaseContextType;
|
|
542
|
+
/** A title describing the context content. */
|
|
543
|
+
title: string;
|
|
544
|
+
/** The text content of the context. */
|
|
545
|
+
text: string;
|
|
546
|
+
/** Indicates whether the context is a preview; defaults to false. */
|
|
547
|
+
preview: boolean;
|
|
548
|
+
/** The size of the context content in bytes. */
|
|
549
|
+
sizeBytes: number;
|
|
550
|
+
/** Metadata associated with the context. */
|
|
551
|
+
metadata: AiContextMetadata;
|
|
552
|
+
}
|
|
553
|
+
/**
|
|
554
|
+
* @category AI
|
|
555
|
+
*/
|
|
556
|
+
export type AgentContextRequest = TextContextRequest | FileContextRequest;
|
|
557
|
+
interface BaseContextRequest {
|
|
558
|
+
contextId: string;
|
|
559
|
+
type: AiKnowledgeBaseContextType;
|
|
560
|
+
metadata?: AiContextMetadata;
|
|
561
|
+
}
|
|
562
|
+
/**
|
|
563
|
+
* Status of an individual context item after successfully upserting it.
|
|
564
|
+
* @category AI
|
|
565
|
+
*/
|
|
566
|
+
export interface BaseUpsertContextStatus {
|
|
567
|
+
/** Whether the context upsert was successful, otherwise, what occurred. */
|
|
568
|
+
status: 'success' | 'error';
|
|
569
|
+
/** The unique identifier of the context item. */
|
|
570
|
+
contextId: string;
|
|
571
|
+
/** The name of the context item, typically the title or filename. */
|
|
572
|
+
name: string;
|
|
573
|
+
}
|
|
574
|
+
/**
|
|
575
|
+
* Status of an individual context item after successfully upserting it.
|
|
576
|
+
* @category AI
|
|
577
|
+
*/
|
|
578
|
+
export interface UpsertContextStatusSuccess extends BaseUpsertContextStatus {
|
|
579
|
+
/** Whether the context upsert was successful or got an error. */
|
|
580
|
+
status: 'success';
|
|
581
|
+
}
|
|
582
|
+
/**
|
|
583
|
+
* Status of an individual context item after it failed to upsert.
|
|
584
|
+
*
|
|
585
|
+
* Contains the error message for why the upsert failed.
|
|
586
|
+
* @category AI
|
|
587
|
+
*/
|
|
588
|
+
export interface UpsertContextStatusError extends BaseUpsertContextStatus {
|
|
589
|
+
/** Whether the context upsert was successful or got an error. */
|
|
590
|
+
status: 'error';
|
|
591
|
+
/** Reason the upsert failed. */
|
|
592
|
+
errorMessage: string;
|
|
593
|
+
}
|
|
594
|
+
/**
|
|
595
|
+
* Status of an individual context item after attempting to upsert it.
|
|
596
|
+
* @category AI
|
|
597
|
+
*/
|
|
598
|
+
export type UpsertContextStatus = UpsertContextStatusSuccess | UpsertContextStatusError;
|
|
599
|
+
/**
|
|
600
|
+
* Response structure for upserting context.
|
|
601
|
+
* @category AI
|
|
602
|
+
*/
|
|
603
|
+
export interface UpsertAgentContextResponse {
|
|
604
|
+
/** Upsert failure that occurred for the items sent in the request. */
|
|
605
|
+
failure?: UpsertContextStatusError;
|
|
606
|
+
}
|
|
607
|
+
/**
|
|
608
|
+
* Response structure for upserting contexts.
|
|
609
|
+
* @category AI
|
|
610
|
+
*/
|
|
611
|
+
export interface UpsertAgentContextsResponse {
|
|
612
|
+
/** List of the upsert failures that occurred for the items sent in the request. */
|
|
613
|
+
failures: Array<UpsertContextStatusError>;
|
|
614
|
+
}
|
|
615
|
+
/**
|
|
616
|
+
* Base options for upserting text content into the AI agent's context.
|
|
617
|
+
* @category AI
|
|
618
|
+
*/
|
|
619
|
+
export interface AiContextTextOptions extends BaseAiContextOptions {
|
|
620
|
+
}
|
|
621
|
+
/**
|
|
622
|
+
* Base options for upserting file content into the AI agent's context.
|
|
623
|
+
* @category AI
|
|
624
|
+
*/
|
|
625
|
+
export interface AiContextFileOptions extends BaseAiContextOptions {
|
|
626
|
+
}
|
|
627
|
+
/**
|
|
628
|
+
* Request structure for adding text-based context to an AI agent.
|
|
629
|
+
* @category AI
|
|
630
|
+
*/
|
|
631
|
+
export interface TextContextRequest extends BaseContextRequest {
|
|
632
|
+
/** Specifies the context type as 'text'. */
|
|
633
|
+
type: 'text';
|
|
634
|
+
/** A title for the text context. */
|
|
635
|
+
title: string;
|
|
636
|
+
/** The text content to add to the context. */
|
|
637
|
+
text: string;
|
|
638
|
+
/** General options for how to process the text. */
|
|
639
|
+
options?: AiContextTextOptions;
|
|
640
|
+
}
|
|
641
|
+
/**
|
|
642
|
+
* Request structure for adding file-based context to an AI agent.
|
|
643
|
+
* @category AI
|
|
644
|
+
*/
|
|
645
|
+
export interface FileContextRequest extends BaseContextRequest {
|
|
646
|
+
/** Specifies the context type as 'file'. */
|
|
647
|
+
type: 'file';
|
|
648
|
+
/** Whether to extract images from the file; defaults to false. */
|
|
649
|
+
extractImages?: boolean;
|
|
650
|
+
/** The minimum size for extracted images, if applicable. */
|
|
651
|
+
imageMinSizePixels?: number;
|
|
652
|
+
/** The AI model to use for extraction, if specified. */
|
|
653
|
+
extractionModel?: AiChatModelName;
|
|
654
|
+
/** General options for how to process the file. */
|
|
655
|
+
options?: AiContextFileOptions;
|
|
656
|
+
/** The preferred method for extracting data from the document. */
|
|
657
|
+
preferredExtractionMethod?: DocumentExtractionMethod;
|
|
658
|
+
/**
|
|
659
|
+
* Whether Squid keeps or discards the original file.
|
|
660
|
+
*
|
|
661
|
+
* Keeping the original file allows reprocessing and the ability for the user to download it later.
|
|
662
|
+
*
|
|
663
|
+
* Defaults to false.
|
|
664
|
+
*/
|
|
665
|
+
discardOriginalFile?: boolean;
|
|
666
|
+
}
|
|
667
|
+
/**
|
|
668
|
+
* Base options for how to deal with the content being upserted.
|
|
669
|
+
* @category AI
|
|
670
|
+
*/
|
|
671
|
+
export type BaseAiContextOptions = {
|
|
672
|
+
/** The type of RAG to use for the content. */
|
|
673
|
+
ragType?: AiRagType;
|
|
674
|
+
/** Amount of chunk overlap, in characters. */
|
|
675
|
+
chunkOverlap?: number;
|
|
676
|
+
};
|
|
677
|
+
/**
|
|
678
|
+
* @category AI
|
|
679
|
+
*/
|
|
680
|
+
export type GuardrailKeysType = 'disableProfanity' | 'offTopicAnswers' | 'professionalTone' | 'disablePii';
|
|
681
|
+
/**
|
|
682
|
+
* Options for applying guardrails to AI responses to enforce specific constraints.
|
|
683
|
+
* @category AI
|
|
684
|
+
*/
|
|
685
|
+
export type GuardrailsOptions = {
|
|
686
|
+
/** Disables profanity in the AI response if true; defaults to false. */
|
|
687
|
+
disableProfanity?: boolean;
|
|
688
|
+
/** Prevents off-topic answers if true; defaults to false. */
|
|
689
|
+
offTopicAnswers?: boolean;
|
|
690
|
+
/** Enforces a professional tone in the response if true; defaults to false. */
|
|
691
|
+
professionalTone?: boolean;
|
|
692
|
+
/** Disables inclusion of personally identifiable information if true; defaults to false. */
|
|
693
|
+
disablePii?: boolean;
|
|
694
|
+
/** A custom guardrail instruction, if specified. */
|
|
695
|
+
custom?: string;
|
|
696
|
+
};
|
|
697
|
+
/**
|
|
698
|
+
* Response structure for transcribing audio and asking an AI agent a question.
|
|
699
|
+
* @category AI
|
|
700
|
+
*/
|
|
701
|
+
export interface AiTranscribeAndAskResponse {
|
|
702
|
+
/** The AI agent's response to the transcribed prompt. */
|
|
703
|
+
responseString: string;
|
|
704
|
+
/** The transcribed text from the audio input. */
|
|
705
|
+
transcribedPrompt: string;
|
|
706
|
+
/** The annotations associated with the AI response, if any. */
|
|
707
|
+
annotations?: Record<string, AiAnnotation>;
|
|
708
|
+
}
|
|
709
|
+
/** Per application AI settings. */
|
|
710
|
+
export interface ApplicationAiSettings {
|
|
711
|
+
/** Maps AI provider name to API key secret name that must be used for any request made by the application. */
|
|
712
|
+
apiKeys: Partial<Record<AiProviderType, SecretKey>>;
|
|
713
|
+
}
|
|
714
|
+
/**
|
|
715
|
+
* Options for user-provided LLM models.
|
|
716
|
+
* @category AI
|
|
717
|
+
*/
|
|
718
|
+
export interface UserAiChatOptions extends BaseAiChatOptions {
|
|
719
|
+
/** LLM Model to use for the chat query. */
|
|
720
|
+
model: UserAiChatModelName;
|
|
721
|
+
}
|
|
722
|
+
/**
|
|
723
|
+
* Response structure for asking a user LLM a question.
|
|
724
|
+
* @category AI
|
|
725
|
+
*/
|
|
726
|
+
export interface UserAiAskResponse {
|
|
727
|
+
/** The response from the AI agent. */
|
|
728
|
+
response?: string;
|
|
729
|
+
/** The number of input tokens used in the request. */
|
|
730
|
+
inputTokens?: number;
|
|
731
|
+
/** The number of output tokens generated by the AI agent. */
|
|
732
|
+
outputTokens?: number;
|
|
733
|
+
}
|
|
734
|
+
/** Represents an annotation in the AI response. */
|
|
735
|
+
export type AiAnnotation = AiFileAnnotation;
|
|
736
|
+
/** Represents an annotation for a file in the AI response. */
|
|
737
|
+
export interface AiFileAnnotation {
|
|
738
|
+
/** The type of the annotation, always 'file'. */
|
|
739
|
+
type: 'file';
|
|
740
|
+
/** The AI file with the URL to the file */
|
|
741
|
+
aiFileUrl: AiFileUrl;
|
|
742
|
+
}
|
|
743
|
+
export {};
|