@letta-ai/letta-client 1.0.0-alpha.15 → 1.0.0-alpha.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +41 -0
- package/client.d.mts +22 -22
- package/client.d.mts.map +1 -1
- package/client.d.ts +22 -22
- package/client.d.ts.map +1 -1
- package/client.js.map +1 -1
- package/client.mjs.map +1 -1
- package/package.json +1 -1
- package/resources/agents/agents.d.mts +1098 -136
- package/resources/agents/agents.d.mts.map +1 -1
- package/resources/agents/agents.d.ts +1098 -136
- package/resources/agents/agents.d.ts.map +1 -1
- package/resources/agents/agents.js +0 -6
- package/resources/agents/agents.js.map +1 -1
- package/resources/agents/agents.mjs +1 -7
- package/resources/agents/agents.mjs.map +1 -1
- package/resources/agents/blocks.d.mts +1 -15
- package/resources/agents/blocks.d.mts.map +1 -1
- package/resources/agents/blocks.d.ts +1 -15
- package/resources/agents/blocks.d.ts.map +1 -1
- package/resources/agents/folders.d.mts +47 -76
- package/resources/agents/folders.d.mts.map +1 -1
- package/resources/agents/folders.d.ts +47 -76
- package/resources/agents/folders.d.ts.map +1 -1
- package/resources/agents/folders.js +5 -1
- package/resources/agents/folders.js.map +1 -1
- package/resources/agents/folders.mjs +5 -1
- package/resources/agents/folders.mjs.map +1 -1
- package/resources/agents/index.d.mts +4 -4
- package/resources/agents/index.d.mts.map +1 -1
- package/resources/agents/index.d.ts +4 -4
- package/resources/agents/index.d.ts.map +1 -1
- package/resources/agents/index.js.map +1 -1
- package/resources/agents/index.mjs.map +1 -1
- package/resources/agents/messages.d.mts +87 -28
- package/resources/agents/messages.d.mts.map +1 -1
- package/resources/agents/messages.d.ts +87 -28
- package/resources/agents/messages.d.ts.map +1 -1
- package/resources/agents/messages.js +5 -5
- package/resources/agents/messages.js.map +1 -1
- package/resources/agents/messages.mjs +5 -5
- package/resources/agents/messages.mjs.map +1 -1
- package/resources/agents/tools.d.mts +6 -27
- package/resources/agents/tools.d.mts.map +1 -1
- package/resources/agents/tools.d.ts +6 -27
- package/resources/agents/tools.d.ts.map +1 -1
- package/resources/agents/tools.js +5 -1
- package/resources/agents/tools.js.map +1 -1
- package/resources/agents/tools.mjs +5 -1
- package/resources/agents/tools.mjs.map +1 -1
- package/resources/archives.d.mts +9 -37
- package/resources/archives.d.mts.map +1 -1
- package/resources/archives.d.ts +9 -37
- package/resources/archives.d.ts.map +1 -1
- package/resources/archives.js +2 -1
- package/resources/archives.js.map +1 -1
- package/resources/archives.mjs +2 -1
- package/resources/archives.mjs.map +1 -1
- package/resources/batches/batches.d.mts +8 -30
- package/resources/batches/batches.d.mts.map +1 -1
- package/resources/batches/batches.d.ts +8 -30
- package/resources/batches/batches.d.ts.map +1 -1
- package/resources/batches/batches.js +2 -1
- package/resources/batches/batches.js.map +1 -1
- package/resources/batches/batches.mjs +2 -1
- package/resources/batches/batches.mjs.map +1 -1
- package/resources/batches/index.d.mts +2 -2
- package/resources/batches/index.d.mts.map +1 -1
- package/resources/batches/index.d.ts +2 -2
- package/resources/batches/index.d.ts.map +1 -1
- package/resources/batches/index.js.map +1 -1
- package/resources/batches/index.mjs.map +1 -1
- package/resources/batches/messages.d.mts +6 -30
- package/resources/batches/messages.d.mts.map +1 -1
- package/resources/batches/messages.d.ts +6 -30
- package/resources/batches/messages.d.ts.map +1 -1
- package/resources/batches/messages.js +2 -1
- package/resources/batches/messages.js.map +1 -1
- package/resources/batches/messages.mjs +2 -1
- package/resources/batches/messages.mjs.map +1 -1
- package/resources/blocks/blocks.d.mts +1 -6
- package/resources/blocks/blocks.d.mts.map +1 -1
- package/resources/blocks/blocks.d.ts +1 -6
- package/resources/blocks/blocks.d.ts.map +1 -1
- package/resources/blocks/blocks.js +0 -6
- package/resources/blocks/blocks.js.map +1 -1
- package/resources/blocks/blocks.mjs +0 -6
- package/resources/blocks/blocks.mjs.map +1 -1
- package/resources/blocks/index.d.mts +1 -1
- package/resources/blocks/index.d.mts.map +1 -1
- package/resources/blocks/index.d.ts +1 -1
- package/resources/blocks/index.d.ts.map +1 -1
- package/resources/blocks/index.js.map +1 -1
- package/resources/blocks/index.mjs.map +1 -1
- package/resources/folders/files.d.mts +73 -97
- package/resources/folders/files.d.mts.map +1 -1
- package/resources/folders/files.d.ts +73 -97
- package/resources/folders/files.d.ts.map +1 -1
- package/resources/folders/files.js +5 -1
- package/resources/folders/files.js.map +1 -1
- package/resources/folders/files.mjs +5 -1
- package/resources/folders/files.mjs.map +1 -1
- package/resources/folders/folders.d.mts +11 -44
- package/resources/folders/folders.d.mts.map +1 -1
- package/resources/folders/folders.d.ts +11 -44
- package/resources/folders/folders.d.ts.map +1 -1
- package/resources/folders/folders.js +2 -7
- package/resources/folders/folders.js.map +1 -1
- package/resources/folders/folders.mjs +2 -7
- package/resources/folders/folders.mjs.map +1 -1
- package/resources/folders/index.d.mts +2 -2
- package/resources/folders/index.d.mts.map +1 -1
- package/resources/folders/index.d.ts +2 -2
- package/resources/folders/index.d.ts.map +1 -1
- package/resources/folders/index.js.map +1 -1
- package/resources/folders/index.mjs.map +1 -1
- package/resources/groups/groups.d.mts +1 -6
- package/resources/groups/groups.d.mts.map +1 -1
- package/resources/groups/groups.d.ts +1 -6
- package/resources/groups/groups.d.ts.map +1 -1
- package/resources/groups/groups.js +0 -6
- package/resources/groups/groups.js.map +1 -1
- package/resources/groups/groups.mjs +0 -6
- package/resources/groups/groups.mjs.map +1 -1
- package/resources/groups/index.d.mts +1 -1
- package/resources/groups/index.d.mts.map +1 -1
- package/resources/groups/index.d.ts +1 -1
- package/resources/groups/index.d.ts.map +1 -1
- package/resources/groups/index.js.map +1 -1
- package/resources/groups/index.mjs.map +1 -1
- package/resources/groups/messages.d.mts +9 -3
- package/resources/groups/messages.d.mts.map +1 -1
- package/resources/groups/messages.d.ts +9 -3
- package/resources/groups/messages.d.ts.map +1 -1
- package/resources/identities/identities.d.mts +9 -36
- package/resources/identities/identities.d.mts.map +1 -1
- package/resources/identities/identities.d.ts +9 -36
- package/resources/identities/identities.d.ts.map +1 -1
- package/resources/identities/identities.js +2 -7
- package/resources/identities/identities.js.map +1 -1
- package/resources/identities/identities.mjs +2 -7
- package/resources/identities/identities.mjs.map +1 -1
- package/resources/identities/index.d.mts +1 -1
- package/resources/identities/index.d.mts.map +1 -1
- package/resources/identities/index.d.ts +1 -1
- package/resources/identities/index.d.ts.map +1 -1
- package/resources/identities/index.js.map +1 -1
- package/resources/identities/index.mjs.map +1 -1
- package/resources/index.d.mts +11 -11
- package/resources/index.d.mts.map +1 -1
- package/resources/index.d.ts +11 -11
- package/resources/index.d.ts.map +1 -1
- package/resources/index.js.map +1 -1
- package/resources/index.mjs.map +1 -1
- package/resources/mcp-servers/index.d.mts +2 -3
- package/resources/mcp-servers/index.d.mts.map +1 -1
- package/resources/mcp-servers/index.d.ts +2 -3
- package/resources/mcp-servers/index.d.ts.map +1 -1
- package/resources/mcp-servers/index.js +1 -3
- package/resources/mcp-servers/index.js.map +1 -1
- package/resources/mcp-servers/index.mjs +1 -2
- package/resources/mcp-servers/index.mjs.map +1 -1
- package/resources/mcp-servers/mcp-servers.d.mts +292 -378
- package/resources/mcp-servers/mcp-servers.d.mts.map +1 -1
- package/resources/mcp-servers/mcp-servers.d.ts +292 -378
- package/resources/mcp-servers/mcp-servers.d.ts.map +1 -1
- package/resources/mcp-servers/mcp-servers.js +17 -4
- package/resources/mcp-servers/mcp-servers.js.map +1 -1
- package/resources/mcp-servers/mcp-servers.mjs +17 -4
- package/resources/mcp-servers/mcp-servers.mjs.map +1 -1
- package/resources/mcp-servers/tools.d.mts +3 -41
- package/resources/mcp-servers/tools.d.mts.map +1 -1
- package/resources/mcp-servers/tools.d.ts +3 -41
- package/resources/mcp-servers/tools.d.ts.map +1 -1
- package/resources/mcp-servers/tools.js.map +1 -1
- package/resources/mcp-servers/tools.mjs.map +1 -1
- package/resources/models/embeddings.d.mts +6 -2
- package/resources/models/embeddings.d.mts.map +1 -1
- package/resources/models/embeddings.d.ts +6 -2
- package/resources/models/embeddings.d.ts.map +1 -1
- package/resources/models/embeddings.js +5 -1
- package/resources/models/embeddings.js.map +1 -1
- package/resources/models/embeddings.mjs +5 -1
- package/resources/models/embeddings.mjs.map +1 -1
- package/resources/models/index.d.mts +1 -1
- package/resources/models/index.d.mts.map +1 -1
- package/resources/models/index.d.ts +1 -1
- package/resources/models/index.d.ts.map +1 -1
- package/resources/models/index.js.map +1 -1
- package/resources/models/index.mjs.map +1 -1
- package/resources/models/models.d.mts +174 -3
- package/resources/models/models.d.mts.map +1 -1
- package/resources/models/models.d.ts +174 -3
- package/resources/models/models.d.ts.map +1 -1
- package/resources/models/models.js +5 -1
- package/resources/models/models.js.map +1 -1
- package/resources/models/models.mjs +5 -1
- package/resources/models/models.mjs.map +1 -1
- package/resources/runs/index.d.mts +1 -1
- package/resources/runs/index.d.mts.map +1 -1
- package/resources/runs/index.d.ts +1 -1
- package/resources/runs/index.d.ts.map +1 -1
- package/resources/runs/index.js.map +1 -1
- package/resources/runs/index.mjs.map +1 -1
- package/resources/runs/runs.d.mts +6 -32
- package/resources/runs/runs.d.mts.map +1 -1
- package/resources/runs/runs.d.ts +6 -32
- package/resources/runs/runs.d.ts.map +1 -1
- package/resources/runs/runs.js +2 -1
- package/resources/runs/runs.js.map +1 -1
- package/resources/runs/runs.mjs +2 -1
- package/resources/runs/runs.mjs.map +1 -1
- package/resources/tools.d.mts +6 -68
- package/resources/tools.d.mts.map +1 -1
- package/resources/tools.d.ts +6 -68
- package/resources/tools.d.ts.map +1 -1
- package/resources/tools.js +2 -7
- package/resources/tools.js.map +1 -1
- package/resources/tools.mjs +2 -7
- package/resources/tools.mjs.map +1 -1
- package/src/client.ts +41 -28
- package/src/resources/agents/agents.ts +1364 -77
- package/src/resources/agents/blocks.ts +1 -15
- package/src/resources/agents/folders.ts +55 -85
- package/src/resources/agents/index.ts +5 -2
- package/src/resources/agents/messages.ts +123 -28
- package/src/resources/agents/tools.ts +10 -34
- package/src/resources/archives.ts +12 -45
- package/src/resources/batches/batches.ts +10 -41
- package/src/resources/batches/index.ts +2 -2
- package/src/resources/batches/messages.ts +12 -37
- package/src/resources/blocks/blocks.ts +0 -10
- package/src/resources/blocks/index.ts +0 -1
- package/src/resources/folders/files.ts +88 -113
- package/src/resources/folders/folders.ts +14 -55
- package/src/resources/folders/index.ts +2 -2
- package/src/resources/groups/groups.ts +0 -10
- package/src/resources/groups/index.ts +0 -1
- package/src/resources/groups/messages.ts +10 -3
- package/src/resources/identities/identities.ts +12 -49
- package/src/resources/identities/index.ts +1 -2
- package/src/resources/index.ts +21 -14
- package/src/resources/mcp-servers/index.ts +14 -9
- package/src/resources/mcp-servers/mcp-servers.ts +303 -402
- package/src/resources/mcp-servers/tools.ts +6 -47
- package/src/resources/models/embeddings.ts +6 -2
- package/src/resources/models/index.ts +2 -0
- package/src/resources/models/models.ts +254 -2
- package/src/resources/runs/index.ts +1 -1
- package/src/resources/runs/runs.ts +11 -44
- package/src/resources/tools.ts +8 -92
- package/src/version.ts +1 -1
- package/version.d.mts +1 -1
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/version.mjs +1 -1
- package/resources/mcp-servers/refresh.d.mts +0 -24
- package/resources/mcp-servers/refresh.d.mts.map +0 -1
- package/resources/mcp-servers/refresh.d.ts +0 -24
- package/resources/mcp-servers/refresh.d.ts.map +0 -1
- package/resources/mcp-servers/refresh.js +0 -27
- package/resources/mcp-servers/refresh.js.map +0 -1
- package/resources/mcp-servers/refresh.mjs +0 -23
- package/resources/mcp-servers/refresh.mjs.map +0 -1
- package/src/resources/mcp-servers/refresh.ts +0 -43
|
@@ -7,17 +7,18 @@ import { Block, BlockAttachParams, BlockDetachParams, BlockListParams, BlockModi
|
|
|
7
7
|
import * as FilesAPI from "./files.mjs";
|
|
8
8
|
import { FileCloseAllResponse, FileCloseParams, FileCloseResponse, FileListParams, FileListResponse, FileListResponsesNextFilesPage, FileOpenParams, FileOpenResponse, Files } from "./files.mjs";
|
|
9
9
|
import * as FoldersAPI from "./folders.mjs";
|
|
10
|
-
import { FolderAttachParams, FolderDetachParams, FolderListParams, FolderListResponse, Folders } from "./folders.mjs";
|
|
10
|
+
import { FolderAttachParams, FolderDetachParams, FolderListParams, FolderListResponse, FolderListResponsesArrayPage, Folders } from "./folders.mjs";
|
|
11
11
|
import * as GroupsAPI from "./groups.mjs";
|
|
12
12
|
import { GroupListParams, Groups } from "./groups.mjs";
|
|
13
13
|
import * as MessagesAPI from "./messages.mjs";
|
|
14
|
-
import { ApprovalCreate, ApprovalRequestMessage, ApprovalResponseMessage, AssistantMessage, EventMessage, HiddenReasoningMessage, ImageContent, JobStatus, JobType, LettaAssistantMessageContentUnion, LettaMessageUnion, LettaMessageUnionsArrayPage, LettaRequest, LettaResponse, LettaStreamingRequest, LettaStreamingResponse, LettaUserMessageContentUnion, Message, MessageCancelParams, MessageCancelResponse, MessageListParams, MessageModifyParams, MessageModifyResponse, MessageResetParams, MessageRole, MessageSendAsyncParams, MessageSendParams, MessageStreamParams, MessageType, Messages, OmittedReasoningContent, ReasoningContent, ReasoningMessage, RedactedReasoningContent, Run, SummaryMessage, SystemMessage, TextContent, ToolCall, ToolCallContent, ToolCallDelta, ToolCallMessage, ToolReturn, ToolReturnContent, UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage, UserMessage } from "./messages.mjs";
|
|
14
|
+
import { ApprovalCreate, ApprovalRequestMessage, ApprovalResponseMessage, AssistantMessage, EventMessage, HiddenReasoningMessage, ImageContent, JobStatus, JobType, LettaAssistantMessageContentUnion, LettaMessageUnion, LettaMessageUnionsArrayPage, LettaRequest, LettaResponse, LettaStreamingRequest, LettaStreamingResponse, LettaUserMessageContentUnion, Message, MessageCancelParams, MessageCancelResponse, MessageListParams, MessageModifyParams, MessageModifyResponse, MessageResetParams, MessageRole, MessageSendAsyncParams, MessageSendParams, MessageSendParamsNonStreaming, MessageSendParamsStreaming, MessageStreamParams, MessageType, Messages, OmittedReasoningContent, ReasoningContent, ReasoningMessage, RedactedReasoningContent, Run, SummaryMessage, SystemMessage, TextContent, ToolCall, ToolCallContent, ToolCallDelta, ToolCallMessage, ToolReturn, ToolReturnContent, UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage, UserMessage } from "./messages.mjs";
|
|
15
15
|
import * as AgentsToolsAPI from "./tools.mjs";
|
|
16
|
-
import { ToolAttachParams, ToolDetachParams, ToolListParams,
|
|
16
|
+
import { ToolAttachParams, ToolDetachParams, ToolListParams, ToolUpdateApprovalParams, Tools } from "./tools.mjs";
|
|
17
17
|
import * as BlocksBlocksAPI from "../blocks/blocks.mjs";
|
|
18
18
|
import * as GroupsGroupsAPI from "../groups/groups.mjs";
|
|
19
19
|
import * as IdentitiesAPI from "../identities/identities.mjs";
|
|
20
20
|
import * as ModelsAPI from "../models/models.mjs";
|
|
21
|
+
import * as RunsAPI from "../runs/runs.mjs";
|
|
21
22
|
import { APIPromise } from "../../core/api-promise.mjs";
|
|
22
23
|
import { ArrayPage, type ArrayPageParams, PagePromise } from "../../core/pagination.mjs";
|
|
23
24
|
import { type Uploadable } from "../../core/uploads.mjs";
|
|
@@ -45,10 +46,6 @@ export declare class Agents extends APIResource {
|
|
|
45
46
|
* Delete an agent.
|
|
46
47
|
*/
|
|
47
48
|
delete(agentID: string, options?: RequestOptions): APIPromise<unknown>;
|
|
48
|
-
/**
|
|
49
|
-
* Get the total number of agents.
|
|
50
|
-
*/
|
|
51
|
-
count(options?: RequestOptions): APIPromise<AgentCountResponse>;
|
|
52
49
|
/**
|
|
53
50
|
* Export the serialized JSON representation of an agent, formatted with
|
|
54
51
|
* indentation.
|
|
@@ -111,16 +108,6 @@ export interface AgentEnvironmentVariable {
|
|
|
111
108
|
* Representation of an agent's state. This is the state of the agent at a given
|
|
112
109
|
* time, and is persisted in the DB backend. The state has all the information
|
|
113
110
|
* needed to recreate a persisted agent.
|
|
114
|
-
*
|
|
115
|
-
* Parameters: id (str): The unique identifier of the agent. name (str): The name
|
|
116
|
-
* of the agent (must be unique to the user). created_at (datetime): The datetime
|
|
117
|
-
* the agent was created. message_ids (List[str]): The ids of the messages in the
|
|
118
|
-
* agent's in-context memory. memory (Memory): The in-context memory of the agent.
|
|
119
|
-
* tools (List[str]): The tools used by the agent. This includes any memory editing
|
|
120
|
-
* functions specified in `memory`. system (str): The system prompt used by the
|
|
121
|
-
* agent. llm_config (LLMConfig): The LLM configuration used by the agent.
|
|
122
|
-
* embedding_config (EmbeddingConfig): The embedding configuration used by the
|
|
123
|
-
* agent.
|
|
124
111
|
*/
|
|
125
112
|
export interface AgentState {
|
|
126
113
|
/**
|
|
@@ -136,15 +123,18 @@ export interface AgentState {
|
|
|
136
123
|
*/
|
|
137
124
|
blocks: Array<BlocksAPI.Block>;
|
|
138
125
|
/**
|
|
139
|
-
*
|
|
126
|
+
* @deprecated Deprecated: Use `embedding` field instead. The embedding
|
|
127
|
+
* configuration used by the agent.
|
|
140
128
|
*/
|
|
141
129
|
embedding_config: ModelsAPI.EmbeddingConfig;
|
|
142
130
|
/**
|
|
143
|
-
* The LLM configuration used by
|
|
131
|
+
* @deprecated Deprecated: Use `model` field instead. The LLM configuration used by
|
|
132
|
+
* the agent.
|
|
144
133
|
*/
|
|
145
134
|
llm_config: ModelsAPI.LlmConfig;
|
|
146
135
|
/**
|
|
147
|
-
* @deprecated The in-context memory of the
|
|
136
|
+
* @deprecated Deprecated: Use `blocks` field instead. The in-context memory of the
|
|
137
|
+
* agent.
|
|
148
138
|
*/
|
|
149
139
|
memory: AgentState.Memory;
|
|
150
140
|
/**
|
|
@@ -187,6 +177,10 @@ export interface AgentState {
|
|
|
187
177
|
* The description of the agent.
|
|
188
178
|
*/
|
|
189
179
|
description?: string | null;
|
|
180
|
+
/**
|
|
181
|
+
* The embedding model handle used by the agent (format: provider/model-name).
|
|
182
|
+
*/
|
|
183
|
+
embedding?: string | null;
|
|
190
184
|
/**
|
|
191
185
|
* If set to True, memory management will move to a background agent thread.
|
|
192
186
|
*/
|
|
@@ -204,7 +198,8 @@ export interface AgentState {
|
|
|
204
198
|
*/
|
|
205
199
|
identities?: Array<IdentitiesAPI.Identity>;
|
|
206
200
|
/**
|
|
207
|
-
* @deprecated
|
|
201
|
+
* @deprecated Deprecated: Use `identities` field instead. The ids of the
|
|
202
|
+
* identities associated with this agent.
|
|
208
203
|
*/
|
|
209
204
|
identity_ids?: Array<string>;
|
|
210
205
|
/**
|
|
@@ -215,6 +210,10 @@ export interface AgentState {
|
|
|
215
210
|
* The duration in milliseconds of the agent's last run.
|
|
216
211
|
*/
|
|
217
212
|
last_run_duration_ms?: number | null;
|
|
213
|
+
/**
|
|
214
|
+
* The stop reason from the agent's last run.
|
|
215
|
+
*/
|
|
216
|
+
last_stop_reason?: RunsAPI.StopReasonType | null;
|
|
218
217
|
/**
|
|
219
218
|
* The id of the user that made this object.
|
|
220
219
|
*/
|
|
@@ -245,7 +244,16 @@ export interface AgentState {
|
|
|
245
244
|
[key: string]: unknown;
|
|
246
245
|
} | null;
|
|
247
246
|
/**
|
|
248
|
-
*
|
|
247
|
+
* The model handle used by the agent (format: provider/model-name).
|
|
248
|
+
*/
|
|
249
|
+
model?: string | null;
|
|
250
|
+
/**
|
|
251
|
+
* The model settings used by the agent.
|
|
252
|
+
*/
|
|
253
|
+
model_settings?: AgentState.OpenAIModelSettings | AgentState.AnthropicModelSettings | AgentState.GoogleAIModelSettings | AgentState.GoogleVertexModelSettings | AgentState.AzureModelSettings | AgentState.XaiModelSettings | AgentState.GroqModelSettings | AgentState.DeepseekModelSettings | AgentState.TogetherModelSettings | AgentState.BedrockModelSettings | null;
|
|
254
|
+
/**
|
|
255
|
+
* @deprecated Deprecated: Use `managed_group` field instead. The multi-agent group
|
|
256
|
+
* that this agent manages.
|
|
249
257
|
*/
|
|
250
258
|
multi_agent_group?: GroupsGroupsAPI.Group | null;
|
|
251
259
|
/**
|
|
@@ -258,7 +266,7 @@ export interface AgentState {
|
|
|
258
266
|
*/
|
|
259
267
|
project_id?: string | null;
|
|
260
268
|
/**
|
|
261
|
-
* The response format used by the agent
|
|
269
|
+
* The response format used by the agent
|
|
262
270
|
*/
|
|
263
271
|
response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
|
|
264
272
|
/**
|
|
@@ -288,7 +296,8 @@ export interface AgentState {
|
|
|
288
296
|
}
|
|
289
297
|
export declare namespace AgentState {
|
|
290
298
|
/**
|
|
291
|
-
* @deprecated The in-context memory of the
|
|
299
|
+
* @deprecated Deprecated: Use `blocks` field instead. The in-context memory of the
|
|
300
|
+
* agent.
|
|
292
301
|
*/
|
|
293
302
|
interface Memory {
|
|
294
303
|
/**
|
|
@@ -404,15 +413,14 @@ export declare namespace AgentState {
|
|
|
404
413
|
}
|
|
405
414
|
}
|
|
406
415
|
/**
|
|
407
|
-
* Representation of a source, which is a collection of
|
|
408
|
-
*
|
|
409
|
-
* Parameters: id (str): The ID of the source name (str): The name of the source.
|
|
410
|
-
* embedding_config (EmbeddingConfig): The embedding configuration used by the
|
|
411
|
-
* source. user_id (str): The ID of the user that created the source. metadata
|
|
412
|
-
* (dict): Metadata associated with the source. description (str): The description
|
|
413
|
-
* of the source.
|
|
416
|
+
* (Deprecated: Use Folder) Representation of a source, which is a collection of
|
|
417
|
+
* files and passages.
|
|
414
418
|
*/
|
|
415
419
|
interface Source {
|
|
420
|
+
/**
|
|
421
|
+
* The human-friendly ID of the Source
|
|
422
|
+
*/
|
|
423
|
+
id: string;
|
|
416
424
|
/**
|
|
417
425
|
* The embedding configuration used by the source.
|
|
418
426
|
*/
|
|
@@ -421,10 +429,6 @@ export declare namespace AgentState {
|
|
|
421
429
|
* The name of the source.
|
|
422
430
|
*/
|
|
423
431
|
name: string;
|
|
424
|
-
/**
|
|
425
|
-
* The human-friendly ID of the Source
|
|
426
|
-
*/
|
|
427
|
-
id?: string;
|
|
428
432
|
/**
|
|
429
433
|
* The timestamp when the source was created.
|
|
430
434
|
*/
|
|
@@ -460,6 +464,316 @@ export declare namespace AgentState {
|
|
|
460
464
|
*/
|
|
461
465
|
vector_db_provider?: ArchivesAPI.VectorDBProvider;
|
|
462
466
|
}
|
|
467
|
+
interface OpenAIModelSettings {
|
|
468
|
+
/**
|
|
469
|
+
* The maximum number of tokens the model can generate.
|
|
470
|
+
*/
|
|
471
|
+
max_output_tokens?: number;
|
|
472
|
+
/**
|
|
473
|
+
* Whether to enable parallel tool calling.
|
|
474
|
+
*/
|
|
475
|
+
parallel_tool_calls?: boolean;
|
|
476
|
+
/**
|
|
477
|
+
* The provider of the model.
|
|
478
|
+
*/
|
|
479
|
+
provider?: 'openai';
|
|
480
|
+
/**
|
|
481
|
+
* The reasoning configuration for the model.
|
|
482
|
+
*/
|
|
483
|
+
reasoning?: OpenAIModelSettings.Reasoning;
|
|
484
|
+
/**
|
|
485
|
+
* The response format for the model.
|
|
486
|
+
*/
|
|
487
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
488
|
+
/**
|
|
489
|
+
* The temperature of the model.
|
|
490
|
+
*/
|
|
491
|
+
temperature?: number;
|
|
492
|
+
}
|
|
493
|
+
namespace OpenAIModelSettings {
|
|
494
|
+
/**
|
|
495
|
+
* The reasoning configuration for the model.
|
|
496
|
+
*/
|
|
497
|
+
interface Reasoning {
|
|
498
|
+
/**
|
|
499
|
+
* The reasoning effort to use when generating text reasoning models
|
|
500
|
+
*/
|
|
501
|
+
reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
interface AnthropicModelSettings {
|
|
505
|
+
/**
|
|
506
|
+
* The maximum number of tokens the model can generate.
|
|
507
|
+
*/
|
|
508
|
+
max_output_tokens?: number;
|
|
509
|
+
/**
|
|
510
|
+
* Whether to enable parallel tool calling.
|
|
511
|
+
*/
|
|
512
|
+
parallel_tool_calls?: boolean;
|
|
513
|
+
/**
|
|
514
|
+
* The provider of the model.
|
|
515
|
+
*/
|
|
516
|
+
provider?: 'anthropic';
|
|
517
|
+
/**
|
|
518
|
+
* The temperature of the model.
|
|
519
|
+
*/
|
|
520
|
+
temperature?: number;
|
|
521
|
+
/**
|
|
522
|
+
* The thinking configuration for the model.
|
|
523
|
+
*/
|
|
524
|
+
thinking?: AnthropicModelSettings.Thinking;
|
|
525
|
+
/**
|
|
526
|
+
* Soft control for how verbose model output should be, used for GPT-5 models.
|
|
527
|
+
*/
|
|
528
|
+
verbosity?: 'low' | 'medium' | 'high' | null;
|
|
529
|
+
}
|
|
530
|
+
namespace AnthropicModelSettings {
|
|
531
|
+
/**
|
|
532
|
+
* The thinking configuration for the model.
|
|
533
|
+
*/
|
|
534
|
+
interface Thinking {
|
|
535
|
+
/**
|
|
536
|
+
* The maximum number of tokens the model can use for extended thinking.
|
|
537
|
+
*/
|
|
538
|
+
budget_tokens?: number;
|
|
539
|
+
/**
|
|
540
|
+
* The type of thinking to use.
|
|
541
|
+
*/
|
|
542
|
+
type?: 'enabled' | 'disabled';
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
interface GoogleAIModelSettings {
|
|
546
|
+
/**
|
|
547
|
+
* The maximum number of tokens the model can generate.
|
|
548
|
+
*/
|
|
549
|
+
max_output_tokens?: number;
|
|
550
|
+
/**
|
|
551
|
+
* Whether to enable parallel tool calling.
|
|
552
|
+
*/
|
|
553
|
+
parallel_tool_calls?: boolean;
|
|
554
|
+
/**
|
|
555
|
+
* The provider of the model.
|
|
556
|
+
*/
|
|
557
|
+
provider?: 'google_ai';
|
|
558
|
+
/**
|
|
559
|
+
* The response schema for the model.
|
|
560
|
+
*/
|
|
561
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
562
|
+
/**
|
|
563
|
+
* The temperature of the model.
|
|
564
|
+
*/
|
|
565
|
+
temperature?: number;
|
|
566
|
+
/**
|
|
567
|
+
* The thinking configuration for the model.
|
|
568
|
+
*/
|
|
569
|
+
thinking_config?: GoogleAIModelSettings.ThinkingConfig;
|
|
570
|
+
}
|
|
571
|
+
namespace GoogleAIModelSettings {
|
|
572
|
+
/**
|
|
573
|
+
* The thinking configuration for the model.
|
|
574
|
+
*/
|
|
575
|
+
interface ThinkingConfig {
|
|
576
|
+
/**
|
|
577
|
+
* Whether to include thoughts in the model's response.
|
|
578
|
+
*/
|
|
579
|
+
include_thoughts?: boolean;
|
|
580
|
+
/**
|
|
581
|
+
* The thinking budget for the model.
|
|
582
|
+
*/
|
|
583
|
+
thinking_budget?: number;
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
interface GoogleVertexModelSettings {
|
|
587
|
+
/**
|
|
588
|
+
* The maximum number of tokens the model can generate.
|
|
589
|
+
*/
|
|
590
|
+
max_output_tokens?: number;
|
|
591
|
+
/**
|
|
592
|
+
* Whether to enable parallel tool calling.
|
|
593
|
+
*/
|
|
594
|
+
parallel_tool_calls?: boolean;
|
|
595
|
+
/**
|
|
596
|
+
* The provider of the model.
|
|
597
|
+
*/
|
|
598
|
+
provider?: 'google_vertex';
|
|
599
|
+
/**
|
|
600
|
+
* The response schema for the model.
|
|
601
|
+
*/
|
|
602
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
603
|
+
/**
|
|
604
|
+
* The temperature of the model.
|
|
605
|
+
*/
|
|
606
|
+
temperature?: number;
|
|
607
|
+
/**
|
|
608
|
+
* The thinking configuration for the model.
|
|
609
|
+
*/
|
|
610
|
+
thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
|
|
611
|
+
}
|
|
612
|
+
namespace GoogleVertexModelSettings {
|
|
613
|
+
/**
|
|
614
|
+
* The thinking configuration for the model.
|
|
615
|
+
*/
|
|
616
|
+
interface ThinkingConfig {
|
|
617
|
+
/**
|
|
618
|
+
* Whether to include thoughts in the model's response.
|
|
619
|
+
*/
|
|
620
|
+
include_thoughts?: boolean;
|
|
621
|
+
/**
|
|
622
|
+
* The thinking budget for the model.
|
|
623
|
+
*/
|
|
624
|
+
thinking_budget?: number;
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
/**
|
|
628
|
+
* Azure OpenAI model configuration (OpenAI-compatible).
|
|
629
|
+
*/
|
|
630
|
+
interface AzureModelSettings {
|
|
631
|
+
/**
|
|
632
|
+
* The maximum number of tokens the model can generate.
|
|
633
|
+
*/
|
|
634
|
+
max_output_tokens?: number;
|
|
635
|
+
/**
|
|
636
|
+
* Whether to enable parallel tool calling.
|
|
637
|
+
*/
|
|
638
|
+
parallel_tool_calls?: boolean;
|
|
639
|
+
/**
|
|
640
|
+
* The provider of the model.
|
|
641
|
+
*/
|
|
642
|
+
provider?: 'azure';
|
|
643
|
+
/**
|
|
644
|
+
* The response format for the model.
|
|
645
|
+
*/
|
|
646
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
647
|
+
/**
|
|
648
|
+
* The temperature of the model.
|
|
649
|
+
*/
|
|
650
|
+
temperature?: number;
|
|
651
|
+
}
|
|
652
|
+
/**
|
|
653
|
+
* xAI model configuration (OpenAI-compatible).
|
|
654
|
+
*/
|
|
655
|
+
interface XaiModelSettings {
|
|
656
|
+
/**
|
|
657
|
+
* The maximum number of tokens the model can generate.
|
|
658
|
+
*/
|
|
659
|
+
max_output_tokens?: number;
|
|
660
|
+
/**
|
|
661
|
+
* Whether to enable parallel tool calling.
|
|
662
|
+
*/
|
|
663
|
+
parallel_tool_calls?: boolean;
|
|
664
|
+
/**
|
|
665
|
+
* The provider of the model.
|
|
666
|
+
*/
|
|
667
|
+
provider?: 'xai';
|
|
668
|
+
/**
|
|
669
|
+
* The response format for the model.
|
|
670
|
+
*/
|
|
671
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
672
|
+
/**
|
|
673
|
+
* The temperature of the model.
|
|
674
|
+
*/
|
|
675
|
+
temperature?: number;
|
|
676
|
+
}
|
|
677
|
+
/**
|
|
678
|
+
* Groq model configuration (OpenAI-compatible).
|
|
679
|
+
*/
|
|
680
|
+
interface GroqModelSettings {
|
|
681
|
+
/**
|
|
682
|
+
* The maximum number of tokens the model can generate.
|
|
683
|
+
*/
|
|
684
|
+
max_output_tokens?: number;
|
|
685
|
+
/**
|
|
686
|
+
* Whether to enable parallel tool calling.
|
|
687
|
+
*/
|
|
688
|
+
parallel_tool_calls?: boolean;
|
|
689
|
+
/**
|
|
690
|
+
* The provider of the model.
|
|
691
|
+
*/
|
|
692
|
+
provider?: 'groq';
|
|
693
|
+
/**
|
|
694
|
+
* The response format for the model.
|
|
695
|
+
*/
|
|
696
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
697
|
+
/**
|
|
698
|
+
* The temperature of the model.
|
|
699
|
+
*/
|
|
700
|
+
temperature?: number;
|
|
701
|
+
}
|
|
702
|
+
/**
|
|
703
|
+
* Deepseek model configuration (OpenAI-compatible).
|
|
704
|
+
*/
|
|
705
|
+
interface DeepseekModelSettings {
|
|
706
|
+
/**
|
|
707
|
+
* The maximum number of tokens the model can generate.
|
|
708
|
+
*/
|
|
709
|
+
max_output_tokens?: number;
|
|
710
|
+
/**
|
|
711
|
+
* Whether to enable parallel tool calling.
|
|
712
|
+
*/
|
|
713
|
+
parallel_tool_calls?: boolean;
|
|
714
|
+
/**
|
|
715
|
+
* The provider of the model.
|
|
716
|
+
*/
|
|
717
|
+
provider?: 'deepseek';
|
|
718
|
+
/**
|
|
719
|
+
* The response format for the model.
|
|
720
|
+
*/
|
|
721
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
722
|
+
/**
|
|
723
|
+
* The temperature of the model.
|
|
724
|
+
*/
|
|
725
|
+
temperature?: number;
|
|
726
|
+
}
|
|
727
|
+
/**
|
|
728
|
+
* Together AI model configuration (OpenAI-compatible).
|
|
729
|
+
*/
|
|
730
|
+
interface TogetherModelSettings {
|
|
731
|
+
/**
|
|
732
|
+
* The maximum number of tokens the model can generate.
|
|
733
|
+
*/
|
|
734
|
+
max_output_tokens?: number;
|
|
735
|
+
/**
|
|
736
|
+
* Whether to enable parallel tool calling.
|
|
737
|
+
*/
|
|
738
|
+
parallel_tool_calls?: boolean;
|
|
739
|
+
/**
|
|
740
|
+
* The provider of the model.
|
|
741
|
+
*/
|
|
742
|
+
provider?: 'together';
|
|
743
|
+
/**
|
|
744
|
+
* The response format for the model.
|
|
745
|
+
*/
|
|
746
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
747
|
+
/**
|
|
748
|
+
* The temperature of the model.
|
|
749
|
+
*/
|
|
750
|
+
temperature?: number;
|
|
751
|
+
}
|
|
752
|
+
/**
|
|
753
|
+
* AWS Bedrock model configuration.
|
|
754
|
+
*/
|
|
755
|
+
interface BedrockModelSettings {
|
|
756
|
+
/**
|
|
757
|
+
* The maximum number of tokens the model can generate.
|
|
758
|
+
*/
|
|
759
|
+
max_output_tokens?: number;
|
|
760
|
+
/**
|
|
761
|
+
* Whether to enable parallel tool calling.
|
|
762
|
+
*/
|
|
763
|
+
parallel_tool_calls?: boolean;
|
|
764
|
+
/**
|
|
765
|
+
* The provider of the model.
|
|
766
|
+
*/
|
|
767
|
+
provider?: 'bedrock';
|
|
768
|
+
/**
|
|
769
|
+
* The response format for the model.
|
|
770
|
+
*/
|
|
771
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
772
|
+
/**
|
|
773
|
+
* The temperature of the model.
|
|
774
|
+
*/
|
|
775
|
+
temperature?: number;
|
|
776
|
+
}
|
|
463
777
|
}
|
|
464
778
|
/**
|
|
465
779
|
* Enum to represent the type of agent.
|
|
@@ -738,7 +1052,6 @@ export interface TextResponseFormat {
|
|
|
738
1052
|
type?: 'text';
|
|
739
1053
|
}
|
|
740
1054
|
export type AgentDeleteResponse = unknown;
|
|
741
|
-
export type AgentCountResponse = number;
|
|
742
1055
|
export type AgentExportFileResponse = string;
|
|
743
1056
|
/**
|
|
744
1057
|
* Response model for imported agents
|
|
@@ -755,7 +1068,7 @@ export interface AgentCreateParams {
|
|
|
755
1068
|
*/
|
|
756
1069
|
agent_type?: AgentType;
|
|
757
1070
|
/**
|
|
758
|
-
* The base template id of the agent.
|
|
1071
|
+
* @deprecated Deprecated: No longer used. The base template id of the agent.
|
|
759
1072
|
*/
|
|
760
1073
|
base_template_id?: string | null;
|
|
761
1074
|
/**
|
|
@@ -771,12 +1084,12 @@ export interface AgentCreateParams {
|
|
|
771
1084
|
*/
|
|
772
1085
|
description?: string | null;
|
|
773
1086
|
/**
|
|
774
|
-
* The embedding
|
|
775
|
-
* provider/model-name.
|
|
1087
|
+
* The embedding model handle used by the agent (format: provider/model-name).
|
|
776
1088
|
*/
|
|
777
1089
|
embedding?: string | null;
|
|
778
1090
|
/**
|
|
779
|
-
* The embedding chunk size used by the
|
|
1091
|
+
* @deprecated Deprecated: No longer used. The embedding chunk size used by the
|
|
1092
|
+
* agent.
|
|
780
1093
|
*/
|
|
781
1094
|
embedding_chunk_size?: number | null;
|
|
782
1095
|
/**
|
|
@@ -784,6 +1097,7 @@ export interface AgentCreateParams {
|
|
|
784
1097
|
*/
|
|
785
1098
|
embedding_config?: ModelsAPI.EmbeddingConfig | null;
|
|
786
1099
|
/**
|
|
1100
|
+
* @deprecated Deprecated: Use `model` field to configure reasoning instead.
|
|
787
1101
|
* Whether to enable internal extended thinking step for a reasoner model.
|
|
788
1102
|
*/
|
|
789
1103
|
enable_reasoner?: boolean | null;
|
|
@@ -792,11 +1106,13 @@ export interface AgentCreateParams {
|
|
|
792
1106
|
*/
|
|
793
1107
|
enable_sleeptime?: boolean | null;
|
|
794
1108
|
/**
|
|
795
|
-
* Deprecated: please use the 'create agents from a template' endpoint
|
|
1109
|
+
* @deprecated Deprecated: please use the 'create agents from a template' endpoint
|
|
1110
|
+
* instead.
|
|
796
1111
|
*/
|
|
797
1112
|
from_template?: string | null;
|
|
798
1113
|
/**
|
|
799
|
-
* If set to True, the agent will be
|
|
1114
|
+
* @deprecated Deprecated: No longer used. If set to True, the agent will be
|
|
1115
|
+
* hidden.
|
|
800
1116
|
*/
|
|
801
1117
|
hidden?: boolean | null;
|
|
802
1118
|
/**
|
|
@@ -813,8 +1129,8 @@ export interface AgentCreateParams {
|
|
|
813
1129
|
*/
|
|
814
1130
|
include_base_tools?: boolean;
|
|
815
1131
|
/**
|
|
816
|
-
* If true, automatically creates and attaches a default data source
|
|
817
|
-
* agent.
|
|
1132
|
+
* @deprecated If true, automatically creates and attaches a default data source
|
|
1133
|
+
* for this agent.
|
|
818
1134
|
*/
|
|
819
1135
|
include_default_source?: boolean;
|
|
820
1136
|
/**
|
|
@@ -836,13 +1152,13 @@ export interface AgentCreateParams {
|
|
|
836
1152
|
*/
|
|
837
1153
|
max_files_open?: number | null;
|
|
838
1154
|
/**
|
|
839
|
-
*
|
|
840
|
-
*
|
|
1155
|
+
* @deprecated Deprecated: Use `model` field to configure reasoning tokens instead.
|
|
1156
|
+
* The maximum number of tokens to generate for reasoning step.
|
|
841
1157
|
*/
|
|
842
1158
|
max_reasoning_tokens?: number | null;
|
|
843
1159
|
/**
|
|
844
|
-
*
|
|
845
|
-
*
|
|
1160
|
+
* @deprecated Deprecated: Use `model` field to configure max output tokens
|
|
1161
|
+
* instead. The maximum number of tokens to generate, including reasoning step.
|
|
846
1162
|
*/
|
|
847
1163
|
max_tokens?: number | null;
|
|
848
1164
|
/**
|
|
@@ -850,7 +1166,8 @@ export interface AgentCreateParams {
|
|
|
850
1166
|
*/
|
|
851
1167
|
memory_blocks?: Array<BlocksBlocksAPI.CreateBlock> | null;
|
|
852
1168
|
/**
|
|
853
|
-
*
|
|
1169
|
+
* @deprecated Deprecated: Only relevant for creating agents from a template. Use
|
|
1170
|
+
* the 'create agents from a template' endpoint instead.
|
|
854
1171
|
*/
|
|
855
1172
|
memory_variables?: {
|
|
856
1173
|
[key: string]: string;
|
|
@@ -868,16 +1185,20 @@ export interface AgentCreateParams {
|
|
|
868
1185
|
[key: string]: unknown;
|
|
869
1186
|
} | null;
|
|
870
1187
|
/**
|
|
871
|
-
* The
|
|
872
|
-
* provider/model-name, as an alternative to specifying llm_config.
|
|
1188
|
+
* The model handle for the agent to use (format: provider/model-name).
|
|
873
1189
|
*/
|
|
874
1190
|
model?: string | null;
|
|
1191
|
+
/**
|
|
1192
|
+
* The model settings for the agent.
|
|
1193
|
+
*/
|
|
1194
|
+
model_settings?: AgentCreateParams.OpenAIModelSettings | AgentCreateParams.AnthropicModelSettings | AgentCreateParams.GoogleAIModelSettings | AgentCreateParams.GoogleVertexModelSettings | AgentCreateParams.AzureModelSettings | AgentCreateParams.XaiModelSettings | AgentCreateParams.GroqModelSettings | AgentCreateParams.DeepseekModelSettings | AgentCreateParams.TogetherModelSettings | AgentCreateParams.BedrockModelSettings | null;
|
|
875
1195
|
/**
|
|
876
1196
|
* The name of the agent.
|
|
877
1197
|
*/
|
|
878
1198
|
name?: string;
|
|
879
1199
|
/**
|
|
880
|
-
*
|
|
1200
|
+
* @deprecated Deprecated: Use `model` field to configure parallel tool calls
|
|
1201
|
+
* instead. If set to True, enables parallel tool calling.
|
|
881
1202
|
*/
|
|
882
1203
|
parallel_tool_calls?: boolean | null;
|
|
883
1204
|
/**
|
|
@@ -887,15 +1208,17 @@ export interface AgentCreateParams {
|
|
|
887
1208
|
per_file_view_window_char_limit?: number | null;
|
|
888
1209
|
/**
|
|
889
1210
|
* @deprecated Deprecated: Project should now be passed via the X-Project header
|
|
890
|
-
* instead of in the request body. If using the
|
|
891
|
-
* x_project
|
|
1211
|
+
* instead of in the request body. If using the SDK, this can be done via the
|
|
1212
|
+
* x_project parameter.
|
|
892
1213
|
*/
|
|
893
1214
|
project?: string | null;
|
|
894
1215
|
/**
|
|
895
|
-
* The id of the project the agent belongs
|
|
1216
|
+
* @deprecated Deprecated: No longer used. The id of the project the agent belongs
|
|
1217
|
+
* to.
|
|
896
1218
|
*/
|
|
897
1219
|
project_id?: string | null;
|
|
898
1220
|
/**
|
|
1221
|
+
* @deprecated Deprecated: Use `model` field to configure reasoning instead.
|
|
899
1222
|
* Whether to enable reasoning for this agent.
|
|
900
1223
|
*/
|
|
901
1224
|
reasoning?: boolean | null;
|
|
@@ -922,11 +1245,12 @@ export interface AgentCreateParams {
|
|
|
922
1245
|
*/
|
|
923
1246
|
tags?: Array<string> | null;
|
|
924
1247
|
/**
|
|
925
|
-
* Deprecated: No longer used
|
|
1248
|
+
* @deprecated Deprecated: No longer used.
|
|
926
1249
|
*/
|
|
927
1250
|
template?: boolean;
|
|
928
1251
|
/**
|
|
929
|
-
* The id of the template the agent belongs
|
|
1252
|
+
* @deprecated Deprecated: No longer used. The id of the template the agent belongs
|
|
1253
|
+
* to.
|
|
930
1254
|
*/
|
|
931
1255
|
template_id?: string | null;
|
|
932
1256
|
/**
|
|
@@ -934,7 +1258,8 @@ export interface AgentCreateParams {
|
|
|
934
1258
|
*/
|
|
935
1259
|
timezone?: string | null;
|
|
936
1260
|
/**
|
|
937
|
-
* Deprecated:
|
|
1261
|
+
* @deprecated Deprecated: Use `secrets` field instead. Environment variables for
|
|
1262
|
+
* tool execution.
|
|
938
1263
|
*/
|
|
939
1264
|
tool_exec_environment_variables?: {
|
|
940
1265
|
[key: string]: string;
|
|
@@ -952,73 +1277,389 @@ export interface AgentCreateParams {
|
|
|
952
1277
|
*/
|
|
953
1278
|
tools?: Array<string> | null;
|
|
954
1279
|
}
|
|
955
|
-
export
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1280
|
+
export declare namespace AgentCreateParams {
|
|
1281
|
+
interface OpenAIModelSettings {
|
|
1282
|
+
/**
|
|
1283
|
+
* The maximum number of tokens the model can generate.
|
|
1284
|
+
*/
|
|
1285
|
+
max_output_tokens?: number;
|
|
1286
|
+
/**
|
|
1287
|
+
* Whether to enable parallel tool calling.
|
|
1288
|
+
*/
|
|
1289
|
+
parallel_tool_calls?: boolean;
|
|
1290
|
+
/**
|
|
1291
|
+
* The provider of the model.
|
|
1292
|
+
*/
|
|
1293
|
+
provider?: 'openai';
|
|
1294
|
+
/**
|
|
1295
|
+
* The reasoning configuration for the model.
|
|
1296
|
+
*/
|
|
1297
|
+
reasoning?: OpenAIModelSettings.Reasoning;
|
|
1298
|
+
/**
|
|
1299
|
+
* The response format for the model.
|
|
1300
|
+
*/
|
|
1301
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1302
|
+
/**
|
|
1303
|
+
* The temperature of the model.
|
|
1304
|
+
*/
|
|
1305
|
+
temperature?: number;
|
|
1306
|
+
}
|
|
1307
|
+
namespace OpenAIModelSettings {
|
|
1308
|
+
/**
|
|
1309
|
+
* The reasoning configuration for the model.
|
|
1310
|
+
*/
|
|
1311
|
+
interface Reasoning {
|
|
1312
|
+
/**
|
|
1313
|
+
* The reasoning effort to use when generating text reasoning models
|
|
1314
|
+
*/
|
|
1315
|
+
reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
|
|
1316
|
+
}
|
|
1317
|
+
}
|
|
1318
|
+
interface AnthropicModelSettings {
|
|
1319
|
+
/**
|
|
1320
|
+
* The maximum number of tokens the model can generate.
|
|
1321
|
+
*/
|
|
1322
|
+
max_output_tokens?: number;
|
|
1323
|
+
/**
|
|
1324
|
+
* Whether to enable parallel tool calling.
|
|
1325
|
+
*/
|
|
1326
|
+
parallel_tool_calls?: boolean;
|
|
1327
|
+
/**
|
|
1328
|
+
* The provider of the model.
|
|
1329
|
+
*/
|
|
1330
|
+
provider?: 'anthropic';
|
|
1331
|
+
/**
|
|
1332
|
+
* The temperature of the model.
|
|
1333
|
+
*/
|
|
1334
|
+
temperature?: number;
|
|
1335
|
+
/**
|
|
1336
|
+
* The thinking configuration for the model.
|
|
1337
|
+
*/
|
|
1338
|
+
thinking?: AnthropicModelSettings.Thinking;
|
|
1339
|
+
/**
|
|
1340
|
+
* Soft control for how verbose model output should be, used for GPT-5 models.
|
|
1341
|
+
*/
|
|
1342
|
+
verbosity?: 'low' | 'medium' | 'high' | null;
|
|
1343
|
+
}
|
|
1344
|
+
namespace AnthropicModelSettings {
|
|
1345
|
+
/**
|
|
1346
|
+
* The thinking configuration for the model.
|
|
1347
|
+
*/
|
|
1348
|
+
interface Thinking {
|
|
1349
|
+
/**
|
|
1350
|
+
* The maximum number of tokens the model can use for extended thinking.
|
|
1351
|
+
*/
|
|
1352
|
+
budget_tokens?: number;
|
|
1353
|
+
/**
|
|
1354
|
+
* The type of thinking to use.
|
|
1355
|
+
*/
|
|
1356
|
+
type?: 'enabled' | 'disabled';
|
|
1357
|
+
}
|
|
1358
|
+
}
|
|
1359
|
+
interface GoogleAIModelSettings {
|
|
1360
|
+
/**
|
|
1361
|
+
* The maximum number of tokens the model can generate.
|
|
1362
|
+
*/
|
|
1363
|
+
max_output_tokens?: number;
|
|
1364
|
+
/**
|
|
1365
|
+
* Whether to enable parallel tool calling.
|
|
1366
|
+
*/
|
|
1367
|
+
parallel_tool_calls?: boolean;
|
|
1368
|
+
/**
|
|
1369
|
+
* The provider of the model.
|
|
1370
|
+
*/
|
|
1371
|
+
provider?: 'google_ai';
|
|
1372
|
+
/**
|
|
1373
|
+
* The response schema for the model.
|
|
1374
|
+
*/
|
|
1375
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1376
|
+
/**
|
|
1377
|
+
* The temperature of the model.
|
|
1378
|
+
*/
|
|
1379
|
+
temperature?: number;
|
|
1380
|
+
/**
|
|
1381
|
+
* The thinking configuration for the model.
|
|
1382
|
+
*/
|
|
1383
|
+
thinking_config?: GoogleAIModelSettings.ThinkingConfig;
|
|
1384
|
+
}
|
|
1385
|
+
namespace GoogleAIModelSettings {
|
|
1386
|
+
/**
|
|
1387
|
+
* The thinking configuration for the model.
|
|
1388
|
+
*/
|
|
1389
|
+
interface ThinkingConfig {
|
|
1390
|
+
/**
|
|
1391
|
+
* Whether to include thoughts in the model's response.
|
|
1392
|
+
*/
|
|
1393
|
+
include_thoughts?: boolean;
|
|
1394
|
+
/**
|
|
1395
|
+
* The thinking budget for the model.
|
|
1396
|
+
*/
|
|
1397
|
+
thinking_budget?: number;
|
|
1398
|
+
}
|
|
1399
|
+
}
|
|
1400
|
+
interface GoogleVertexModelSettings {
|
|
1401
|
+
/**
|
|
1402
|
+
* The maximum number of tokens the model can generate.
|
|
1403
|
+
*/
|
|
1404
|
+
max_output_tokens?: number;
|
|
1405
|
+
/**
|
|
1406
|
+
* Whether to enable parallel tool calling.
|
|
1407
|
+
*/
|
|
1408
|
+
parallel_tool_calls?: boolean;
|
|
1409
|
+
/**
|
|
1410
|
+
* The provider of the model.
|
|
1411
|
+
*/
|
|
1412
|
+
provider?: 'google_vertex';
|
|
1413
|
+
/**
|
|
1414
|
+
* The response schema for the model.
|
|
1415
|
+
*/
|
|
1416
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1417
|
+
/**
|
|
1418
|
+
* The temperature of the model.
|
|
1419
|
+
*/
|
|
1420
|
+
temperature?: number;
|
|
1421
|
+
/**
|
|
1422
|
+
* The thinking configuration for the model.
|
|
1423
|
+
*/
|
|
1424
|
+
thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
|
|
1425
|
+
}
|
|
1426
|
+
namespace GoogleVertexModelSettings {
|
|
1427
|
+
/**
|
|
1428
|
+
* The thinking configuration for the model.
|
|
1429
|
+
*/
|
|
1430
|
+
interface ThinkingConfig {
|
|
1431
|
+
/**
|
|
1432
|
+
* Whether to include thoughts in the model's response.
|
|
1433
|
+
*/
|
|
1434
|
+
include_thoughts?: boolean;
|
|
1435
|
+
/**
|
|
1436
|
+
* The thinking budget for the model.
|
|
1437
|
+
*/
|
|
1438
|
+
thinking_budget?: number;
|
|
1439
|
+
}
|
|
1440
|
+
}
|
|
1441
|
+
/**
|
|
1442
|
+
* Azure OpenAI model configuration (OpenAI-compatible).
|
|
1443
|
+
*/
|
|
1444
|
+
interface AzureModelSettings {
|
|
1445
|
+
/**
|
|
1446
|
+
* The maximum number of tokens the model can generate.
|
|
1447
|
+
*/
|
|
1448
|
+
max_output_tokens?: number;
|
|
1449
|
+
/**
|
|
1450
|
+
* Whether to enable parallel tool calling.
|
|
1451
|
+
*/
|
|
1452
|
+
parallel_tool_calls?: boolean;
|
|
1453
|
+
/**
|
|
1454
|
+
* The provider of the model.
|
|
1455
|
+
*/
|
|
1456
|
+
provider?: 'azure';
|
|
1457
|
+
/**
|
|
1458
|
+
* The response format for the model.
|
|
1459
|
+
*/
|
|
1460
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1461
|
+
/**
|
|
1462
|
+
* The temperature of the model.
|
|
1463
|
+
*/
|
|
1464
|
+
temperature?: number;
|
|
1465
|
+
}
|
|
1466
|
+
/**
|
|
1467
|
+
* xAI model configuration (OpenAI-compatible).
|
|
1468
|
+
*/
|
|
1469
|
+
interface XaiModelSettings {
|
|
1470
|
+
/**
|
|
1471
|
+
* The maximum number of tokens the model can generate.
|
|
1472
|
+
*/
|
|
1473
|
+
max_output_tokens?: number;
|
|
1474
|
+
/**
|
|
1475
|
+
* Whether to enable parallel tool calling.
|
|
1476
|
+
*/
|
|
1477
|
+
parallel_tool_calls?: boolean;
|
|
1478
|
+
/**
|
|
1479
|
+
* The provider of the model.
|
|
1480
|
+
*/
|
|
1481
|
+
provider?: 'xai';
|
|
1482
|
+
/**
|
|
1483
|
+
* The response format for the model.
|
|
1484
|
+
*/
|
|
1485
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1486
|
+
/**
|
|
1487
|
+
* The temperature of the model.
|
|
1488
|
+
*/
|
|
1489
|
+
temperature?: number;
|
|
1490
|
+
}
|
|
1491
|
+
/**
|
|
1492
|
+
* Groq model configuration (OpenAI-compatible).
|
|
1493
|
+
*/
|
|
1494
|
+
interface GroqModelSettings {
|
|
1495
|
+
/**
|
|
1496
|
+
* The maximum number of tokens the model can generate.
|
|
1497
|
+
*/
|
|
1498
|
+
max_output_tokens?: number;
|
|
1499
|
+
/**
|
|
1500
|
+
* Whether to enable parallel tool calling.
|
|
1501
|
+
*/
|
|
1502
|
+
parallel_tool_calls?: boolean;
|
|
1503
|
+
/**
|
|
1504
|
+
* The provider of the model.
|
|
1505
|
+
*/
|
|
1506
|
+
provider?: 'groq';
|
|
1507
|
+
/**
|
|
1508
|
+
* The response format for the model.
|
|
1509
|
+
*/
|
|
1510
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1511
|
+
/**
|
|
1512
|
+
* The temperature of the model.
|
|
1513
|
+
*/
|
|
1514
|
+
temperature?: number;
|
|
1515
|
+
}
|
|
1516
|
+
/**
|
|
1517
|
+
* Deepseek model configuration (OpenAI-compatible).
|
|
1518
|
+
*/
|
|
1519
|
+
interface DeepseekModelSettings {
|
|
1520
|
+
/**
|
|
1521
|
+
* The maximum number of tokens the model can generate.
|
|
1522
|
+
*/
|
|
1523
|
+
max_output_tokens?: number;
|
|
1524
|
+
/**
|
|
1525
|
+
* Whether to enable parallel tool calling.
|
|
1526
|
+
*/
|
|
1527
|
+
parallel_tool_calls?: boolean;
|
|
1528
|
+
/**
|
|
1529
|
+
* The provider of the model.
|
|
1530
|
+
*/
|
|
1531
|
+
provider?: 'deepseek';
|
|
1532
|
+
/**
|
|
1533
|
+
* The response format for the model.
|
|
1534
|
+
*/
|
|
1535
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1536
|
+
/**
|
|
1537
|
+
* The temperature of the model.
|
|
1538
|
+
*/
|
|
1539
|
+
temperature?: number;
|
|
1540
|
+
}
|
|
1541
|
+
/**
|
|
1542
|
+
* Together AI model configuration (OpenAI-compatible).
|
|
1543
|
+
*/
|
|
1544
|
+
interface TogetherModelSettings {
|
|
1545
|
+
/**
|
|
1546
|
+
* The maximum number of tokens the model can generate.
|
|
1547
|
+
*/
|
|
1548
|
+
max_output_tokens?: number;
|
|
1549
|
+
/**
|
|
1550
|
+
* Whether to enable parallel tool calling.
|
|
1551
|
+
*/
|
|
1552
|
+
parallel_tool_calls?: boolean;
|
|
1553
|
+
/**
|
|
1554
|
+
* The provider of the model.
|
|
1555
|
+
*/
|
|
1556
|
+
provider?: 'together';
|
|
1557
|
+
/**
|
|
1558
|
+
* The response format for the model.
|
|
1559
|
+
*/
|
|
1560
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1561
|
+
/**
|
|
1562
|
+
* The temperature of the model.
|
|
1563
|
+
*/
|
|
1564
|
+
temperature?: number;
|
|
1565
|
+
}
|
|
1566
|
+
/**
|
|
1567
|
+
* AWS Bedrock model configuration.
|
|
1568
|
+
*/
|
|
1569
|
+
interface BedrockModelSettings {
|
|
1570
|
+
/**
|
|
1571
|
+
* The maximum number of tokens the model can generate.
|
|
1572
|
+
*/
|
|
1573
|
+
max_output_tokens?: number;
|
|
1574
|
+
/**
|
|
1575
|
+
* Whether to enable parallel tool calling.
|
|
1576
|
+
*/
|
|
1577
|
+
parallel_tool_calls?: boolean;
|
|
1578
|
+
/**
|
|
1579
|
+
* The provider of the model.
|
|
1580
|
+
*/
|
|
1581
|
+
provider?: 'bedrock';
|
|
1582
|
+
/**
|
|
1583
|
+
* The response format for the model.
|
|
1584
|
+
*/
|
|
1585
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1586
|
+
/**
|
|
1587
|
+
* The temperature of the model.
|
|
1588
|
+
*/
|
|
1589
|
+
temperature?: number;
|
|
1590
|
+
}
|
|
1591
|
+
}
|
|
1592
|
+
export interface AgentRetrieveParams {
|
|
1593
|
+
/**
|
|
1594
|
+
* Specify which relational fields to include in the response. No relationships are
|
|
1595
|
+
* included by default.
|
|
1596
|
+
*/
|
|
1597
|
+
include?: Array<'agent.blocks' | 'agent.identities' | 'agent.managed_group' | 'agent.secrets' | 'agent.sources' | 'agent.tags' | 'agent.tools'>;
|
|
1598
|
+
/**
|
|
1599
|
+
* Specify which relational fields (e.g., 'tools', 'sources', 'memory') to include
|
|
1600
|
+
* in the response. If not provided, all relationships are loaded by default. Using
|
|
1601
|
+
* this can optimize performance by reducing unnecessary joins.This is a legacy
|
|
1602
|
+
* parameter, and no longer supported after 1.0.0 SDK versions.
|
|
1603
|
+
*/
|
|
1604
|
+
include_relationships?: Array<string> | null;
|
|
1605
|
+
}
|
|
1606
|
+
export interface AgentListParams extends ArrayPageParams {
|
|
1607
|
+
/**
|
|
1608
|
+
* @deprecated Whether to sort agents oldest to newest (True) or newest to oldest
|
|
1609
|
+
* (False, default)
|
|
1610
|
+
*/
|
|
1611
|
+
ascending?: boolean;
|
|
1612
|
+
/**
|
|
1613
|
+
* Search agents by base template ID
|
|
1614
|
+
*/
|
|
1615
|
+
base_template_id?: string | null;
|
|
1616
|
+
/**
|
|
1617
|
+
* Search agents by identifier keys
|
|
1618
|
+
*/
|
|
1619
|
+
identifier_keys?: Array<string> | null;
|
|
1620
|
+
/**
|
|
1621
|
+
* Search agents by identity ID
|
|
1622
|
+
*/
|
|
1623
|
+
identity_id?: string | null;
|
|
1624
|
+
/**
|
|
1625
|
+
* Specify which relational fields to include in the response. No relationships are
|
|
1626
|
+
* included by default.
|
|
1627
|
+
*/
|
|
1628
|
+
include?: Array<'agent.blocks' | 'agent.identities' | 'agent.managed_group' | 'agent.secrets' | 'agent.sources' | 'agent.tags' | 'agent.tools'>;
|
|
1629
|
+
/**
|
|
1630
|
+
* Specify which relational fields (e.g., 'tools', 'sources', 'memory') to include
|
|
1631
|
+
* in the response. If not provided, all relationships are loaded by default. Using
|
|
1632
|
+
* this can optimize performance by reducing unnecessary joins.This is a legacy
|
|
1633
|
+
* parameter, and no longer supported after 1.0.0 SDK versions.
|
|
1634
|
+
*/
|
|
1635
|
+
include_relationships?: Array<string> | null;
|
|
1636
|
+
/**
|
|
1637
|
+
* Filter agents by their last stop reason.
|
|
1638
|
+
*/
|
|
1639
|
+
last_stop_reason?: RunsAPI.StopReasonType | null;
|
|
1640
|
+
/**
|
|
1641
|
+
* If True, only returns agents that match ALL given tags. Otherwise, return agents
|
|
1642
|
+
* that have ANY of the passed-in tags.
|
|
1643
|
+
*/
|
|
1644
|
+
match_all_tags?: boolean;
|
|
1645
|
+
/**
|
|
1646
|
+
* Name of the agent
|
|
1647
|
+
*/
|
|
1648
|
+
name?: string | null;
|
|
1649
|
+
/**
|
|
1650
|
+
* Search agents by project ID - this will default to your default project on cloud
|
|
1651
|
+
*/
|
|
1652
|
+
project_id?: string | null;
|
|
1653
|
+
/**
|
|
1654
|
+
* Search agents by name
|
|
1655
|
+
*/
|
|
1656
|
+
query_text?: string | null;
|
|
1657
|
+
/**
|
|
1658
|
+
* @deprecated Field to sort by. Options: 'created_at' (default),
|
|
1659
|
+
* 'last_run_completion'
|
|
1660
|
+
*/
|
|
1661
|
+
sort_by?: string | null;
|
|
1662
|
+
/**
|
|
1022
1663
|
* List of tags to filter agents by
|
|
1023
1664
|
*/
|
|
1024
1665
|
tags?: Array<string> | null;
|
|
@@ -1099,8 +1740,7 @@ export interface AgentModifyParams {
|
|
|
1099
1740
|
*/
|
|
1100
1741
|
description?: string | null;
|
|
1101
1742
|
/**
|
|
1102
|
-
* The embedding
|
|
1103
|
-
* provider/model-name.
|
|
1743
|
+
* The embedding model handle used by the agent (format: provider/model-name).
|
|
1104
1744
|
*/
|
|
1105
1745
|
embedding?: string | null;
|
|
1106
1746
|
/**
|
|
@@ -1127,6 +1767,10 @@ export interface AgentModifyParams {
|
|
|
1127
1767
|
* The duration in milliseconds of the agent's last run.
|
|
1128
1768
|
*/
|
|
1129
1769
|
last_run_duration_ms?: number | null;
|
|
1770
|
+
/**
|
|
1771
|
+
* The stop reason from the agent's last run.
|
|
1772
|
+
*/
|
|
1773
|
+
last_stop_reason?: RunsAPI.StopReasonType | null;
|
|
1130
1774
|
/**
|
|
1131
1775
|
* Configuration for Language Model (LLM) connection and generation parameters.
|
|
1132
1776
|
*/
|
|
@@ -1137,8 +1781,8 @@ export interface AgentModifyParams {
|
|
|
1137
1781
|
*/
|
|
1138
1782
|
max_files_open?: number | null;
|
|
1139
1783
|
/**
|
|
1140
|
-
*
|
|
1141
|
-
*
|
|
1784
|
+
* @deprecated Deprecated: Use `model` field to configure max output tokens
|
|
1785
|
+
* instead. The maximum number of tokens to generate, including reasoning step.
|
|
1142
1786
|
*/
|
|
1143
1787
|
max_tokens?: number | null;
|
|
1144
1788
|
/**
|
|
@@ -1158,16 +1802,20 @@ export interface AgentModifyParams {
|
|
|
1158
1802
|
[key: string]: unknown;
|
|
1159
1803
|
} | null;
|
|
1160
1804
|
/**
|
|
1161
|
-
* The
|
|
1162
|
-
* provider/model-name, as an alternative to specifying llm_config.
|
|
1805
|
+
* The model handle used by the agent (format: provider/model-name).
|
|
1163
1806
|
*/
|
|
1164
1807
|
model?: string | null;
|
|
1808
|
+
/**
|
|
1809
|
+
* The model settings for the agent.
|
|
1810
|
+
*/
|
|
1811
|
+
model_settings?: AgentModifyParams.OpenAIModelSettings | AgentModifyParams.AnthropicModelSettings | AgentModifyParams.GoogleAIModelSettings | AgentModifyParams.GoogleVertexModelSettings | AgentModifyParams.AzureModelSettings | AgentModifyParams.XaiModelSettings | AgentModifyParams.GroqModelSettings | AgentModifyParams.DeepseekModelSettings | AgentModifyParams.TogetherModelSettings | AgentModifyParams.BedrockModelSettings | null;
|
|
1165
1812
|
/**
|
|
1166
1813
|
* The name of the agent.
|
|
1167
1814
|
*/
|
|
1168
1815
|
name?: string | null;
|
|
1169
1816
|
/**
|
|
1170
|
-
*
|
|
1817
|
+
* @deprecated Deprecated: Use `model` field to configure parallel tool calls
|
|
1818
|
+
* instead. If set to True, enables parallel tool calling.
|
|
1171
1819
|
*/
|
|
1172
1820
|
parallel_tool_calls?: boolean | null;
|
|
1173
1821
|
/**
|
|
@@ -1180,10 +1828,12 @@ export interface AgentModifyParams {
|
|
|
1180
1828
|
*/
|
|
1181
1829
|
project_id?: string | null;
|
|
1182
1830
|
/**
|
|
1831
|
+
* @deprecated Deprecated: Use `model` field to configure reasoning instead.
|
|
1183
1832
|
* Whether to enable reasoning for this agent.
|
|
1184
1833
|
*/
|
|
1185
1834
|
reasoning?: boolean | null;
|
|
1186
1835
|
/**
|
|
1836
|
+
* @deprecated Deprecated: Use `model` field to configure response format instead.
|
|
1187
1837
|
* The response format for the agent.
|
|
1188
1838
|
*/
|
|
1189
1839
|
response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
|
|
@@ -1228,13 +1878,325 @@ export interface AgentModifyParams {
|
|
|
1228
1878
|
*/
|
|
1229
1879
|
tool_rules?: Array<ChildToolRule | InitToolRule | TerminalToolRule | ConditionalToolRule | ContinueToolRule | RequiredBeforeExitToolRule | MaxCountPerStepToolRule | ParentToolRule | RequiresApprovalToolRule> | null;
|
|
1230
1880
|
}
|
|
1881
|
+
export declare namespace AgentModifyParams {
|
|
1882
|
+
interface OpenAIModelSettings {
|
|
1883
|
+
/**
|
|
1884
|
+
* The maximum number of tokens the model can generate.
|
|
1885
|
+
*/
|
|
1886
|
+
max_output_tokens?: number;
|
|
1887
|
+
/**
|
|
1888
|
+
* Whether to enable parallel tool calling.
|
|
1889
|
+
*/
|
|
1890
|
+
parallel_tool_calls?: boolean;
|
|
1891
|
+
/**
|
|
1892
|
+
* The provider of the model.
|
|
1893
|
+
*/
|
|
1894
|
+
provider?: 'openai';
|
|
1895
|
+
/**
|
|
1896
|
+
* The reasoning configuration for the model.
|
|
1897
|
+
*/
|
|
1898
|
+
reasoning?: OpenAIModelSettings.Reasoning;
|
|
1899
|
+
/**
|
|
1900
|
+
* The response format for the model.
|
|
1901
|
+
*/
|
|
1902
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1903
|
+
/**
|
|
1904
|
+
* The temperature of the model.
|
|
1905
|
+
*/
|
|
1906
|
+
temperature?: number;
|
|
1907
|
+
}
|
|
1908
|
+
namespace OpenAIModelSettings {
|
|
1909
|
+
/**
|
|
1910
|
+
* The reasoning configuration for the model.
|
|
1911
|
+
*/
|
|
1912
|
+
interface Reasoning {
|
|
1913
|
+
/**
|
|
1914
|
+
* The reasoning effort to use when generating text reasoning models
|
|
1915
|
+
*/
|
|
1916
|
+
reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
|
|
1917
|
+
}
|
|
1918
|
+
}
|
|
1919
|
+
interface AnthropicModelSettings {
|
|
1920
|
+
/**
|
|
1921
|
+
* The maximum number of tokens the model can generate.
|
|
1922
|
+
*/
|
|
1923
|
+
max_output_tokens?: number;
|
|
1924
|
+
/**
|
|
1925
|
+
* Whether to enable parallel tool calling.
|
|
1926
|
+
*/
|
|
1927
|
+
parallel_tool_calls?: boolean;
|
|
1928
|
+
/**
|
|
1929
|
+
* The provider of the model.
|
|
1930
|
+
*/
|
|
1931
|
+
provider?: 'anthropic';
|
|
1932
|
+
/**
|
|
1933
|
+
* The temperature of the model.
|
|
1934
|
+
*/
|
|
1935
|
+
temperature?: number;
|
|
1936
|
+
/**
|
|
1937
|
+
* The thinking configuration for the model.
|
|
1938
|
+
*/
|
|
1939
|
+
thinking?: AnthropicModelSettings.Thinking;
|
|
1940
|
+
/**
|
|
1941
|
+
* Soft control for how verbose model output should be, used for GPT-5 models.
|
|
1942
|
+
*/
|
|
1943
|
+
verbosity?: 'low' | 'medium' | 'high' | null;
|
|
1944
|
+
}
|
|
1945
|
+
namespace AnthropicModelSettings {
|
|
1946
|
+
/**
|
|
1947
|
+
* The thinking configuration for the model.
|
|
1948
|
+
*/
|
|
1949
|
+
interface Thinking {
|
|
1950
|
+
/**
|
|
1951
|
+
* The maximum number of tokens the model can use for extended thinking.
|
|
1952
|
+
*/
|
|
1953
|
+
budget_tokens?: number;
|
|
1954
|
+
/**
|
|
1955
|
+
* The type of thinking to use.
|
|
1956
|
+
*/
|
|
1957
|
+
type?: 'enabled' | 'disabled';
|
|
1958
|
+
}
|
|
1959
|
+
}
|
|
1960
|
+
interface GoogleAIModelSettings {
|
|
1961
|
+
/**
|
|
1962
|
+
* The maximum number of tokens the model can generate.
|
|
1963
|
+
*/
|
|
1964
|
+
max_output_tokens?: number;
|
|
1965
|
+
/**
|
|
1966
|
+
* Whether to enable parallel tool calling.
|
|
1967
|
+
*/
|
|
1968
|
+
parallel_tool_calls?: boolean;
|
|
1969
|
+
/**
|
|
1970
|
+
* The provider of the model.
|
|
1971
|
+
*/
|
|
1972
|
+
provider?: 'google_ai';
|
|
1973
|
+
/**
|
|
1974
|
+
* The response schema for the model.
|
|
1975
|
+
*/
|
|
1976
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1977
|
+
/**
|
|
1978
|
+
* The temperature of the model.
|
|
1979
|
+
*/
|
|
1980
|
+
temperature?: number;
|
|
1981
|
+
/**
|
|
1982
|
+
* The thinking configuration for the model.
|
|
1983
|
+
*/
|
|
1984
|
+
thinking_config?: GoogleAIModelSettings.ThinkingConfig;
|
|
1985
|
+
}
|
|
1986
|
+
namespace GoogleAIModelSettings {
|
|
1987
|
+
/**
|
|
1988
|
+
* The thinking configuration for the model.
|
|
1989
|
+
*/
|
|
1990
|
+
interface ThinkingConfig {
|
|
1991
|
+
/**
|
|
1992
|
+
* Whether to include thoughts in the model's response.
|
|
1993
|
+
*/
|
|
1994
|
+
include_thoughts?: boolean;
|
|
1995
|
+
/**
|
|
1996
|
+
* The thinking budget for the model.
|
|
1997
|
+
*/
|
|
1998
|
+
thinking_budget?: number;
|
|
1999
|
+
}
|
|
2000
|
+
}
|
|
2001
|
+
interface GoogleVertexModelSettings {
|
|
2002
|
+
/**
|
|
2003
|
+
* The maximum number of tokens the model can generate.
|
|
2004
|
+
*/
|
|
2005
|
+
max_output_tokens?: number;
|
|
2006
|
+
/**
|
|
2007
|
+
* Whether to enable parallel tool calling.
|
|
2008
|
+
*/
|
|
2009
|
+
parallel_tool_calls?: boolean;
|
|
2010
|
+
/**
|
|
2011
|
+
* The provider of the model.
|
|
2012
|
+
*/
|
|
2013
|
+
provider?: 'google_vertex';
|
|
2014
|
+
/**
|
|
2015
|
+
* The response schema for the model.
|
|
2016
|
+
*/
|
|
2017
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2018
|
+
/**
|
|
2019
|
+
* The temperature of the model.
|
|
2020
|
+
*/
|
|
2021
|
+
temperature?: number;
|
|
2022
|
+
/**
|
|
2023
|
+
* The thinking configuration for the model.
|
|
2024
|
+
*/
|
|
2025
|
+
thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
|
|
2026
|
+
}
|
|
2027
|
+
namespace GoogleVertexModelSettings {
|
|
2028
|
+
/**
|
|
2029
|
+
* The thinking configuration for the model.
|
|
2030
|
+
*/
|
|
2031
|
+
interface ThinkingConfig {
|
|
2032
|
+
/**
|
|
2033
|
+
* Whether to include thoughts in the model's response.
|
|
2034
|
+
*/
|
|
2035
|
+
include_thoughts?: boolean;
|
|
2036
|
+
/**
|
|
2037
|
+
* The thinking budget for the model.
|
|
2038
|
+
*/
|
|
2039
|
+
thinking_budget?: number;
|
|
2040
|
+
}
|
|
2041
|
+
}
|
|
2042
|
+
/**
|
|
2043
|
+
* Azure OpenAI model configuration (OpenAI-compatible).
|
|
2044
|
+
*/
|
|
2045
|
+
interface AzureModelSettings {
|
|
2046
|
+
/**
|
|
2047
|
+
* The maximum number of tokens the model can generate.
|
|
2048
|
+
*/
|
|
2049
|
+
max_output_tokens?: number;
|
|
2050
|
+
/**
|
|
2051
|
+
* Whether to enable parallel tool calling.
|
|
2052
|
+
*/
|
|
2053
|
+
parallel_tool_calls?: boolean;
|
|
2054
|
+
/**
|
|
2055
|
+
* The provider of the model.
|
|
2056
|
+
*/
|
|
2057
|
+
provider?: 'azure';
|
|
2058
|
+
/**
|
|
2059
|
+
* The response format for the model.
|
|
2060
|
+
*/
|
|
2061
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2062
|
+
/**
|
|
2063
|
+
* The temperature of the model.
|
|
2064
|
+
*/
|
|
2065
|
+
temperature?: number;
|
|
2066
|
+
}
|
|
2067
|
+
/**
|
|
2068
|
+
* xAI model configuration (OpenAI-compatible).
|
|
2069
|
+
*/
|
|
2070
|
+
interface XaiModelSettings {
|
|
2071
|
+
/**
|
|
2072
|
+
* The maximum number of tokens the model can generate.
|
|
2073
|
+
*/
|
|
2074
|
+
max_output_tokens?: number;
|
|
2075
|
+
/**
|
|
2076
|
+
* Whether to enable parallel tool calling.
|
|
2077
|
+
*/
|
|
2078
|
+
parallel_tool_calls?: boolean;
|
|
2079
|
+
/**
|
|
2080
|
+
* The provider of the model.
|
|
2081
|
+
*/
|
|
2082
|
+
provider?: 'xai';
|
|
2083
|
+
/**
|
|
2084
|
+
* The response format for the model.
|
|
2085
|
+
*/
|
|
2086
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2087
|
+
/**
|
|
2088
|
+
* The temperature of the model.
|
|
2089
|
+
*/
|
|
2090
|
+
temperature?: number;
|
|
2091
|
+
}
|
|
2092
|
+
/**
|
|
2093
|
+
* Groq model configuration (OpenAI-compatible).
|
|
2094
|
+
*/
|
|
2095
|
+
interface GroqModelSettings {
|
|
2096
|
+
/**
|
|
2097
|
+
* The maximum number of tokens the model can generate.
|
|
2098
|
+
*/
|
|
2099
|
+
max_output_tokens?: number;
|
|
2100
|
+
/**
|
|
2101
|
+
* Whether to enable parallel tool calling.
|
|
2102
|
+
*/
|
|
2103
|
+
parallel_tool_calls?: boolean;
|
|
2104
|
+
/**
|
|
2105
|
+
* The provider of the model.
|
|
2106
|
+
*/
|
|
2107
|
+
provider?: 'groq';
|
|
2108
|
+
/**
|
|
2109
|
+
* The response format for the model.
|
|
2110
|
+
*/
|
|
2111
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2112
|
+
/**
|
|
2113
|
+
* The temperature of the model.
|
|
2114
|
+
*/
|
|
2115
|
+
temperature?: number;
|
|
2116
|
+
}
|
|
2117
|
+
/**
|
|
2118
|
+
* Deepseek model configuration (OpenAI-compatible).
|
|
2119
|
+
*/
|
|
2120
|
+
interface DeepseekModelSettings {
|
|
2121
|
+
/**
|
|
2122
|
+
* The maximum number of tokens the model can generate.
|
|
2123
|
+
*/
|
|
2124
|
+
max_output_tokens?: number;
|
|
2125
|
+
/**
|
|
2126
|
+
* Whether to enable parallel tool calling.
|
|
2127
|
+
*/
|
|
2128
|
+
parallel_tool_calls?: boolean;
|
|
2129
|
+
/**
|
|
2130
|
+
* The provider of the model.
|
|
2131
|
+
*/
|
|
2132
|
+
provider?: 'deepseek';
|
|
2133
|
+
/**
|
|
2134
|
+
* The response format for the model.
|
|
2135
|
+
*/
|
|
2136
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2137
|
+
/**
|
|
2138
|
+
* The temperature of the model.
|
|
2139
|
+
*/
|
|
2140
|
+
temperature?: number;
|
|
2141
|
+
}
|
|
2142
|
+
/**
|
|
2143
|
+
* Together AI model configuration (OpenAI-compatible).
|
|
2144
|
+
*/
|
|
2145
|
+
interface TogetherModelSettings {
|
|
2146
|
+
/**
|
|
2147
|
+
* The maximum number of tokens the model can generate.
|
|
2148
|
+
*/
|
|
2149
|
+
max_output_tokens?: number;
|
|
2150
|
+
/**
|
|
2151
|
+
* Whether to enable parallel tool calling.
|
|
2152
|
+
*/
|
|
2153
|
+
parallel_tool_calls?: boolean;
|
|
2154
|
+
/**
|
|
2155
|
+
* The provider of the model.
|
|
2156
|
+
*/
|
|
2157
|
+
provider?: 'together';
|
|
2158
|
+
/**
|
|
2159
|
+
* The response format for the model.
|
|
2160
|
+
*/
|
|
2161
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2162
|
+
/**
|
|
2163
|
+
* The temperature of the model.
|
|
2164
|
+
*/
|
|
2165
|
+
temperature?: number;
|
|
2166
|
+
}
|
|
2167
|
+
/**
|
|
2168
|
+
* AWS Bedrock model configuration.
|
|
2169
|
+
*/
|
|
2170
|
+
interface BedrockModelSettings {
|
|
2171
|
+
/**
|
|
2172
|
+
* The maximum number of tokens the model can generate.
|
|
2173
|
+
*/
|
|
2174
|
+
max_output_tokens?: number;
|
|
2175
|
+
/**
|
|
2176
|
+
* Whether to enable parallel tool calling.
|
|
2177
|
+
*/
|
|
2178
|
+
parallel_tool_calls?: boolean;
|
|
2179
|
+
/**
|
|
2180
|
+
* The provider of the model.
|
|
2181
|
+
*/
|
|
2182
|
+
provider?: 'bedrock';
|
|
2183
|
+
/**
|
|
2184
|
+
* The response format for the model.
|
|
2185
|
+
*/
|
|
2186
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2187
|
+
/**
|
|
2188
|
+
* The temperature of the model.
|
|
2189
|
+
*/
|
|
2190
|
+
temperature?: number;
|
|
2191
|
+
}
|
|
2192
|
+
}
|
|
1231
2193
|
export declare namespace Agents {
|
|
1232
|
-
export { type AgentEnvironmentVariable as AgentEnvironmentVariable, type AgentState as AgentState, type AgentType as AgentType, type ChildToolRule as ChildToolRule, type ConditionalToolRule as ConditionalToolRule, type ContinueToolRule as ContinueToolRule, type InitToolRule as InitToolRule, type JsonObjectResponseFormat as JsonObjectResponseFormat, type JsonSchemaResponseFormat as JsonSchemaResponseFormat, type LettaMessageContentUnion as LettaMessageContentUnion, type MaxCountPerStepToolRule as MaxCountPerStepToolRule, type MessageCreate as MessageCreate, type ParentToolRule as ParentToolRule, type RequiredBeforeExitToolRule as RequiredBeforeExitToolRule, type RequiresApprovalToolRule as RequiresApprovalToolRule, type TerminalToolRule as TerminalToolRule, type TextResponseFormat as TextResponseFormat, type AgentDeleteResponse as AgentDeleteResponse, type
|
|
1233
|
-
export { Tools as Tools, type
|
|
1234
|
-
export { Folders as Folders, type FolderListResponse as FolderListResponse, type FolderListParams as FolderListParams, type FolderAttachParams as FolderAttachParams, type FolderDetachParams as FolderDetachParams, };
|
|
2194
|
+
export { type AgentEnvironmentVariable as AgentEnvironmentVariable, type AgentState as AgentState, type AgentType as AgentType, type ChildToolRule as ChildToolRule, type ConditionalToolRule as ConditionalToolRule, type ContinueToolRule as ContinueToolRule, type InitToolRule as InitToolRule, type JsonObjectResponseFormat as JsonObjectResponseFormat, type JsonSchemaResponseFormat as JsonSchemaResponseFormat, type LettaMessageContentUnion as LettaMessageContentUnion, type MaxCountPerStepToolRule as MaxCountPerStepToolRule, type MessageCreate as MessageCreate, type ParentToolRule as ParentToolRule, type RequiredBeforeExitToolRule as RequiredBeforeExitToolRule, type RequiresApprovalToolRule as RequiresApprovalToolRule, type TerminalToolRule as TerminalToolRule, type TextResponseFormat as TextResponseFormat, type AgentDeleteResponse as AgentDeleteResponse, type AgentExportFileResponse as AgentExportFileResponse, type AgentImportFileResponse as AgentImportFileResponse, type AgentStatesArrayPage as AgentStatesArrayPage, type AgentCreateParams as AgentCreateParams, type AgentRetrieveParams as AgentRetrieveParams, type AgentListParams as AgentListParams, type AgentExportFileParams as AgentExportFileParams, type AgentImportFileParams as AgentImportFileParams, type AgentModifyParams as AgentModifyParams, };
|
|
2195
|
+
export { Tools as Tools, type ToolListParams as ToolListParams, type ToolAttachParams as ToolAttachParams, type ToolDetachParams as ToolDetachParams, type ToolUpdateApprovalParams as ToolUpdateApprovalParams, };
|
|
2196
|
+
export { Folders as Folders, type FolderListResponse as FolderListResponse, type FolderListResponsesArrayPage as FolderListResponsesArrayPage, type FolderListParams as FolderListParams, type FolderAttachParams as FolderAttachParams, type FolderDetachParams as FolderDetachParams, };
|
|
1235
2197
|
export { Files as Files, type FileListResponse as FileListResponse, type FileCloseResponse as FileCloseResponse, type FileCloseAllResponse as FileCloseAllResponse, type FileOpenResponse as FileOpenResponse, type FileListResponsesNextFilesPage as FileListResponsesNextFilesPage, type FileListParams as FileListParams, type FileCloseParams as FileCloseParams, type FileOpenParams as FileOpenParams, };
|
|
1236
2198
|
export { Blocks as Blocks, type Block as Block, type BlockModify as BlockModify, type BlockRetrieveParams as BlockRetrieveParams, type BlockListParams as BlockListParams, type BlockAttachParams as BlockAttachParams, type BlockDetachParams as BlockDetachParams, type BlockModifyParams as BlockModifyParams, };
|
|
1237
2199
|
export { Groups as Groups, type GroupListParams as GroupListParams };
|
|
1238
|
-
export { Messages as Messages, type ApprovalCreate as ApprovalCreate, type ApprovalRequestMessage as ApprovalRequestMessage, type ApprovalResponseMessage as ApprovalResponseMessage, type AssistantMessage as AssistantMessage, type EventMessage as EventMessage, type HiddenReasoningMessage as HiddenReasoningMessage, type ImageContent as ImageContent, type JobStatus as JobStatus, type JobType as JobType, type LettaAssistantMessageContentUnion as LettaAssistantMessageContentUnion, type LettaMessageUnion as LettaMessageUnion, type LettaRequest as LettaRequest, type LettaResponse as LettaResponse, type LettaStreamingRequest as LettaStreamingRequest, type LettaStreamingResponse as LettaStreamingResponse, type LettaUserMessageContentUnion as LettaUserMessageContentUnion, type Message as Message, type MessageRole as MessageRole, type MessageType as MessageType, type OmittedReasoningContent as OmittedReasoningContent, type ReasoningContent as ReasoningContent, type ReasoningMessage as ReasoningMessage, type RedactedReasoningContent as RedactedReasoningContent, type Run as Run, type SummaryMessage as SummaryMessage, type SystemMessage as SystemMessage, type TextContent as TextContent, type ToolCall as ToolCall, type ToolCallContent as ToolCallContent, type ToolCallDelta as ToolCallDelta, type ToolCallMessage as ToolCallMessage, type ToolReturn as ToolReturn, type ToolReturnContent as ToolReturnContent, type UpdateAssistantMessage as UpdateAssistantMessage, type UpdateReasoningMessage as UpdateReasoningMessage, type UpdateSystemMessage as UpdateSystemMessage, type UpdateUserMessage as UpdateUserMessage, type UserMessage as UserMessage, type MessageCancelResponse as MessageCancelResponse, type MessageModifyResponse as MessageModifyResponse, type LettaMessageUnionsArrayPage as LettaMessageUnionsArrayPage, type MessageListParams as MessageListParams, type MessageCancelParams as MessageCancelParams, type MessageModifyParams as MessageModifyParams, type MessageResetParams as MessageResetParams, type MessageSendParams as MessageSendParams, type MessageSendAsyncParams as MessageSendAsyncParams, type MessageStreamParams as MessageStreamParams, };
|
|
2200
|
+
export { Messages as Messages, type ApprovalCreate as ApprovalCreate, type ApprovalRequestMessage as ApprovalRequestMessage, type ApprovalResponseMessage as ApprovalResponseMessage, type AssistantMessage as AssistantMessage, type EventMessage as EventMessage, type HiddenReasoningMessage as HiddenReasoningMessage, type ImageContent as ImageContent, type JobStatus as JobStatus, type JobType as JobType, type LettaAssistantMessageContentUnion as LettaAssistantMessageContentUnion, type LettaMessageUnion as LettaMessageUnion, type LettaRequest as LettaRequest, type LettaResponse as LettaResponse, type LettaStreamingRequest as LettaStreamingRequest, type LettaStreamingResponse as LettaStreamingResponse, type LettaUserMessageContentUnion as LettaUserMessageContentUnion, type Message as Message, type MessageRole as MessageRole, type MessageType as MessageType, type OmittedReasoningContent as OmittedReasoningContent, type ReasoningContent as ReasoningContent, type ReasoningMessage as ReasoningMessage, type RedactedReasoningContent as RedactedReasoningContent, type Run as Run, type SummaryMessage as SummaryMessage, type SystemMessage as SystemMessage, type TextContent as TextContent, type ToolCall as ToolCall, type ToolCallContent as ToolCallContent, type ToolCallDelta as ToolCallDelta, type ToolCallMessage as ToolCallMessage, type ToolReturn as ToolReturn, type ToolReturnContent as ToolReturnContent, type UpdateAssistantMessage as UpdateAssistantMessage, type UpdateReasoningMessage as UpdateReasoningMessage, type UpdateSystemMessage as UpdateSystemMessage, type UpdateUserMessage as UpdateUserMessage, type UserMessage as UserMessage, type MessageCancelResponse as MessageCancelResponse, type MessageModifyResponse as MessageModifyResponse, type LettaMessageUnionsArrayPage as LettaMessageUnionsArrayPage, type MessageListParams as MessageListParams, type MessageCancelParams as MessageCancelParams, type MessageModifyParams as MessageModifyParams, type MessageResetParams as MessageResetParams, type MessageSendParams as MessageSendParams, type MessageSendParamsNonStreaming as MessageSendParamsNonStreaming, type MessageSendParamsStreaming as MessageSendParamsStreaming, type MessageSendAsyncParams as MessageSendAsyncParams, type MessageStreamParams as MessageStreamParams, };
|
|
1239
2201
|
}
|
|
1240
2202
|
//# sourceMappingURL=agents.d.mts.map
|