@letta-ai/letta-client 1.0.0-alpha.17 → 1.0.0-alpha.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. package/CHANGELOG.md +35 -0
  2. package/README.md +5 -15
  3. package/client.d.mts +20 -20
  4. package/client.d.mts.map +1 -1
  5. package/client.d.ts +20 -20
  6. package/client.d.ts.map +1 -1
  7. package/client.js +29 -23
  8. package/client.js.map +1 -1
  9. package/client.mjs +29 -23
  10. package/client.mjs.map +1 -1
  11. package/package.json +1 -1
  12. package/resources/agents/agents.d.mts +319 -935
  13. package/resources/agents/agents.d.mts.map +1 -1
  14. package/resources/agents/agents.d.ts +319 -935
  15. package/resources/agents/agents.d.ts.map +1 -1
  16. package/resources/agents/agents.js +12 -4
  17. package/resources/agents/agents.js.map +1 -1
  18. package/resources/agents/agents.mjs +12 -4
  19. package/resources/agents/agents.mjs.map +1 -1
  20. package/resources/agents/archives.d.mts +31 -0
  21. package/resources/agents/archives.d.mts.map +1 -0
  22. package/resources/agents/archives.d.ts +31 -0
  23. package/resources/agents/archives.d.ts.map +1 -0
  24. package/resources/agents/archives.js +24 -0
  25. package/resources/agents/archives.js.map +1 -0
  26. package/resources/agents/archives.mjs +20 -0
  27. package/resources/agents/archives.mjs.map +1 -0
  28. package/resources/agents/folders.d.mts +1 -1
  29. package/resources/agents/folders.d.mts.map +1 -1
  30. package/resources/agents/folders.d.ts +1 -1
  31. package/resources/agents/folders.d.ts.map +1 -1
  32. package/resources/agents/identities.d.mts +31 -0
  33. package/resources/agents/identities.d.mts.map +1 -0
  34. package/resources/agents/identities.d.ts +31 -0
  35. package/resources/agents/identities.d.ts.map +1 -0
  36. package/resources/agents/identities.js +24 -0
  37. package/resources/agents/identities.js.map +1 -0
  38. package/resources/agents/identities.mjs +20 -0
  39. package/resources/agents/identities.mjs.map +1 -0
  40. package/resources/agents/index.d.mts +4 -2
  41. package/resources/agents/index.d.mts.map +1 -1
  42. package/resources/agents/index.d.ts +4 -2
  43. package/resources/agents/index.d.ts.map +1 -1
  44. package/resources/agents/index.js +5 -1
  45. package/resources/agents/index.js.map +1 -1
  46. package/resources/agents/index.mjs +2 -0
  47. package/resources/agents/index.mjs.map +1 -1
  48. package/resources/agents/messages.d.mts +244 -284
  49. package/resources/agents/messages.d.mts.map +1 -1
  50. package/resources/agents/messages.d.ts +244 -284
  51. package/resources/agents/messages.d.ts.map +1 -1
  52. package/resources/agents/messages.js.map +1 -1
  53. package/resources/agents/messages.mjs.map +1 -1
  54. package/resources/archives/archives.d.mts +111 -0
  55. package/resources/archives/archives.d.mts.map +1 -0
  56. package/resources/archives/archives.d.ts +111 -0
  57. package/resources/archives/archives.d.ts.map +1 -0
  58. package/resources/archives/archives.js +50 -0
  59. package/resources/archives/archives.js.map +1 -0
  60. package/resources/archives/archives.mjs +45 -0
  61. package/resources/archives/archives.mjs.map +1 -0
  62. package/resources/archives/index.d.mts +3 -0
  63. package/resources/archives/index.d.mts.map +1 -0
  64. package/resources/archives/index.d.ts +3 -0
  65. package/resources/archives/index.d.ts.map +1 -0
  66. package/resources/archives/index.js +9 -0
  67. package/resources/archives/index.js.map +1 -0
  68. package/resources/archives/index.mjs +4 -0
  69. package/resources/archives/index.mjs.map +1 -0
  70. package/resources/archives/passages.d.mts +112 -0
  71. package/resources/archives/passages.d.mts.map +1 -0
  72. package/resources/archives/passages.d.ts +112 -0
  73. package/resources/archives/passages.d.ts.map +1 -0
  74. package/resources/archives/passages.js +32 -0
  75. package/resources/archives/passages.js.map +1 -0
  76. package/resources/archives/passages.mjs +28 -0
  77. package/resources/archives/passages.mjs.map +1 -0
  78. package/resources/archives.d.mts +1 -106
  79. package/resources/archives.d.mts.map +1 -1
  80. package/resources/archives.d.ts +1 -106
  81. package/resources/archives.d.ts.map +1 -1
  82. package/resources/archives.js +2 -38
  83. package/resources/archives.js.map +1 -1
  84. package/resources/archives.mjs +1 -36
  85. package/resources/archives.mjs.map +1 -1
  86. package/resources/batches/messages.d.mts +3 -3
  87. package/resources/batches/messages.d.mts.map +1 -1
  88. package/resources/batches/messages.d.ts +3 -3
  89. package/resources/batches/messages.d.ts.map +1 -1
  90. package/resources/batches/messages.js.map +1 -1
  91. package/resources/batches/messages.mjs.map +1 -1
  92. package/resources/groups/messages.d.mts +3 -3
  93. package/resources/groups/messages.d.mts.map +1 -1
  94. package/resources/groups/messages.d.ts +3 -3
  95. package/resources/groups/messages.d.ts.map +1 -1
  96. package/resources/groups/messages.js +4 -1
  97. package/resources/groups/messages.js.map +1 -1
  98. package/resources/groups/messages.mjs +4 -1
  99. package/resources/groups/messages.mjs.map +1 -1
  100. package/resources/index.d.mts +2 -2
  101. package/resources/index.d.mts.map +1 -1
  102. package/resources/index.d.ts +2 -2
  103. package/resources/index.d.ts.map +1 -1
  104. package/resources/index.js +1 -1
  105. package/resources/index.js.map +1 -1
  106. package/resources/index.mjs +1 -1
  107. package/resources/index.mjs.map +1 -1
  108. package/resources/runs/messages.d.mts +3 -3
  109. package/resources/runs/messages.d.mts.map +1 -1
  110. package/resources/runs/messages.d.ts +3 -3
  111. package/resources/runs/messages.d.ts.map +1 -1
  112. package/resources/runs/messages.js +4 -1
  113. package/resources/runs/messages.js.map +1 -1
  114. package/resources/runs/messages.mjs +4 -1
  115. package/resources/runs/messages.mjs.map +1 -1
  116. package/resources/steps/steps.d.mts +1 -1
  117. package/resources/steps/steps.d.mts.map +1 -1
  118. package/resources/steps/steps.d.ts +1 -1
  119. package/resources/steps/steps.d.ts.map +1 -1
  120. package/src/client.ts +156 -130
  121. package/src/resources/agents/agents.ts +721 -1527
  122. package/src/resources/agents/archives.ts +51 -0
  123. package/src/resources/agents/folders.ts +1 -1
  124. package/src/resources/agents/identities.ts +51 -0
  125. package/src/resources/agents/index.ts +28 -3
  126. package/src/resources/agents/messages.ts +377 -424
  127. package/src/resources/archives/archives.ts +165 -0
  128. package/src/resources/archives/index.ts +17 -0
  129. package/src/resources/archives/passages.ts +149 -0
  130. package/src/resources/archives.ts +1 -150
  131. package/src/resources/batches/messages.ts +4 -4
  132. package/src/resources/groups/messages.ts +7 -8
  133. package/src/resources/index.ts +11 -1
  134. package/src/resources/runs/messages.ts +7 -8
  135. package/src/resources/steps/steps.ts +1 -1
  136. package/src/version.ts +1 -1
  137. package/version.d.mts +1 -1
  138. package/version.d.ts +1 -1
  139. package/version.js +1 -1
  140. package/version.mjs +1 -1
@@ -1,7 +1,8 @@
1
1
  import { APIResource } from "../../core/resource.js";
2
2
  import * as AgentsAPI from "./agents.js";
3
- import * as ArchivesAPI from "../archives.js";
4
3
  import * as ToolsAPI from "../tools.js";
4
+ import * as ArchivesAPI from "./archives.js";
5
+ import { ArchiveAttachParams, ArchiveAttachResponse, ArchiveDetachParams, ArchiveDetachResponse, Archives } from "./archives.js";
5
6
  import * as BlocksAPI from "./blocks.js";
6
7
  import { Block, BlockAttachParams, BlockDetachParams, BlockListParams, BlockModify, BlockModifyParams, BlockRetrieveParams, Blocks } from "./blocks.js";
7
8
  import * as FilesAPI from "./files.js";
@@ -10,13 +11,16 @@ import * as FoldersAPI from "./folders.js";
10
11
  import { FolderAttachParams, FolderDetachParams, FolderListParams, FolderListResponse, FolderListResponsesArrayPage, Folders } from "./folders.js";
11
12
  import * as GroupsAPI from "./groups.js";
12
13
  import { GroupListParams, Groups } from "./groups.js";
14
+ import * as IdentitiesAPI from "./identities.js";
15
+ import { Identities, IdentityAttachParams, IdentityAttachResponse, IdentityDetachParams, IdentityDetachResponse } from "./identities.js";
13
16
  import * as MessagesAPI from "./messages.js";
14
- import { ApprovalCreate, ApprovalRequestMessage, ApprovalResponseMessage, AssistantMessage, EventMessage, HiddenReasoningMessage, ImageContent, JobStatus, JobType, LettaAssistantMessageContentUnion, LettaMessageUnion, LettaMessageUnionsArrayPage, LettaRequest, LettaResponse, LettaStreamingRequest, LettaStreamingResponse, LettaUserMessageContentUnion, Message, MessageCancelParams, MessageCancelResponse, MessageListParams, MessageModifyParams, MessageModifyResponse, MessageResetParams, MessageRole, MessageSendAsyncParams, MessageSendParams, MessageSendParamsNonStreaming, MessageSendParamsStreaming, MessageStreamParams, MessageType, Messages, OmittedReasoningContent, ReasoningContent, ReasoningMessage, RedactedReasoningContent, Run, SummaryMessage, SystemMessage, TextContent, ToolCall, ToolCallContent, ToolCallDelta, ToolCallMessage, ToolReturn, ToolReturnContent, UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage, UserMessage } from "./messages.js";
17
+ import { ApprovalCreate, ApprovalRequestMessage, ApprovalResponseMessage, ApprovalReturn, AssistantMessage, EventMessage, HiddenReasoningMessage, ImageContent, InternalMessage, JobStatus, JobType, LettaAssistantMessageContentUnion, LettaRequest, LettaResponse, LettaStreamingRequest, LettaStreamingResponse, LettaUserMessageContentUnion, Message, MessageCancelParams, MessageCancelResponse, MessageListParams, MessageModifyParams, MessageModifyResponse, MessageResetParams, MessageRole, MessageSendAsyncParams, MessageSendParams, MessageSendParamsNonStreaming, MessageSendParamsStreaming, MessageStreamParams, MessageType, Messages, MessagesArrayPage, OmittedReasoningContent, ReasoningContent, ReasoningMessage, RedactedReasoningContent, Run, SummaryMessage, SystemMessage, TextContent, ToolCall, ToolCallContent, ToolCallDelta, ToolCallMessage, ToolReturn, ToolReturnContent, UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage, UserMessage } from "./messages.js";
15
18
  import * as AgentsToolsAPI from "./tools.js";
16
19
  import { ToolAttachParams, ToolDetachParams, ToolListParams, ToolUpdateApprovalParams, Tools } from "./tools.js";
20
+ import * as ArchivesArchivesAPI from "../archives/archives.js";
17
21
  import * as BlocksBlocksAPI from "../blocks/blocks.js";
18
22
  import * as GroupsGroupsAPI from "../groups/groups.js";
19
- import * as IdentitiesAPI from "../identities/identities.js";
23
+ import * as IdentitiesIdentitiesAPI from "../identities/identities.js";
20
24
  import * as ModelsAPI from "../models/models.js";
21
25
  import * as RunsAPI from "../runs/runs.js";
22
26
  import { APIPromise } from "../../core/api-promise.js";
@@ -24,12 +28,14 @@ import { ArrayPage, type ArrayPageParams, PagePromise } from "../../core/paginat
24
28
  import { type Uploadable } from "../../core/uploads.js";
25
29
  import { RequestOptions } from "../../internal/request-options.js";
26
30
  export declare class Agents extends APIResource {
31
+ messages: MessagesAPI.Messages;
32
+ blocks: BlocksAPI.Blocks;
27
33
  tools: AgentsToolsAPI.Tools;
28
34
  folders: FoldersAPI.Folders;
29
35
  files: FilesAPI.Files;
30
- blocks: BlocksAPI.Blocks;
31
36
  groups: GroupsAPI.Groups;
32
- messages: MessagesAPI.Messages;
37
+ archives: ArchivesAPI.Archives;
38
+ identities: IdentitiesAPI.Identities;
33
39
  /**
34
40
  * Create an agent.
35
41
  */
@@ -196,7 +202,7 @@ export interface AgentState {
196
202
  /**
197
203
  * The identities associated with this agent.
198
204
  */
199
- identities?: Array<IdentitiesAPI.Identity>;
205
+ identities?: Array<IdentitiesIdentitiesAPI.Identity>;
200
206
  /**
201
207
  * @deprecated Deprecated: Use `identities` field instead. The ids of the
202
208
  * identities associated with this agent.
@@ -250,7 +256,7 @@ export interface AgentState {
250
256
  /**
251
257
  * The model settings used by the agent.
252
258
  */
253
- model_settings?: AgentState.OpenAIModelSettings | AgentState.AnthropicModelSettings | AgentState.GoogleAIModelSettings | AgentState.GoogleVertexModelSettings | AgentState.AzureModelSettings | AgentState.XaiModelSettings | AgentState.GroqModelSettings | AgentState.DeepseekModelSettings | AgentState.TogetherModelSettings | AgentState.BedrockModelSettings | null;
259
+ model_settings?: OpenAIModelSettings | AnthropicModelSettings | GoogleAIModelSettings | GoogleVertexModelSettings | AzureModelSettings | XaiModelSettings | GroqModelSettings | DeepseekModelSettings | TogetherModelSettings | BedrockModelSettings | null;
254
260
  /**
255
261
  * @deprecated Deprecated: Use `managed_group` field instead. The multi-agent group
256
262
  * that this agent manages.
@@ -462,323 +468,104 @@ export declare namespace AgentState {
462
468
  /**
463
469
  * The vector database provider used for this source's passages
464
470
  */
465
- vector_db_provider?: ArchivesAPI.VectorDBProvider;
466
- }
467
- interface OpenAIModelSettings {
468
- /**
469
- * The maximum number of tokens the model can generate.
470
- */
471
- max_output_tokens?: number;
472
- /**
473
- * Whether to enable parallel tool calling.
474
- */
475
- parallel_tool_calls?: boolean;
476
- /**
477
- * The provider of the model.
478
- */
479
- provider?: 'openai';
480
- /**
481
- * The reasoning configuration for the model.
482
- */
483
- reasoning?: OpenAIModelSettings.Reasoning;
484
- /**
485
- * The response format for the model.
486
- */
487
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
488
- /**
489
- * The temperature of the model.
490
- */
491
- temperature?: number;
492
- }
493
- namespace OpenAIModelSettings {
494
- /**
495
- * The reasoning configuration for the model.
496
- */
497
- interface Reasoning {
498
- /**
499
- * The reasoning effort to use when generating text reasoning models
500
- */
501
- reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
502
- }
503
- }
504
- interface AnthropicModelSettings {
505
- /**
506
- * The maximum number of tokens the model can generate.
507
- */
508
- max_output_tokens?: number;
509
- /**
510
- * Whether to enable parallel tool calling.
511
- */
512
- parallel_tool_calls?: boolean;
513
- /**
514
- * The provider of the model.
515
- */
516
- provider?: 'anthropic';
517
- /**
518
- * The temperature of the model.
519
- */
520
- temperature?: number;
521
- /**
522
- * The thinking configuration for the model.
523
- */
524
- thinking?: AnthropicModelSettings.Thinking;
525
- /**
526
- * Soft control for how verbose model output should be, used for GPT-5 models.
527
- */
528
- verbosity?: 'low' | 'medium' | 'high' | null;
529
- }
530
- namespace AnthropicModelSettings {
531
- /**
532
- * The thinking configuration for the model.
533
- */
534
- interface Thinking {
535
- /**
536
- * The maximum number of tokens the model can use for extended thinking.
537
- */
538
- budget_tokens?: number;
539
- /**
540
- * The type of thinking to use.
541
- */
542
- type?: 'enabled' | 'disabled';
543
- }
544
- }
545
- interface GoogleAIModelSettings {
546
- /**
547
- * The maximum number of tokens the model can generate.
548
- */
549
- max_output_tokens?: number;
550
- /**
551
- * Whether to enable parallel tool calling.
552
- */
553
- parallel_tool_calls?: boolean;
554
- /**
555
- * The provider of the model.
556
- */
557
- provider?: 'google_ai';
558
- /**
559
- * The response schema for the model.
560
- */
561
- response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
562
- /**
563
- * The temperature of the model.
564
- */
565
- temperature?: number;
566
- /**
567
- * The thinking configuration for the model.
568
- */
569
- thinking_config?: GoogleAIModelSettings.ThinkingConfig;
570
- }
571
- namespace GoogleAIModelSettings {
572
- /**
573
- * The thinking configuration for the model.
574
- */
575
- interface ThinkingConfig {
576
- /**
577
- * Whether to include thoughts in the model's response.
578
- */
579
- include_thoughts?: boolean;
580
- /**
581
- * The thinking budget for the model.
582
- */
583
- thinking_budget?: number;
584
- }
585
- }
586
- interface GoogleVertexModelSettings {
587
- /**
588
- * The maximum number of tokens the model can generate.
589
- */
590
- max_output_tokens?: number;
591
- /**
592
- * Whether to enable parallel tool calling.
593
- */
594
- parallel_tool_calls?: boolean;
595
- /**
596
- * The provider of the model.
597
- */
598
- provider?: 'google_vertex';
599
- /**
600
- * The response schema for the model.
601
- */
602
- response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
603
- /**
604
- * The temperature of the model.
605
- */
606
- temperature?: number;
607
- /**
608
- * The thinking configuration for the model.
609
- */
610
- thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
611
- }
612
- namespace GoogleVertexModelSettings {
613
- /**
614
- * The thinking configuration for the model.
615
- */
616
- interface ThinkingConfig {
617
- /**
618
- * Whether to include thoughts in the model's response.
619
- */
620
- include_thoughts?: boolean;
621
- /**
622
- * The thinking budget for the model.
623
- */
624
- thinking_budget?: number;
625
- }
471
+ vector_db_provider?: ArchivesArchivesAPI.VectorDBProvider;
626
472
  }
473
+ }
474
+ /**
475
+ * Enum to represent the type of agent.
476
+ */
477
+ export type AgentType = 'memgpt_agent' | 'memgpt_v2_agent' | 'letta_v1_agent' | 'react_agent' | 'workflow_agent' | 'split_thread_agent' | 'sleeptime_agent' | 'voice_convo_agent' | 'voice_sleeptime_agent';
478
+ export interface AnthropicModelSettings {
627
479
  /**
628
- * Azure OpenAI model configuration (OpenAI-compatible).
480
+ * The maximum number of tokens the model can generate.
629
481
  */
630
- interface AzureModelSettings {
631
- /**
632
- * The maximum number of tokens the model can generate.
633
- */
634
- max_output_tokens?: number;
635
- /**
636
- * Whether to enable parallel tool calling.
637
- */
638
- parallel_tool_calls?: boolean;
639
- /**
640
- * The provider of the model.
641
- */
642
- provider?: 'azure';
643
- /**
644
- * The response format for the model.
645
- */
646
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
647
- /**
648
- * The temperature of the model.
649
- */
650
- temperature?: number;
651
- }
482
+ max_output_tokens?: number;
652
483
  /**
653
- * xAI model configuration (OpenAI-compatible).
484
+ * Whether to enable parallel tool calling.
654
485
  */
655
- interface XaiModelSettings {
656
- /**
657
- * The maximum number of tokens the model can generate.
658
- */
659
- max_output_tokens?: number;
660
- /**
661
- * Whether to enable parallel tool calling.
662
- */
663
- parallel_tool_calls?: boolean;
664
- /**
665
- * The provider of the model.
666
- */
667
- provider?: 'xai';
668
- /**
669
- * The response format for the model.
670
- */
671
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
672
- /**
673
- * The temperature of the model.
674
- */
675
- temperature?: number;
676
- }
486
+ parallel_tool_calls?: boolean;
677
487
  /**
678
- * Groq model configuration (OpenAI-compatible).
488
+ * The type of the provider.
679
489
  */
680
- interface GroqModelSettings {
681
- /**
682
- * The maximum number of tokens the model can generate.
683
- */
684
- max_output_tokens?: number;
685
- /**
686
- * Whether to enable parallel tool calling.
687
- */
688
- parallel_tool_calls?: boolean;
689
- /**
690
- * The provider of the model.
691
- */
692
- provider?: 'groq';
693
- /**
694
- * The response format for the model.
695
- */
696
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
697
- /**
698
- * The temperature of the model.
699
- */
700
- temperature?: number;
701
- }
490
+ provider_type?: 'anthropic';
702
491
  /**
703
- * Deepseek model configuration (OpenAI-compatible).
492
+ * The temperature of the model.
704
493
  */
705
- interface DeepseekModelSettings {
706
- /**
707
- * The maximum number of tokens the model can generate.
708
- */
709
- max_output_tokens?: number;
710
- /**
711
- * Whether to enable parallel tool calling.
712
- */
713
- parallel_tool_calls?: boolean;
714
- /**
715
- * The provider of the model.
716
- */
717
- provider?: 'deepseek';
718
- /**
719
- * The response format for the model.
720
- */
721
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
722
- /**
723
- * The temperature of the model.
724
- */
725
- temperature?: number;
726
- }
494
+ temperature?: number;
727
495
  /**
728
- * Together AI model configuration (OpenAI-compatible).
496
+ * The thinking configuration for the model.
729
497
  */
730
- interface TogetherModelSettings {
731
- /**
732
- * The maximum number of tokens the model can generate.
733
- */
734
- max_output_tokens?: number;
735
- /**
736
- * Whether to enable parallel tool calling.
737
- */
738
- parallel_tool_calls?: boolean;
739
- /**
740
- * The provider of the model.
741
- */
742
- provider?: 'together';
743
- /**
744
- * The response format for the model.
745
- */
746
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
747
- /**
748
- * The temperature of the model.
749
- */
750
- temperature?: number;
751
- }
498
+ thinking?: AnthropicModelSettings.Thinking;
752
499
  /**
753
- * AWS Bedrock model configuration.
500
+ * Soft control for how verbose model output should be, used for GPT-5 models.
754
501
  */
755
- interface BedrockModelSettings {
756
- /**
757
- * The maximum number of tokens the model can generate.
758
- */
759
- max_output_tokens?: number;
760
- /**
761
- * Whether to enable parallel tool calling.
762
- */
763
- parallel_tool_calls?: boolean;
764
- /**
765
- * The provider of the model.
766
- */
767
- provider?: 'bedrock';
502
+ verbosity?: 'low' | 'medium' | 'high' | null;
503
+ }
504
+ export declare namespace AnthropicModelSettings {
505
+ /**
506
+ * The thinking configuration for the model.
507
+ */
508
+ interface Thinking {
768
509
  /**
769
- * The response format for the model.
510
+ * The maximum number of tokens the model can use for extended thinking.
770
511
  */
771
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
512
+ budget_tokens?: number;
772
513
  /**
773
- * The temperature of the model.
514
+ * The type of thinking to use.
774
515
  */
775
- temperature?: number;
516
+ type?: 'enabled' | 'disabled';
776
517
  }
777
518
  }
778
519
  /**
779
- * Enum to represent the type of agent.
520
+ * Azure OpenAI model configuration (OpenAI-compatible).
780
521
  */
781
- export type AgentType = 'memgpt_agent' | 'memgpt_v2_agent' | 'letta_v1_agent' | 'react_agent' | 'workflow_agent' | 'split_thread_agent' | 'sleeptime_agent' | 'voice_convo_agent' | 'voice_sleeptime_agent';
522
+ export interface AzureModelSettings {
523
+ /**
524
+ * The maximum number of tokens the model can generate.
525
+ */
526
+ max_output_tokens?: number;
527
+ /**
528
+ * Whether to enable parallel tool calling.
529
+ */
530
+ parallel_tool_calls?: boolean;
531
+ /**
532
+ * The type of the provider.
533
+ */
534
+ provider_type?: 'azure';
535
+ /**
536
+ * The response format for the model.
537
+ */
538
+ response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
539
+ /**
540
+ * The temperature of the model.
541
+ */
542
+ temperature?: number;
543
+ }
544
+ /**
545
+ * AWS Bedrock model configuration.
546
+ */
547
+ export interface BedrockModelSettings {
548
+ /**
549
+ * The maximum number of tokens the model can generate.
550
+ */
551
+ max_output_tokens?: number;
552
+ /**
553
+ * Whether to enable parallel tool calling.
554
+ */
555
+ parallel_tool_calls?: boolean;
556
+ /**
557
+ * The type of the provider.
558
+ */
559
+ provider_type?: 'bedrock';
560
+ /**
561
+ * The response format for the model.
562
+ */
563
+ response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
564
+ /**
565
+ * The temperature of the model.
566
+ */
567
+ temperature?: number;
568
+ }
782
569
  /**
783
570
  * A ToolRule represents a tool that can be invoked by the agent.
784
571
  */
@@ -867,6 +654,138 @@ export interface ContinueToolRule {
867
654
  prompt_template?: string | null;
868
655
  type?: 'continue_loop';
869
656
  }
657
+ /**
658
+ * Deepseek model configuration (OpenAI-compatible).
659
+ */
660
+ export interface DeepseekModelSettings {
661
+ /**
662
+ * The maximum number of tokens the model can generate.
663
+ */
664
+ max_output_tokens?: number;
665
+ /**
666
+ * Whether to enable parallel tool calling.
667
+ */
668
+ parallel_tool_calls?: boolean;
669
+ /**
670
+ * The type of the provider.
671
+ */
672
+ provider_type?: 'deepseek';
673
+ /**
674
+ * The response format for the model.
675
+ */
676
+ response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
677
+ /**
678
+ * The temperature of the model.
679
+ */
680
+ temperature?: number;
681
+ }
682
+ export interface GoogleAIModelSettings {
683
+ /**
684
+ * The maximum number of tokens the model can generate.
685
+ */
686
+ max_output_tokens?: number;
687
+ /**
688
+ * Whether to enable parallel tool calling.
689
+ */
690
+ parallel_tool_calls?: boolean;
691
+ /**
692
+ * The type of the provider.
693
+ */
694
+ provider_type?: 'google_ai';
695
+ /**
696
+ * The response schema for the model.
697
+ */
698
+ response_schema?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
699
+ /**
700
+ * The temperature of the model.
701
+ */
702
+ temperature?: number;
703
+ /**
704
+ * The thinking configuration for the model.
705
+ */
706
+ thinking_config?: GoogleAIModelSettings.ThinkingConfig;
707
+ }
708
+ export declare namespace GoogleAIModelSettings {
709
+ /**
710
+ * The thinking configuration for the model.
711
+ */
712
+ interface ThinkingConfig {
713
+ /**
714
+ * Whether to include thoughts in the model's response.
715
+ */
716
+ include_thoughts?: boolean;
717
+ /**
718
+ * The thinking budget for the model.
719
+ */
720
+ thinking_budget?: number;
721
+ }
722
+ }
723
+ export interface GoogleVertexModelSettings {
724
+ /**
725
+ * The maximum number of tokens the model can generate.
726
+ */
727
+ max_output_tokens?: number;
728
+ /**
729
+ * Whether to enable parallel tool calling.
730
+ */
731
+ parallel_tool_calls?: boolean;
732
+ /**
733
+ * The type of the provider.
734
+ */
735
+ provider_type?: 'google_vertex';
736
+ /**
737
+ * The response schema for the model.
738
+ */
739
+ response_schema?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
740
+ /**
741
+ * The temperature of the model.
742
+ */
743
+ temperature?: number;
744
+ /**
745
+ * The thinking configuration for the model.
746
+ */
747
+ thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
748
+ }
749
+ export declare namespace GoogleVertexModelSettings {
750
+ /**
751
+ * The thinking configuration for the model.
752
+ */
753
+ interface ThinkingConfig {
754
+ /**
755
+ * Whether to include thoughts in the model's response.
756
+ */
757
+ include_thoughts?: boolean;
758
+ /**
759
+ * The thinking budget for the model.
760
+ */
761
+ thinking_budget?: number;
762
+ }
763
+ }
764
+ /**
765
+ * Groq model configuration (OpenAI-compatible).
766
+ */
767
+ export interface GroqModelSettings {
768
+ /**
769
+ * The maximum number of tokens the model can generate.
770
+ */
771
+ max_output_tokens?: number;
772
+ /**
773
+ * Whether to enable parallel tool calling.
774
+ */
775
+ parallel_tool_calls?: boolean;
776
+ /**
777
+ * The type of the provider.
778
+ */
779
+ provider_type?: 'groq';
780
+ /**
781
+ * The response format for the model.
782
+ */
783
+ response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
784
+ /**
785
+ * The temperature of the model.
786
+ */
787
+ temperature?: number;
788
+ }
870
789
  /**
871
790
  * Represents the initial tool rule configuration.
872
791
  */
@@ -977,9 +896,46 @@ export interface MessageCreate {
977
896
  */
978
897
  type?: 'message' | null;
979
898
  }
980
- /**
981
- * A ToolRule that only allows a child tool to be called if the parent has been
982
- * called.
899
+ export interface OpenAIModelSettings {
900
+ /**
901
+ * The maximum number of tokens the model can generate.
902
+ */
903
+ max_output_tokens?: number;
904
+ /**
905
+ * Whether to enable parallel tool calling.
906
+ */
907
+ parallel_tool_calls?: boolean;
908
+ /**
909
+ * The type of the provider.
910
+ */
911
+ provider_type?: 'openai';
912
+ /**
913
+ * The reasoning configuration for the model.
914
+ */
915
+ reasoning?: OpenAIModelSettings.Reasoning;
916
+ /**
917
+ * The response format for the model.
918
+ */
919
+ response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
920
+ /**
921
+ * The temperature of the model.
922
+ */
923
+ temperature?: number;
924
+ }
925
+ export declare namespace OpenAIModelSettings {
926
+ /**
927
+ * The reasoning configuration for the model.
928
+ */
929
+ interface Reasoning {
930
+ /**
931
+ * The reasoning effort to use when generating text reasoning models
932
+ */
933
+ reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
934
+ }
935
+ }
936
+ /**
937
+ * A ToolRule that only allows a child tool to be called if the parent has been
938
+ * called.
983
939
  */
984
940
  export interface ParentToolRule {
985
941
  /**
@@ -1051,6 +1007,56 @@ export interface TextResponseFormat {
1051
1007
  */
1052
1008
  type?: 'text';
1053
1009
  }
1010
+ /**
1011
+ * Together AI model configuration (OpenAI-compatible).
1012
+ */
1013
+ export interface TogetherModelSettings {
1014
+ /**
1015
+ * The maximum number of tokens the model can generate.
1016
+ */
1017
+ max_output_tokens?: number;
1018
+ /**
1019
+ * Whether to enable parallel tool calling.
1020
+ */
1021
+ parallel_tool_calls?: boolean;
1022
+ /**
1023
+ * The type of the provider.
1024
+ */
1025
+ provider_type?: 'together';
1026
+ /**
1027
+ * The response format for the model.
1028
+ */
1029
+ response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
1030
+ /**
1031
+ * The temperature of the model.
1032
+ */
1033
+ temperature?: number;
1034
+ }
1035
+ /**
1036
+ * xAI model configuration (OpenAI-compatible).
1037
+ */
1038
+ export interface XaiModelSettings {
1039
+ /**
1040
+ * The maximum number of tokens the model can generate.
1041
+ */
1042
+ max_output_tokens?: number;
1043
+ /**
1044
+ * Whether to enable parallel tool calling.
1045
+ */
1046
+ parallel_tool_calls?: boolean;
1047
+ /**
1048
+ * The type of the provider.
1049
+ */
1050
+ provider_type?: 'xai';
1051
+ /**
1052
+ * The response format for the model.
1053
+ */
1054
+ response_format?: TextResponseFormat | JsonSchemaResponseFormat | JsonObjectResponseFormat | null;
1055
+ /**
1056
+ * The temperature of the model.
1057
+ */
1058
+ temperature?: number;
1059
+ }
1054
1060
  export type AgentDeleteResponse = unknown;
1055
1061
  export type AgentExportFileResponse = string;
1056
1062
  /**
@@ -1191,7 +1197,7 @@ export interface AgentCreateParams {
1191
1197
  /**
1192
1198
  * The model settings for the agent.
1193
1199
  */
1194
- model_settings?: AgentCreateParams.OpenAIModelSettings | AgentCreateParams.AnthropicModelSettings | AgentCreateParams.GoogleAIModelSettings | AgentCreateParams.GoogleVertexModelSettings | AgentCreateParams.AzureModelSettings | AgentCreateParams.XaiModelSettings | AgentCreateParams.GroqModelSettings | AgentCreateParams.DeepseekModelSettings | AgentCreateParams.TogetherModelSettings | AgentCreateParams.BedrockModelSettings | null;
1200
+ model_settings?: OpenAIModelSettings | AnthropicModelSettings | GoogleAIModelSettings | GoogleVertexModelSettings | AzureModelSettings | XaiModelSettings | GroqModelSettings | DeepseekModelSettings | TogetherModelSettings | BedrockModelSettings | null;
1195
1201
  /**
1196
1202
  * The name of the agent.
1197
1203
  */
@@ -1277,318 +1283,6 @@ export interface AgentCreateParams {
1277
1283
  */
1278
1284
  tools?: Array<string> | null;
1279
1285
  }
1280
- export declare namespace AgentCreateParams {
1281
- interface OpenAIModelSettings {
1282
- /**
1283
- * The maximum number of tokens the model can generate.
1284
- */
1285
- max_output_tokens?: number;
1286
- /**
1287
- * Whether to enable parallel tool calling.
1288
- */
1289
- parallel_tool_calls?: boolean;
1290
- /**
1291
- * The provider of the model.
1292
- */
1293
- provider?: 'openai';
1294
- /**
1295
- * The reasoning configuration for the model.
1296
- */
1297
- reasoning?: OpenAIModelSettings.Reasoning;
1298
- /**
1299
- * The response format for the model.
1300
- */
1301
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1302
- /**
1303
- * The temperature of the model.
1304
- */
1305
- temperature?: number;
1306
- }
1307
- namespace OpenAIModelSettings {
1308
- /**
1309
- * The reasoning configuration for the model.
1310
- */
1311
- interface Reasoning {
1312
- /**
1313
- * The reasoning effort to use when generating text reasoning models
1314
- */
1315
- reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
1316
- }
1317
- }
1318
- interface AnthropicModelSettings {
1319
- /**
1320
- * The maximum number of tokens the model can generate.
1321
- */
1322
- max_output_tokens?: number;
1323
- /**
1324
- * Whether to enable parallel tool calling.
1325
- */
1326
- parallel_tool_calls?: boolean;
1327
- /**
1328
- * The provider of the model.
1329
- */
1330
- provider?: 'anthropic';
1331
- /**
1332
- * The temperature of the model.
1333
- */
1334
- temperature?: number;
1335
- /**
1336
- * The thinking configuration for the model.
1337
- */
1338
- thinking?: AnthropicModelSettings.Thinking;
1339
- /**
1340
- * Soft control for how verbose model output should be, used for GPT-5 models.
1341
- */
1342
- verbosity?: 'low' | 'medium' | 'high' | null;
1343
- }
1344
- namespace AnthropicModelSettings {
1345
- /**
1346
- * The thinking configuration for the model.
1347
- */
1348
- interface Thinking {
1349
- /**
1350
- * The maximum number of tokens the model can use for extended thinking.
1351
- */
1352
- budget_tokens?: number;
1353
- /**
1354
- * The type of thinking to use.
1355
- */
1356
- type?: 'enabled' | 'disabled';
1357
- }
1358
- }
1359
- interface GoogleAIModelSettings {
1360
- /**
1361
- * The maximum number of tokens the model can generate.
1362
- */
1363
- max_output_tokens?: number;
1364
- /**
1365
- * Whether to enable parallel tool calling.
1366
- */
1367
- parallel_tool_calls?: boolean;
1368
- /**
1369
- * The provider of the model.
1370
- */
1371
- provider?: 'google_ai';
1372
- /**
1373
- * The response schema for the model.
1374
- */
1375
- response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1376
- /**
1377
- * The temperature of the model.
1378
- */
1379
- temperature?: number;
1380
- /**
1381
- * The thinking configuration for the model.
1382
- */
1383
- thinking_config?: GoogleAIModelSettings.ThinkingConfig;
1384
- }
1385
- namespace GoogleAIModelSettings {
1386
- /**
1387
- * The thinking configuration for the model.
1388
- */
1389
- interface ThinkingConfig {
1390
- /**
1391
- * Whether to include thoughts in the model's response.
1392
- */
1393
- include_thoughts?: boolean;
1394
- /**
1395
- * The thinking budget for the model.
1396
- */
1397
- thinking_budget?: number;
1398
- }
1399
- }
1400
- interface GoogleVertexModelSettings {
1401
- /**
1402
- * The maximum number of tokens the model can generate.
1403
- */
1404
- max_output_tokens?: number;
1405
- /**
1406
- * Whether to enable parallel tool calling.
1407
- */
1408
- parallel_tool_calls?: boolean;
1409
- /**
1410
- * The provider of the model.
1411
- */
1412
- provider?: 'google_vertex';
1413
- /**
1414
- * The response schema for the model.
1415
- */
1416
- response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1417
- /**
1418
- * The temperature of the model.
1419
- */
1420
- temperature?: number;
1421
- /**
1422
- * The thinking configuration for the model.
1423
- */
1424
- thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
1425
- }
1426
- namespace GoogleVertexModelSettings {
1427
- /**
1428
- * The thinking configuration for the model.
1429
- */
1430
- interface ThinkingConfig {
1431
- /**
1432
- * Whether to include thoughts in the model's response.
1433
- */
1434
- include_thoughts?: boolean;
1435
- /**
1436
- * The thinking budget for the model.
1437
- */
1438
- thinking_budget?: number;
1439
- }
1440
- }
1441
- /**
1442
- * Azure OpenAI model configuration (OpenAI-compatible).
1443
- */
1444
- interface AzureModelSettings {
1445
- /**
1446
- * The maximum number of tokens the model can generate.
1447
- */
1448
- max_output_tokens?: number;
1449
- /**
1450
- * Whether to enable parallel tool calling.
1451
- */
1452
- parallel_tool_calls?: boolean;
1453
- /**
1454
- * The provider of the model.
1455
- */
1456
- provider?: 'azure';
1457
- /**
1458
- * The response format for the model.
1459
- */
1460
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1461
- /**
1462
- * The temperature of the model.
1463
- */
1464
- temperature?: number;
1465
- }
1466
- /**
1467
- * xAI model configuration (OpenAI-compatible).
1468
- */
1469
- interface XaiModelSettings {
1470
- /**
1471
- * The maximum number of tokens the model can generate.
1472
- */
1473
- max_output_tokens?: number;
1474
- /**
1475
- * Whether to enable parallel tool calling.
1476
- */
1477
- parallel_tool_calls?: boolean;
1478
- /**
1479
- * The provider of the model.
1480
- */
1481
- provider?: 'xai';
1482
- /**
1483
- * The response format for the model.
1484
- */
1485
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1486
- /**
1487
- * The temperature of the model.
1488
- */
1489
- temperature?: number;
1490
- }
1491
- /**
1492
- * Groq model configuration (OpenAI-compatible).
1493
- */
1494
- interface GroqModelSettings {
1495
- /**
1496
- * The maximum number of tokens the model can generate.
1497
- */
1498
- max_output_tokens?: number;
1499
- /**
1500
- * Whether to enable parallel tool calling.
1501
- */
1502
- parallel_tool_calls?: boolean;
1503
- /**
1504
- * The provider of the model.
1505
- */
1506
- provider?: 'groq';
1507
- /**
1508
- * The response format for the model.
1509
- */
1510
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1511
- /**
1512
- * The temperature of the model.
1513
- */
1514
- temperature?: number;
1515
- }
1516
- /**
1517
- * Deepseek model configuration (OpenAI-compatible).
1518
- */
1519
- interface DeepseekModelSettings {
1520
- /**
1521
- * The maximum number of tokens the model can generate.
1522
- */
1523
- max_output_tokens?: number;
1524
- /**
1525
- * Whether to enable parallel tool calling.
1526
- */
1527
- parallel_tool_calls?: boolean;
1528
- /**
1529
- * The provider of the model.
1530
- */
1531
- provider?: 'deepseek';
1532
- /**
1533
- * The response format for the model.
1534
- */
1535
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1536
- /**
1537
- * The temperature of the model.
1538
- */
1539
- temperature?: number;
1540
- }
1541
- /**
1542
- * Together AI model configuration (OpenAI-compatible).
1543
- */
1544
- interface TogetherModelSettings {
1545
- /**
1546
- * The maximum number of tokens the model can generate.
1547
- */
1548
- max_output_tokens?: number;
1549
- /**
1550
- * Whether to enable parallel tool calling.
1551
- */
1552
- parallel_tool_calls?: boolean;
1553
- /**
1554
- * The provider of the model.
1555
- */
1556
- provider?: 'together';
1557
- /**
1558
- * The response format for the model.
1559
- */
1560
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1561
- /**
1562
- * The temperature of the model.
1563
- */
1564
- temperature?: number;
1565
- }
1566
- /**
1567
- * AWS Bedrock model configuration.
1568
- */
1569
- interface BedrockModelSettings {
1570
- /**
1571
- * The maximum number of tokens the model can generate.
1572
- */
1573
- max_output_tokens?: number;
1574
- /**
1575
- * Whether to enable parallel tool calling.
1576
- */
1577
- parallel_tool_calls?: boolean;
1578
- /**
1579
- * The provider of the model.
1580
- */
1581
- provider?: 'bedrock';
1582
- /**
1583
- * The response format for the model.
1584
- */
1585
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1586
- /**
1587
- * The temperature of the model.
1588
- */
1589
- temperature?: number;
1590
- }
1591
- }
1592
1286
  export interface AgentRetrieveParams {
1593
1287
  /**
1594
1288
  * Specify which relational fields to include in the response. No relationships are
@@ -1808,7 +1502,7 @@ export interface AgentModifyParams {
1808
1502
  /**
1809
1503
  * The model settings for the agent.
1810
1504
  */
1811
- model_settings?: AgentModifyParams.OpenAIModelSettings | AgentModifyParams.AnthropicModelSettings | AgentModifyParams.GoogleAIModelSettings | AgentModifyParams.GoogleVertexModelSettings | AgentModifyParams.AzureModelSettings | AgentModifyParams.XaiModelSettings | AgentModifyParams.GroqModelSettings | AgentModifyParams.DeepseekModelSettings | AgentModifyParams.TogetherModelSettings | AgentModifyParams.BedrockModelSettings | null;
1505
+ model_settings?: OpenAIModelSettings | AnthropicModelSettings | GoogleAIModelSettings | GoogleVertexModelSettings | AzureModelSettings | XaiModelSettings | GroqModelSettings | DeepseekModelSettings | TogetherModelSettings | BedrockModelSettings | null;
1812
1506
  /**
1813
1507
  * The name of the agent.
1814
1508
  */
@@ -1878,325 +1572,15 @@ export interface AgentModifyParams {
1878
1572
  */
1879
1573
  tool_rules?: Array<ChildToolRule | InitToolRule | TerminalToolRule | ConditionalToolRule | ContinueToolRule | RequiredBeforeExitToolRule | MaxCountPerStepToolRule | ParentToolRule | RequiresApprovalToolRule> | null;
1880
1574
  }
1881
- export declare namespace AgentModifyParams {
1882
- interface OpenAIModelSettings {
1883
- /**
1884
- * The maximum number of tokens the model can generate.
1885
- */
1886
- max_output_tokens?: number;
1887
- /**
1888
- * Whether to enable parallel tool calling.
1889
- */
1890
- parallel_tool_calls?: boolean;
1891
- /**
1892
- * The provider of the model.
1893
- */
1894
- provider?: 'openai';
1895
- /**
1896
- * The reasoning configuration for the model.
1897
- */
1898
- reasoning?: OpenAIModelSettings.Reasoning;
1899
- /**
1900
- * The response format for the model.
1901
- */
1902
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1903
- /**
1904
- * The temperature of the model.
1905
- */
1906
- temperature?: number;
1907
- }
1908
- namespace OpenAIModelSettings {
1909
- /**
1910
- * The reasoning configuration for the model.
1911
- */
1912
- interface Reasoning {
1913
- /**
1914
- * The reasoning effort to use when generating text reasoning models
1915
- */
1916
- reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
1917
- }
1918
- }
1919
- interface AnthropicModelSettings {
1920
- /**
1921
- * The maximum number of tokens the model can generate.
1922
- */
1923
- max_output_tokens?: number;
1924
- /**
1925
- * Whether to enable parallel tool calling.
1926
- */
1927
- parallel_tool_calls?: boolean;
1928
- /**
1929
- * The provider of the model.
1930
- */
1931
- provider?: 'anthropic';
1932
- /**
1933
- * The temperature of the model.
1934
- */
1935
- temperature?: number;
1936
- /**
1937
- * The thinking configuration for the model.
1938
- */
1939
- thinking?: AnthropicModelSettings.Thinking;
1940
- /**
1941
- * Soft control for how verbose model output should be, used for GPT-5 models.
1942
- */
1943
- verbosity?: 'low' | 'medium' | 'high' | null;
1944
- }
1945
- namespace AnthropicModelSettings {
1946
- /**
1947
- * The thinking configuration for the model.
1948
- */
1949
- interface Thinking {
1950
- /**
1951
- * The maximum number of tokens the model can use for extended thinking.
1952
- */
1953
- budget_tokens?: number;
1954
- /**
1955
- * The type of thinking to use.
1956
- */
1957
- type?: 'enabled' | 'disabled';
1958
- }
1959
- }
1960
- interface GoogleAIModelSettings {
1961
- /**
1962
- * The maximum number of tokens the model can generate.
1963
- */
1964
- max_output_tokens?: number;
1965
- /**
1966
- * Whether to enable parallel tool calling.
1967
- */
1968
- parallel_tool_calls?: boolean;
1969
- /**
1970
- * The provider of the model.
1971
- */
1972
- provider?: 'google_ai';
1973
- /**
1974
- * The response schema for the model.
1975
- */
1976
- response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
1977
- /**
1978
- * The temperature of the model.
1979
- */
1980
- temperature?: number;
1981
- /**
1982
- * The thinking configuration for the model.
1983
- */
1984
- thinking_config?: GoogleAIModelSettings.ThinkingConfig;
1985
- }
1986
- namespace GoogleAIModelSettings {
1987
- /**
1988
- * The thinking configuration for the model.
1989
- */
1990
- interface ThinkingConfig {
1991
- /**
1992
- * Whether to include thoughts in the model's response.
1993
- */
1994
- include_thoughts?: boolean;
1995
- /**
1996
- * The thinking budget for the model.
1997
- */
1998
- thinking_budget?: number;
1999
- }
2000
- }
2001
- interface GoogleVertexModelSettings {
2002
- /**
2003
- * The maximum number of tokens the model can generate.
2004
- */
2005
- max_output_tokens?: number;
2006
- /**
2007
- * Whether to enable parallel tool calling.
2008
- */
2009
- parallel_tool_calls?: boolean;
2010
- /**
2011
- * The provider of the model.
2012
- */
2013
- provider?: 'google_vertex';
2014
- /**
2015
- * The response schema for the model.
2016
- */
2017
- response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
2018
- /**
2019
- * The temperature of the model.
2020
- */
2021
- temperature?: number;
2022
- /**
2023
- * The thinking configuration for the model.
2024
- */
2025
- thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
2026
- }
2027
- namespace GoogleVertexModelSettings {
2028
- /**
2029
- * The thinking configuration for the model.
2030
- */
2031
- interface ThinkingConfig {
2032
- /**
2033
- * Whether to include thoughts in the model's response.
2034
- */
2035
- include_thoughts?: boolean;
2036
- /**
2037
- * The thinking budget for the model.
2038
- */
2039
- thinking_budget?: number;
2040
- }
2041
- }
2042
- /**
2043
- * Azure OpenAI model configuration (OpenAI-compatible).
2044
- */
2045
- interface AzureModelSettings {
2046
- /**
2047
- * The maximum number of tokens the model can generate.
2048
- */
2049
- max_output_tokens?: number;
2050
- /**
2051
- * Whether to enable parallel tool calling.
2052
- */
2053
- parallel_tool_calls?: boolean;
2054
- /**
2055
- * The provider of the model.
2056
- */
2057
- provider?: 'azure';
2058
- /**
2059
- * The response format for the model.
2060
- */
2061
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
2062
- /**
2063
- * The temperature of the model.
2064
- */
2065
- temperature?: number;
2066
- }
2067
- /**
2068
- * xAI model configuration (OpenAI-compatible).
2069
- */
2070
- interface XaiModelSettings {
2071
- /**
2072
- * The maximum number of tokens the model can generate.
2073
- */
2074
- max_output_tokens?: number;
2075
- /**
2076
- * Whether to enable parallel tool calling.
2077
- */
2078
- parallel_tool_calls?: boolean;
2079
- /**
2080
- * The provider of the model.
2081
- */
2082
- provider?: 'xai';
2083
- /**
2084
- * The response format for the model.
2085
- */
2086
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
2087
- /**
2088
- * The temperature of the model.
2089
- */
2090
- temperature?: number;
2091
- }
2092
- /**
2093
- * Groq model configuration (OpenAI-compatible).
2094
- */
2095
- interface GroqModelSettings {
2096
- /**
2097
- * The maximum number of tokens the model can generate.
2098
- */
2099
- max_output_tokens?: number;
2100
- /**
2101
- * Whether to enable parallel tool calling.
2102
- */
2103
- parallel_tool_calls?: boolean;
2104
- /**
2105
- * The provider of the model.
2106
- */
2107
- provider?: 'groq';
2108
- /**
2109
- * The response format for the model.
2110
- */
2111
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
2112
- /**
2113
- * The temperature of the model.
2114
- */
2115
- temperature?: number;
2116
- }
2117
- /**
2118
- * Deepseek model configuration (OpenAI-compatible).
2119
- */
2120
- interface DeepseekModelSettings {
2121
- /**
2122
- * The maximum number of tokens the model can generate.
2123
- */
2124
- max_output_tokens?: number;
2125
- /**
2126
- * Whether to enable parallel tool calling.
2127
- */
2128
- parallel_tool_calls?: boolean;
2129
- /**
2130
- * The provider of the model.
2131
- */
2132
- provider?: 'deepseek';
2133
- /**
2134
- * The response format for the model.
2135
- */
2136
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
2137
- /**
2138
- * The temperature of the model.
2139
- */
2140
- temperature?: number;
2141
- }
2142
- /**
2143
- * Together AI model configuration (OpenAI-compatible).
2144
- */
2145
- interface TogetherModelSettings {
2146
- /**
2147
- * The maximum number of tokens the model can generate.
2148
- */
2149
- max_output_tokens?: number;
2150
- /**
2151
- * Whether to enable parallel tool calling.
2152
- */
2153
- parallel_tool_calls?: boolean;
2154
- /**
2155
- * The provider of the model.
2156
- */
2157
- provider?: 'together';
2158
- /**
2159
- * The response format for the model.
2160
- */
2161
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
2162
- /**
2163
- * The temperature of the model.
2164
- */
2165
- temperature?: number;
2166
- }
2167
- /**
2168
- * AWS Bedrock model configuration.
2169
- */
2170
- interface BedrockModelSettings {
2171
- /**
2172
- * The maximum number of tokens the model can generate.
2173
- */
2174
- max_output_tokens?: number;
2175
- /**
2176
- * Whether to enable parallel tool calling.
2177
- */
2178
- parallel_tool_calls?: boolean;
2179
- /**
2180
- * The provider of the model.
2181
- */
2182
- provider?: 'bedrock';
2183
- /**
2184
- * The response format for the model.
2185
- */
2186
- response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
2187
- /**
2188
- * The temperature of the model.
2189
- */
2190
- temperature?: number;
2191
- }
2192
- }
2193
1575
  export declare namespace Agents {
2194
- export { type AgentEnvironmentVariable as AgentEnvironmentVariable, type AgentState as AgentState, type AgentType as AgentType, type ChildToolRule as ChildToolRule, type ConditionalToolRule as ConditionalToolRule, type ContinueToolRule as ContinueToolRule, type InitToolRule as InitToolRule, type JsonObjectResponseFormat as JsonObjectResponseFormat, type JsonSchemaResponseFormat as JsonSchemaResponseFormat, type LettaMessageContentUnion as LettaMessageContentUnion, type MaxCountPerStepToolRule as MaxCountPerStepToolRule, type MessageCreate as MessageCreate, type ParentToolRule as ParentToolRule, type RequiredBeforeExitToolRule as RequiredBeforeExitToolRule, type RequiresApprovalToolRule as RequiresApprovalToolRule, type TerminalToolRule as TerminalToolRule, type TextResponseFormat as TextResponseFormat, type AgentDeleteResponse as AgentDeleteResponse, type AgentExportFileResponse as AgentExportFileResponse, type AgentImportFileResponse as AgentImportFileResponse, type AgentStatesArrayPage as AgentStatesArrayPage, type AgentCreateParams as AgentCreateParams, type AgentRetrieveParams as AgentRetrieveParams, type AgentListParams as AgentListParams, type AgentExportFileParams as AgentExportFileParams, type AgentImportFileParams as AgentImportFileParams, type AgentModifyParams as AgentModifyParams, };
1576
+ export { type AgentEnvironmentVariable as AgentEnvironmentVariable, type AgentState as AgentState, type AgentType as AgentType, type AnthropicModelSettings as AnthropicModelSettings, type AzureModelSettings as AzureModelSettings, type BedrockModelSettings as BedrockModelSettings, type ChildToolRule as ChildToolRule, type ConditionalToolRule as ConditionalToolRule, type ContinueToolRule as ContinueToolRule, type DeepseekModelSettings as DeepseekModelSettings, type GoogleAIModelSettings as GoogleAIModelSettings, type GoogleVertexModelSettings as GoogleVertexModelSettings, type GroqModelSettings as GroqModelSettings, type InitToolRule as InitToolRule, type JsonObjectResponseFormat as JsonObjectResponseFormat, type JsonSchemaResponseFormat as JsonSchemaResponseFormat, type LettaMessageContentUnion as LettaMessageContentUnion, type MaxCountPerStepToolRule as MaxCountPerStepToolRule, type MessageCreate as MessageCreate, type OpenAIModelSettings as OpenAIModelSettings, type ParentToolRule as ParentToolRule, type RequiredBeforeExitToolRule as RequiredBeforeExitToolRule, type RequiresApprovalToolRule as RequiresApprovalToolRule, type TerminalToolRule as TerminalToolRule, type TextResponseFormat as TextResponseFormat, type TogetherModelSettings as TogetherModelSettings, type XaiModelSettings as XaiModelSettings, type AgentDeleteResponse as AgentDeleteResponse, type AgentExportFileResponse as AgentExportFileResponse, type AgentImportFileResponse as AgentImportFileResponse, type AgentStatesArrayPage as AgentStatesArrayPage, type AgentCreateParams as AgentCreateParams, type AgentRetrieveParams as AgentRetrieveParams, type AgentListParams as AgentListParams, type AgentExportFileParams as AgentExportFileParams, type AgentImportFileParams as AgentImportFileParams, type AgentModifyParams as AgentModifyParams, };
1577
+ export { Messages as Messages, type ApprovalCreate as ApprovalCreate, type ApprovalRequestMessage as ApprovalRequestMessage, type ApprovalResponseMessage as ApprovalResponseMessage, type ApprovalReturn as ApprovalReturn, type AssistantMessage as AssistantMessage, type EventMessage as EventMessage, type HiddenReasoningMessage as HiddenReasoningMessage, type ImageContent as ImageContent, type InternalMessage as InternalMessage, type JobStatus as JobStatus, type JobType as JobType, type LettaAssistantMessageContentUnion as LettaAssistantMessageContentUnion, type LettaRequest as LettaRequest, type LettaResponse as LettaResponse, type LettaStreamingRequest as LettaStreamingRequest, type LettaStreamingResponse as LettaStreamingResponse, type LettaUserMessageContentUnion as LettaUserMessageContentUnion, type Message as Message, type MessageRole as MessageRole, type MessageType as MessageType, type OmittedReasoningContent as OmittedReasoningContent, type ReasoningContent as ReasoningContent, type ReasoningMessage as ReasoningMessage, type RedactedReasoningContent as RedactedReasoningContent, type Run as Run, type SummaryMessage as SummaryMessage, type SystemMessage as SystemMessage, type TextContent as TextContent, type ToolCall as ToolCall, type ToolCallContent as ToolCallContent, type ToolCallDelta as ToolCallDelta, type ToolCallMessage as ToolCallMessage, type ToolReturn as ToolReturn, type ToolReturnContent as ToolReturnContent, type UpdateAssistantMessage as UpdateAssistantMessage, type UpdateReasoningMessage as UpdateReasoningMessage, type UpdateSystemMessage as UpdateSystemMessage, type UpdateUserMessage as UpdateUserMessage, type UserMessage as UserMessage, type MessageCancelResponse as MessageCancelResponse, type MessageModifyResponse as MessageModifyResponse, type MessagesArrayPage as MessagesArrayPage, type MessageListParams as MessageListParams, type MessageCancelParams as MessageCancelParams, type MessageModifyParams as MessageModifyParams, type MessageResetParams as MessageResetParams, type MessageSendParams as MessageSendParams, type MessageSendParamsNonStreaming as MessageSendParamsNonStreaming, type MessageSendParamsStreaming as MessageSendParamsStreaming, type MessageSendAsyncParams as MessageSendAsyncParams, type MessageStreamParams as MessageStreamParams, };
1578
+ export { Blocks as Blocks, type Block as Block, type BlockModify as BlockModify, type BlockRetrieveParams as BlockRetrieveParams, type BlockListParams as BlockListParams, type BlockAttachParams as BlockAttachParams, type BlockDetachParams as BlockDetachParams, type BlockModifyParams as BlockModifyParams, };
2195
1579
  export { Tools as Tools, type ToolListParams as ToolListParams, type ToolAttachParams as ToolAttachParams, type ToolDetachParams as ToolDetachParams, type ToolUpdateApprovalParams as ToolUpdateApprovalParams, };
2196
1580
  export { Folders as Folders, type FolderListResponse as FolderListResponse, type FolderListResponsesArrayPage as FolderListResponsesArrayPage, type FolderListParams as FolderListParams, type FolderAttachParams as FolderAttachParams, type FolderDetachParams as FolderDetachParams, };
2197
1581
  export { Files as Files, type FileListResponse as FileListResponse, type FileCloseResponse as FileCloseResponse, type FileCloseAllResponse as FileCloseAllResponse, type FileOpenResponse as FileOpenResponse, type FileListResponsesNextFilesPage as FileListResponsesNextFilesPage, type FileListParams as FileListParams, type FileCloseParams as FileCloseParams, type FileOpenParams as FileOpenParams, };
2198
- export { Blocks as Blocks, type Block as Block, type BlockModify as BlockModify, type BlockRetrieveParams as BlockRetrieveParams, type BlockListParams as BlockListParams, type BlockAttachParams as BlockAttachParams, type BlockDetachParams as BlockDetachParams, type BlockModifyParams as BlockModifyParams, };
2199
1582
  export { Groups as Groups, type GroupListParams as GroupListParams };
2200
- export { Messages as Messages, type ApprovalCreate as ApprovalCreate, type ApprovalRequestMessage as ApprovalRequestMessage, type ApprovalResponseMessage as ApprovalResponseMessage, type AssistantMessage as AssistantMessage, type EventMessage as EventMessage, type HiddenReasoningMessage as HiddenReasoningMessage, type ImageContent as ImageContent, type JobStatus as JobStatus, type JobType as JobType, type LettaAssistantMessageContentUnion as LettaAssistantMessageContentUnion, type LettaMessageUnion as LettaMessageUnion, type LettaRequest as LettaRequest, type LettaResponse as LettaResponse, type LettaStreamingRequest as LettaStreamingRequest, type LettaStreamingResponse as LettaStreamingResponse, type LettaUserMessageContentUnion as LettaUserMessageContentUnion, type Message as Message, type MessageRole as MessageRole, type MessageType as MessageType, type OmittedReasoningContent as OmittedReasoningContent, type ReasoningContent as ReasoningContent, type ReasoningMessage as ReasoningMessage, type RedactedReasoningContent as RedactedReasoningContent, type Run as Run, type SummaryMessage as SummaryMessage, type SystemMessage as SystemMessage, type TextContent as TextContent, type ToolCall as ToolCall, type ToolCallContent as ToolCallContent, type ToolCallDelta as ToolCallDelta, type ToolCallMessage as ToolCallMessage, type ToolReturn as ToolReturn, type ToolReturnContent as ToolReturnContent, type UpdateAssistantMessage as UpdateAssistantMessage, type UpdateReasoningMessage as UpdateReasoningMessage, type UpdateSystemMessage as UpdateSystemMessage, type UpdateUserMessage as UpdateUserMessage, type UserMessage as UserMessage, type MessageCancelResponse as MessageCancelResponse, type MessageModifyResponse as MessageModifyResponse, type LettaMessageUnionsArrayPage as LettaMessageUnionsArrayPage, type MessageListParams as MessageListParams, type MessageCancelParams as MessageCancelParams, type MessageModifyParams as MessageModifyParams, type MessageResetParams as MessageResetParams, type MessageSendParams as MessageSendParams, type MessageSendParamsNonStreaming as MessageSendParamsNonStreaming, type MessageSendParamsStreaming as MessageSendParamsStreaming, type MessageSendAsyncParams as MessageSendAsyncParams, type MessageStreamParams as MessageStreamParams, };
1583
+ export { Archives as Archives, type ArchiveAttachResponse as ArchiveAttachResponse, type ArchiveDetachResponse as ArchiveDetachResponse, type ArchiveAttachParams as ArchiveAttachParams, type ArchiveDetachParams as ArchiveDetachParams, };
1584
+ export { Identities as Identities, type IdentityAttachResponse as IdentityAttachResponse, type IdentityDetachResponse as IdentityDetachResponse, type IdentityAttachParams as IdentityAttachParams, type IdentityDetachParams as IdentityDetachParams, };
2201
1585
  }
2202
1586
  //# sourceMappingURL=agents.d.ts.map