@letta-ai/letta-client 1.0.0-alpha.16 → 1.0.0-alpha.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +28 -0
- package/client.d.mts +2 -2
- package/client.d.mts.map +1 -1
- package/client.d.ts +2 -2
- package/client.d.ts.map +1 -1
- package/client.js +1 -1
- package/client.js.map +1 -1
- package/client.mjs +1 -1
- package/client.mjs.map +1 -1
- package/package.json +1 -1
- package/resources/agents/agents.d.mts +962 -111
- package/resources/agents/agents.d.mts.map +1 -1
- package/resources/agents/agents.d.ts +962 -111
- package/resources/agents/agents.d.ts.map +1 -1
- package/resources/agents/agents.js.map +1 -1
- package/resources/agents/agents.mjs.map +1 -1
- package/resources/agents/blocks.d.mts +1 -15
- package/resources/agents/blocks.d.mts.map +1 -1
- package/resources/agents/blocks.d.ts +1 -15
- package/resources/agents/blocks.d.ts.map +1 -1
- package/resources/agents/folders.d.mts +2 -7
- package/resources/agents/folders.d.mts.map +1 -1
- package/resources/agents/folders.d.ts +2 -7
- package/resources/agents/folders.d.ts.map +1 -1
- package/resources/agents/messages.d.mts +0 -10
- package/resources/agents/messages.d.mts.map +1 -1
- package/resources/agents/messages.d.ts +0 -10
- package/resources/agents/messages.d.ts.map +1 -1
- package/resources/archives.d.mts +0 -6
- package/resources/archives.d.mts.map +1 -1
- package/resources/archives.d.ts +0 -6
- package/resources/archives.d.ts.map +1 -1
- package/resources/folders/folders.d.mts +0 -6
- package/resources/folders/folders.d.mts.map +1 -1
- package/resources/folders/folders.d.ts +0 -6
- package/resources/folders/folders.d.ts.map +1 -1
- package/resources/folders/folders.js.map +1 -1
- package/resources/folders/folders.mjs.map +1 -1
- package/resources/index.d.mts +1 -1
- package/resources/index.d.mts.map +1 -1
- package/resources/index.d.ts +1 -1
- package/resources/index.d.ts.map +1 -1
- package/resources/index.js.map +1 -1
- package/resources/index.mjs.map +1 -1
- package/resources/mcp-servers/mcp-servers.d.mts +0 -10
- package/resources/mcp-servers/mcp-servers.d.mts.map +1 -1
- package/resources/mcp-servers/mcp-servers.d.ts +0 -10
- package/resources/mcp-servers/mcp-servers.d.ts.map +1 -1
- package/resources/mcp-servers/mcp-servers.js.map +1 -1
- package/resources/mcp-servers/mcp-servers.mjs.map +1 -1
- package/resources/models/embeddings.d.mts +1 -66
- package/resources/models/embeddings.d.mts.map +1 -1
- package/resources/models/embeddings.d.ts +1 -66
- package/resources/models/embeddings.d.ts.map +1 -1
- package/resources/models/index.d.mts +1 -1
- package/resources/models/index.d.mts.map +1 -1
- package/resources/models/index.d.ts +1 -1
- package/resources/models/index.d.ts.map +1 -1
- package/resources/models/index.js.map +1 -1
- package/resources/models/index.mjs.map +1 -1
- package/resources/models/models.d.mts +169 -109
- package/resources/models/models.d.mts.map +1 -1
- package/resources/models/models.d.ts +169 -109
- package/resources/models/models.d.ts.map +1 -1
- package/resources/models/models.js.map +1 -1
- package/resources/models/models.mjs.map +1 -1
- package/resources/runs/runs.d.mts +0 -5
- package/resources/runs/runs.d.mts.map +1 -1
- package/resources/runs/runs.d.ts +0 -5
- package/resources/runs/runs.d.ts.map +1 -1
- package/resources/runs/runs.js.map +1 -1
- package/resources/runs/runs.mjs.map +1 -1
- package/resources/tools.d.mts +0 -4
- package/resources/tools.d.mts.map +1 -1
- package/resources/tools.d.ts +0 -4
- package/resources/tools.d.ts.map +1 -1
- package/src/client.ts +5 -1
- package/src/resources/agents/agents.ts +1235 -69
- package/src/resources/agents/blocks.ts +1 -15
- package/src/resources/agents/folders.ts +2 -7
- package/src/resources/agents/messages.ts +0 -10
- package/src/resources/archives.ts +0 -6
- package/src/resources/folders/folders.ts +0 -6
- package/src/resources/index.ts +2 -0
- package/src/resources/mcp-servers/mcp-servers.ts +0 -10
- package/src/resources/models/embeddings.ts +1 -100
- package/src/resources/models/index.ts +2 -0
- package/src/resources/models/models.ts +249 -153
- package/src/resources/runs/runs.ts +0 -5
- package/src/resources/tools.ts +0 -4
- package/src/version.ts +1 -1
- package/version.d.mts +1 -1
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/version.mjs +1 -1
|
@@ -108,16 +108,6 @@ export interface AgentEnvironmentVariable {
|
|
|
108
108
|
* Representation of an agent's state. This is the state of the agent at a given
|
|
109
109
|
* time, and is persisted in the DB backend. The state has all the information
|
|
110
110
|
* needed to recreate a persisted agent.
|
|
111
|
-
*
|
|
112
|
-
* Parameters: id (str): The unique identifier of the agent. name (str): The name
|
|
113
|
-
* of the agent (must be unique to the user). created_at (datetime): The datetime
|
|
114
|
-
* the agent was created. message_ids (List[str]): The ids of the messages in the
|
|
115
|
-
* agent's in-context memory. memory (Memory): The in-context memory of the agent.
|
|
116
|
-
* tools (List[str]): The tools used by the agent. This includes any memory editing
|
|
117
|
-
* functions specified in `memory`. system (str): The system prompt used by the
|
|
118
|
-
* agent. llm_config (LLMConfig): The LLM configuration used by the agent.
|
|
119
|
-
* embedding_config (EmbeddingConfig): The embedding configuration used by the
|
|
120
|
-
* agent.
|
|
121
111
|
*/
|
|
122
112
|
export interface AgentState {
|
|
123
113
|
/**
|
|
@@ -188,9 +178,9 @@ export interface AgentState {
|
|
|
188
178
|
*/
|
|
189
179
|
description?: string | null;
|
|
190
180
|
/**
|
|
191
|
-
* The embedding model used by the agent.
|
|
181
|
+
* The embedding model handle used by the agent (format: provider/model-name).
|
|
192
182
|
*/
|
|
193
|
-
embedding?:
|
|
183
|
+
embedding?: string | null;
|
|
194
184
|
/**
|
|
195
185
|
* If set to True, memory management will move to a background agent thread.
|
|
196
186
|
*/
|
|
@@ -254,9 +244,13 @@ export interface AgentState {
|
|
|
254
244
|
[key: string]: unknown;
|
|
255
245
|
} | null;
|
|
256
246
|
/**
|
|
257
|
-
*
|
|
247
|
+
* The model handle used by the agent (format: provider/model-name).
|
|
258
248
|
*/
|
|
259
|
-
model?:
|
|
249
|
+
model?: string | null;
|
|
250
|
+
/**
|
|
251
|
+
* The model settings used by the agent.
|
|
252
|
+
*/
|
|
253
|
+
model_settings?: AgentState.OpenAIModelSettings | AgentState.AnthropicModelSettings | AgentState.GoogleAIModelSettings | AgentState.GoogleVertexModelSettings | AgentState.AzureModelSettings | AgentState.XaiModelSettings | AgentState.GroqModelSettings | AgentState.DeepseekModelSettings | AgentState.TogetherModelSettings | AgentState.BedrockModelSettings | null;
|
|
260
254
|
/**
|
|
261
255
|
* @deprecated Deprecated: Use `managed_group` field instead. The multi-agent group
|
|
262
256
|
* that this agent manages.
|
|
@@ -419,13 +413,8 @@ export declare namespace AgentState {
|
|
|
419
413
|
}
|
|
420
414
|
}
|
|
421
415
|
/**
|
|
422
|
-
* Representation of a source, which is a collection of
|
|
423
|
-
*
|
|
424
|
-
* Parameters: id (str): The ID of the source name (str): The name of the source.
|
|
425
|
-
* embedding_config (EmbeddingConfig): The embedding configuration used by the
|
|
426
|
-
* source. user_id (str): The ID of the user that created the source. metadata
|
|
427
|
-
* (dict): Metadata associated with the source. description (str): The description
|
|
428
|
-
* of the source.
|
|
416
|
+
* (Deprecated: Use Folder) Representation of a source, which is a collection of
|
|
417
|
+
* files and passages.
|
|
429
418
|
*/
|
|
430
419
|
interface Source {
|
|
431
420
|
/**
|
|
@@ -475,31 +464,315 @@ export declare namespace AgentState {
|
|
|
475
464
|
*/
|
|
476
465
|
vector_db_provider?: ArchivesAPI.VectorDBProvider;
|
|
477
466
|
}
|
|
467
|
+
interface OpenAIModelSettings {
|
|
468
|
+
/**
|
|
469
|
+
* The maximum number of tokens the model can generate.
|
|
470
|
+
*/
|
|
471
|
+
max_output_tokens?: number;
|
|
472
|
+
/**
|
|
473
|
+
* Whether to enable parallel tool calling.
|
|
474
|
+
*/
|
|
475
|
+
parallel_tool_calls?: boolean;
|
|
476
|
+
/**
|
|
477
|
+
* The provider of the model.
|
|
478
|
+
*/
|
|
479
|
+
provider?: 'openai';
|
|
480
|
+
/**
|
|
481
|
+
* The reasoning configuration for the model.
|
|
482
|
+
*/
|
|
483
|
+
reasoning?: OpenAIModelSettings.Reasoning;
|
|
484
|
+
/**
|
|
485
|
+
* The response format for the model.
|
|
486
|
+
*/
|
|
487
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
488
|
+
/**
|
|
489
|
+
* The temperature of the model.
|
|
490
|
+
*/
|
|
491
|
+
temperature?: number;
|
|
492
|
+
}
|
|
493
|
+
namespace OpenAIModelSettings {
|
|
494
|
+
/**
|
|
495
|
+
* The reasoning configuration for the model.
|
|
496
|
+
*/
|
|
497
|
+
interface Reasoning {
|
|
498
|
+
/**
|
|
499
|
+
* The reasoning effort to use when generating text reasoning models
|
|
500
|
+
*/
|
|
501
|
+
reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
interface AnthropicModelSettings {
|
|
505
|
+
/**
|
|
506
|
+
* The maximum number of tokens the model can generate.
|
|
507
|
+
*/
|
|
508
|
+
max_output_tokens?: number;
|
|
509
|
+
/**
|
|
510
|
+
* Whether to enable parallel tool calling.
|
|
511
|
+
*/
|
|
512
|
+
parallel_tool_calls?: boolean;
|
|
513
|
+
/**
|
|
514
|
+
* The provider of the model.
|
|
515
|
+
*/
|
|
516
|
+
provider?: 'anthropic';
|
|
517
|
+
/**
|
|
518
|
+
* The temperature of the model.
|
|
519
|
+
*/
|
|
520
|
+
temperature?: number;
|
|
521
|
+
/**
|
|
522
|
+
* The thinking configuration for the model.
|
|
523
|
+
*/
|
|
524
|
+
thinking?: AnthropicModelSettings.Thinking;
|
|
525
|
+
/**
|
|
526
|
+
* Soft control for how verbose model output should be, used for GPT-5 models.
|
|
527
|
+
*/
|
|
528
|
+
verbosity?: 'low' | 'medium' | 'high' | null;
|
|
529
|
+
}
|
|
530
|
+
namespace AnthropicModelSettings {
|
|
531
|
+
/**
|
|
532
|
+
* The thinking configuration for the model.
|
|
533
|
+
*/
|
|
534
|
+
interface Thinking {
|
|
535
|
+
/**
|
|
536
|
+
* The maximum number of tokens the model can use for extended thinking.
|
|
537
|
+
*/
|
|
538
|
+
budget_tokens?: number;
|
|
539
|
+
/**
|
|
540
|
+
* The type of thinking to use.
|
|
541
|
+
*/
|
|
542
|
+
type?: 'enabled' | 'disabled';
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
interface GoogleAIModelSettings {
|
|
546
|
+
/**
|
|
547
|
+
* The maximum number of tokens the model can generate.
|
|
548
|
+
*/
|
|
549
|
+
max_output_tokens?: number;
|
|
550
|
+
/**
|
|
551
|
+
* Whether to enable parallel tool calling.
|
|
552
|
+
*/
|
|
553
|
+
parallel_tool_calls?: boolean;
|
|
554
|
+
/**
|
|
555
|
+
* The provider of the model.
|
|
556
|
+
*/
|
|
557
|
+
provider?: 'google_ai';
|
|
558
|
+
/**
|
|
559
|
+
* The response schema for the model.
|
|
560
|
+
*/
|
|
561
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
562
|
+
/**
|
|
563
|
+
* The temperature of the model.
|
|
564
|
+
*/
|
|
565
|
+
temperature?: number;
|
|
566
|
+
/**
|
|
567
|
+
* The thinking configuration for the model.
|
|
568
|
+
*/
|
|
569
|
+
thinking_config?: GoogleAIModelSettings.ThinkingConfig;
|
|
570
|
+
}
|
|
571
|
+
namespace GoogleAIModelSettings {
|
|
572
|
+
/**
|
|
573
|
+
* The thinking configuration for the model.
|
|
574
|
+
*/
|
|
575
|
+
interface ThinkingConfig {
|
|
576
|
+
/**
|
|
577
|
+
* Whether to include thoughts in the model's response.
|
|
578
|
+
*/
|
|
579
|
+
include_thoughts?: boolean;
|
|
580
|
+
/**
|
|
581
|
+
* The thinking budget for the model.
|
|
582
|
+
*/
|
|
583
|
+
thinking_budget?: number;
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
interface GoogleVertexModelSettings {
|
|
587
|
+
/**
|
|
588
|
+
* The maximum number of tokens the model can generate.
|
|
589
|
+
*/
|
|
590
|
+
max_output_tokens?: number;
|
|
591
|
+
/**
|
|
592
|
+
* Whether to enable parallel tool calling.
|
|
593
|
+
*/
|
|
594
|
+
parallel_tool_calls?: boolean;
|
|
595
|
+
/**
|
|
596
|
+
* The provider of the model.
|
|
597
|
+
*/
|
|
598
|
+
provider?: 'google_vertex';
|
|
599
|
+
/**
|
|
600
|
+
* The response schema for the model.
|
|
601
|
+
*/
|
|
602
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
603
|
+
/**
|
|
604
|
+
* The temperature of the model.
|
|
605
|
+
*/
|
|
606
|
+
temperature?: number;
|
|
607
|
+
/**
|
|
608
|
+
* The thinking configuration for the model.
|
|
609
|
+
*/
|
|
610
|
+
thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
|
|
611
|
+
}
|
|
612
|
+
namespace GoogleVertexModelSettings {
|
|
613
|
+
/**
|
|
614
|
+
* The thinking configuration for the model.
|
|
615
|
+
*/
|
|
616
|
+
interface ThinkingConfig {
|
|
617
|
+
/**
|
|
618
|
+
* Whether to include thoughts in the model's response.
|
|
619
|
+
*/
|
|
620
|
+
include_thoughts?: boolean;
|
|
621
|
+
/**
|
|
622
|
+
* The thinking budget for the model.
|
|
623
|
+
*/
|
|
624
|
+
thinking_budget?: number;
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
/**
|
|
628
|
+
* Azure OpenAI model configuration (OpenAI-compatible).
|
|
629
|
+
*/
|
|
630
|
+
interface AzureModelSettings {
|
|
631
|
+
/**
|
|
632
|
+
* The maximum number of tokens the model can generate.
|
|
633
|
+
*/
|
|
634
|
+
max_output_tokens?: number;
|
|
635
|
+
/**
|
|
636
|
+
* Whether to enable parallel tool calling.
|
|
637
|
+
*/
|
|
638
|
+
parallel_tool_calls?: boolean;
|
|
639
|
+
/**
|
|
640
|
+
* The provider of the model.
|
|
641
|
+
*/
|
|
642
|
+
provider?: 'azure';
|
|
643
|
+
/**
|
|
644
|
+
* The response format for the model.
|
|
645
|
+
*/
|
|
646
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
647
|
+
/**
|
|
648
|
+
* The temperature of the model.
|
|
649
|
+
*/
|
|
650
|
+
temperature?: number;
|
|
651
|
+
}
|
|
652
|
+
/**
|
|
653
|
+
* xAI model configuration (OpenAI-compatible).
|
|
654
|
+
*/
|
|
655
|
+
interface XaiModelSettings {
|
|
656
|
+
/**
|
|
657
|
+
* The maximum number of tokens the model can generate.
|
|
658
|
+
*/
|
|
659
|
+
max_output_tokens?: number;
|
|
660
|
+
/**
|
|
661
|
+
* Whether to enable parallel tool calling.
|
|
662
|
+
*/
|
|
663
|
+
parallel_tool_calls?: boolean;
|
|
664
|
+
/**
|
|
665
|
+
* The provider of the model.
|
|
666
|
+
*/
|
|
667
|
+
provider?: 'xai';
|
|
668
|
+
/**
|
|
669
|
+
* The response format for the model.
|
|
670
|
+
*/
|
|
671
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
672
|
+
/**
|
|
673
|
+
* The temperature of the model.
|
|
674
|
+
*/
|
|
675
|
+
temperature?: number;
|
|
676
|
+
}
|
|
677
|
+
/**
|
|
678
|
+
* Groq model configuration (OpenAI-compatible).
|
|
679
|
+
*/
|
|
680
|
+
interface GroqModelSettings {
|
|
681
|
+
/**
|
|
682
|
+
* The maximum number of tokens the model can generate.
|
|
683
|
+
*/
|
|
684
|
+
max_output_tokens?: number;
|
|
685
|
+
/**
|
|
686
|
+
* Whether to enable parallel tool calling.
|
|
687
|
+
*/
|
|
688
|
+
parallel_tool_calls?: boolean;
|
|
689
|
+
/**
|
|
690
|
+
* The provider of the model.
|
|
691
|
+
*/
|
|
692
|
+
provider?: 'groq';
|
|
693
|
+
/**
|
|
694
|
+
* The response format for the model.
|
|
695
|
+
*/
|
|
696
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
697
|
+
/**
|
|
698
|
+
* The temperature of the model.
|
|
699
|
+
*/
|
|
700
|
+
temperature?: number;
|
|
701
|
+
}
|
|
478
702
|
/**
|
|
479
|
-
*
|
|
703
|
+
* Deepseek model configuration (OpenAI-compatible).
|
|
480
704
|
*/
|
|
481
|
-
interface
|
|
705
|
+
interface DeepseekModelSettings {
|
|
706
|
+
/**
|
|
707
|
+
* The maximum number of tokens the model can generate.
|
|
708
|
+
*/
|
|
709
|
+
max_output_tokens?: number;
|
|
482
710
|
/**
|
|
483
|
-
*
|
|
711
|
+
* Whether to enable parallel tool calling.
|
|
484
712
|
*/
|
|
485
|
-
|
|
713
|
+
parallel_tool_calls?: boolean;
|
|
486
714
|
/**
|
|
487
715
|
* The provider of the model.
|
|
488
716
|
*/
|
|
489
|
-
provider
|
|
717
|
+
provider?: 'deepseek';
|
|
718
|
+
/**
|
|
719
|
+
* The response format for the model.
|
|
720
|
+
*/
|
|
721
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
722
|
+
/**
|
|
723
|
+
* The temperature of the model.
|
|
724
|
+
*/
|
|
725
|
+
temperature?: number;
|
|
490
726
|
}
|
|
491
727
|
/**
|
|
492
|
-
*
|
|
728
|
+
* Together AI model configuration (OpenAI-compatible).
|
|
493
729
|
*/
|
|
494
|
-
interface
|
|
730
|
+
interface TogetherModelSettings {
|
|
731
|
+
/**
|
|
732
|
+
* The maximum number of tokens the model can generate.
|
|
733
|
+
*/
|
|
734
|
+
max_output_tokens?: number;
|
|
735
|
+
/**
|
|
736
|
+
* Whether to enable parallel tool calling.
|
|
737
|
+
*/
|
|
738
|
+
parallel_tool_calls?: boolean;
|
|
739
|
+
/**
|
|
740
|
+
* The provider of the model.
|
|
741
|
+
*/
|
|
742
|
+
provider?: 'together';
|
|
743
|
+
/**
|
|
744
|
+
* The response format for the model.
|
|
745
|
+
*/
|
|
746
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
495
747
|
/**
|
|
496
|
-
* The
|
|
748
|
+
* The temperature of the model.
|
|
497
749
|
*/
|
|
498
|
-
|
|
750
|
+
temperature?: number;
|
|
751
|
+
}
|
|
752
|
+
/**
|
|
753
|
+
* AWS Bedrock model configuration.
|
|
754
|
+
*/
|
|
755
|
+
interface BedrockModelSettings {
|
|
499
756
|
/**
|
|
500
757
|
* The maximum number of tokens the model can generate.
|
|
501
758
|
*/
|
|
502
759
|
max_output_tokens?: number;
|
|
760
|
+
/**
|
|
761
|
+
* Whether to enable parallel tool calling.
|
|
762
|
+
*/
|
|
763
|
+
parallel_tool_calls?: boolean;
|
|
764
|
+
/**
|
|
765
|
+
* The provider of the model.
|
|
766
|
+
*/
|
|
767
|
+
provider?: 'bedrock';
|
|
768
|
+
/**
|
|
769
|
+
* The response format for the model.
|
|
770
|
+
*/
|
|
771
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
772
|
+
/**
|
|
773
|
+
* The temperature of the model.
|
|
774
|
+
*/
|
|
775
|
+
temperature?: number;
|
|
503
776
|
}
|
|
504
777
|
}
|
|
505
778
|
/**
|
|
@@ -811,10 +1084,9 @@ export interface AgentCreateParams {
|
|
|
811
1084
|
*/
|
|
812
1085
|
description?: string | null;
|
|
813
1086
|
/**
|
|
814
|
-
* The embedding
|
|
815
|
-
* provider/model-name.
|
|
1087
|
+
* The embedding model handle used by the agent (format: provider/model-name).
|
|
816
1088
|
*/
|
|
817
|
-
embedding?: string |
|
|
1089
|
+
embedding?: string | null;
|
|
818
1090
|
/**
|
|
819
1091
|
* @deprecated Deprecated: No longer used. The embedding chunk size used by the
|
|
820
1092
|
* agent.
|
|
@@ -913,10 +1185,13 @@ export interface AgentCreateParams {
|
|
|
913
1185
|
[key: string]: unknown;
|
|
914
1186
|
} | null;
|
|
915
1187
|
/**
|
|
916
|
-
* The model handle
|
|
917
|
-
* handle or an object. See the model schema for more information.
|
|
1188
|
+
* The model handle for the agent to use (format: provider/model-name).
|
|
918
1189
|
*/
|
|
919
|
-
model?: string |
|
|
1190
|
+
model?: string | null;
|
|
1191
|
+
/**
|
|
1192
|
+
* The model settings for the agent.
|
|
1193
|
+
*/
|
|
1194
|
+
model_settings?: AgentCreateParams.OpenAIModelSettings | AgentCreateParams.AnthropicModelSettings | AgentCreateParams.GoogleAIModelSettings | AgentCreateParams.GoogleVertexModelSettings | AgentCreateParams.AzureModelSettings | AgentCreateParams.XaiModelSettings | AgentCreateParams.GroqModelSettings | AgentCreateParams.DeepseekModelSettings | AgentCreateParams.TogetherModelSettings | AgentCreateParams.BedrockModelSettings | null;
|
|
920
1195
|
/**
|
|
921
1196
|
* The name of the agent.
|
|
922
1197
|
*/
|
|
@@ -1003,80 +1278,367 @@ export interface AgentCreateParams {
|
|
|
1003
1278
|
tools?: Array<string> | null;
|
|
1004
1279
|
}
|
|
1005
1280
|
export declare namespace AgentCreateParams {
|
|
1006
|
-
interface
|
|
1281
|
+
interface OpenAIModelSettings {
|
|
1282
|
+
/**
|
|
1283
|
+
* The maximum number of tokens the model can generate.
|
|
1284
|
+
*/
|
|
1285
|
+
max_output_tokens?: number;
|
|
1007
1286
|
/**
|
|
1008
|
-
*
|
|
1287
|
+
* Whether to enable parallel tool calling.
|
|
1009
1288
|
*/
|
|
1010
|
-
|
|
1289
|
+
parallel_tool_calls?: boolean;
|
|
1011
1290
|
/**
|
|
1012
1291
|
* The provider of the model.
|
|
1013
1292
|
*/
|
|
1014
|
-
provider
|
|
1293
|
+
provider?: 'openai';
|
|
1294
|
+
/**
|
|
1295
|
+
* The reasoning configuration for the model.
|
|
1296
|
+
*/
|
|
1297
|
+
reasoning?: OpenAIModelSettings.Reasoning;
|
|
1298
|
+
/**
|
|
1299
|
+
* The response format for the model.
|
|
1300
|
+
*/
|
|
1301
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1302
|
+
/**
|
|
1303
|
+
* The temperature of the model.
|
|
1304
|
+
*/
|
|
1305
|
+
temperature?: number;
|
|
1015
1306
|
}
|
|
1016
|
-
|
|
1017
|
-
* Schema for defining settings for a model
|
|
1018
|
-
*/
|
|
1019
|
-
interface ModelSettings {
|
|
1307
|
+
namespace OpenAIModelSettings {
|
|
1020
1308
|
/**
|
|
1021
|
-
* The
|
|
1309
|
+
* The reasoning configuration for the model.
|
|
1022
1310
|
*/
|
|
1023
|
-
|
|
1311
|
+
interface Reasoning {
|
|
1312
|
+
/**
|
|
1313
|
+
* The reasoning effort to use when generating text reasoning models
|
|
1314
|
+
*/
|
|
1315
|
+
reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
|
|
1316
|
+
}
|
|
1317
|
+
}
|
|
1318
|
+
interface AnthropicModelSettings {
|
|
1024
1319
|
/**
|
|
1025
1320
|
* The maximum number of tokens the model can generate.
|
|
1026
1321
|
*/
|
|
1027
1322
|
max_output_tokens?: number;
|
|
1323
|
+
/**
|
|
1324
|
+
* Whether to enable parallel tool calling.
|
|
1325
|
+
*/
|
|
1326
|
+
parallel_tool_calls?: boolean;
|
|
1327
|
+
/**
|
|
1328
|
+
* The provider of the model.
|
|
1329
|
+
*/
|
|
1330
|
+
provider?: 'anthropic';
|
|
1331
|
+
/**
|
|
1332
|
+
* The temperature of the model.
|
|
1333
|
+
*/
|
|
1334
|
+
temperature?: number;
|
|
1335
|
+
/**
|
|
1336
|
+
* The thinking configuration for the model.
|
|
1337
|
+
*/
|
|
1338
|
+
thinking?: AnthropicModelSettings.Thinking;
|
|
1339
|
+
/**
|
|
1340
|
+
* Soft control for how verbose model output should be, used for GPT-5 models.
|
|
1341
|
+
*/
|
|
1342
|
+
verbosity?: 'low' | 'medium' | 'high' | null;
|
|
1028
1343
|
}
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
}
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1344
|
+
namespace AnthropicModelSettings {
|
|
1345
|
+
/**
|
|
1346
|
+
* The thinking configuration for the model.
|
|
1347
|
+
*/
|
|
1348
|
+
interface Thinking {
|
|
1349
|
+
/**
|
|
1350
|
+
* The maximum number of tokens the model can use for extended thinking.
|
|
1351
|
+
*/
|
|
1352
|
+
budget_tokens?: number;
|
|
1353
|
+
/**
|
|
1354
|
+
* The type of thinking to use.
|
|
1355
|
+
*/
|
|
1356
|
+
type?: 'enabled' | 'disabled';
|
|
1357
|
+
}
|
|
1358
|
+
}
|
|
1359
|
+
interface GoogleAIModelSettings {
|
|
1360
|
+
/**
|
|
1361
|
+
* The maximum number of tokens the model can generate.
|
|
1362
|
+
*/
|
|
1363
|
+
max_output_tokens?: number;
|
|
1364
|
+
/**
|
|
1365
|
+
* Whether to enable parallel tool calling.
|
|
1366
|
+
*/
|
|
1367
|
+
parallel_tool_calls?: boolean;
|
|
1368
|
+
/**
|
|
1369
|
+
* The provider of the model.
|
|
1370
|
+
*/
|
|
1371
|
+
provider?: 'google_ai';
|
|
1372
|
+
/**
|
|
1373
|
+
* The response schema for the model.
|
|
1374
|
+
*/
|
|
1375
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1376
|
+
/**
|
|
1377
|
+
* The temperature of the model.
|
|
1378
|
+
*/
|
|
1379
|
+
temperature?: number;
|
|
1380
|
+
/**
|
|
1381
|
+
* The thinking configuration for the model.
|
|
1382
|
+
*/
|
|
1383
|
+
thinking_config?: GoogleAIModelSettings.ThinkingConfig;
|
|
1384
|
+
}
|
|
1385
|
+
namespace GoogleAIModelSettings {
|
|
1386
|
+
/**
|
|
1387
|
+
* The thinking configuration for the model.
|
|
1388
|
+
*/
|
|
1389
|
+
interface ThinkingConfig {
|
|
1390
|
+
/**
|
|
1391
|
+
* Whether to include thoughts in the model's response.
|
|
1392
|
+
*/
|
|
1393
|
+
include_thoughts?: boolean;
|
|
1394
|
+
/**
|
|
1395
|
+
* The thinking budget for the model.
|
|
1396
|
+
*/
|
|
1397
|
+
thinking_budget?: number;
|
|
1398
|
+
}
|
|
1399
|
+
}
|
|
1400
|
+
interface GoogleVertexModelSettings {
|
|
1401
|
+
/**
|
|
1402
|
+
* The maximum number of tokens the model can generate.
|
|
1403
|
+
*/
|
|
1404
|
+
max_output_tokens?: number;
|
|
1405
|
+
/**
|
|
1406
|
+
* Whether to enable parallel tool calling.
|
|
1407
|
+
*/
|
|
1408
|
+
parallel_tool_calls?: boolean;
|
|
1409
|
+
/**
|
|
1410
|
+
* The provider of the model.
|
|
1411
|
+
*/
|
|
1412
|
+
provider?: 'google_vertex';
|
|
1413
|
+
/**
|
|
1414
|
+
* The response schema for the model.
|
|
1415
|
+
*/
|
|
1416
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1417
|
+
/**
|
|
1418
|
+
* The temperature of the model.
|
|
1419
|
+
*/
|
|
1420
|
+
temperature?: number;
|
|
1421
|
+
/**
|
|
1422
|
+
* The thinking configuration for the model.
|
|
1423
|
+
*/
|
|
1424
|
+
thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
|
|
1425
|
+
}
|
|
1426
|
+
namespace GoogleVertexModelSettings {
|
|
1427
|
+
/**
|
|
1428
|
+
* The thinking configuration for the model.
|
|
1429
|
+
*/
|
|
1430
|
+
interface ThinkingConfig {
|
|
1431
|
+
/**
|
|
1432
|
+
* Whether to include thoughts in the model's response.
|
|
1433
|
+
*/
|
|
1434
|
+
include_thoughts?: boolean;
|
|
1435
|
+
/**
|
|
1436
|
+
* The thinking budget for the model.
|
|
1437
|
+
*/
|
|
1438
|
+
thinking_budget?: number;
|
|
1439
|
+
}
|
|
1440
|
+
}
|
|
1441
|
+
/**
|
|
1442
|
+
* Azure OpenAI model configuration (OpenAI-compatible).
|
|
1443
|
+
*/
|
|
1444
|
+
interface AzureModelSettings {
|
|
1445
|
+
/**
|
|
1446
|
+
* The maximum number of tokens the model can generate.
|
|
1447
|
+
*/
|
|
1448
|
+
max_output_tokens?: number;
|
|
1449
|
+
/**
|
|
1450
|
+
* Whether to enable parallel tool calling.
|
|
1451
|
+
*/
|
|
1452
|
+
parallel_tool_calls?: boolean;
|
|
1453
|
+
/**
|
|
1454
|
+
* The provider of the model.
|
|
1455
|
+
*/
|
|
1456
|
+
provider?: 'azure';
|
|
1457
|
+
/**
|
|
1458
|
+
* The response format for the model.
|
|
1459
|
+
*/
|
|
1460
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1461
|
+
/**
|
|
1462
|
+
* The temperature of the model.
|
|
1463
|
+
*/
|
|
1464
|
+
temperature?: number;
|
|
1465
|
+
}
|
|
1466
|
+
/**
|
|
1467
|
+
* xAI model configuration (OpenAI-compatible).
|
|
1468
|
+
*/
|
|
1469
|
+
interface XaiModelSettings {
|
|
1470
|
+
/**
|
|
1471
|
+
* The maximum number of tokens the model can generate.
|
|
1472
|
+
*/
|
|
1473
|
+
max_output_tokens?: number;
|
|
1474
|
+
/**
|
|
1475
|
+
* Whether to enable parallel tool calling.
|
|
1476
|
+
*/
|
|
1477
|
+
parallel_tool_calls?: boolean;
|
|
1478
|
+
/**
|
|
1479
|
+
* The provider of the model.
|
|
1480
|
+
*/
|
|
1481
|
+
provider?: 'xai';
|
|
1482
|
+
/**
|
|
1483
|
+
* The response format for the model.
|
|
1484
|
+
*/
|
|
1485
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1486
|
+
/**
|
|
1487
|
+
* The temperature of the model.
|
|
1488
|
+
*/
|
|
1489
|
+
temperature?: number;
|
|
1490
|
+
}
|
|
1491
|
+
/**
|
|
1492
|
+
* Groq model configuration (OpenAI-compatible).
|
|
1493
|
+
*/
|
|
1494
|
+
interface GroqModelSettings {
|
|
1495
|
+
/**
|
|
1496
|
+
* The maximum number of tokens the model can generate.
|
|
1497
|
+
*/
|
|
1498
|
+
max_output_tokens?: number;
|
|
1499
|
+
/**
|
|
1500
|
+
* Whether to enable parallel tool calling.
|
|
1501
|
+
*/
|
|
1502
|
+
parallel_tool_calls?: boolean;
|
|
1503
|
+
/**
|
|
1504
|
+
* The provider of the model.
|
|
1505
|
+
*/
|
|
1506
|
+
provider?: 'groq';
|
|
1507
|
+
/**
|
|
1508
|
+
* The response format for the model.
|
|
1509
|
+
*/
|
|
1510
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1511
|
+
/**
|
|
1512
|
+
* The temperature of the model.
|
|
1513
|
+
*/
|
|
1514
|
+
temperature?: number;
|
|
1515
|
+
}
|
|
1516
|
+
/**
|
|
1517
|
+
* Deepseek model configuration (OpenAI-compatible).
|
|
1518
|
+
*/
|
|
1519
|
+
interface DeepseekModelSettings {
|
|
1520
|
+
/**
|
|
1521
|
+
* The maximum number of tokens the model can generate.
|
|
1522
|
+
*/
|
|
1523
|
+
max_output_tokens?: number;
|
|
1524
|
+
/**
|
|
1525
|
+
* Whether to enable parallel tool calling.
|
|
1526
|
+
*/
|
|
1527
|
+
parallel_tool_calls?: boolean;
|
|
1528
|
+
/**
|
|
1529
|
+
* The provider of the model.
|
|
1530
|
+
*/
|
|
1531
|
+
provider?: 'deepseek';
|
|
1532
|
+
/**
|
|
1533
|
+
* The response format for the model.
|
|
1534
|
+
*/
|
|
1535
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1536
|
+
/**
|
|
1537
|
+
* The temperature of the model.
|
|
1538
|
+
*/
|
|
1539
|
+
temperature?: number;
|
|
1540
|
+
}
|
|
1541
|
+
/**
|
|
1542
|
+
* Together AI model configuration (OpenAI-compatible).
|
|
1543
|
+
*/
|
|
1544
|
+
interface TogetherModelSettings {
|
|
1545
|
+
/**
|
|
1546
|
+
* The maximum number of tokens the model can generate.
|
|
1547
|
+
*/
|
|
1548
|
+
max_output_tokens?: number;
|
|
1549
|
+
/**
|
|
1550
|
+
* Whether to enable parallel tool calling.
|
|
1551
|
+
*/
|
|
1552
|
+
parallel_tool_calls?: boolean;
|
|
1553
|
+
/**
|
|
1554
|
+
* The provider of the model.
|
|
1555
|
+
*/
|
|
1556
|
+
provider?: 'together';
|
|
1557
|
+
/**
|
|
1558
|
+
* The response format for the model.
|
|
1559
|
+
*/
|
|
1560
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1561
|
+
/**
|
|
1562
|
+
* The temperature of the model.
|
|
1563
|
+
*/
|
|
1564
|
+
temperature?: number;
|
|
1565
|
+
}
|
|
1566
|
+
/**
|
|
1567
|
+
* AWS Bedrock model configuration.
|
|
1568
|
+
*/
|
|
1569
|
+
interface BedrockModelSettings {
|
|
1570
|
+
/**
|
|
1571
|
+
* The maximum number of tokens the model can generate.
|
|
1572
|
+
*/
|
|
1573
|
+
max_output_tokens?: number;
|
|
1574
|
+
/**
|
|
1575
|
+
* Whether to enable parallel tool calling.
|
|
1576
|
+
*/
|
|
1577
|
+
parallel_tool_calls?: boolean;
|
|
1578
|
+
/**
|
|
1579
|
+
* The provider of the model.
|
|
1580
|
+
*/
|
|
1581
|
+
provider?: 'bedrock';
|
|
1582
|
+
/**
|
|
1583
|
+
* The response format for the model.
|
|
1584
|
+
*/
|
|
1585
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1586
|
+
/**
|
|
1587
|
+
* The temperature of the model.
|
|
1588
|
+
*/
|
|
1589
|
+
temperature?: number;
|
|
1590
|
+
}
|
|
1591
|
+
}
|
|
1592
|
+
export interface AgentRetrieveParams {
|
|
1593
|
+
/**
|
|
1594
|
+
* Specify which relational fields to include in the response. No relationships are
|
|
1595
|
+
* included by default.
|
|
1596
|
+
*/
|
|
1597
|
+
include?: Array<'agent.blocks' | 'agent.identities' | 'agent.managed_group' | 'agent.secrets' | 'agent.sources' | 'agent.tags' | 'agent.tools'>;
|
|
1598
|
+
/**
|
|
1599
|
+
* Specify which relational fields (e.g., 'tools', 'sources', 'memory') to include
|
|
1600
|
+
* in the response. If not provided, all relationships are loaded by default. Using
|
|
1601
|
+
* this can optimize performance by reducing unnecessary joins.This is a legacy
|
|
1602
|
+
* parameter, and no longer supported after 1.0.0 SDK versions.
|
|
1603
|
+
*/
|
|
1604
|
+
include_relationships?: Array<string> | null;
|
|
1605
|
+
}
|
|
1606
|
+
export interface AgentListParams extends ArrayPageParams {
|
|
1607
|
+
/**
|
|
1608
|
+
* @deprecated Whether to sort agents oldest to newest (True) or newest to oldest
|
|
1609
|
+
* (False, default)
|
|
1610
|
+
*/
|
|
1611
|
+
ascending?: boolean;
|
|
1612
|
+
/**
|
|
1613
|
+
* Search agents by base template ID
|
|
1614
|
+
*/
|
|
1615
|
+
base_template_id?: string | null;
|
|
1616
|
+
/**
|
|
1617
|
+
* Search agents by identifier keys
|
|
1618
|
+
*/
|
|
1619
|
+
identifier_keys?: Array<string> | null;
|
|
1620
|
+
/**
|
|
1621
|
+
* Search agents by identity ID
|
|
1622
|
+
*/
|
|
1623
|
+
identity_id?: string | null;
|
|
1624
|
+
/**
|
|
1625
|
+
* Specify which relational fields to include in the response. No relationships are
|
|
1626
|
+
* included by default.
|
|
1627
|
+
*/
|
|
1628
|
+
include?: Array<'agent.blocks' | 'agent.identities' | 'agent.managed_group' | 'agent.secrets' | 'agent.sources' | 'agent.tags' | 'agent.tools'>;
|
|
1629
|
+
/**
|
|
1630
|
+
* Specify which relational fields (e.g., 'tools', 'sources', 'memory') to include
|
|
1631
|
+
* in the response. If not provided, all relationships are loaded by default. Using
|
|
1632
|
+
* this can optimize performance by reducing unnecessary joins.This is a legacy
|
|
1633
|
+
* parameter, and no longer supported after 1.0.0 SDK versions.
|
|
1634
|
+
*/
|
|
1635
|
+
include_relationships?: Array<string> | null;
|
|
1636
|
+
/**
|
|
1637
|
+
* Filter agents by their last stop reason.
|
|
1638
|
+
*/
|
|
1639
|
+
last_stop_reason?: RunsAPI.StopReasonType | null;
|
|
1640
|
+
/**
|
|
1641
|
+
* If True, only returns agents that match ALL given tags. Otherwise, return agents
|
|
1080
1642
|
* that have ANY of the passed-in tags.
|
|
1081
1643
|
*/
|
|
1082
1644
|
match_all_tags?: boolean;
|
|
@@ -1178,10 +1740,9 @@ export interface AgentModifyParams {
|
|
|
1178
1740
|
*/
|
|
1179
1741
|
description?: string | null;
|
|
1180
1742
|
/**
|
|
1181
|
-
* The embedding
|
|
1182
|
-
* provider/model-name.
|
|
1743
|
+
* The embedding model handle used by the agent (format: provider/model-name).
|
|
1183
1744
|
*/
|
|
1184
|
-
embedding?: string |
|
|
1745
|
+
embedding?: string | null;
|
|
1185
1746
|
/**
|
|
1186
1747
|
* Configuration for embedding model connection and processing parameters.
|
|
1187
1748
|
*/
|
|
@@ -1241,10 +1802,13 @@ export interface AgentModifyParams {
|
|
|
1241
1802
|
[key: string]: unknown;
|
|
1242
1803
|
} | null;
|
|
1243
1804
|
/**
|
|
1244
|
-
* The model used by the agent
|
|
1245
|
-
|
|
1805
|
+
* The model handle used by the agent (format: provider/model-name).
|
|
1806
|
+
*/
|
|
1807
|
+
model?: string | null;
|
|
1808
|
+
/**
|
|
1809
|
+
* The model settings for the agent.
|
|
1246
1810
|
*/
|
|
1247
|
-
|
|
1811
|
+
model_settings?: AgentModifyParams.OpenAIModelSettings | AgentModifyParams.AnthropicModelSettings | AgentModifyParams.GoogleAIModelSettings | AgentModifyParams.GoogleVertexModelSettings | AgentModifyParams.AzureModelSettings | AgentModifyParams.XaiModelSettings | AgentModifyParams.GroqModelSettings | AgentModifyParams.DeepseekModelSettings | AgentModifyParams.TogetherModelSettings | AgentModifyParams.BedrockModelSettings | null;
|
|
1248
1812
|
/**
|
|
1249
1813
|
* The name of the agent.
|
|
1250
1814
|
*/
|
|
@@ -1315,28 +1879,315 @@ export interface AgentModifyParams {
|
|
|
1315
1879
|
tool_rules?: Array<ChildToolRule | InitToolRule | TerminalToolRule | ConditionalToolRule | ContinueToolRule | RequiredBeforeExitToolRule | MaxCountPerStepToolRule | ParentToolRule | RequiresApprovalToolRule> | null;
|
|
1316
1880
|
}
|
|
1317
1881
|
export declare namespace AgentModifyParams {
|
|
1318
|
-
interface
|
|
1882
|
+
interface OpenAIModelSettings {
|
|
1883
|
+
/**
|
|
1884
|
+
* The maximum number of tokens the model can generate.
|
|
1885
|
+
*/
|
|
1886
|
+
max_output_tokens?: number;
|
|
1887
|
+
/**
|
|
1888
|
+
* Whether to enable parallel tool calling.
|
|
1889
|
+
*/
|
|
1890
|
+
parallel_tool_calls?: boolean;
|
|
1891
|
+
/**
|
|
1892
|
+
* The provider of the model.
|
|
1893
|
+
*/
|
|
1894
|
+
provider?: 'openai';
|
|
1895
|
+
/**
|
|
1896
|
+
* The reasoning configuration for the model.
|
|
1897
|
+
*/
|
|
1898
|
+
reasoning?: OpenAIModelSettings.Reasoning;
|
|
1899
|
+
/**
|
|
1900
|
+
* The response format for the model.
|
|
1901
|
+
*/
|
|
1902
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1903
|
+
/**
|
|
1904
|
+
* The temperature of the model.
|
|
1905
|
+
*/
|
|
1906
|
+
temperature?: number;
|
|
1907
|
+
}
|
|
1908
|
+
namespace OpenAIModelSettings {
|
|
1909
|
+
/**
|
|
1910
|
+
* The reasoning configuration for the model.
|
|
1911
|
+
*/
|
|
1912
|
+
interface Reasoning {
|
|
1913
|
+
/**
|
|
1914
|
+
* The reasoning effort to use when generating text reasoning models
|
|
1915
|
+
*/
|
|
1916
|
+
reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high';
|
|
1917
|
+
}
|
|
1918
|
+
}
|
|
1919
|
+
interface AnthropicModelSettings {
|
|
1920
|
+
/**
|
|
1921
|
+
* The maximum number of tokens the model can generate.
|
|
1922
|
+
*/
|
|
1923
|
+
max_output_tokens?: number;
|
|
1924
|
+
/**
|
|
1925
|
+
* Whether to enable parallel tool calling.
|
|
1926
|
+
*/
|
|
1927
|
+
parallel_tool_calls?: boolean;
|
|
1928
|
+
/**
|
|
1929
|
+
* The provider of the model.
|
|
1930
|
+
*/
|
|
1931
|
+
provider?: 'anthropic';
|
|
1932
|
+
/**
|
|
1933
|
+
* The temperature of the model.
|
|
1934
|
+
*/
|
|
1935
|
+
temperature?: number;
|
|
1936
|
+
/**
|
|
1937
|
+
* The thinking configuration for the model.
|
|
1938
|
+
*/
|
|
1939
|
+
thinking?: AnthropicModelSettings.Thinking;
|
|
1940
|
+
/**
|
|
1941
|
+
* Soft control for how verbose model output should be, used for GPT-5 models.
|
|
1942
|
+
*/
|
|
1943
|
+
verbosity?: 'low' | 'medium' | 'high' | null;
|
|
1944
|
+
}
|
|
1945
|
+
namespace AnthropicModelSettings {
|
|
1946
|
+
/**
|
|
1947
|
+
* The thinking configuration for the model.
|
|
1948
|
+
*/
|
|
1949
|
+
interface Thinking {
|
|
1950
|
+
/**
|
|
1951
|
+
* The maximum number of tokens the model can use for extended thinking.
|
|
1952
|
+
*/
|
|
1953
|
+
budget_tokens?: number;
|
|
1954
|
+
/**
|
|
1955
|
+
* The type of thinking to use.
|
|
1956
|
+
*/
|
|
1957
|
+
type?: 'enabled' | 'disabled';
|
|
1958
|
+
}
|
|
1959
|
+
}
|
|
1960
|
+
interface GoogleAIModelSettings {
|
|
1961
|
+
/**
|
|
1962
|
+
* The maximum number of tokens the model can generate.
|
|
1963
|
+
*/
|
|
1964
|
+
max_output_tokens?: number;
|
|
1965
|
+
/**
|
|
1966
|
+
* Whether to enable parallel tool calling.
|
|
1967
|
+
*/
|
|
1968
|
+
parallel_tool_calls?: boolean;
|
|
1969
|
+
/**
|
|
1970
|
+
* The provider of the model.
|
|
1971
|
+
*/
|
|
1972
|
+
provider?: 'google_ai';
|
|
1973
|
+
/**
|
|
1974
|
+
* The response schema for the model.
|
|
1975
|
+
*/
|
|
1976
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
1977
|
+
/**
|
|
1978
|
+
* The temperature of the model.
|
|
1979
|
+
*/
|
|
1980
|
+
temperature?: number;
|
|
1981
|
+
/**
|
|
1982
|
+
* The thinking configuration for the model.
|
|
1983
|
+
*/
|
|
1984
|
+
thinking_config?: GoogleAIModelSettings.ThinkingConfig;
|
|
1985
|
+
}
|
|
1986
|
+
namespace GoogleAIModelSettings {
|
|
1987
|
+
/**
|
|
1988
|
+
* The thinking configuration for the model.
|
|
1989
|
+
*/
|
|
1990
|
+
interface ThinkingConfig {
|
|
1991
|
+
/**
|
|
1992
|
+
* Whether to include thoughts in the model's response.
|
|
1993
|
+
*/
|
|
1994
|
+
include_thoughts?: boolean;
|
|
1995
|
+
/**
|
|
1996
|
+
* The thinking budget for the model.
|
|
1997
|
+
*/
|
|
1998
|
+
thinking_budget?: number;
|
|
1999
|
+
}
|
|
2000
|
+
}
|
|
2001
|
+
interface GoogleVertexModelSettings {
|
|
2002
|
+
/**
|
|
2003
|
+
* The maximum number of tokens the model can generate.
|
|
2004
|
+
*/
|
|
2005
|
+
max_output_tokens?: number;
|
|
2006
|
+
/**
|
|
2007
|
+
* Whether to enable parallel tool calling.
|
|
2008
|
+
*/
|
|
2009
|
+
parallel_tool_calls?: boolean;
|
|
2010
|
+
/**
|
|
2011
|
+
* The provider of the model.
|
|
2012
|
+
*/
|
|
2013
|
+
provider?: 'google_vertex';
|
|
2014
|
+
/**
|
|
2015
|
+
* The response schema for the model.
|
|
2016
|
+
*/
|
|
2017
|
+
response_schema?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2018
|
+
/**
|
|
2019
|
+
* The temperature of the model.
|
|
2020
|
+
*/
|
|
2021
|
+
temperature?: number;
|
|
2022
|
+
/**
|
|
2023
|
+
* The thinking configuration for the model.
|
|
2024
|
+
*/
|
|
2025
|
+
thinking_config?: GoogleVertexModelSettings.ThinkingConfig;
|
|
2026
|
+
}
|
|
2027
|
+
namespace GoogleVertexModelSettings {
|
|
2028
|
+
/**
|
|
2029
|
+
* The thinking configuration for the model.
|
|
2030
|
+
*/
|
|
2031
|
+
interface ThinkingConfig {
|
|
2032
|
+
/**
|
|
2033
|
+
* Whether to include thoughts in the model's response.
|
|
2034
|
+
*/
|
|
2035
|
+
include_thoughts?: boolean;
|
|
2036
|
+
/**
|
|
2037
|
+
* The thinking budget for the model.
|
|
2038
|
+
*/
|
|
2039
|
+
thinking_budget?: number;
|
|
2040
|
+
}
|
|
2041
|
+
}
|
|
2042
|
+
/**
|
|
2043
|
+
* Azure OpenAI model configuration (OpenAI-compatible).
|
|
2044
|
+
*/
|
|
2045
|
+
interface AzureModelSettings {
|
|
2046
|
+
/**
|
|
2047
|
+
* The maximum number of tokens the model can generate.
|
|
2048
|
+
*/
|
|
2049
|
+
max_output_tokens?: number;
|
|
1319
2050
|
/**
|
|
1320
|
-
*
|
|
2051
|
+
* Whether to enable parallel tool calling.
|
|
1321
2052
|
*/
|
|
1322
|
-
|
|
2053
|
+
parallel_tool_calls?: boolean;
|
|
1323
2054
|
/**
|
|
1324
2055
|
* The provider of the model.
|
|
1325
2056
|
*/
|
|
1326
|
-
provider
|
|
2057
|
+
provider?: 'azure';
|
|
2058
|
+
/**
|
|
2059
|
+
* The response format for the model.
|
|
2060
|
+
*/
|
|
2061
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2062
|
+
/**
|
|
2063
|
+
* The temperature of the model.
|
|
2064
|
+
*/
|
|
2065
|
+
temperature?: number;
|
|
1327
2066
|
}
|
|
1328
2067
|
/**
|
|
1329
|
-
*
|
|
2068
|
+
* xAI model configuration (OpenAI-compatible).
|
|
1330
2069
|
*/
|
|
1331
|
-
interface
|
|
2070
|
+
interface XaiModelSettings {
|
|
1332
2071
|
/**
|
|
1333
|
-
* The
|
|
2072
|
+
* The maximum number of tokens the model can generate.
|
|
2073
|
+
*/
|
|
2074
|
+
max_output_tokens?: number;
|
|
2075
|
+
/**
|
|
2076
|
+
* Whether to enable parallel tool calling.
|
|
2077
|
+
*/
|
|
2078
|
+
parallel_tool_calls?: boolean;
|
|
2079
|
+
/**
|
|
2080
|
+
* The provider of the model.
|
|
2081
|
+
*/
|
|
2082
|
+
provider?: 'xai';
|
|
2083
|
+
/**
|
|
2084
|
+
* The response format for the model.
|
|
1334
2085
|
*/
|
|
1335
|
-
|
|
2086
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2087
|
+
/**
|
|
2088
|
+
* The temperature of the model.
|
|
2089
|
+
*/
|
|
2090
|
+
temperature?: number;
|
|
2091
|
+
}
|
|
2092
|
+
/**
|
|
2093
|
+
* Groq model configuration (OpenAI-compatible).
|
|
2094
|
+
*/
|
|
2095
|
+
interface GroqModelSettings {
|
|
1336
2096
|
/**
|
|
1337
2097
|
* The maximum number of tokens the model can generate.
|
|
1338
2098
|
*/
|
|
1339
2099
|
max_output_tokens?: number;
|
|
2100
|
+
/**
|
|
2101
|
+
* Whether to enable parallel tool calling.
|
|
2102
|
+
*/
|
|
2103
|
+
parallel_tool_calls?: boolean;
|
|
2104
|
+
/**
|
|
2105
|
+
* The provider of the model.
|
|
2106
|
+
*/
|
|
2107
|
+
provider?: 'groq';
|
|
2108
|
+
/**
|
|
2109
|
+
* The response format for the model.
|
|
2110
|
+
*/
|
|
2111
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2112
|
+
/**
|
|
2113
|
+
* The temperature of the model.
|
|
2114
|
+
*/
|
|
2115
|
+
temperature?: number;
|
|
2116
|
+
}
|
|
2117
|
+
/**
|
|
2118
|
+
* Deepseek model configuration (OpenAI-compatible).
|
|
2119
|
+
*/
|
|
2120
|
+
interface DeepseekModelSettings {
|
|
2121
|
+
/**
|
|
2122
|
+
* The maximum number of tokens the model can generate.
|
|
2123
|
+
*/
|
|
2124
|
+
max_output_tokens?: number;
|
|
2125
|
+
/**
|
|
2126
|
+
* Whether to enable parallel tool calling.
|
|
2127
|
+
*/
|
|
2128
|
+
parallel_tool_calls?: boolean;
|
|
2129
|
+
/**
|
|
2130
|
+
* The provider of the model.
|
|
2131
|
+
*/
|
|
2132
|
+
provider?: 'deepseek';
|
|
2133
|
+
/**
|
|
2134
|
+
* The response format for the model.
|
|
2135
|
+
*/
|
|
2136
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2137
|
+
/**
|
|
2138
|
+
* The temperature of the model.
|
|
2139
|
+
*/
|
|
2140
|
+
temperature?: number;
|
|
2141
|
+
}
|
|
2142
|
+
/**
|
|
2143
|
+
* Together AI model configuration (OpenAI-compatible).
|
|
2144
|
+
*/
|
|
2145
|
+
interface TogetherModelSettings {
|
|
2146
|
+
/**
|
|
2147
|
+
* The maximum number of tokens the model can generate.
|
|
2148
|
+
*/
|
|
2149
|
+
max_output_tokens?: number;
|
|
2150
|
+
/**
|
|
2151
|
+
* Whether to enable parallel tool calling.
|
|
2152
|
+
*/
|
|
2153
|
+
parallel_tool_calls?: boolean;
|
|
2154
|
+
/**
|
|
2155
|
+
* The provider of the model.
|
|
2156
|
+
*/
|
|
2157
|
+
provider?: 'together';
|
|
2158
|
+
/**
|
|
2159
|
+
* The response format for the model.
|
|
2160
|
+
*/
|
|
2161
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2162
|
+
/**
|
|
2163
|
+
* The temperature of the model.
|
|
2164
|
+
*/
|
|
2165
|
+
temperature?: number;
|
|
2166
|
+
}
|
|
2167
|
+
/**
|
|
2168
|
+
* AWS Bedrock model configuration.
|
|
2169
|
+
*/
|
|
2170
|
+
interface BedrockModelSettings {
|
|
2171
|
+
/**
|
|
2172
|
+
* The maximum number of tokens the model can generate.
|
|
2173
|
+
*/
|
|
2174
|
+
max_output_tokens?: number;
|
|
2175
|
+
/**
|
|
2176
|
+
* Whether to enable parallel tool calling.
|
|
2177
|
+
*/
|
|
2178
|
+
parallel_tool_calls?: boolean;
|
|
2179
|
+
/**
|
|
2180
|
+
* The provider of the model.
|
|
2181
|
+
*/
|
|
2182
|
+
provider?: 'bedrock';
|
|
2183
|
+
/**
|
|
2184
|
+
* The response format for the model.
|
|
2185
|
+
*/
|
|
2186
|
+
response_format?: AgentsAPI.TextResponseFormat | AgentsAPI.JsonSchemaResponseFormat | AgentsAPI.JsonObjectResponseFormat | null;
|
|
2187
|
+
/**
|
|
2188
|
+
* The temperature of the model.
|
|
2189
|
+
*/
|
|
2190
|
+
temperature?: number;
|
|
1340
2191
|
}
|
|
1341
2192
|
}
|
|
1342
2193
|
export declare namespace Agents {
|