letta-client 0.1.324__py3-none-any.whl → 1.0.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-client might be problematic. Click here for more details.
- letta_client/__init__.py +229 -59
- letta_client/agents/__init__.py +15 -17
- letta_client/agents/blocks/__init__.py +3 -0
- letta_client/agents/blocks/client.py +77 -4
- letta_client/agents/blocks/raw_client.py +63 -2
- letta_client/agents/blocks/types/__init__.py +7 -0
- letta_client/agents/blocks/types/blocks_list_request_order.py +5 -0
- letta_client/agents/client.py +46 -13
- letta_client/agents/files/__init__.py +3 -0
- letta_client/agents/files/client.py +71 -10
- letta_client/agents/files/raw_client.py +51 -10
- letta_client/agents/files/types/__init__.py +7 -0
- letta_client/{types/tool_return_status.py → agents/files/types/files_list_request_order.py} +1 -1
- letta_client/agents/folders/__init__.py +3 -0
- letta_client/agents/folders/client.py +77 -4
- letta_client/agents/folders/raw_client.py +63 -2
- letta_client/agents/folders/types/__init__.py +7 -0
- letta_client/agents/folders/types/folders_list_request_order.py +5 -0
- letta_client/agents/groups/__init__.py +3 -0
- letta_client/agents/groups/client.py +71 -2
- letta_client/agents/groups/raw_client.py +51 -0
- letta_client/agents/groups/types/__init__.py +7 -0
- letta_client/agents/groups/types/groups_list_request_order.py +5 -0
- letta_client/agents/messages/__init__.py +2 -0
- letta_client/agents/messages/client.py +57 -18
- letta_client/agents/messages/raw_client.py +37 -18
- letta_client/agents/messages/types/__init__.py +2 -0
- letta_client/agents/messages/types/messages_list_request_order.py +5 -0
- letta_client/agents/passages/client.py +29 -0
- letta_client/agents/raw_client.py +4 -4
- letta_client/agents/sources/__init__.py +3 -0
- letta_client/agents/sources/client.py +77 -4
- letta_client/agents/sources/raw_client.py +63 -2
- letta_client/agents/sources/types/__init__.py +7 -0
- letta_client/agents/sources/types/sources_list_request_order.py +5 -0
- letta_client/agents/tools/__init__.py +3 -0
- letta_client/agents/tools/client.py +77 -4
- letta_client/agents/tools/raw_client.py +63 -2
- letta_client/agents/tools/types/__init__.py +7 -0
- letta_client/agents/tools/types/tools_list_request_order.py +5 -0
- letta_client/archives/client.py +16 -2
- letta_client/base_client.py +3 -0
- letta_client/batches/client.py +12 -2
- letta_client/batches/messages/client.py +10 -0
- letta_client/blocks/agents/client.py +8 -0
- letta_client/blocks/client.py +32 -2
- letta_client/chat/__init__.py +7 -0
- letta_client/chat/client.py +255 -0
- letta_client/chat/raw_client.py +269 -0
- letta_client/chat/types/__init__.py +8 -0
- letta_client/chat/types/chat_completion_request_messages_item.py +19 -0
- letta_client/chat/types/chat_completion_request_stop.py +5 -0
- letta_client/client_side_access_tokens/client.py +10 -2
- letta_client/core/client_wrapper.py +2 -2
- letta_client/errors/__init__.py +2 -0
- letta_client/errors/gone_error.py +10 -0
- letta_client/folders/agents/client.py +8 -0
- letta_client/folders/client.py +20 -4
- letta_client/folders/files/client.py +14 -0
- letta_client/folders/passages/client.py +8 -0
- letta_client/groups/client.py +16 -2
- letta_client/groups/messages/client.py +14 -0
- letta_client/identities/agents/client.py +8 -0
- letta_client/identities/blocks/client.py +8 -0
- letta_client/identities/client.py +20 -2
- letta_client/jobs/__init__.py +3 -0
- letta_client/jobs/client.py +61 -12
- letta_client/jobs/raw_client.py +29 -8
- letta_client/jobs/types/__init__.py +7 -0
- letta_client/jobs/types/jobs_list_request_order.py +5 -0
- letta_client/models/client.py +8 -2
- letta_client/projects/client.py +10 -2
- letta_client/providers/client.py +90 -2
- letta_client/providers/raw_client.py +102 -0
- letta_client/runs/__init__.py +11 -2
- letta_client/runs/client.py +152 -20
- letta_client/runs/messages/client.py +30 -2
- letta_client/runs/messages/raw_client.py +10 -0
- letta_client/runs/raw_client.py +146 -16
- letta_client/runs/steps/__init__.py +3 -0
- letta_client/runs/steps/client.py +39 -30
- letta_client/runs/steps/raw_client.py +19 -28
- letta_client/runs/steps/types/__init__.py +7 -0
- letta_client/runs/steps/types/steps_list_request_order.py +5 -0
- letta_client/runs/types/__init__.py +2 -1
- letta_client/runs/types/runs_list_request_order.py +5 -0
- letta_client/sources/client.py +8 -2
- letta_client/sources/files/client.py +12 -0
- letta_client/sources/passages/client.py +6 -0
- letta_client/steps/client.py +26 -2
- letta_client/steps/messages/client.py +8 -0
- letta_client/tags/client.py +16 -2
- letta_client/templates/__init__.py +88 -36
- letta_client/templates/client.py +165 -4
- letta_client/templates/raw_client.py +205 -2
- letta_client/templates/types/__init__.py +176 -72
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_llm_config.py +4 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_llm_config_display_name.py +14 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_llm_config_display_name_item.py +5 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_secrets_item.py +4 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_secrets_item_value_enc.py +14 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_secrets_item_value_enc_item.py +5 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_exec_environment_variables_item.py +6 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_exec_environment_variables_item_value_enc.py +16 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_exec_environment_variables_item_value_enc_item.py +7 -0
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_one.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_args.py} +11 -7
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_one_prompt_template.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_args_args.py} +4 -5
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_args_args_item.py +7 -0
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_zero_prompt_template.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_args_prompt_template.py} +4 -4
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_one_prompt_template_item.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_args_prompt_template_item.py} +1 -1
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_zero_type.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_args_type.py} +1 -1
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_zero.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes.py} +13 -7
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes.py +16 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes_item.py +12 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes_item_args.py +17 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes_item_args_item.py +7 -0
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_one.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes_item_item.py} +8 -10
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes_item_item_args.py +17 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes_item_item_args_item.py +7 -0
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_zero_prompt_template.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_prompt_template.py} +4 -4
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_one_prompt_template_item.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_prompt_template_item.py} +1 -1
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_zero_type.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_child_arg_nodes_type.py} +1 -1
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item.py +8 -8
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_zero.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_item_args.py} +11 -8
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_args_args.py +13 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_args_args_item.py +7 -0
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_one_prompt_template.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_item_args_prompt_template.py} +4 -4
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_zero_prompt_template_item.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_item_args_prompt_template_item.py} +1 -1
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_item_one_type.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_item_args_type.py} +1 -1
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes.py +37 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_child_arg_nodes.py +18 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_child_arg_nodes_item.py +12 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_child_arg_nodes_item_args.py +17 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_child_arg_nodes_item_args_item.py +7 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_child_arg_nodes_item_item.py +28 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_child_arg_nodes_item_item_args.py +17 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_child_arg_nodes_item_item_args_item.py +7 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_prompt_template.py +18 -0
- letta_client/templates/types/{templates_create_agents_from_template_response_agents_item_tool_rules_item_zero_prompt_template_item.py → templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_prompt_template_item.py} +1 -1
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_item_child_arg_nodes_type.py +7 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tools_item_tool_type.py +1 -1
- letta_client/templates/types/templates_create_template_request_agent_file.py +5 -0
- letta_client/templates/types/templates_get_template_snapshot_response_agents_item_tool_rules_item.py +8 -8
- letta_client/templates/types/{templates_get_template_snapshot_response_agents_item_tool_rules_item_one.py → templates_get_template_snapshot_response_agents_item_tool_rules_item_args.py} +2 -1
- letta_client/templates/types/templates_get_template_snapshot_response_agents_item_tool_rules_item_child_arg_nodes.py +29 -0
- letta_client/templates/types/{templates_get_template_snapshot_response_agents_item_tool_rules_item_zero.py → templates_get_template_snapshot_response_agents_item_tool_rules_item_child_arg_nodes_child_arg_nodes_item.py} +3 -5
- letta_client/templates/types/templates_update_current_template_from_agent_file_response.py +21 -0
- letta_client/tools/client.py +30 -220
- letta_client/tools/raw_client.py +0 -292
- letta_client/types/__init__.py +134 -22
- letta_client/types/agent_environment_variable.py +5 -0
- letta_client/types/{action_parameters_model.py → annotation.py} +4 -10
- letta_client/types/{action_response_model.py → annotation_url_citation.py} +4 -9
- letta_client/types/approval_create.py +8 -2
- letta_client/types/approval_create_approvals_item.py +8 -0
- letta_client/types/approval_response_message.py +8 -2
- letta_client/types/approval_response_message_approvals_item.py +8 -0
- letta_client/types/approval_return.py +34 -0
- letta_client/{agents/templates/types/templates_migrate_response.py → types/audio.py} +4 -4
- letta_client/types/chat_completion.py +30 -0
- letta_client/types/chat_completion_assistant_message_param.py +30 -0
- letta_client/types/chat_completion_assistant_message_param_content.py +9 -0
- letta_client/types/chat_completion_assistant_message_param_content_item.py +10 -0
- letta_client/types/chat_completion_assistant_message_param_tool_calls_item.py +10 -0
- letta_client/types/chat_completion_audio.py +23 -0
- letta_client/types/chat_completion_content_part_image_param.py +22 -0
- letta_client/types/chat_completion_content_part_input_audio_param.py +22 -0
- letta_client/types/chat_completion_content_part_refusal_param.py +21 -0
- letta_client/types/chat_completion_content_part_text_param.py +21 -0
- letta_client/types/chat_completion_developer_message_param.py +23 -0
- letta_client/types/chat_completion_developer_message_param_content.py +7 -0
- letta_client/types/chat_completion_function_message_param.py +22 -0
- letta_client/types/chat_completion_message.py +30 -0
- letta_client/types/chat_completion_message_custom_tool_call.py +23 -0
- letta_client/types/chat_completion_message_custom_tool_call_param.py +23 -0
- letta_client/types/chat_completion_message_function_tool_call_input.py +25 -0
- letta_client/types/{chat_completion_message_function_tool_call.py → chat_completion_message_function_tool_call_output.py} +3 -3
- letta_client/types/chat_completion_message_function_tool_call_param.py +25 -0
- letta_client/types/chat_completion_message_tool_calls_item.py +10 -0
- letta_client/types/chat_completion_service_tier.py +5 -0
- letta_client/types/chat_completion_system_message_param.py +23 -0
- letta_client/types/chat_completion_system_message_param_content.py +7 -0
- letta_client/types/chat_completion_token_logprob.py +24 -0
- letta_client/types/chat_completion_tool_message_param.py +23 -0
- letta_client/types/chat_completion_tool_message_param_content.py +7 -0
- letta_client/types/chat_completion_user_message_param.py +23 -0
- letta_client/types/chat_completion_user_message_param_content.py +7 -0
- letta_client/types/chat_completion_user_message_param_content_item.py +15 -0
- letta_client/types/child_tool_rule.py +6 -0
- letta_client/types/choice.py +26 -0
- letta_client/types/choice_finish_reason.py +7 -0
- letta_client/types/choice_logprobs.py +22 -0
- letta_client/types/completion_tokens_details.py +23 -0
- letta_client/types/{auth_scheme_field.py → completion_usage.py} +8 -13
- letta_client/types/custom_input.py +21 -0
- letta_client/types/custom_output.py +21 -0
- letta_client/types/file.py +22 -0
- letta_client/types/file_file.py +22 -0
- letta_client/types/function_call_input.py +21 -0
- letta_client/types/function_call_output.py +21 -0
- letta_client/types/{function.py → function_output.py} +1 -1
- letta_client/types/image_url.py +22 -0
- letta_client/types/image_url_detail.py +5 -0
- letta_client/types/init_tool_rule.py +5 -0
- letta_client/types/input_audio.py +22 -0
- letta_client/types/input_audio_format.py +5 -0
- letta_client/types/internal_template_agent_create.py +2 -2
- letta_client/types/letta_schemas_agent_file_agent_schema.py +2 -2
- letta_client/types/letta_schemas_agent_file_message_schema.py +27 -4
- letta_client/types/letta_schemas_agent_file_message_schema_approvals_item.py +8 -0
- letta_client/types/letta_schemas_letta_message_tool_return.py +26 -0
- letta_client/types/letta_schemas_letta_message_tool_return_status.py +5 -0
- letta_client/types/{tool_return.py → letta_schemas_message_tool_return.py} +9 -3
- letta_client/types/letta_schemas_message_tool_return_status.py +5 -0
- letta_client/types/llm_config.py +5 -0
- letta_client/types/message.py +15 -4
- letta_client/types/message_approvals_item.py +8 -0
- letta_client/types/omitted_reasoning_content.py +4 -0
- letta_client/types/openai_types_chat_chat_completion_message_function_tool_call_function.py +21 -0
- letta_client/types/openai_types_chat_chat_completion_message_function_tool_call_param_function.py +21 -0
- letta_client/types/prompt_tokens_details.py +21 -0
- letta_client/types/provider.py +10 -0
- letta_client/types/run.py +28 -41
- letta_client/types/run_metrics.py +58 -0
- letta_client/types/run_status.py +5 -0
- letta_client/types/sandbox_environment_variable.py +5 -0
- letta_client/types/step.py +2 -2
- letta_client/types/step_metrics.py +2 -2
- letta_client/types/text_content.py +5 -0
- letta_client/types/tool_call_content.py +5 -0
- letta_client/types/tool_call_message.py +2 -0
- letta_client/types/tool_call_message_tool_calls.py +8 -0
- letta_client/types/tool_call_node.py +35 -0
- letta_client/types/tool_return_message.py +8 -5
- letta_client/types/tool_type.py +1 -1
- letta_client/types/top_logprob.py +22 -0
- letta_client/voice/client.py +14 -0
- letta_client/voice/raw_client.py +37 -0
- letta_client-1.0.0a2.dist-info/METADATA +422 -0
- {letta_client-0.1.324.dist-info → letta_client-1.0.0a2.dist-info}/RECORD +241 -141
- letta_client/agents/templates/__init__.py +0 -7
- letta_client/agents/templates/client.py +0 -307
- letta_client/agents/templates/raw_client.py +0 -275
- letta_client/agents/templates/types/__init__.py +0 -7
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_rules_item_one_type.py +0 -5
- letta_client/types/action_model.py +0 -39
- letta_client/types/app_auth_scheme.py +0 -35
- letta_client/types/app_auth_scheme_auth_mode.py +0 -19
- letta_client/types/app_model.py +0 -45
- letta_client-0.1.324.dist-info/METADATA +0 -211
- {letta_client-0.1.324.dist-info → letta_client-1.0.0a2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
6
|
+
from ..core.request_options import RequestOptions
|
|
7
|
+
from ..types.chat_completion import ChatCompletion
|
|
8
|
+
from .raw_client import AsyncRawChatClient, RawChatClient
|
|
9
|
+
from .types.chat_completion_request_messages_item import ChatCompletionRequestMessagesItem
|
|
10
|
+
from .types.chat_completion_request_stop import ChatCompletionRequestStop
|
|
11
|
+
|
|
12
|
+
# this is used as the default value for optional parameters
|
|
13
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ChatClient:
|
|
17
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
18
|
+
self._raw_client = RawChatClient(client_wrapper=client_wrapper)
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def with_raw_response(self) -> RawChatClient:
|
|
22
|
+
"""
|
|
23
|
+
Retrieves a raw implementation of this client that returns raw responses.
|
|
24
|
+
|
|
25
|
+
Returns
|
|
26
|
+
-------
|
|
27
|
+
RawChatClient
|
|
28
|
+
"""
|
|
29
|
+
return self._raw_client
|
|
30
|
+
|
|
31
|
+
def create_chat_completion(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
model: str,
|
|
35
|
+
messages: typing.Sequence[ChatCompletionRequestMessagesItem],
|
|
36
|
+
temperature: typing.Optional[float] = OMIT,
|
|
37
|
+
top_p: typing.Optional[float] = OMIT,
|
|
38
|
+
n: typing.Optional[int] = OMIT,
|
|
39
|
+
stream: typing.Optional[bool] = OMIT,
|
|
40
|
+
stop: typing.Optional[ChatCompletionRequestStop] = OMIT,
|
|
41
|
+
max_tokens: typing.Optional[int] = OMIT,
|
|
42
|
+
presence_penalty: typing.Optional[float] = OMIT,
|
|
43
|
+
frequency_penalty: typing.Optional[float] = OMIT,
|
|
44
|
+
user: typing.Optional[str] = OMIT,
|
|
45
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
46
|
+
) -> ChatCompletion:
|
|
47
|
+
"""
|
|
48
|
+
Create a chat completion using a Letta agent (OpenAI-compatible).
|
|
49
|
+
|
|
50
|
+
This endpoint provides full OpenAI API compatibility. The agent is selected based on:
|
|
51
|
+
- The 'model' parameter in the request (should contain an agent ID in format 'agent-...')
|
|
52
|
+
|
|
53
|
+
When streaming is enabled (stream=true), the response will be Server-Sent Events
|
|
54
|
+
with ChatCompletionChunk objects.
|
|
55
|
+
|
|
56
|
+
Parameters
|
|
57
|
+
----------
|
|
58
|
+
model : str
|
|
59
|
+
ID of the model to use
|
|
60
|
+
|
|
61
|
+
messages : typing.Sequence[ChatCompletionRequestMessagesItem]
|
|
62
|
+
Messages comprising the conversation so far
|
|
63
|
+
|
|
64
|
+
temperature : typing.Optional[float]
|
|
65
|
+
Sampling temperature
|
|
66
|
+
|
|
67
|
+
top_p : typing.Optional[float]
|
|
68
|
+
Nucleus sampling parameter
|
|
69
|
+
|
|
70
|
+
n : typing.Optional[int]
|
|
71
|
+
Number of chat completion choices to generate
|
|
72
|
+
|
|
73
|
+
stream : typing.Optional[bool]
|
|
74
|
+
Whether to stream back partial progress
|
|
75
|
+
|
|
76
|
+
stop : typing.Optional[ChatCompletionRequestStop]
|
|
77
|
+
Sequences where the API will stop generating
|
|
78
|
+
|
|
79
|
+
max_tokens : typing.Optional[int]
|
|
80
|
+
Maximum number of tokens to generate
|
|
81
|
+
|
|
82
|
+
presence_penalty : typing.Optional[float]
|
|
83
|
+
Presence penalty
|
|
84
|
+
|
|
85
|
+
frequency_penalty : typing.Optional[float]
|
|
86
|
+
Frequency penalty
|
|
87
|
+
|
|
88
|
+
user : typing.Optional[str]
|
|
89
|
+
A unique identifier representing your end-user
|
|
90
|
+
|
|
91
|
+
request_options : typing.Optional[RequestOptions]
|
|
92
|
+
Request-specific configuration.
|
|
93
|
+
|
|
94
|
+
Returns
|
|
95
|
+
-------
|
|
96
|
+
ChatCompletion
|
|
97
|
+
Successful response
|
|
98
|
+
|
|
99
|
+
Examples
|
|
100
|
+
--------
|
|
101
|
+
from letta_client import ChatCompletionDeveloperMessageParam, Letta
|
|
102
|
+
|
|
103
|
+
client = Letta(
|
|
104
|
+
project="YOUR_PROJECT",
|
|
105
|
+
token="YOUR_TOKEN",
|
|
106
|
+
)
|
|
107
|
+
client.chat.create_chat_completion(
|
|
108
|
+
model="model",
|
|
109
|
+
messages=[
|
|
110
|
+
ChatCompletionDeveloperMessageParam(
|
|
111
|
+
content="content",
|
|
112
|
+
)
|
|
113
|
+
],
|
|
114
|
+
)
|
|
115
|
+
"""
|
|
116
|
+
_response = self._raw_client.create_chat_completion(
|
|
117
|
+
model=model,
|
|
118
|
+
messages=messages,
|
|
119
|
+
temperature=temperature,
|
|
120
|
+
top_p=top_p,
|
|
121
|
+
n=n,
|
|
122
|
+
stream=stream,
|
|
123
|
+
stop=stop,
|
|
124
|
+
max_tokens=max_tokens,
|
|
125
|
+
presence_penalty=presence_penalty,
|
|
126
|
+
frequency_penalty=frequency_penalty,
|
|
127
|
+
user=user,
|
|
128
|
+
request_options=request_options,
|
|
129
|
+
)
|
|
130
|
+
return _response.data
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class AsyncChatClient:
|
|
134
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
135
|
+
self._raw_client = AsyncRawChatClient(client_wrapper=client_wrapper)
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def with_raw_response(self) -> AsyncRawChatClient:
|
|
139
|
+
"""
|
|
140
|
+
Retrieves a raw implementation of this client that returns raw responses.
|
|
141
|
+
|
|
142
|
+
Returns
|
|
143
|
+
-------
|
|
144
|
+
AsyncRawChatClient
|
|
145
|
+
"""
|
|
146
|
+
return self._raw_client
|
|
147
|
+
|
|
148
|
+
async def create_chat_completion(
|
|
149
|
+
self,
|
|
150
|
+
*,
|
|
151
|
+
model: str,
|
|
152
|
+
messages: typing.Sequence[ChatCompletionRequestMessagesItem],
|
|
153
|
+
temperature: typing.Optional[float] = OMIT,
|
|
154
|
+
top_p: typing.Optional[float] = OMIT,
|
|
155
|
+
n: typing.Optional[int] = OMIT,
|
|
156
|
+
stream: typing.Optional[bool] = OMIT,
|
|
157
|
+
stop: typing.Optional[ChatCompletionRequestStop] = OMIT,
|
|
158
|
+
max_tokens: typing.Optional[int] = OMIT,
|
|
159
|
+
presence_penalty: typing.Optional[float] = OMIT,
|
|
160
|
+
frequency_penalty: typing.Optional[float] = OMIT,
|
|
161
|
+
user: typing.Optional[str] = OMIT,
|
|
162
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
163
|
+
) -> ChatCompletion:
|
|
164
|
+
"""
|
|
165
|
+
Create a chat completion using a Letta agent (OpenAI-compatible).
|
|
166
|
+
|
|
167
|
+
This endpoint provides full OpenAI API compatibility. The agent is selected based on:
|
|
168
|
+
- The 'model' parameter in the request (should contain an agent ID in format 'agent-...')
|
|
169
|
+
|
|
170
|
+
When streaming is enabled (stream=true), the response will be Server-Sent Events
|
|
171
|
+
with ChatCompletionChunk objects.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
model : str
|
|
176
|
+
ID of the model to use
|
|
177
|
+
|
|
178
|
+
messages : typing.Sequence[ChatCompletionRequestMessagesItem]
|
|
179
|
+
Messages comprising the conversation so far
|
|
180
|
+
|
|
181
|
+
temperature : typing.Optional[float]
|
|
182
|
+
Sampling temperature
|
|
183
|
+
|
|
184
|
+
top_p : typing.Optional[float]
|
|
185
|
+
Nucleus sampling parameter
|
|
186
|
+
|
|
187
|
+
n : typing.Optional[int]
|
|
188
|
+
Number of chat completion choices to generate
|
|
189
|
+
|
|
190
|
+
stream : typing.Optional[bool]
|
|
191
|
+
Whether to stream back partial progress
|
|
192
|
+
|
|
193
|
+
stop : typing.Optional[ChatCompletionRequestStop]
|
|
194
|
+
Sequences where the API will stop generating
|
|
195
|
+
|
|
196
|
+
max_tokens : typing.Optional[int]
|
|
197
|
+
Maximum number of tokens to generate
|
|
198
|
+
|
|
199
|
+
presence_penalty : typing.Optional[float]
|
|
200
|
+
Presence penalty
|
|
201
|
+
|
|
202
|
+
frequency_penalty : typing.Optional[float]
|
|
203
|
+
Frequency penalty
|
|
204
|
+
|
|
205
|
+
user : typing.Optional[str]
|
|
206
|
+
A unique identifier representing your end-user
|
|
207
|
+
|
|
208
|
+
request_options : typing.Optional[RequestOptions]
|
|
209
|
+
Request-specific configuration.
|
|
210
|
+
|
|
211
|
+
Returns
|
|
212
|
+
-------
|
|
213
|
+
ChatCompletion
|
|
214
|
+
Successful response
|
|
215
|
+
|
|
216
|
+
Examples
|
|
217
|
+
--------
|
|
218
|
+
import asyncio
|
|
219
|
+
|
|
220
|
+
from letta_client import AsyncLetta, ChatCompletionDeveloperMessageParam
|
|
221
|
+
|
|
222
|
+
client = AsyncLetta(
|
|
223
|
+
project="YOUR_PROJECT",
|
|
224
|
+
token="YOUR_TOKEN",
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
async def main() -> None:
|
|
229
|
+
await client.chat.create_chat_completion(
|
|
230
|
+
model="model",
|
|
231
|
+
messages=[
|
|
232
|
+
ChatCompletionDeveloperMessageParam(
|
|
233
|
+
content="content",
|
|
234
|
+
)
|
|
235
|
+
],
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
asyncio.run(main())
|
|
240
|
+
"""
|
|
241
|
+
_response = await self._raw_client.create_chat_completion(
|
|
242
|
+
model=model,
|
|
243
|
+
messages=messages,
|
|
244
|
+
temperature=temperature,
|
|
245
|
+
top_p=top_p,
|
|
246
|
+
n=n,
|
|
247
|
+
stream=stream,
|
|
248
|
+
stop=stop,
|
|
249
|
+
max_tokens=max_tokens,
|
|
250
|
+
presence_penalty=presence_penalty,
|
|
251
|
+
frequency_penalty=frequency_penalty,
|
|
252
|
+
user=user,
|
|
253
|
+
request_options=request_options,
|
|
254
|
+
)
|
|
255
|
+
return _response.data
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
from json.decoder import JSONDecodeError
|
|
5
|
+
|
|
6
|
+
from ..core.api_error import ApiError
|
|
7
|
+
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
8
|
+
from ..core.http_response import AsyncHttpResponse, HttpResponse
|
|
9
|
+
from ..core.request_options import RequestOptions
|
|
10
|
+
from ..core.serialization import convert_and_respect_annotation_metadata
|
|
11
|
+
from ..core.unchecked_base_model import construct_type
|
|
12
|
+
from ..errors.unprocessable_entity_error import UnprocessableEntityError
|
|
13
|
+
from ..types.chat_completion import ChatCompletion
|
|
14
|
+
from ..types.http_validation_error import HttpValidationError
|
|
15
|
+
from .types.chat_completion_request_messages_item import ChatCompletionRequestMessagesItem
|
|
16
|
+
from .types.chat_completion_request_stop import ChatCompletionRequestStop
|
|
17
|
+
|
|
18
|
+
# this is used as the default value for optional parameters
|
|
19
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class RawChatClient:
|
|
23
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
24
|
+
self._client_wrapper = client_wrapper
|
|
25
|
+
|
|
26
|
+
def create_chat_completion(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
model: str,
|
|
30
|
+
messages: typing.Sequence[ChatCompletionRequestMessagesItem],
|
|
31
|
+
temperature: typing.Optional[float] = OMIT,
|
|
32
|
+
top_p: typing.Optional[float] = OMIT,
|
|
33
|
+
n: typing.Optional[int] = OMIT,
|
|
34
|
+
stream: typing.Optional[bool] = OMIT,
|
|
35
|
+
stop: typing.Optional[ChatCompletionRequestStop] = OMIT,
|
|
36
|
+
max_tokens: typing.Optional[int] = OMIT,
|
|
37
|
+
presence_penalty: typing.Optional[float] = OMIT,
|
|
38
|
+
frequency_penalty: typing.Optional[float] = OMIT,
|
|
39
|
+
user: typing.Optional[str] = OMIT,
|
|
40
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
41
|
+
) -> HttpResponse[ChatCompletion]:
|
|
42
|
+
"""
|
|
43
|
+
Create a chat completion using a Letta agent (OpenAI-compatible).
|
|
44
|
+
|
|
45
|
+
This endpoint provides full OpenAI API compatibility. The agent is selected based on:
|
|
46
|
+
- The 'model' parameter in the request (should contain an agent ID in format 'agent-...')
|
|
47
|
+
|
|
48
|
+
When streaming is enabled (stream=true), the response will be Server-Sent Events
|
|
49
|
+
with ChatCompletionChunk objects.
|
|
50
|
+
|
|
51
|
+
Parameters
|
|
52
|
+
----------
|
|
53
|
+
model : str
|
|
54
|
+
ID of the model to use
|
|
55
|
+
|
|
56
|
+
messages : typing.Sequence[ChatCompletionRequestMessagesItem]
|
|
57
|
+
Messages comprising the conversation so far
|
|
58
|
+
|
|
59
|
+
temperature : typing.Optional[float]
|
|
60
|
+
Sampling temperature
|
|
61
|
+
|
|
62
|
+
top_p : typing.Optional[float]
|
|
63
|
+
Nucleus sampling parameter
|
|
64
|
+
|
|
65
|
+
n : typing.Optional[int]
|
|
66
|
+
Number of chat completion choices to generate
|
|
67
|
+
|
|
68
|
+
stream : typing.Optional[bool]
|
|
69
|
+
Whether to stream back partial progress
|
|
70
|
+
|
|
71
|
+
stop : typing.Optional[ChatCompletionRequestStop]
|
|
72
|
+
Sequences where the API will stop generating
|
|
73
|
+
|
|
74
|
+
max_tokens : typing.Optional[int]
|
|
75
|
+
Maximum number of tokens to generate
|
|
76
|
+
|
|
77
|
+
presence_penalty : typing.Optional[float]
|
|
78
|
+
Presence penalty
|
|
79
|
+
|
|
80
|
+
frequency_penalty : typing.Optional[float]
|
|
81
|
+
Frequency penalty
|
|
82
|
+
|
|
83
|
+
user : typing.Optional[str]
|
|
84
|
+
A unique identifier representing your end-user
|
|
85
|
+
|
|
86
|
+
request_options : typing.Optional[RequestOptions]
|
|
87
|
+
Request-specific configuration.
|
|
88
|
+
|
|
89
|
+
Returns
|
|
90
|
+
-------
|
|
91
|
+
HttpResponse[ChatCompletion]
|
|
92
|
+
Successful response
|
|
93
|
+
"""
|
|
94
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
95
|
+
"v1/chat/completions",
|
|
96
|
+
method="POST",
|
|
97
|
+
json={
|
|
98
|
+
"model": model,
|
|
99
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
100
|
+
object_=messages, annotation=typing.Sequence[ChatCompletionRequestMessagesItem], direction="write"
|
|
101
|
+
),
|
|
102
|
+
"temperature": temperature,
|
|
103
|
+
"top_p": top_p,
|
|
104
|
+
"n": n,
|
|
105
|
+
"stream": stream,
|
|
106
|
+
"stop": convert_and_respect_annotation_metadata(
|
|
107
|
+
object_=stop, annotation=ChatCompletionRequestStop, direction="write"
|
|
108
|
+
),
|
|
109
|
+
"max_tokens": max_tokens,
|
|
110
|
+
"presence_penalty": presence_penalty,
|
|
111
|
+
"frequency_penalty": frequency_penalty,
|
|
112
|
+
"user": user,
|
|
113
|
+
},
|
|
114
|
+
headers={
|
|
115
|
+
"content-type": "application/json",
|
|
116
|
+
},
|
|
117
|
+
request_options=request_options,
|
|
118
|
+
omit=OMIT,
|
|
119
|
+
)
|
|
120
|
+
try:
|
|
121
|
+
if 200 <= _response.status_code < 300:
|
|
122
|
+
_data = typing.cast(
|
|
123
|
+
ChatCompletion,
|
|
124
|
+
construct_type(
|
|
125
|
+
type_=ChatCompletion, # type: ignore
|
|
126
|
+
object_=_response.json(),
|
|
127
|
+
),
|
|
128
|
+
)
|
|
129
|
+
return HttpResponse(response=_response, data=_data)
|
|
130
|
+
if _response.status_code == 422:
|
|
131
|
+
raise UnprocessableEntityError(
|
|
132
|
+
headers=dict(_response.headers),
|
|
133
|
+
body=typing.cast(
|
|
134
|
+
HttpValidationError,
|
|
135
|
+
construct_type(
|
|
136
|
+
type_=HttpValidationError, # type: ignore
|
|
137
|
+
object_=_response.json(),
|
|
138
|
+
),
|
|
139
|
+
),
|
|
140
|
+
)
|
|
141
|
+
_response_json = _response.json()
|
|
142
|
+
except JSONDecodeError:
|
|
143
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
144
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class AsyncRawChatClient:
|
|
148
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
149
|
+
self._client_wrapper = client_wrapper
|
|
150
|
+
|
|
151
|
+
async def create_chat_completion(
|
|
152
|
+
self,
|
|
153
|
+
*,
|
|
154
|
+
model: str,
|
|
155
|
+
messages: typing.Sequence[ChatCompletionRequestMessagesItem],
|
|
156
|
+
temperature: typing.Optional[float] = OMIT,
|
|
157
|
+
top_p: typing.Optional[float] = OMIT,
|
|
158
|
+
n: typing.Optional[int] = OMIT,
|
|
159
|
+
stream: typing.Optional[bool] = OMIT,
|
|
160
|
+
stop: typing.Optional[ChatCompletionRequestStop] = OMIT,
|
|
161
|
+
max_tokens: typing.Optional[int] = OMIT,
|
|
162
|
+
presence_penalty: typing.Optional[float] = OMIT,
|
|
163
|
+
frequency_penalty: typing.Optional[float] = OMIT,
|
|
164
|
+
user: typing.Optional[str] = OMIT,
|
|
165
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
166
|
+
) -> AsyncHttpResponse[ChatCompletion]:
|
|
167
|
+
"""
|
|
168
|
+
Create a chat completion using a Letta agent (OpenAI-compatible).
|
|
169
|
+
|
|
170
|
+
This endpoint provides full OpenAI API compatibility. The agent is selected based on:
|
|
171
|
+
- The 'model' parameter in the request (should contain an agent ID in format 'agent-...')
|
|
172
|
+
|
|
173
|
+
When streaming is enabled (stream=true), the response will be Server-Sent Events
|
|
174
|
+
with ChatCompletionChunk objects.
|
|
175
|
+
|
|
176
|
+
Parameters
|
|
177
|
+
----------
|
|
178
|
+
model : str
|
|
179
|
+
ID of the model to use
|
|
180
|
+
|
|
181
|
+
messages : typing.Sequence[ChatCompletionRequestMessagesItem]
|
|
182
|
+
Messages comprising the conversation so far
|
|
183
|
+
|
|
184
|
+
temperature : typing.Optional[float]
|
|
185
|
+
Sampling temperature
|
|
186
|
+
|
|
187
|
+
top_p : typing.Optional[float]
|
|
188
|
+
Nucleus sampling parameter
|
|
189
|
+
|
|
190
|
+
n : typing.Optional[int]
|
|
191
|
+
Number of chat completion choices to generate
|
|
192
|
+
|
|
193
|
+
stream : typing.Optional[bool]
|
|
194
|
+
Whether to stream back partial progress
|
|
195
|
+
|
|
196
|
+
stop : typing.Optional[ChatCompletionRequestStop]
|
|
197
|
+
Sequences where the API will stop generating
|
|
198
|
+
|
|
199
|
+
max_tokens : typing.Optional[int]
|
|
200
|
+
Maximum number of tokens to generate
|
|
201
|
+
|
|
202
|
+
presence_penalty : typing.Optional[float]
|
|
203
|
+
Presence penalty
|
|
204
|
+
|
|
205
|
+
frequency_penalty : typing.Optional[float]
|
|
206
|
+
Frequency penalty
|
|
207
|
+
|
|
208
|
+
user : typing.Optional[str]
|
|
209
|
+
A unique identifier representing your end-user
|
|
210
|
+
|
|
211
|
+
request_options : typing.Optional[RequestOptions]
|
|
212
|
+
Request-specific configuration.
|
|
213
|
+
|
|
214
|
+
Returns
|
|
215
|
+
-------
|
|
216
|
+
AsyncHttpResponse[ChatCompletion]
|
|
217
|
+
Successful response
|
|
218
|
+
"""
|
|
219
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
220
|
+
"v1/chat/completions",
|
|
221
|
+
method="POST",
|
|
222
|
+
json={
|
|
223
|
+
"model": model,
|
|
224
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
225
|
+
object_=messages, annotation=typing.Sequence[ChatCompletionRequestMessagesItem], direction="write"
|
|
226
|
+
),
|
|
227
|
+
"temperature": temperature,
|
|
228
|
+
"top_p": top_p,
|
|
229
|
+
"n": n,
|
|
230
|
+
"stream": stream,
|
|
231
|
+
"stop": convert_and_respect_annotation_metadata(
|
|
232
|
+
object_=stop, annotation=ChatCompletionRequestStop, direction="write"
|
|
233
|
+
),
|
|
234
|
+
"max_tokens": max_tokens,
|
|
235
|
+
"presence_penalty": presence_penalty,
|
|
236
|
+
"frequency_penalty": frequency_penalty,
|
|
237
|
+
"user": user,
|
|
238
|
+
},
|
|
239
|
+
headers={
|
|
240
|
+
"content-type": "application/json",
|
|
241
|
+
},
|
|
242
|
+
request_options=request_options,
|
|
243
|
+
omit=OMIT,
|
|
244
|
+
)
|
|
245
|
+
try:
|
|
246
|
+
if 200 <= _response.status_code < 300:
|
|
247
|
+
_data = typing.cast(
|
|
248
|
+
ChatCompletion,
|
|
249
|
+
construct_type(
|
|
250
|
+
type_=ChatCompletion, # type: ignore
|
|
251
|
+
object_=_response.json(),
|
|
252
|
+
),
|
|
253
|
+
)
|
|
254
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
255
|
+
if _response.status_code == 422:
|
|
256
|
+
raise UnprocessableEntityError(
|
|
257
|
+
headers=dict(_response.headers),
|
|
258
|
+
body=typing.cast(
|
|
259
|
+
HttpValidationError,
|
|
260
|
+
construct_type(
|
|
261
|
+
type_=HttpValidationError, # type: ignore
|
|
262
|
+
object_=_response.json(),
|
|
263
|
+
),
|
|
264
|
+
),
|
|
265
|
+
)
|
|
266
|
+
_response_json = _response.json()
|
|
267
|
+
except JSONDecodeError:
|
|
268
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
269
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
# isort: skip_file
|
|
4
|
+
|
|
5
|
+
from .chat_completion_request_messages_item import ChatCompletionRequestMessagesItem
|
|
6
|
+
from .chat_completion_request_stop import ChatCompletionRequestStop
|
|
7
|
+
|
|
8
|
+
__all__ = ["ChatCompletionRequestMessagesItem", "ChatCompletionRequestStop"]
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from ...types.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
|
|
6
|
+
from ...types.chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam
|
|
7
|
+
from ...types.chat_completion_function_message_param import ChatCompletionFunctionMessageParam
|
|
8
|
+
from ...types.chat_completion_system_message_param import ChatCompletionSystemMessageParam
|
|
9
|
+
from ...types.chat_completion_tool_message_param import ChatCompletionToolMessageParam
|
|
10
|
+
from ...types.chat_completion_user_message_param import ChatCompletionUserMessageParam
|
|
11
|
+
|
|
12
|
+
ChatCompletionRequestMessagesItem = typing.Union[
|
|
13
|
+
ChatCompletionDeveloperMessageParam,
|
|
14
|
+
ChatCompletionSystemMessageParam,
|
|
15
|
+
ChatCompletionUserMessageParam,
|
|
16
|
+
ChatCompletionAssistantMessageParam,
|
|
17
|
+
ChatCompletionToolMessageParam,
|
|
18
|
+
ChatCompletionFunctionMessageParam,
|
|
19
|
+
]
|
|
@@ -68,7 +68,11 @@ class ClientSideAccessTokensClient:
|
|
|
68
68
|
project="YOUR_PROJECT",
|
|
69
69
|
token="YOUR_TOKEN",
|
|
70
70
|
)
|
|
71
|
-
client.client_side_access_tokens.client_side_access_tokens_list_client_side_access_tokens(
|
|
71
|
+
client.client_side_access_tokens.client_side_access_tokens_list_client_side_access_tokens(
|
|
72
|
+
agent_id="agentId",
|
|
73
|
+
offset=1.1,
|
|
74
|
+
limit=1.1,
|
|
75
|
+
)
|
|
72
76
|
"""
|
|
73
77
|
_response = self._raw_client.client_side_access_tokens_list_client_side_access_tokens(
|
|
74
78
|
agent_id=agent_id, offset=offset, limit=limit, request_options=request_options
|
|
@@ -230,7 +234,11 @@ class AsyncClientSideAccessTokensClient:
|
|
|
230
234
|
|
|
231
235
|
|
|
232
236
|
async def main() -> None:
|
|
233
|
-
await client.client_side_access_tokens.client_side_access_tokens_list_client_side_access_tokens(
|
|
237
|
+
await client.client_side_access_tokens.client_side_access_tokens_list_client_side_access_tokens(
|
|
238
|
+
agent_id="agentId",
|
|
239
|
+
offset=1.1,
|
|
240
|
+
limit=1.1,
|
|
241
|
+
)
|
|
234
242
|
|
|
235
243
|
|
|
236
244
|
asyncio.run(main())
|
|
@@ -24,10 +24,10 @@ class BaseClientWrapper:
|
|
|
24
24
|
|
|
25
25
|
def get_headers(self) -> typing.Dict[str, str]:
|
|
26
26
|
headers: typing.Dict[str, str] = {
|
|
27
|
-
"User-Agent": "letta-client/0.
|
|
27
|
+
"User-Agent": "letta-client/1.0.0a2",
|
|
28
28
|
"X-Fern-Language": "Python",
|
|
29
29
|
"X-Fern-SDK-Name": "letta-client",
|
|
30
|
-
"X-Fern-SDK-Version": "0.
|
|
30
|
+
"X-Fern-SDK-Version": "1.0.0a2",
|
|
31
31
|
**(self.get_custom_headers() or {}),
|
|
32
32
|
}
|
|
33
33
|
if self._project is not None:
|
letta_client/errors/__init__.py
CHANGED
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
from .bad_request_error import BadRequestError
|
|
6
6
|
from .conflict_error import ConflictError
|
|
7
|
+
from .gone_error import GoneError
|
|
7
8
|
from .internal_server_error import InternalServerError
|
|
8
9
|
from .not_found_error import NotFoundError
|
|
9
10
|
from .payment_required_error import PaymentRequiredError
|
|
@@ -12,6 +13,7 @@ from .unprocessable_entity_error import UnprocessableEntityError
|
|
|
12
13
|
__all__ = [
|
|
13
14
|
"BadRequestError",
|
|
14
15
|
"ConflictError",
|
|
16
|
+
"GoneError",
|
|
15
17
|
"InternalServerError",
|
|
16
18
|
"NotFoundError",
|
|
17
19
|
"PaymentRequiredError",
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from ..core.api_error import ApiError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class GoneError(ApiError):
|
|
9
|
+
def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
10
|
+
super().__init__(status_code=410, headers=headers, body=body)
|
|
@@ -74,6 +74,10 @@ class AgentsClient:
|
|
|
74
74
|
)
|
|
75
75
|
client.folders.agents.list(
|
|
76
76
|
folder_id="folder_id",
|
|
77
|
+
before="before",
|
|
78
|
+
after="after",
|
|
79
|
+
limit=1,
|
|
80
|
+
order="asc",
|
|
77
81
|
)
|
|
78
82
|
"""
|
|
79
83
|
_response = self._raw_client.list(
|
|
@@ -159,6 +163,10 @@ class AsyncAgentsClient:
|
|
|
159
163
|
async def main() -> None:
|
|
160
164
|
await client.folders.agents.list(
|
|
161
165
|
folder_id="folder_id",
|
|
166
|
+
before="before",
|
|
167
|
+
after="after",
|
|
168
|
+
limit=1,
|
|
169
|
+
order="asc",
|
|
162
170
|
)
|
|
163
171
|
|
|
164
172
|
|