letta-client 0.1.233__py3-none-any.whl → 0.1.235__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-client might be problematic. Click here for more details.
- letta_client/__init__.py +2 -0
- letta_client/agents/__init__.py +2 -0
- letta_client/agents/blocks/__init__.py +2 -0
- letta_client/agents/blocks/client.py +72 -313
- letta_client/agents/blocks/raw_client.py +654 -0
- letta_client/agents/client.py +349 -869
- letta_client/agents/context/__init__.py +2 -0
- letta_client/agents/context/client.py +31 -66
- letta_client/agents/context/raw_client.py +122 -0
- letta_client/agents/core_memory/__init__.py +2 -0
- letta_client/agents/core_memory/client.py +31 -66
- letta_client/agents/core_memory/raw_client.py +124 -0
- letta_client/agents/files/__init__.py +2 -0
- letta_client/agents/files/client.py +39 -178
- letta_client/agents/files/raw_client.py +349 -0
- letta_client/agents/folders/__init__.py +2 -0
- letta_client/agents/folders/client.py +39 -178
- letta_client/agents/folders/raw_client.py +327 -0
- letta_client/agents/groups/__init__.py +2 -0
- letta_client/agents/groups/client.py +31 -72
- letta_client/agents/groups/raw_client.py +142 -0
- letta_client/agents/memory_variables/__init__.py +2 -0
- letta_client/agents/memory_variables/client.py +31 -65
- letta_client/agents/memory_variables/raw_client.py +125 -0
- letta_client/agents/memory_variables/types/__init__.py +2 -0
- letta_client/agents/memory_variables/types/memory_variables_list_response.py +3 -2
- letta_client/agents/messages/__init__.py +2 -0
- letta_client/agents/messages/client.py +150 -589
- letta_client/agents/messages/raw_client.py +1384 -0
- letta_client/agents/messages/types/__init__.py +2 -0
- letta_client/agents/messages/types/letta_streaming_response.py +6 -5
- letta_client/agents/messages/types/messages_modify_request.py +3 -2
- letta_client/agents/messages/types/messages_modify_response.py +5 -4
- letta_client/agents/messages/types/messages_preview_raw_payload_request.py +1 -0
- letta_client/agents/passages/__init__.py +2 -0
- letta_client/agents/passages/client.py +89 -301
- letta_client/agents/passages/raw_client.py +678 -0
- letta_client/agents/raw_client.py +2088 -0
- letta_client/agents/sources/__init__.py +2 -0
- letta_client/agents/sources/client.py +39 -178
- letta_client/agents/sources/raw_client.py +327 -0
- letta_client/agents/templates/__init__.py +2 -0
- letta_client/agents/templates/client.py +57 -276
- letta_client/agents/templates/raw_client.py +505 -0
- letta_client/agents/templates/types/__init__.py +2 -0
- letta_client/agents/templates/types/templates_create_response.py +5 -4
- letta_client/agents/templates/types/templates_migrate_response.py +3 -2
- letta_client/agents/tools/__init__.py +2 -0
- letta_client/agents/tools/client.py +40 -179
- letta_client/agents/tools/raw_client.py +327 -0
- letta_client/agents/types/__init__.py +2 -0
- letta_client/agents/types/agents_search_request_search_item.py +4 -3
- letta_client/agents/types/agents_search_request_search_item_field.py +4 -3
- letta_client/agents/types/agents_search_request_search_item_one.py +4 -3
- letta_client/agents/types/agents_search_request_search_item_three.py +3 -2
- letta_client/agents/types/agents_search_request_search_item_two.py +3 -2
- letta_client/agents/types/agents_search_request_search_item_zero.py +3 -2
- letta_client/agents/types/agents_search_response.py +5 -4
- letta_client/agents/types/create_agent_request_response_format.py +1 -0
- letta_client/agents/types/create_agent_request_tool_rules_item.py +4 -3
- letta_client/agents/types/update_agent_response_format.py +1 -0
- letta_client/agents/types/update_agent_tool_rules_item.py +4 -3
- letta_client/base_client.py +41 -49
- letta_client/batches/__init__.py +2 -0
- letta_client/batches/client.py +45 -253
- letta_client/batches/raw_client.py +457 -0
- letta_client/blocks/__init__.py +2 -0
- letta_client/blocks/agents/__init__.py +2 -0
- letta_client/blocks/agents/client.py +33 -70
- letta_client/blocks/agents/raw_client.py +144 -0
- letta_client/blocks/client.py +126 -424
- letta_client/blocks/raw_client.py +973 -0
- letta_client/client_side_access_tokens/__init__.py +2 -0
- letta_client/client_side_access_tokens/client.py +45 -216
- letta_client/client_side_access_tokens/raw_client.py +435 -0
- letta_client/client_side_access_tokens/types/__init__.py +2 -0
- letta_client/client_side_access_tokens/types/client_side_access_tokens_create_request_policy_item.py +4 -3
- letta_client/client_side_access_tokens/types/client_side_access_tokens_create_response.py +6 -5
- letta_client/client_side_access_tokens/types/client_side_access_tokens_create_response_policy.py +4 -3
- letta_client/client_side_access_tokens/types/client_side_access_tokens_create_response_policy_data_item.py +4 -3
- letta_client/client_side_access_tokens/types/client_side_access_tokens_list_client_side_access_tokens_response.py +6 -5
- letta_client/client_side_access_tokens/types/client_side_access_tokens_list_client_side_access_tokens_response_tokens_item.py +6 -5
- letta_client/client_side_access_tokens/types/client_side_access_tokens_list_client_side_access_tokens_response_tokens_item_policy.py +4 -3
- letta_client/client_side_access_tokens/types/client_side_access_tokens_list_client_side_access_tokens_response_tokens_item_policy_data_item.py +4 -3
- letta_client/core/__init__.py +5 -0
- letta_client/core/api_error.py +13 -5
- letta_client/core/client_wrapper.py +14 -5
- letta_client/core/force_multipart.py +16 -0
- letta_client/core/http_client.py +70 -26
- letta_client/core/http_response.py +55 -0
- letta_client/core/jsonable_encoder.py +0 -1
- letta_client/core/pydantic_utilities.py +70 -111
- letta_client/core/serialization.py +7 -3
- letta_client/core/unchecked_base_model.py +40 -4
- letta_client/embedding_models/__init__.py +2 -0
- letta_client/embedding_models/client.py +31 -65
- letta_client/embedding_models/raw_client.py +117 -0
- letta_client/errors/__init__.py +2 -0
- letta_client/errors/bad_request_error.py +4 -3
- letta_client/errors/conflict_error.py +4 -2
- letta_client/errors/internal_server_error.py +4 -3
- letta_client/errors/not_found_error.py +4 -3
- letta_client/errors/payment_required_error.py +4 -2
- letta_client/errors/unprocessable_entity_error.py +4 -2
- letta_client/folders/__init__.py +2 -0
- letta_client/folders/client.py +100 -564
- letta_client/folders/files/__init__.py +2 -0
- letta_client/folders/files/client.py +44 -189
- letta_client/folders/files/raw_client.py +391 -0
- letta_client/folders/passages/__init__.py +2 -0
- letta_client/folders/passages/client.py +33 -74
- letta_client/folders/passages/raw_client.py +162 -0
- letta_client/folders/raw_client.py +1084 -0
- letta_client/groups/__init__.py +2 -0
- letta_client/groups/client.py +87 -406
- letta_client/groups/messages/__init__.py +2 -0
- letta_client/groups/messages/client.py +110 -371
- letta_client/groups/messages/raw_client.py +881 -0
- letta_client/groups/messages/types/__init__.py +2 -0
- letta_client/groups/messages/types/letta_streaming_response.py +5 -4
- letta_client/groups/messages/types/messages_modify_request.py +3 -2
- letta_client/groups/messages/types/messages_modify_response.py +5 -4
- letta_client/groups/raw_client.py +807 -0
- letta_client/groups/types/__init__.py +2 -0
- letta_client/groups/types/group_create_manager_config.py +1 -0
- letta_client/groups/types/group_update_manager_config.py +1 -0
- letta_client/health/__init__.py +2 -0
- letta_client/health/client.py +31 -43
- letta_client/health/raw_client.py +85 -0
- letta_client/identities/__init__.py +2 -0
- letta_client/identities/client.py +115 -498
- letta_client/identities/properties/__init__.py +2 -0
- letta_client/identities/properties/client.py +32 -76
- letta_client/identities/properties/raw_client.py +152 -0
- letta_client/identities/raw_client.py +1010 -0
- letta_client/jobs/__init__.py +2 -0
- letta_client/jobs/client.py +67 -310
- letta_client/jobs/raw_client.py +624 -0
- letta_client/messages/__init__.py +2 -0
- letta_client/messages/client.py +41 -74
- letta_client/messages/raw_client.py +186 -0
- letta_client/models/__init__.py +2 -0
- letta_client/models/client.py +39 -73
- letta_client/models/raw_client.py +151 -0
- letta_client/projects/__init__.py +2 -0
- letta_client/projects/client.py +31 -53
- letta_client/projects/raw_client.py +125 -0
- letta_client/projects/types/__init__.py +2 -0
- letta_client/projects/types/projects_list_response.py +5 -4
- letta_client/projects/types/projects_list_response_projects_item.py +3 -2
- letta_client/providers/__init__.py +2 -0
- letta_client/providers/client.py +62 -335
- letta_client/providers/raw_client.py +664 -0
- letta_client/runs/__init__.py +2 -0
- letta_client/runs/client.py +56 -258
- letta_client/runs/messages/__init__.py +2 -0
- letta_client/runs/messages/client.py +34 -79
- letta_client/runs/messages/raw_client.py +209 -0
- letta_client/runs/raw_client.py +440 -0
- letta_client/runs/steps/__init__.py +2 -0
- letta_client/runs/steps/client.py +33 -76
- letta_client/runs/steps/raw_client.py +192 -0
- letta_client/runs/usage/__init__.py +2 -0
- letta_client/runs/usage/client.py +31 -66
- letta_client/runs/usage/raw_client.py +122 -0
- letta_client/sources/__init__.py +2 -0
- letta_client/sources/client.py +107 -625
- letta_client/sources/files/__init__.py +2 -0
- letta_client/sources/files/client.py +44 -189
- letta_client/sources/files/raw_client.py +391 -0
- letta_client/sources/passages/__init__.py +2 -0
- letta_client/sources/passages/client.py +33 -74
- letta_client/sources/passages/raw_client.py +162 -0
- letta_client/sources/raw_client.py +1209 -0
- letta_client/steps/__init__.py +2 -0
- letta_client/steps/client.py +65 -153
- letta_client/steps/feedback/__init__.py +2 -0
- letta_client/steps/feedback/client.py +32 -73
- letta_client/steps/feedback/raw_client.py +141 -0
- letta_client/steps/raw_client.py +355 -0
- letta_client/steps/types/__init__.py +2 -0
- letta_client/tags/__init__.py +2 -0
- letta_client/tags/client.py +33 -73
- letta_client/tags/raw_client.py +148 -0
- letta_client/telemetry/__init__.py +2 -0
- letta_client/telemetry/client.py +31 -66
- letta_client/telemetry/raw_client.py +118 -0
- letta_client/templates/__init__.py +2 -0
- letta_client/templates/agents/__init__.py +2 -0
- letta_client/templates/agents/client.py +48 -96
- letta_client/templates/agents/raw_client.py +227 -0
- letta_client/templates/agents/types/__init__.py +2 -0
- letta_client/templates/agents/types/agents_create_request_initial_message_sequence_item.py +4 -3
- letta_client/templates/agents/types/agents_create_response.py +4 -3
- letta_client/templates/client.py +36 -57
- letta_client/templates/raw_client.py +133 -0
- letta_client/templates/types/__init__.py +2 -0
- letta_client/templates/types/templates_list_response.py +5 -4
- letta_client/templates/types/templates_list_response_templates_item.py +3 -2
- letta_client/tools/__init__.py +2 -0
- letta_client/tools/client.py +195 -1322
- letta_client/tools/raw_client.py +2565 -0
- letta_client/tools/types/__init__.py +2 -0
- letta_client/tools/types/add_mcp_server_request.py +2 -1
- letta_client/tools/types/add_mcp_server_response_item.py +2 -1
- letta_client/tools/types/connect_mcp_server_request.py +2 -1
- letta_client/tools/types/delete_mcp_server_response_item.py +2 -1
- letta_client/tools/types/list_mcp_servers_response_value.py +1 -0
- letta_client/tools/types/streaming_response.py +5 -4
- letta_client/tools/types/test_mcp_server_request.py +2 -1
- letta_client/tools/types/update_mcp_server_request.py +2 -1
- letta_client/tools/types/update_mcp_server_response.py +2 -1
- letta_client/types/__init__.py +2 -0
- letta_client/types/action_model.py +6 -5
- letta_client/types/action_parameters_model.py +3 -2
- letta_client/types/action_response_model.py +3 -2
- letta_client/types/agent_environment_variable.py +3 -2
- letta_client/types/agent_schema.py +7 -6
- letta_client/types/agent_schema_tool_rules_item.py +2 -1
- letta_client/types/agent_state.py +14 -8
- letta_client/types/agent_state_response_format.py +1 -0
- letta_client/types/agent_state_tool_rules_item.py +4 -3
- letta_client/types/app_auth_scheme.py +4 -3
- letta_client/types/app_model.py +5 -4
- letta_client/types/assistant_message.py +3 -2
- letta_client/types/assistant_message_content.py +1 -0
- letta_client/types/audio.py +3 -2
- letta_client/types/auth_request.py +2 -1
- letta_client/types/auth_response.py +5 -4
- letta_client/types/auth_scheme_field.py +3 -2
- letta_client/types/bad_request_error_body.py +3 -2
- letta_client/types/base_64_image.py +2 -1
- letta_client/types/base_tool_rule_schema.py +3 -2
- letta_client/types/batch_job.py +4 -3
- letta_client/types/block.py +3 -2
- letta_client/types/block_update.py +2 -1
- letta_client/types/chat_completion_assistant_message_param.py +5 -4
- letta_client/types/chat_completion_assistant_message_param_content.py +1 -0
- letta_client/types/chat_completion_assistant_message_param_content_item.py +2 -1
- letta_client/types/chat_completion_audio_param.py +4 -3
- letta_client/types/chat_completion_content_part_image_param.py +4 -3
- letta_client/types/chat_completion_content_part_input_audio_param.py +4 -3
- letta_client/types/chat_completion_content_part_refusal_param.py +3 -2
- letta_client/types/chat_completion_content_part_text_param.py +3 -2
- letta_client/types/chat_completion_developer_message_param.py +4 -3
- letta_client/types/chat_completion_developer_message_param_content.py +1 -0
- letta_client/types/chat_completion_function_call_option_param.py +3 -2
- letta_client/types/chat_completion_function_message_param.py +3 -2
- letta_client/types/chat_completion_message_tool_call.py +4 -3
- letta_client/types/chat_completion_message_tool_call_param.py +4 -3
- letta_client/types/chat_completion_named_tool_choice_param.py +4 -3
- letta_client/types/chat_completion_prediction_content_param.py +4 -3
- letta_client/types/chat_completion_prediction_content_param_content.py +1 -0
- letta_client/types/chat_completion_stream_options_param.py +3 -2
- letta_client/types/chat_completion_system_message_param.py +4 -3
- letta_client/types/chat_completion_system_message_param_content.py +1 -0
- letta_client/types/chat_completion_tool_message_param.py +4 -3
- letta_client/types/chat_completion_tool_message_param_content.py +1 -0
- letta_client/types/chat_completion_tool_param.py +4 -3
- letta_client/types/chat_completion_user_message_param.py +4 -3
- letta_client/types/chat_completion_user_message_param_content.py +1 -0
- letta_client/types/chat_completion_user_message_param_content_item.py +2 -1
- letta_client/types/child_tool_rule.py +3 -2
- letta_client/types/child_tool_rule_schema.py +3 -2
- letta_client/types/code_input.py +3 -2
- letta_client/types/completion_create_params_non_streaming.py +10 -9
- letta_client/types/completion_create_params_non_streaming_function_call.py +1 -0
- letta_client/types/completion_create_params_non_streaming_messages_item.py +4 -3
- letta_client/types/completion_create_params_non_streaming_response_format.py +3 -2
- letta_client/types/completion_create_params_non_streaming_tool_choice.py +1 -0
- letta_client/types/completion_create_params_streaming.py +10 -9
- letta_client/types/completion_create_params_streaming_function_call.py +1 -0
- letta_client/types/completion_create_params_streaming_messages_item.py +4 -3
- letta_client/types/completion_create_params_streaming_response_format.py +3 -2
- letta_client/types/completion_create_params_streaming_tool_choice.py +1 -0
- letta_client/types/conditional_tool_rule.py +3 -2
- letta_client/types/conditional_tool_rule_schema.py +3 -2
- letta_client/types/conflict_error_body.py +3 -2
- letta_client/types/context_window_overview.py +4 -3
- letta_client/types/continue_tool_rule.py +3 -2
- letta_client/types/core_memory_block_schema.py +4 -3
- letta_client/types/create_block.py +3 -2
- letta_client/types/dynamic_manager.py +2 -1
- letta_client/types/dynamic_manager_update.py +2 -1
- letta_client/types/e_2_b_sandbox_config.py +2 -1
- letta_client/types/embedding_config.py +4 -3
- letta_client/types/file.py +4 -3
- letta_client/types/file_block.py +4 -3
- letta_client/types/file_file.py +3 -2
- letta_client/types/file_metadata.py +5 -4
- letta_client/types/file_stats.py +3 -2
- letta_client/types/folder.py +5 -4
- letta_client/types/function_call.py +3 -2
- letta_client/types/function_definition_input.py +3 -2
- letta_client/types/function_definition_output.py +3 -2
- letta_client/types/function_output.py +3 -2
- letta_client/types/function_tool.py +4 -3
- letta_client/types/generate_tool_input.py +3 -2
- letta_client/types/generate_tool_output.py +4 -3
- letta_client/types/group.py +4 -3
- letta_client/types/health.py +3 -2
- letta_client/types/hidden_reasoning_message.py +4 -3
- letta_client/types/http_validation_error.py +4 -3
- letta_client/types/identity.py +4 -3
- letta_client/types/identity_property.py +5 -4
- letta_client/types/image_content.py +3 -2
- letta_client/types/image_content_source.py +1 -0
- letta_client/types/image_url.py +4 -3
- letta_client/types/init_tool_rule.py +3 -2
- letta_client/types/input_audio.py +4 -3
- letta_client/types/internal_server_error_body.py +3 -2
- letta_client/types/job.py +4 -3
- letta_client/types/json_object_response_format.py +3 -2
- letta_client/types/json_schema.py +4 -3
- letta_client/types/json_schema_response_format.py +2 -1
- letta_client/types/letta_batch_messages.py +4 -3
- letta_client/types/letta_batch_request.py +4 -3
- letta_client/types/letta_image.py +2 -1
- letta_client/types/letta_message_content_union.py +4 -3
- letta_client/types/letta_message_union.py +5 -4
- letta_client/types/letta_ping.py +2 -1
- letta_client/types/letta_request.py +4 -3
- letta_client/types/letta_request_config.py +3 -2
- letta_client/types/letta_response.py +4 -3
- letta_client/types/letta_stop_reason.py +3 -2
- letta_client/types/letta_streaming_request.py +9 -3
- letta_client/types/letta_usage_statistics.py +3 -2
- letta_client/types/letta_user_message_content_union.py +2 -1
- letta_client/types/llm_config.py +6 -5
- letta_client/types/local_sandbox_config.py +3 -2
- letta_client/types/max_count_per_step_tool_rule.py +3 -2
- letta_client/types/max_count_per_step_tool_rule_schema.py +3 -2
- letta_client/types/mcp_tool.py +4 -3
- letta_client/types/memory.py +4 -3
- letta_client/types/message.py +6 -5
- letta_client/types/message_content_item.py +1 -0
- letta_client/types/message_create.py +5 -4
- letta_client/types/message_create_content.py +1 -0
- letta_client/types/message_schema.py +4 -3
- letta_client/types/modal_sandbox_config.py +2 -1
- letta_client/types/not_found_error_body.py +3 -2
- letta_client/types/omitted_reasoning_content.py +3 -2
- letta_client/types/openai_types_chat_chat_completion_message_tool_call_param_function.py +3 -2
- letta_client/types/openai_types_chat_chat_completion_named_tool_choice_param_function.py +3 -2
- letta_client/types/openai_types_chat_completion_create_params_function.py +3 -2
- letta_client/types/organization.py +3 -2
- letta_client/types/organization_create.py +2 -1
- letta_client/types/organization_sources_stats.py +3 -2
- letta_client/types/organization_update.py +2 -1
- letta_client/types/parameter_properties.py +3 -2
- letta_client/types/parameters_schema.py +4 -3
- letta_client/types/parent_tool_rule.py +3 -2
- letta_client/types/passage.py +4 -3
- letta_client/types/payment_required_error_body.py +3 -2
- letta_client/types/pip_requirement.py +3 -2
- letta_client/types/provider.py +5 -4
- letta_client/types/provider_check.py +4 -3
- letta_client/types/provider_trace.py +3 -2
- letta_client/types/reasoning_content.py +2 -1
- letta_client/types/reasoning_message.py +4 -3
- letta_client/types/redacted_reasoning_content.py +2 -1
- letta_client/types/required_before_exit_tool_rule.py +3 -2
- letta_client/types/response_format_json_object.py +3 -2
- letta_client/types/response_format_json_schema.py +4 -3
- letta_client/types/response_format_text.py +3 -2
- letta_client/types/round_robin_manager.py +2 -1
- letta_client/types/round_robin_manager_update.py +2 -1
- letta_client/types/run.py +4 -3
- letta_client/types/sandbox_config.py +4 -3
- letta_client/types/sandbox_config_create.py +4 -3
- letta_client/types/sandbox_config_create_config.py +2 -1
- letta_client/types/sandbox_config_update.py +3 -2
- letta_client/types/sandbox_config_update_config.py +2 -1
- letta_client/types/sandbox_environment_variable.py +3 -2
- letta_client/types/sandbox_environment_variable_create.py +3 -2
- letta_client/types/sandbox_environment_variable_update.py +2 -1
- letta_client/types/sleeptime_manager.py +2 -1
- letta_client/types/sleeptime_manager_update.py +2 -1
- letta_client/types/source.py +5 -4
- letta_client/types/source_create.py +4 -3
- letta_client/types/source_stats.py +4 -3
- letta_client/types/source_update.py +3 -2
- letta_client/types/sse_server_config.py +4 -3
- letta_client/types/stdio_server_config.py +4 -3
- letta_client/types/step.py +5 -4
- letta_client/types/streamable_http_server_config.py +4 -3
- letta_client/types/supervisor_manager.py +2 -1
- letta_client/types/supervisor_manager_update.py +2 -1
- letta_client/types/system_message.py +2 -1
- letta_client/types/tag_schema.py +3 -2
- letta_client/types/terminal_tool_rule.py +3 -2
- letta_client/types/text_content.py +2 -1
- letta_client/types/text_response_format.py +3 -2
- letta_client/types/tool.py +5 -4
- letta_client/types/tool_annotations.py +4 -3
- letta_client/types/tool_call.py +3 -2
- letta_client/types/tool_call_content.py +2 -1
- letta_client/types/tool_call_delta.py +3 -2
- letta_client/types/tool_call_message.py +4 -3
- letta_client/types/tool_call_message_tool_call.py +1 -0
- letta_client/types/tool_create.py +3 -2
- letta_client/types/tool_env_var_schema.py +3 -2
- letta_client/types/tool_json_schema.py +4 -3
- letta_client/types/tool_return.py +4 -3
- letta_client/types/tool_return_content.py +2 -1
- letta_client/types/tool_return_message.py +4 -3
- letta_client/types/tool_schema.py +5 -4
- letta_client/types/update_assistant_message.py +3 -2
- letta_client/types/update_assistant_message_content.py +1 -0
- letta_client/types/update_reasoning_message.py +3 -2
- letta_client/types/update_ssemcp_server.py +2 -1
- letta_client/types/update_stdio_mcp_server.py +3 -2
- letta_client/types/update_streamable_httpmcp_server.py +2 -1
- letta_client/types/update_system_message.py +2 -1
- letta_client/types/update_user_message.py +3 -2
- letta_client/types/update_user_message_content.py +1 -0
- letta_client/types/url_image.py +2 -1
- letta_client/types/usage_statistics.py +5 -4
- letta_client/types/usage_statistics_completion_token_details.py +3 -2
- letta_client/types/usage_statistics_prompt_token_details.py +3 -2
- letta_client/types/user.py +3 -2
- letta_client/types/user_create.py +3 -2
- letta_client/types/user_message.py +3 -2
- letta_client/types/user_message_content.py +1 -0
- letta_client/types/user_update.py +3 -2
- letta_client/types/validation_error.py +4 -3
- letta_client/types/voice_sleeptime_manager.py +2 -1
- letta_client/types/voice_sleeptime_manager_update.py +2 -1
- letta_client/types/web_search_options.py +4 -3
- letta_client/types/web_search_options_user_location.py +4 -3
- letta_client/types/web_search_options_user_location_approximate.py +3 -2
- letta_client/voice/__init__.py +2 -0
- letta_client/voice/client.py +33 -74
- letta_client/voice/raw_client.py +154 -0
- {letta_client-0.1.233.dist-info → letta_client-0.1.235.dist-info}/METADATA +24 -3
- letta_client-0.1.235.dist-info/RECORD +503 -0
- letta_client-0.1.233.dist-info/RECORD +0 -455
- {letta_client-0.1.233.dist-info → letta_client-0.1.235.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,1384 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import contextlib
|
|
4
|
+
import json
|
|
5
|
+
import typing
|
|
6
|
+
from json.decoder import JSONDecodeError
|
|
7
|
+
|
|
8
|
+
import httpx_sse
|
|
9
|
+
from ...core.api_error import ApiError
|
|
10
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
11
|
+
from ...core.http_response import AsyncHttpResponse, HttpResponse
|
|
12
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
13
|
+
from ...core.request_options import RequestOptions
|
|
14
|
+
from ...core.serialization import convert_and_respect_annotation_metadata
|
|
15
|
+
from ...core.unchecked_base_model import construct_type
|
|
16
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
17
|
+
from ...types.agent_state import AgentState
|
|
18
|
+
from ...types.http_validation_error import HttpValidationError
|
|
19
|
+
from ...types.letta_message_union import LettaMessageUnion
|
|
20
|
+
from ...types.letta_response import LettaResponse
|
|
21
|
+
from ...types.message_create import MessageCreate
|
|
22
|
+
from ...types.message_type import MessageType
|
|
23
|
+
from ...types.run import Run
|
|
24
|
+
from .types.letta_streaming_response import LettaStreamingResponse
|
|
25
|
+
from .types.messages_modify_request import MessagesModifyRequest
|
|
26
|
+
from .types.messages_modify_response import MessagesModifyResponse
|
|
27
|
+
from .types.messages_preview_raw_payload_request import MessagesPreviewRawPayloadRequest
|
|
28
|
+
|
|
29
|
+
# this is used as the default value for optional parameters
|
|
30
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class RawMessagesClient:
|
|
34
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
35
|
+
self._client_wrapper = client_wrapper
|
|
36
|
+
|
|
37
|
+
def list(
|
|
38
|
+
self,
|
|
39
|
+
agent_id: str,
|
|
40
|
+
*,
|
|
41
|
+
after: typing.Optional[str] = None,
|
|
42
|
+
before: typing.Optional[str] = None,
|
|
43
|
+
limit: typing.Optional[int] = None,
|
|
44
|
+
group_id: typing.Optional[str] = None,
|
|
45
|
+
use_assistant_message: typing.Optional[bool] = None,
|
|
46
|
+
assistant_message_tool_name: typing.Optional[str] = None,
|
|
47
|
+
assistant_message_tool_kwarg: typing.Optional[str] = None,
|
|
48
|
+
include_err: typing.Optional[bool] = None,
|
|
49
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
50
|
+
) -> HttpResponse[typing.List[LettaMessageUnion]]:
|
|
51
|
+
"""
|
|
52
|
+
Retrieve message history for an agent.
|
|
53
|
+
|
|
54
|
+
Parameters
|
|
55
|
+
----------
|
|
56
|
+
agent_id : str
|
|
57
|
+
|
|
58
|
+
after : typing.Optional[str]
|
|
59
|
+
Message after which to retrieve the returned messages.
|
|
60
|
+
|
|
61
|
+
before : typing.Optional[str]
|
|
62
|
+
Message before which to retrieve the returned messages.
|
|
63
|
+
|
|
64
|
+
limit : typing.Optional[int]
|
|
65
|
+
Maximum number of messages to retrieve.
|
|
66
|
+
|
|
67
|
+
group_id : typing.Optional[str]
|
|
68
|
+
Group ID to filter messages by.
|
|
69
|
+
|
|
70
|
+
use_assistant_message : typing.Optional[bool]
|
|
71
|
+
Whether to use assistant messages
|
|
72
|
+
|
|
73
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
74
|
+
The name of the designated message tool.
|
|
75
|
+
|
|
76
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
77
|
+
The name of the message argument.
|
|
78
|
+
|
|
79
|
+
include_err : typing.Optional[bool]
|
|
80
|
+
Whether to include error messages and error statuses. For debugging purposes only.
|
|
81
|
+
|
|
82
|
+
request_options : typing.Optional[RequestOptions]
|
|
83
|
+
Request-specific configuration.
|
|
84
|
+
|
|
85
|
+
Returns
|
|
86
|
+
-------
|
|
87
|
+
HttpResponse[typing.List[LettaMessageUnion]]
|
|
88
|
+
Successful Response
|
|
89
|
+
"""
|
|
90
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
91
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages",
|
|
92
|
+
method="GET",
|
|
93
|
+
params={
|
|
94
|
+
"after": after,
|
|
95
|
+
"before": before,
|
|
96
|
+
"limit": limit,
|
|
97
|
+
"group_id": group_id,
|
|
98
|
+
"use_assistant_message": use_assistant_message,
|
|
99
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
100
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
101
|
+
"include_err": include_err,
|
|
102
|
+
},
|
|
103
|
+
request_options=request_options,
|
|
104
|
+
)
|
|
105
|
+
try:
|
|
106
|
+
if 200 <= _response.status_code < 300:
|
|
107
|
+
_data = typing.cast(
|
|
108
|
+
typing.List[LettaMessageUnion],
|
|
109
|
+
construct_type(
|
|
110
|
+
type_=typing.List[LettaMessageUnion], # type: ignore
|
|
111
|
+
object_=_response.json(),
|
|
112
|
+
),
|
|
113
|
+
)
|
|
114
|
+
return HttpResponse(response=_response, data=_data)
|
|
115
|
+
if _response.status_code == 422:
|
|
116
|
+
raise UnprocessableEntityError(
|
|
117
|
+
headers=dict(_response.headers),
|
|
118
|
+
body=typing.cast(
|
|
119
|
+
HttpValidationError,
|
|
120
|
+
construct_type(
|
|
121
|
+
type_=HttpValidationError, # type: ignore
|
|
122
|
+
object_=_response.json(),
|
|
123
|
+
),
|
|
124
|
+
),
|
|
125
|
+
)
|
|
126
|
+
_response_json = _response.json()
|
|
127
|
+
except JSONDecodeError:
|
|
128
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
129
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
130
|
+
|
|
131
|
+
def create(
|
|
132
|
+
self,
|
|
133
|
+
agent_id: str,
|
|
134
|
+
*,
|
|
135
|
+
messages: typing.Sequence[MessageCreate],
|
|
136
|
+
max_steps: typing.Optional[int] = OMIT,
|
|
137
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
138
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
139
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
140
|
+
include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
|
|
141
|
+
enable_thinking: typing.Optional[str] = OMIT,
|
|
142
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
143
|
+
) -> HttpResponse[LettaResponse]:
|
|
144
|
+
"""
|
|
145
|
+
Process a user message and return the agent's response.
|
|
146
|
+
This endpoint accepts a message from a user and processes it through the agent.
|
|
147
|
+
|
|
148
|
+
Parameters
|
|
149
|
+
----------
|
|
150
|
+
agent_id : str
|
|
151
|
+
|
|
152
|
+
messages : typing.Sequence[MessageCreate]
|
|
153
|
+
The messages to be sent to the agent.
|
|
154
|
+
|
|
155
|
+
max_steps : typing.Optional[int]
|
|
156
|
+
Maximum number of steps the agent should take to process the request.
|
|
157
|
+
|
|
158
|
+
use_assistant_message : typing.Optional[bool]
|
|
159
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
160
|
+
|
|
161
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
162
|
+
The name of the designated message tool.
|
|
163
|
+
|
|
164
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
165
|
+
The name of the message argument in the designated message tool.
|
|
166
|
+
|
|
167
|
+
include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
|
|
168
|
+
Only return specified message types in the response. If `None` (default) returns all messages.
|
|
169
|
+
|
|
170
|
+
enable_thinking : typing.Optional[str]
|
|
171
|
+
If set to True, enables reasoning before responses or tool calls from the agent.
|
|
172
|
+
|
|
173
|
+
request_options : typing.Optional[RequestOptions]
|
|
174
|
+
Request-specific configuration.
|
|
175
|
+
|
|
176
|
+
Returns
|
|
177
|
+
-------
|
|
178
|
+
HttpResponse[LettaResponse]
|
|
179
|
+
Successful Response
|
|
180
|
+
"""
|
|
181
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
182
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages",
|
|
183
|
+
method="POST",
|
|
184
|
+
json={
|
|
185
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
186
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
187
|
+
),
|
|
188
|
+
"max_steps": max_steps,
|
|
189
|
+
"use_assistant_message": use_assistant_message,
|
|
190
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
191
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
192
|
+
"include_return_message_types": include_return_message_types,
|
|
193
|
+
"enable_thinking": enable_thinking,
|
|
194
|
+
},
|
|
195
|
+
headers={
|
|
196
|
+
"content-type": "application/json",
|
|
197
|
+
},
|
|
198
|
+
request_options=request_options,
|
|
199
|
+
omit=OMIT,
|
|
200
|
+
)
|
|
201
|
+
try:
|
|
202
|
+
if 200 <= _response.status_code < 300:
|
|
203
|
+
_data = typing.cast(
|
|
204
|
+
LettaResponse,
|
|
205
|
+
construct_type(
|
|
206
|
+
type_=LettaResponse, # type: ignore
|
|
207
|
+
object_=_response.json(),
|
|
208
|
+
),
|
|
209
|
+
)
|
|
210
|
+
return HttpResponse(response=_response, data=_data)
|
|
211
|
+
if _response.status_code == 422:
|
|
212
|
+
raise UnprocessableEntityError(
|
|
213
|
+
headers=dict(_response.headers),
|
|
214
|
+
body=typing.cast(
|
|
215
|
+
HttpValidationError,
|
|
216
|
+
construct_type(
|
|
217
|
+
type_=HttpValidationError, # type: ignore
|
|
218
|
+
object_=_response.json(),
|
|
219
|
+
),
|
|
220
|
+
),
|
|
221
|
+
)
|
|
222
|
+
_response_json = _response.json()
|
|
223
|
+
except JSONDecodeError:
|
|
224
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
225
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
226
|
+
|
|
227
|
+
def modify(
|
|
228
|
+
self,
|
|
229
|
+
agent_id: str,
|
|
230
|
+
message_id: str,
|
|
231
|
+
*,
|
|
232
|
+
request: MessagesModifyRequest,
|
|
233
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
234
|
+
) -> HttpResponse[MessagesModifyResponse]:
|
|
235
|
+
"""
|
|
236
|
+
Update the details of a message associated with an agent.
|
|
237
|
+
|
|
238
|
+
Parameters
|
|
239
|
+
----------
|
|
240
|
+
agent_id : str
|
|
241
|
+
|
|
242
|
+
message_id : str
|
|
243
|
+
|
|
244
|
+
request : MessagesModifyRequest
|
|
245
|
+
|
|
246
|
+
request_options : typing.Optional[RequestOptions]
|
|
247
|
+
Request-specific configuration.
|
|
248
|
+
|
|
249
|
+
Returns
|
|
250
|
+
-------
|
|
251
|
+
HttpResponse[MessagesModifyResponse]
|
|
252
|
+
Successful Response
|
|
253
|
+
"""
|
|
254
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
255
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/{jsonable_encoder(message_id)}",
|
|
256
|
+
method="PATCH",
|
|
257
|
+
json=convert_and_respect_annotation_metadata(
|
|
258
|
+
object_=request, annotation=MessagesModifyRequest, direction="write"
|
|
259
|
+
),
|
|
260
|
+
headers={
|
|
261
|
+
"content-type": "application/json",
|
|
262
|
+
},
|
|
263
|
+
request_options=request_options,
|
|
264
|
+
omit=OMIT,
|
|
265
|
+
)
|
|
266
|
+
try:
|
|
267
|
+
if 200 <= _response.status_code < 300:
|
|
268
|
+
_data = typing.cast(
|
|
269
|
+
MessagesModifyResponse,
|
|
270
|
+
construct_type(
|
|
271
|
+
type_=MessagesModifyResponse, # type: ignore
|
|
272
|
+
object_=_response.json(),
|
|
273
|
+
),
|
|
274
|
+
)
|
|
275
|
+
return HttpResponse(response=_response, data=_data)
|
|
276
|
+
if _response.status_code == 422:
|
|
277
|
+
raise UnprocessableEntityError(
|
|
278
|
+
headers=dict(_response.headers),
|
|
279
|
+
body=typing.cast(
|
|
280
|
+
HttpValidationError,
|
|
281
|
+
construct_type(
|
|
282
|
+
type_=HttpValidationError, # type: ignore
|
|
283
|
+
object_=_response.json(),
|
|
284
|
+
),
|
|
285
|
+
),
|
|
286
|
+
)
|
|
287
|
+
_response_json = _response.json()
|
|
288
|
+
except JSONDecodeError:
|
|
289
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
290
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
291
|
+
|
|
292
|
+
@contextlib.contextmanager
|
|
293
|
+
def create_stream(
|
|
294
|
+
self,
|
|
295
|
+
agent_id: str,
|
|
296
|
+
*,
|
|
297
|
+
messages: typing.Sequence[MessageCreate],
|
|
298
|
+
max_steps: typing.Optional[int] = OMIT,
|
|
299
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
300
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
301
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
302
|
+
include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
|
|
303
|
+
enable_thinking: typing.Optional[str] = OMIT,
|
|
304
|
+
stream_tokens: typing.Optional[bool] = OMIT,
|
|
305
|
+
include_pings: typing.Optional[bool] = OMIT,
|
|
306
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
307
|
+
) -> typing.Iterator[HttpResponse[typing.Iterator[LettaStreamingResponse]]]:
|
|
308
|
+
"""
|
|
309
|
+
Process a user message and return the agent's response.
|
|
310
|
+
This endpoint accepts a message from a user and processes it through the agent.
|
|
311
|
+
It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.
|
|
312
|
+
|
|
313
|
+
Parameters
|
|
314
|
+
----------
|
|
315
|
+
agent_id : str
|
|
316
|
+
|
|
317
|
+
messages : typing.Sequence[MessageCreate]
|
|
318
|
+
The messages to be sent to the agent.
|
|
319
|
+
|
|
320
|
+
max_steps : typing.Optional[int]
|
|
321
|
+
Maximum number of steps the agent should take to process the request.
|
|
322
|
+
|
|
323
|
+
use_assistant_message : typing.Optional[bool]
|
|
324
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
325
|
+
|
|
326
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
327
|
+
The name of the designated message tool.
|
|
328
|
+
|
|
329
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
330
|
+
The name of the message argument in the designated message tool.
|
|
331
|
+
|
|
332
|
+
include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
|
|
333
|
+
Only return specified message types in the response. If `None` (default) returns all messages.
|
|
334
|
+
|
|
335
|
+
enable_thinking : typing.Optional[str]
|
|
336
|
+
If set to True, enables reasoning before responses or tool calls from the agent.
|
|
337
|
+
|
|
338
|
+
stream_tokens : typing.Optional[bool]
|
|
339
|
+
Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
|
|
340
|
+
|
|
341
|
+
include_pings : typing.Optional[bool]
|
|
342
|
+
Whether to include periodic keepalive ping messages in the stream to prevent connection timeouts.
|
|
343
|
+
|
|
344
|
+
request_options : typing.Optional[RequestOptions]
|
|
345
|
+
Request-specific configuration.
|
|
346
|
+
|
|
347
|
+
Yields
|
|
348
|
+
------
|
|
349
|
+
typing.Iterator[HttpResponse[typing.Iterator[LettaStreamingResponse]]]
|
|
350
|
+
Successful response
|
|
351
|
+
"""
|
|
352
|
+
with self._client_wrapper.httpx_client.stream(
|
|
353
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/stream",
|
|
354
|
+
method="POST",
|
|
355
|
+
json={
|
|
356
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
357
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
358
|
+
),
|
|
359
|
+
"max_steps": max_steps,
|
|
360
|
+
"use_assistant_message": use_assistant_message,
|
|
361
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
362
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
363
|
+
"include_return_message_types": include_return_message_types,
|
|
364
|
+
"enable_thinking": enable_thinking,
|
|
365
|
+
"stream_tokens": stream_tokens,
|
|
366
|
+
"include_pings": include_pings,
|
|
367
|
+
},
|
|
368
|
+
headers={
|
|
369
|
+
"content-type": "application/json",
|
|
370
|
+
},
|
|
371
|
+
request_options=request_options,
|
|
372
|
+
omit=OMIT,
|
|
373
|
+
) as _response:
|
|
374
|
+
|
|
375
|
+
def _stream() -> HttpResponse[typing.Iterator[LettaStreamingResponse]]:
|
|
376
|
+
try:
|
|
377
|
+
if 200 <= _response.status_code < 300:
|
|
378
|
+
|
|
379
|
+
def _iter():
|
|
380
|
+
_event_source = httpx_sse.EventSource(_response)
|
|
381
|
+
for _sse in _event_source.iter_sse():
|
|
382
|
+
if _sse.data == None:
|
|
383
|
+
return
|
|
384
|
+
try:
|
|
385
|
+
yield typing.cast(
|
|
386
|
+
LettaStreamingResponse,
|
|
387
|
+
construct_type(
|
|
388
|
+
type_=LettaStreamingResponse, # type: ignore
|
|
389
|
+
object_=json.loads(_sse.data),
|
|
390
|
+
),
|
|
391
|
+
)
|
|
392
|
+
except Exception:
|
|
393
|
+
pass
|
|
394
|
+
return
|
|
395
|
+
|
|
396
|
+
return HttpResponse(response=_response, data=_iter())
|
|
397
|
+
_response.read()
|
|
398
|
+
if _response.status_code == 422:
|
|
399
|
+
raise UnprocessableEntityError(
|
|
400
|
+
headers=dict(_response.headers),
|
|
401
|
+
body=typing.cast(
|
|
402
|
+
HttpValidationError,
|
|
403
|
+
construct_type(
|
|
404
|
+
type_=HttpValidationError, # type: ignore
|
|
405
|
+
object_=_response.json(),
|
|
406
|
+
),
|
|
407
|
+
),
|
|
408
|
+
)
|
|
409
|
+
_response_json = _response.json()
|
|
410
|
+
except JSONDecodeError:
|
|
411
|
+
raise ApiError(
|
|
412
|
+
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
|
|
413
|
+
)
|
|
414
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
415
|
+
|
|
416
|
+
yield _stream()
|
|
417
|
+
|
|
418
|
+
def cancel(
|
|
419
|
+
self,
|
|
420
|
+
agent_id: str,
|
|
421
|
+
*,
|
|
422
|
+
request: typing.Optional[typing.Sequence[str]] = None,
|
|
423
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
424
|
+
) -> HttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]:
|
|
425
|
+
"""
|
|
426
|
+
Cancel runs associated with an agent. If run_ids are passed in, cancel those in particular.
|
|
427
|
+
|
|
428
|
+
Note to cancel active runs associated with an agent, redis is required.
|
|
429
|
+
|
|
430
|
+
Parameters
|
|
431
|
+
----------
|
|
432
|
+
agent_id : str
|
|
433
|
+
|
|
434
|
+
request : typing.Optional[typing.Sequence[str]]
|
|
435
|
+
|
|
436
|
+
request_options : typing.Optional[RequestOptions]
|
|
437
|
+
Request-specific configuration.
|
|
438
|
+
|
|
439
|
+
Returns
|
|
440
|
+
-------
|
|
441
|
+
HttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
442
|
+
Successful Response
|
|
443
|
+
"""
|
|
444
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
445
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/cancel",
|
|
446
|
+
method="POST",
|
|
447
|
+
json=request,
|
|
448
|
+
headers={
|
|
449
|
+
"content-type": "application/json",
|
|
450
|
+
},
|
|
451
|
+
request_options=request_options,
|
|
452
|
+
omit=OMIT,
|
|
453
|
+
)
|
|
454
|
+
try:
|
|
455
|
+
if 200 <= _response.status_code < 300:
|
|
456
|
+
_data = typing.cast(
|
|
457
|
+
typing.Dict[str, typing.Optional[typing.Any]],
|
|
458
|
+
construct_type(
|
|
459
|
+
type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
|
|
460
|
+
object_=_response.json(),
|
|
461
|
+
),
|
|
462
|
+
)
|
|
463
|
+
return HttpResponse(response=_response, data=_data)
|
|
464
|
+
if _response.status_code == 422:
|
|
465
|
+
raise UnprocessableEntityError(
|
|
466
|
+
headers=dict(_response.headers),
|
|
467
|
+
body=typing.cast(
|
|
468
|
+
HttpValidationError,
|
|
469
|
+
construct_type(
|
|
470
|
+
type_=HttpValidationError, # type: ignore
|
|
471
|
+
object_=_response.json(),
|
|
472
|
+
),
|
|
473
|
+
),
|
|
474
|
+
)
|
|
475
|
+
_response_json = _response.json()
|
|
476
|
+
except JSONDecodeError:
|
|
477
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
478
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
479
|
+
|
|
480
|
+
def create_async(
|
|
481
|
+
self,
|
|
482
|
+
agent_id: str,
|
|
483
|
+
*,
|
|
484
|
+
messages: typing.Sequence[MessageCreate],
|
|
485
|
+
max_steps: typing.Optional[int] = OMIT,
|
|
486
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
487
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
488
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
489
|
+
include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
|
|
490
|
+
enable_thinking: typing.Optional[str] = OMIT,
|
|
491
|
+
callback_url: typing.Optional[str] = OMIT,
|
|
492
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
493
|
+
) -> HttpResponse[Run]:
|
|
494
|
+
"""
|
|
495
|
+
Asynchronously process a user message and return a run object.
|
|
496
|
+
The actual processing happens in the background, and the status can be checked using the run ID.
|
|
497
|
+
|
|
498
|
+
This is "asynchronous" in the sense that it's a background job and explicitly must be fetched by the run ID.
|
|
499
|
+
This is more like `send_message_job`
|
|
500
|
+
|
|
501
|
+
Parameters
|
|
502
|
+
----------
|
|
503
|
+
agent_id : str
|
|
504
|
+
|
|
505
|
+
messages : typing.Sequence[MessageCreate]
|
|
506
|
+
The messages to be sent to the agent.
|
|
507
|
+
|
|
508
|
+
max_steps : typing.Optional[int]
|
|
509
|
+
Maximum number of steps the agent should take to process the request.
|
|
510
|
+
|
|
511
|
+
use_assistant_message : typing.Optional[bool]
|
|
512
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
513
|
+
|
|
514
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
515
|
+
The name of the designated message tool.
|
|
516
|
+
|
|
517
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
518
|
+
The name of the message argument in the designated message tool.
|
|
519
|
+
|
|
520
|
+
include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
|
|
521
|
+
Only return specified message types in the response. If `None` (default) returns all messages.
|
|
522
|
+
|
|
523
|
+
enable_thinking : typing.Optional[str]
|
|
524
|
+
If set to True, enables reasoning before responses or tool calls from the agent.
|
|
525
|
+
|
|
526
|
+
callback_url : typing.Optional[str]
|
|
527
|
+
Optional callback URL to POST to when the job completes
|
|
528
|
+
|
|
529
|
+
request_options : typing.Optional[RequestOptions]
|
|
530
|
+
Request-specific configuration.
|
|
531
|
+
|
|
532
|
+
Returns
|
|
533
|
+
-------
|
|
534
|
+
HttpResponse[Run]
|
|
535
|
+
Successful Response
|
|
536
|
+
"""
|
|
537
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
538
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/async",
|
|
539
|
+
method="POST",
|
|
540
|
+
json={
|
|
541
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
542
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
543
|
+
),
|
|
544
|
+
"max_steps": max_steps,
|
|
545
|
+
"use_assistant_message": use_assistant_message,
|
|
546
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
547
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
548
|
+
"include_return_message_types": include_return_message_types,
|
|
549
|
+
"enable_thinking": enable_thinking,
|
|
550
|
+
"callback_url": callback_url,
|
|
551
|
+
},
|
|
552
|
+
headers={
|
|
553
|
+
"content-type": "application/json",
|
|
554
|
+
},
|
|
555
|
+
request_options=request_options,
|
|
556
|
+
omit=OMIT,
|
|
557
|
+
)
|
|
558
|
+
try:
|
|
559
|
+
if 200 <= _response.status_code < 300:
|
|
560
|
+
_data = typing.cast(
|
|
561
|
+
Run,
|
|
562
|
+
construct_type(
|
|
563
|
+
type_=Run, # type: ignore
|
|
564
|
+
object_=_response.json(),
|
|
565
|
+
),
|
|
566
|
+
)
|
|
567
|
+
return HttpResponse(response=_response, data=_data)
|
|
568
|
+
if _response.status_code == 422:
|
|
569
|
+
raise UnprocessableEntityError(
|
|
570
|
+
headers=dict(_response.headers),
|
|
571
|
+
body=typing.cast(
|
|
572
|
+
HttpValidationError,
|
|
573
|
+
construct_type(
|
|
574
|
+
type_=HttpValidationError, # type: ignore
|
|
575
|
+
object_=_response.json(),
|
|
576
|
+
),
|
|
577
|
+
),
|
|
578
|
+
)
|
|
579
|
+
_response_json = _response.json()
|
|
580
|
+
except JSONDecodeError:
|
|
581
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
582
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
583
|
+
|
|
584
|
+
def reset(
|
|
585
|
+
self,
|
|
586
|
+
agent_id: str,
|
|
587
|
+
*,
|
|
588
|
+
add_default_initial_messages: typing.Optional[bool] = None,
|
|
589
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
590
|
+
) -> HttpResponse[AgentState]:
|
|
591
|
+
"""
|
|
592
|
+
Resets the messages for an agent
|
|
593
|
+
|
|
594
|
+
Parameters
|
|
595
|
+
----------
|
|
596
|
+
agent_id : str
|
|
597
|
+
|
|
598
|
+
add_default_initial_messages : typing.Optional[bool]
|
|
599
|
+
If true, adds the default initial messages after resetting.
|
|
600
|
+
|
|
601
|
+
request_options : typing.Optional[RequestOptions]
|
|
602
|
+
Request-specific configuration.
|
|
603
|
+
|
|
604
|
+
Returns
|
|
605
|
+
-------
|
|
606
|
+
HttpResponse[AgentState]
|
|
607
|
+
Successful Response
|
|
608
|
+
"""
|
|
609
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
610
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/reset-messages",
|
|
611
|
+
method="PATCH",
|
|
612
|
+
params={
|
|
613
|
+
"add_default_initial_messages": add_default_initial_messages,
|
|
614
|
+
},
|
|
615
|
+
request_options=request_options,
|
|
616
|
+
)
|
|
617
|
+
try:
|
|
618
|
+
if 200 <= _response.status_code < 300:
|
|
619
|
+
_data = typing.cast(
|
|
620
|
+
AgentState,
|
|
621
|
+
construct_type(
|
|
622
|
+
type_=AgentState, # type: ignore
|
|
623
|
+
object_=_response.json(),
|
|
624
|
+
),
|
|
625
|
+
)
|
|
626
|
+
return HttpResponse(response=_response, data=_data)
|
|
627
|
+
if _response.status_code == 422:
|
|
628
|
+
raise UnprocessableEntityError(
|
|
629
|
+
headers=dict(_response.headers),
|
|
630
|
+
body=typing.cast(
|
|
631
|
+
HttpValidationError,
|
|
632
|
+
construct_type(
|
|
633
|
+
type_=HttpValidationError, # type: ignore
|
|
634
|
+
object_=_response.json(),
|
|
635
|
+
),
|
|
636
|
+
),
|
|
637
|
+
)
|
|
638
|
+
_response_json = _response.json()
|
|
639
|
+
except JSONDecodeError:
|
|
640
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
641
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
642
|
+
|
|
643
|
+
def preview_raw_payload(
|
|
644
|
+
self,
|
|
645
|
+
agent_id: str,
|
|
646
|
+
*,
|
|
647
|
+
request: MessagesPreviewRawPayloadRequest,
|
|
648
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
649
|
+
) -> HttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]:
|
|
650
|
+
"""
|
|
651
|
+
Inspect the raw LLM request payload without sending it.
|
|
652
|
+
|
|
653
|
+
This endpoint processes the message through the agent loop up until
|
|
654
|
+
the LLM request, then returns the raw request payload that would
|
|
655
|
+
be sent to the LLM provider. Useful for debugging and inspection.
|
|
656
|
+
|
|
657
|
+
Parameters
|
|
658
|
+
----------
|
|
659
|
+
agent_id : str
|
|
660
|
+
|
|
661
|
+
request : MessagesPreviewRawPayloadRequest
|
|
662
|
+
|
|
663
|
+
request_options : typing.Optional[RequestOptions]
|
|
664
|
+
Request-specific configuration.
|
|
665
|
+
|
|
666
|
+
Returns
|
|
667
|
+
-------
|
|
668
|
+
HttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
669
|
+
Successful Response
|
|
670
|
+
"""
|
|
671
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
672
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/preview-raw-payload",
|
|
673
|
+
method="POST",
|
|
674
|
+
json=convert_and_respect_annotation_metadata(
|
|
675
|
+
object_=request, annotation=MessagesPreviewRawPayloadRequest, direction="write"
|
|
676
|
+
),
|
|
677
|
+
headers={
|
|
678
|
+
"content-type": "application/json",
|
|
679
|
+
},
|
|
680
|
+
request_options=request_options,
|
|
681
|
+
omit=OMIT,
|
|
682
|
+
)
|
|
683
|
+
try:
|
|
684
|
+
if 200 <= _response.status_code < 300:
|
|
685
|
+
_data = typing.cast(
|
|
686
|
+
typing.Dict[str, typing.Optional[typing.Any]],
|
|
687
|
+
construct_type(
|
|
688
|
+
type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
|
|
689
|
+
object_=_response.json(),
|
|
690
|
+
),
|
|
691
|
+
)
|
|
692
|
+
return HttpResponse(response=_response, data=_data)
|
|
693
|
+
if _response.status_code == 422:
|
|
694
|
+
raise UnprocessableEntityError(
|
|
695
|
+
headers=dict(_response.headers),
|
|
696
|
+
body=typing.cast(
|
|
697
|
+
HttpValidationError,
|
|
698
|
+
construct_type(
|
|
699
|
+
type_=HttpValidationError, # type: ignore
|
|
700
|
+
object_=_response.json(),
|
|
701
|
+
),
|
|
702
|
+
),
|
|
703
|
+
)
|
|
704
|
+
_response_json = _response.json()
|
|
705
|
+
except JSONDecodeError:
|
|
706
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
707
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
708
|
+
|
|
709
|
+
|
|
710
|
+
class AsyncRawMessagesClient:
|
|
711
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
712
|
+
self._client_wrapper = client_wrapper
|
|
713
|
+
|
|
714
|
+
async def list(
|
|
715
|
+
self,
|
|
716
|
+
agent_id: str,
|
|
717
|
+
*,
|
|
718
|
+
after: typing.Optional[str] = None,
|
|
719
|
+
before: typing.Optional[str] = None,
|
|
720
|
+
limit: typing.Optional[int] = None,
|
|
721
|
+
group_id: typing.Optional[str] = None,
|
|
722
|
+
use_assistant_message: typing.Optional[bool] = None,
|
|
723
|
+
assistant_message_tool_name: typing.Optional[str] = None,
|
|
724
|
+
assistant_message_tool_kwarg: typing.Optional[str] = None,
|
|
725
|
+
include_err: typing.Optional[bool] = None,
|
|
726
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
727
|
+
) -> AsyncHttpResponse[typing.List[LettaMessageUnion]]:
|
|
728
|
+
"""
|
|
729
|
+
Retrieve message history for an agent.
|
|
730
|
+
|
|
731
|
+
Parameters
|
|
732
|
+
----------
|
|
733
|
+
agent_id : str
|
|
734
|
+
|
|
735
|
+
after : typing.Optional[str]
|
|
736
|
+
Message after which to retrieve the returned messages.
|
|
737
|
+
|
|
738
|
+
before : typing.Optional[str]
|
|
739
|
+
Message before which to retrieve the returned messages.
|
|
740
|
+
|
|
741
|
+
limit : typing.Optional[int]
|
|
742
|
+
Maximum number of messages to retrieve.
|
|
743
|
+
|
|
744
|
+
group_id : typing.Optional[str]
|
|
745
|
+
Group ID to filter messages by.
|
|
746
|
+
|
|
747
|
+
use_assistant_message : typing.Optional[bool]
|
|
748
|
+
Whether to use assistant messages
|
|
749
|
+
|
|
750
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
751
|
+
The name of the designated message tool.
|
|
752
|
+
|
|
753
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
754
|
+
The name of the message argument.
|
|
755
|
+
|
|
756
|
+
include_err : typing.Optional[bool]
|
|
757
|
+
Whether to include error messages and error statuses. For debugging purposes only.
|
|
758
|
+
|
|
759
|
+
request_options : typing.Optional[RequestOptions]
|
|
760
|
+
Request-specific configuration.
|
|
761
|
+
|
|
762
|
+
Returns
|
|
763
|
+
-------
|
|
764
|
+
AsyncHttpResponse[typing.List[LettaMessageUnion]]
|
|
765
|
+
Successful Response
|
|
766
|
+
"""
|
|
767
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
768
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages",
|
|
769
|
+
method="GET",
|
|
770
|
+
params={
|
|
771
|
+
"after": after,
|
|
772
|
+
"before": before,
|
|
773
|
+
"limit": limit,
|
|
774
|
+
"group_id": group_id,
|
|
775
|
+
"use_assistant_message": use_assistant_message,
|
|
776
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
777
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
778
|
+
"include_err": include_err,
|
|
779
|
+
},
|
|
780
|
+
request_options=request_options,
|
|
781
|
+
)
|
|
782
|
+
try:
|
|
783
|
+
if 200 <= _response.status_code < 300:
|
|
784
|
+
_data = typing.cast(
|
|
785
|
+
typing.List[LettaMessageUnion],
|
|
786
|
+
construct_type(
|
|
787
|
+
type_=typing.List[LettaMessageUnion], # type: ignore
|
|
788
|
+
object_=_response.json(),
|
|
789
|
+
),
|
|
790
|
+
)
|
|
791
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
792
|
+
if _response.status_code == 422:
|
|
793
|
+
raise UnprocessableEntityError(
|
|
794
|
+
headers=dict(_response.headers),
|
|
795
|
+
body=typing.cast(
|
|
796
|
+
HttpValidationError,
|
|
797
|
+
construct_type(
|
|
798
|
+
type_=HttpValidationError, # type: ignore
|
|
799
|
+
object_=_response.json(),
|
|
800
|
+
),
|
|
801
|
+
),
|
|
802
|
+
)
|
|
803
|
+
_response_json = _response.json()
|
|
804
|
+
except JSONDecodeError:
|
|
805
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
806
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
807
|
+
|
|
808
|
+
async def create(
|
|
809
|
+
self,
|
|
810
|
+
agent_id: str,
|
|
811
|
+
*,
|
|
812
|
+
messages: typing.Sequence[MessageCreate],
|
|
813
|
+
max_steps: typing.Optional[int] = OMIT,
|
|
814
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
815
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
816
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
817
|
+
include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
|
|
818
|
+
enable_thinking: typing.Optional[str] = OMIT,
|
|
819
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
820
|
+
) -> AsyncHttpResponse[LettaResponse]:
|
|
821
|
+
"""
|
|
822
|
+
Process a user message and return the agent's response.
|
|
823
|
+
This endpoint accepts a message from a user and processes it through the agent.
|
|
824
|
+
|
|
825
|
+
Parameters
|
|
826
|
+
----------
|
|
827
|
+
agent_id : str
|
|
828
|
+
|
|
829
|
+
messages : typing.Sequence[MessageCreate]
|
|
830
|
+
The messages to be sent to the agent.
|
|
831
|
+
|
|
832
|
+
max_steps : typing.Optional[int]
|
|
833
|
+
Maximum number of steps the agent should take to process the request.
|
|
834
|
+
|
|
835
|
+
use_assistant_message : typing.Optional[bool]
|
|
836
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
837
|
+
|
|
838
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
839
|
+
The name of the designated message tool.
|
|
840
|
+
|
|
841
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
842
|
+
The name of the message argument in the designated message tool.
|
|
843
|
+
|
|
844
|
+
include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
|
|
845
|
+
Only return specified message types in the response. If `None` (default) returns all messages.
|
|
846
|
+
|
|
847
|
+
enable_thinking : typing.Optional[str]
|
|
848
|
+
If set to True, enables reasoning before responses or tool calls from the agent.
|
|
849
|
+
|
|
850
|
+
request_options : typing.Optional[RequestOptions]
|
|
851
|
+
Request-specific configuration.
|
|
852
|
+
|
|
853
|
+
Returns
|
|
854
|
+
-------
|
|
855
|
+
AsyncHttpResponse[LettaResponse]
|
|
856
|
+
Successful Response
|
|
857
|
+
"""
|
|
858
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
859
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages",
|
|
860
|
+
method="POST",
|
|
861
|
+
json={
|
|
862
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
863
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
864
|
+
),
|
|
865
|
+
"max_steps": max_steps,
|
|
866
|
+
"use_assistant_message": use_assistant_message,
|
|
867
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
868
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
869
|
+
"include_return_message_types": include_return_message_types,
|
|
870
|
+
"enable_thinking": enable_thinking,
|
|
871
|
+
},
|
|
872
|
+
headers={
|
|
873
|
+
"content-type": "application/json",
|
|
874
|
+
},
|
|
875
|
+
request_options=request_options,
|
|
876
|
+
omit=OMIT,
|
|
877
|
+
)
|
|
878
|
+
try:
|
|
879
|
+
if 200 <= _response.status_code < 300:
|
|
880
|
+
_data = typing.cast(
|
|
881
|
+
LettaResponse,
|
|
882
|
+
construct_type(
|
|
883
|
+
type_=LettaResponse, # type: ignore
|
|
884
|
+
object_=_response.json(),
|
|
885
|
+
),
|
|
886
|
+
)
|
|
887
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
888
|
+
if _response.status_code == 422:
|
|
889
|
+
raise UnprocessableEntityError(
|
|
890
|
+
headers=dict(_response.headers),
|
|
891
|
+
body=typing.cast(
|
|
892
|
+
HttpValidationError,
|
|
893
|
+
construct_type(
|
|
894
|
+
type_=HttpValidationError, # type: ignore
|
|
895
|
+
object_=_response.json(),
|
|
896
|
+
),
|
|
897
|
+
),
|
|
898
|
+
)
|
|
899
|
+
_response_json = _response.json()
|
|
900
|
+
except JSONDecodeError:
|
|
901
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
902
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
903
|
+
|
|
904
|
+
async def modify(
|
|
905
|
+
self,
|
|
906
|
+
agent_id: str,
|
|
907
|
+
message_id: str,
|
|
908
|
+
*,
|
|
909
|
+
request: MessagesModifyRequest,
|
|
910
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
911
|
+
) -> AsyncHttpResponse[MessagesModifyResponse]:
|
|
912
|
+
"""
|
|
913
|
+
Update the details of a message associated with an agent.
|
|
914
|
+
|
|
915
|
+
Parameters
|
|
916
|
+
----------
|
|
917
|
+
agent_id : str
|
|
918
|
+
|
|
919
|
+
message_id : str
|
|
920
|
+
|
|
921
|
+
request : MessagesModifyRequest
|
|
922
|
+
|
|
923
|
+
request_options : typing.Optional[RequestOptions]
|
|
924
|
+
Request-specific configuration.
|
|
925
|
+
|
|
926
|
+
Returns
|
|
927
|
+
-------
|
|
928
|
+
AsyncHttpResponse[MessagesModifyResponse]
|
|
929
|
+
Successful Response
|
|
930
|
+
"""
|
|
931
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
932
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/{jsonable_encoder(message_id)}",
|
|
933
|
+
method="PATCH",
|
|
934
|
+
json=convert_and_respect_annotation_metadata(
|
|
935
|
+
object_=request, annotation=MessagesModifyRequest, direction="write"
|
|
936
|
+
),
|
|
937
|
+
headers={
|
|
938
|
+
"content-type": "application/json",
|
|
939
|
+
},
|
|
940
|
+
request_options=request_options,
|
|
941
|
+
omit=OMIT,
|
|
942
|
+
)
|
|
943
|
+
try:
|
|
944
|
+
if 200 <= _response.status_code < 300:
|
|
945
|
+
_data = typing.cast(
|
|
946
|
+
MessagesModifyResponse,
|
|
947
|
+
construct_type(
|
|
948
|
+
type_=MessagesModifyResponse, # type: ignore
|
|
949
|
+
object_=_response.json(),
|
|
950
|
+
),
|
|
951
|
+
)
|
|
952
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
953
|
+
if _response.status_code == 422:
|
|
954
|
+
raise UnprocessableEntityError(
|
|
955
|
+
headers=dict(_response.headers),
|
|
956
|
+
body=typing.cast(
|
|
957
|
+
HttpValidationError,
|
|
958
|
+
construct_type(
|
|
959
|
+
type_=HttpValidationError, # type: ignore
|
|
960
|
+
object_=_response.json(),
|
|
961
|
+
),
|
|
962
|
+
),
|
|
963
|
+
)
|
|
964
|
+
_response_json = _response.json()
|
|
965
|
+
except JSONDecodeError:
|
|
966
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
967
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
968
|
+
|
|
969
|
+
@contextlib.asynccontextmanager
|
|
970
|
+
async def create_stream(
|
|
971
|
+
self,
|
|
972
|
+
agent_id: str,
|
|
973
|
+
*,
|
|
974
|
+
messages: typing.Sequence[MessageCreate],
|
|
975
|
+
max_steps: typing.Optional[int] = OMIT,
|
|
976
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
977
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
978
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
979
|
+
include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
|
|
980
|
+
enable_thinking: typing.Optional[str] = OMIT,
|
|
981
|
+
stream_tokens: typing.Optional[bool] = OMIT,
|
|
982
|
+
include_pings: typing.Optional[bool] = OMIT,
|
|
983
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
984
|
+
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[LettaStreamingResponse]]]:
|
|
985
|
+
"""
|
|
986
|
+
Process a user message and return the agent's response.
|
|
987
|
+
This endpoint accepts a message from a user and processes it through the agent.
|
|
988
|
+
It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.
|
|
989
|
+
|
|
990
|
+
Parameters
|
|
991
|
+
----------
|
|
992
|
+
agent_id : str
|
|
993
|
+
|
|
994
|
+
messages : typing.Sequence[MessageCreate]
|
|
995
|
+
The messages to be sent to the agent.
|
|
996
|
+
|
|
997
|
+
max_steps : typing.Optional[int]
|
|
998
|
+
Maximum number of steps the agent should take to process the request.
|
|
999
|
+
|
|
1000
|
+
use_assistant_message : typing.Optional[bool]
|
|
1001
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
1002
|
+
|
|
1003
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
1004
|
+
The name of the designated message tool.
|
|
1005
|
+
|
|
1006
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
1007
|
+
The name of the message argument in the designated message tool.
|
|
1008
|
+
|
|
1009
|
+
include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
|
|
1010
|
+
Only return specified message types in the response. If `None` (default) returns all messages.
|
|
1011
|
+
|
|
1012
|
+
enable_thinking : typing.Optional[str]
|
|
1013
|
+
If set to True, enables reasoning before responses or tool calls from the agent.
|
|
1014
|
+
|
|
1015
|
+
stream_tokens : typing.Optional[bool]
|
|
1016
|
+
Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
|
|
1017
|
+
|
|
1018
|
+
include_pings : typing.Optional[bool]
|
|
1019
|
+
Whether to include periodic keepalive ping messages in the stream to prevent connection timeouts.
|
|
1020
|
+
|
|
1021
|
+
request_options : typing.Optional[RequestOptions]
|
|
1022
|
+
Request-specific configuration.
|
|
1023
|
+
|
|
1024
|
+
Yields
|
|
1025
|
+
------
|
|
1026
|
+
typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[LettaStreamingResponse]]]
|
|
1027
|
+
Successful response
|
|
1028
|
+
"""
|
|
1029
|
+
async with self._client_wrapper.httpx_client.stream(
|
|
1030
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/stream",
|
|
1031
|
+
method="POST",
|
|
1032
|
+
json={
|
|
1033
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
1034
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
1035
|
+
),
|
|
1036
|
+
"max_steps": max_steps,
|
|
1037
|
+
"use_assistant_message": use_assistant_message,
|
|
1038
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
1039
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
1040
|
+
"include_return_message_types": include_return_message_types,
|
|
1041
|
+
"enable_thinking": enable_thinking,
|
|
1042
|
+
"stream_tokens": stream_tokens,
|
|
1043
|
+
"include_pings": include_pings,
|
|
1044
|
+
},
|
|
1045
|
+
headers={
|
|
1046
|
+
"content-type": "application/json",
|
|
1047
|
+
},
|
|
1048
|
+
request_options=request_options,
|
|
1049
|
+
omit=OMIT,
|
|
1050
|
+
) as _response:
|
|
1051
|
+
|
|
1052
|
+
async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[LettaStreamingResponse]]:
|
|
1053
|
+
try:
|
|
1054
|
+
if 200 <= _response.status_code < 300:
|
|
1055
|
+
|
|
1056
|
+
async def _iter():
|
|
1057
|
+
_event_source = httpx_sse.EventSource(_response)
|
|
1058
|
+
async for _sse in _event_source.aiter_sse():
|
|
1059
|
+
if _sse.data == None:
|
|
1060
|
+
return
|
|
1061
|
+
try:
|
|
1062
|
+
yield typing.cast(
|
|
1063
|
+
LettaStreamingResponse,
|
|
1064
|
+
construct_type(
|
|
1065
|
+
type_=LettaStreamingResponse, # type: ignore
|
|
1066
|
+
object_=json.loads(_sse.data),
|
|
1067
|
+
),
|
|
1068
|
+
)
|
|
1069
|
+
except Exception:
|
|
1070
|
+
pass
|
|
1071
|
+
return
|
|
1072
|
+
|
|
1073
|
+
return AsyncHttpResponse(response=_response, data=_iter())
|
|
1074
|
+
await _response.aread()
|
|
1075
|
+
if _response.status_code == 422:
|
|
1076
|
+
raise UnprocessableEntityError(
|
|
1077
|
+
headers=dict(_response.headers),
|
|
1078
|
+
body=typing.cast(
|
|
1079
|
+
HttpValidationError,
|
|
1080
|
+
construct_type(
|
|
1081
|
+
type_=HttpValidationError, # type: ignore
|
|
1082
|
+
object_=_response.json(),
|
|
1083
|
+
),
|
|
1084
|
+
),
|
|
1085
|
+
)
|
|
1086
|
+
_response_json = _response.json()
|
|
1087
|
+
except JSONDecodeError:
|
|
1088
|
+
raise ApiError(
|
|
1089
|
+
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
|
|
1090
|
+
)
|
|
1091
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
1092
|
+
|
|
1093
|
+
yield await _stream()
|
|
1094
|
+
|
|
1095
|
+
async def cancel(
|
|
1096
|
+
self,
|
|
1097
|
+
agent_id: str,
|
|
1098
|
+
*,
|
|
1099
|
+
request: typing.Optional[typing.Sequence[str]] = None,
|
|
1100
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1101
|
+
) -> AsyncHttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]:
|
|
1102
|
+
"""
|
|
1103
|
+
Cancel runs associated with an agent. If run_ids are passed in, cancel those in particular.
|
|
1104
|
+
|
|
1105
|
+
Note to cancel active runs associated with an agent, redis is required.
|
|
1106
|
+
|
|
1107
|
+
Parameters
|
|
1108
|
+
----------
|
|
1109
|
+
agent_id : str
|
|
1110
|
+
|
|
1111
|
+
request : typing.Optional[typing.Sequence[str]]
|
|
1112
|
+
|
|
1113
|
+
request_options : typing.Optional[RequestOptions]
|
|
1114
|
+
Request-specific configuration.
|
|
1115
|
+
|
|
1116
|
+
Returns
|
|
1117
|
+
-------
|
|
1118
|
+
AsyncHttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
1119
|
+
Successful Response
|
|
1120
|
+
"""
|
|
1121
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1122
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/cancel",
|
|
1123
|
+
method="POST",
|
|
1124
|
+
json=request,
|
|
1125
|
+
headers={
|
|
1126
|
+
"content-type": "application/json",
|
|
1127
|
+
},
|
|
1128
|
+
request_options=request_options,
|
|
1129
|
+
omit=OMIT,
|
|
1130
|
+
)
|
|
1131
|
+
try:
|
|
1132
|
+
if 200 <= _response.status_code < 300:
|
|
1133
|
+
_data = typing.cast(
|
|
1134
|
+
typing.Dict[str, typing.Optional[typing.Any]],
|
|
1135
|
+
construct_type(
|
|
1136
|
+
type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
|
|
1137
|
+
object_=_response.json(),
|
|
1138
|
+
),
|
|
1139
|
+
)
|
|
1140
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
1141
|
+
if _response.status_code == 422:
|
|
1142
|
+
raise UnprocessableEntityError(
|
|
1143
|
+
headers=dict(_response.headers),
|
|
1144
|
+
body=typing.cast(
|
|
1145
|
+
HttpValidationError,
|
|
1146
|
+
construct_type(
|
|
1147
|
+
type_=HttpValidationError, # type: ignore
|
|
1148
|
+
object_=_response.json(),
|
|
1149
|
+
),
|
|
1150
|
+
),
|
|
1151
|
+
)
|
|
1152
|
+
_response_json = _response.json()
|
|
1153
|
+
except JSONDecodeError:
|
|
1154
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
1155
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
1156
|
+
|
|
1157
|
+
async def create_async(
|
|
1158
|
+
self,
|
|
1159
|
+
agent_id: str,
|
|
1160
|
+
*,
|
|
1161
|
+
messages: typing.Sequence[MessageCreate],
|
|
1162
|
+
max_steps: typing.Optional[int] = OMIT,
|
|
1163
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
1164
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
1165
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
1166
|
+
include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
|
|
1167
|
+
enable_thinking: typing.Optional[str] = OMIT,
|
|
1168
|
+
callback_url: typing.Optional[str] = OMIT,
|
|
1169
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1170
|
+
) -> AsyncHttpResponse[Run]:
|
|
1171
|
+
"""
|
|
1172
|
+
Asynchronously process a user message and return a run object.
|
|
1173
|
+
The actual processing happens in the background, and the status can be checked using the run ID.
|
|
1174
|
+
|
|
1175
|
+
This is "asynchronous" in the sense that it's a background job and explicitly must be fetched by the run ID.
|
|
1176
|
+
This is more like `send_message_job`
|
|
1177
|
+
|
|
1178
|
+
Parameters
|
|
1179
|
+
----------
|
|
1180
|
+
agent_id : str
|
|
1181
|
+
|
|
1182
|
+
messages : typing.Sequence[MessageCreate]
|
|
1183
|
+
The messages to be sent to the agent.
|
|
1184
|
+
|
|
1185
|
+
max_steps : typing.Optional[int]
|
|
1186
|
+
Maximum number of steps the agent should take to process the request.
|
|
1187
|
+
|
|
1188
|
+
use_assistant_message : typing.Optional[bool]
|
|
1189
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
1190
|
+
|
|
1191
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
1192
|
+
The name of the designated message tool.
|
|
1193
|
+
|
|
1194
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
1195
|
+
The name of the message argument in the designated message tool.
|
|
1196
|
+
|
|
1197
|
+
include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
|
|
1198
|
+
Only return specified message types in the response. If `None` (default) returns all messages.
|
|
1199
|
+
|
|
1200
|
+
enable_thinking : typing.Optional[str]
|
|
1201
|
+
If set to True, enables reasoning before responses or tool calls from the agent.
|
|
1202
|
+
|
|
1203
|
+
callback_url : typing.Optional[str]
|
|
1204
|
+
Optional callback URL to POST to when the job completes
|
|
1205
|
+
|
|
1206
|
+
request_options : typing.Optional[RequestOptions]
|
|
1207
|
+
Request-specific configuration.
|
|
1208
|
+
|
|
1209
|
+
Returns
|
|
1210
|
+
-------
|
|
1211
|
+
AsyncHttpResponse[Run]
|
|
1212
|
+
Successful Response
|
|
1213
|
+
"""
|
|
1214
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1215
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/async",
|
|
1216
|
+
method="POST",
|
|
1217
|
+
json={
|
|
1218
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
1219
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
1220
|
+
),
|
|
1221
|
+
"max_steps": max_steps,
|
|
1222
|
+
"use_assistant_message": use_assistant_message,
|
|
1223
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
1224
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
1225
|
+
"include_return_message_types": include_return_message_types,
|
|
1226
|
+
"enable_thinking": enable_thinking,
|
|
1227
|
+
"callback_url": callback_url,
|
|
1228
|
+
},
|
|
1229
|
+
headers={
|
|
1230
|
+
"content-type": "application/json",
|
|
1231
|
+
},
|
|
1232
|
+
request_options=request_options,
|
|
1233
|
+
omit=OMIT,
|
|
1234
|
+
)
|
|
1235
|
+
try:
|
|
1236
|
+
if 200 <= _response.status_code < 300:
|
|
1237
|
+
_data = typing.cast(
|
|
1238
|
+
Run,
|
|
1239
|
+
construct_type(
|
|
1240
|
+
type_=Run, # type: ignore
|
|
1241
|
+
object_=_response.json(),
|
|
1242
|
+
),
|
|
1243
|
+
)
|
|
1244
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
1245
|
+
if _response.status_code == 422:
|
|
1246
|
+
raise UnprocessableEntityError(
|
|
1247
|
+
headers=dict(_response.headers),
|
|
1248
|
+
body=typing.cast(
|
|
1249
|
+
HttpValidationError,
|
|
1250
|
+
construct_type(
|
|
1251
|
+
type_=HttpValidationError, # type: ignore
|
|
1252
|
+
object_=_response.json(),
|
|
1253
|
+
),
|
|
1254
|
+
),
|
|
1255
|
+
)
|
|
1256
|
+
_response_json = _response.json()
|
|
1257
|
+
except JSONDecodeError:
|
|
1258
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
1259
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
1260
|
+
|
|
1261
|
+
async def reset(
|
|
1262
|
+
self,
|
|
1263
|
+
agent_id: str,
|
|
1264
|
+
*,
|
|
1265
|
+
add_default_initial_messages: typing.Optional[bool] = None,
|
|
1266
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1267
|
+
) -> AsyncHttpResponse[AgentState]:
|
|
1268
|
+
"""
|
|
1269
|
+
Resets the messages for an agent
|
|
1270
|
+
|
|
1271
|
+
Parameters
|
|
1272
|
+
----------
|
|
1273
|
+
agent_id : str
|
|
1274
|
+
|
|
1275
|
+
add_default_initial_messages : typing.Optional[bool]
|
|
1276
|
+
If true, adds the default initial messages after resetting.
|
|
1277
|
+
|
|
1278
|
+
request_options : typing.Optional[RequestOptions]
|
|
1279
|
+
Request-specific configuration.
|
|
1280
|
+
|
|
1281
|
+
Returns
|
|
1282
|
+
-------
|
|
1283
|
+
AsyncHttpResponse[AgentState]
|
|
1284
|
+
Successful Response
|
|
1285
|
+
"""
|
|
1286
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1287
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/reset-messages",
|
|
1288
|
+
method="PATCH",
|
|
1289
|
+
params={
|
|
1290
|
+
"add_default_initial_messages": add_default_initial_messages,
|
|
1291
|
+
},
|
|
1292
|
+
request_options=request_options,
|
|
1293
|
+
)
|
|
1294
|
+
try:
|
|
1295
|
+
if 200 <= _response.status_code < 300:
|
|
1296
|
+
_data = typing.cast(
|
|
1297
|
+
AgentState,
|
|
1298
|
+
construct_type(
|
|
1299
|
+
type_=AgentState, # type: ignore
|
|
1300
|
+
object_=_response.json(),
|
|
1301
|
+
),
|
|
1302
|
+
)
|
|
1303
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
1304
|
+
if _response.status_code == 422:
|
|
1305
|
+
raise UnprocessableEntityError(
|
|
1306
|
+
headers=dict(_response.headers),
|
|
1307
|
+
body=typing.cast(
|
|
1308
|
+
HttpValidationError,
|
|
1309
|
+
construct_type(
|
|
1310
|
+
type_=HttpValidationError, # type: ignore
|
|
1311
|
+
object_=_response.json(),
|
|
1312
|
+
),
|
|
1313
|
+
),
|
|
1314
|
+
)
|
|
1315
|
+
_response_json = _response.json()
|
|
1316
|
+
except JSONDecodeError:
|
|
1317
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
1318
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
|
1319
|
+
|
|
1320
|
+
async def preview_raw_payload(
|
|
1321
|
+
self,
|
|
1322
|
+
agent_id: str,
|
|
1323
|
+
*,
|
|
1324
|
+
request: MessagesPreviewRawPayloadRequest,
|
|
1325
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1326
|
+
) -> AsyncHttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]:
|
|
1327
|
+
"""
|
|
1328
|
+
Inspect the raw LLM request payload without sending it.
|
|
1329
|
+
|
|
1330
|
+
This endpoint processes the message through the agent loop up until
|
|
1331
|
+
the LLM request, then returns the raw request payload that would
|
|
1332
|
+
be sent to the LLM provider. Useful for debugging and inspection.
|
|
1333
|
+
|
|
1334
|
+
Parameters
|
|
1335
|
+
----------
|
|
1336
|
+
agent_id : str
|
|
1337
|
+
|
|
1338
|
+
request : MessagesPreviewRawPayloadRequest
|
|
1339
|
+
|
|
1340
|
+
request_options : typing.Optional[RequestOptions]
|
|
1341
|
+
Request-specific configuration.
|
|
1342
|
+
|
|
1343
|
+
Returns
|
|
1344
|
+
-------
|
|
1345
|
+
AsyncHttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
1346
|
+
Successful Response
|
|
1347
|
+
"""
|
|
1348
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1349
|
+
f"v1/agents/{jsonable_encoder(agent_id)}/messages/preview-raw-payload",
|
|
1350
|
+
method="POST",
|
|
1351
|
+
json=convert_and_respect_annotation_metadata(
|
|
1352
|
+
object_=request, annotation=MessagesPreviewRawPayloadRequest, direction="write"
|
|
1353
|
+
),
|
|
1354
|
+
headers={
|
|
1355
|
+
"content-type": "application/json",
|
|
1356
|
+
},
|
|
1357
|
+
request_options=request_options,
|
|
1358
|
+
omit=OMIT,
|
|
1359
|
+
)
|
|
1360
|
+
try:
|
|
1361
|
+
if 200 <= _response.status_code < 300:
|
|
1362
|
+
_data = typing.cast(
|
|
1363
|
+
typing.Dict[str, typing.Optional[typing.Any]],
|
|
1364
|
+
construct_type(
|
|
1365
|
+
type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
|
|
1366
|
+
object_=_response.json(),
|
|
1367
|
+
),
|
|
1368
|
+
)
|
|
1369
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
|
1370
|
+
if _response.status_code == 422:
|
|
1371
|
+
raise UnprocessableEntityError(
|
|
1372
|
+
headers=dict(_response.headers),
|
|
1373
|
+
body=typing.cast(
|
|
1374
|
+
HttpValidationError,
|
|
1375
|
+
construct_type(
|
|
1376
|
+
type_=HttpValidationError, # type: ignore
|
|
1377
|
+
object_=_response.json(),
|
|
1378
|
+
),
|
|
1379
|
+
),
|
|
1380
|
+
)
|
|
1381
|
+
_response_json = _response.json()
|
|
1382
|
+
except JSONDecodeError:
|
|
1383
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
|
|
1384
|
+
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|