mirascope 1.0.5__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +6 -6
- mirascope/_stubs.py +384 -0
- mirascope/_utils.py +34 -0
- mirascope/api/__init__.py +14 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +444 -0
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +506 -0
- mirascope/api/_generated/annotations/raw_client.py +1414 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +17 -0
- mirascope/api/_generated/api_keys/client.py +530 -0
- mirascope/api/_generated/api_keys/raw_client.py +1236 -0
- mirascope/api/_generated/api_keys/types/__init__.py +15 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
- mirascope/api/_generated/client.py +211 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +46 -0
- mirascope/api/_generated/core/datetime_utils.py +28 -0
- mirascope/api/_generated/core/file.py +67 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +543 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +100 -0
- mirascope/api/_generated/core/pydantic_utilities.py +255 -0
- mirascope/api/_generated/core/query_encoder.py +58 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +276 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +91 -0
- mirascope/api/_generated/docs/raw_client.py +178 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/environments/__init__.py +23 -0
- mirascope/api/_generated/environments/client.py +649 -0
- mirascope/api/_generated/environments/raw_client.py +1567 -0
- mirascope/api/_generated/environments/types/__init__.py +25 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
- mirascope/api/_generated/errors/__init__.py +25 -0
- mirascope/api/_generated/errors/bad_request_error.py +14 -0
- mirascope/api/_generated/errors/conflict_error.py +14 -0
- mirascope/api/_generated/errors/forbidden_error.py +11 -0
- mirascope/api/_generated/errors/internal_server_error.py +10 -0
- mirascope/api/_generated/errors/not_found_error.py +11 -0
- mirascope/api/_generated/errors/payment_required_error.py +15 -0
- mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
- mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +39 -0
- mirascope/api/_generated/functions/client.py +647 -0
- mirascope/api/_generated/functions/raw_client.py +1890 -0
- mirascope/api/_generated/functions/types/__init__.py +53 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +92 -0
- mirascope/api/_generated/health/raw_client.py +175 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +22 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/organization_invitations/__init__.py +33 -0
- mirascope/api/_generated/organization_invitations/client.py +546 -0
- mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
- mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
- mirascope/api/_generated/organization_memberships/__init__.py +19 -0
- mirascope/api/_generated/organization_memberships/client.py +302 -0
- mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
- mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/organizations/__init__.py +51 -0
- mirascope/api/_generated/organizations/client.py +869 -0
- mirascope/api/_generated/organizations/raw_client.py +2593 -0
- mirascope/api/_generated/organizations/types/__init__.py +71 -0
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
- mirascope/api/_generated/project_memberships/__init__.py +29 -0
- mirascope/api/_generated/project_memberships/client.py +528 -0
- mirascope/api/_generated/project_memberships/raw_client.py +1278 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +7 -0
- mirascope/api/_generated/projects/client.py +428 -0
- mirascope/api/_generated/projects/raw_client.py +1302 -0
- mirascope/api/_generated/projects/types/__init__.py +10 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +25 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +25 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +25 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +25 -0
- mirascope/api/_generated/reference.md +4987 -0
- mirascope/api/_generated/tags/__init__.py +19 -0
- mirascope/api/_generated/tags/client.py +504 -0
- mirascope/api/_generated/tags/raw_client.py +1288 -0
- mirascope/api/_generated/tags/types/__init__.py +17 -0
- mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
- mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
- mirascope/api/_generated/token_cost/__init__.py +7 -0
- mirascope/api/_generated/token_cost/client.py +160 -0
- mirascope/api/_generated/token_cost/raw_client.py +264 -0
- mirascope/api/_generated/token_cost/types/__init__.py +8 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
- mirascope/api/_generated/traces/__init__.py +97 -0
- mirascope/api/_generated/traces/client.py +1103 -0
- mirascope/api/_generated/traces/raw_client.py +2322 -0
- mirascope/api/_generated/traces/types/__init__.py +155 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +20 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +48 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +20 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
- mirascope/api/_generated/types/__init__.py +85 -0
- mirascope/api/_generated/types/already_exists_error.py +22 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/bad_request_error_body.py +50 -0
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +22 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/date.py +3 -0
- mirascope/api/_generated/types/http_api_decode_error.py +27 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/immutable_resource_error.py +22 -0
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +38 -0
- mirascope/api/_generated/types/issue_tag.py +10 -0
- mirascope/api/_generated/types/not_found_error_body.py +22 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +22 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +5 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
- mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_key.py +25 -0
- mirascope/api/_generated/types/property_key_key_tag.py +5 -0
- mirascope/api/_generated/types/rate_limit_error.py +31 -0
- mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
- mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
- mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +99 -0
- mirascope/llm/__init__.py +316 -0
- mirascope/llm/calls/__init__.py +17 -0
- mirascope/llm/calls/calls.py +348 -0
- mirascope/llm/calls/decorator.py +268 -0
- mirascope/llm/content/__init__.py +71 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +69 -0
- mirascope/llm/content/tool_output.py +43 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +41 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +360 -0
- mirascope/llm/formatting/__init__.py +39 -0
- mirascope/llm/formatting/format.py +291 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/output_parser.py +178 -0
- mirascope/llm/formatting/partial.py +131 -0
- mirascope/llm/formatting/primitives.py +192 -0
- mirascope/llm/formatting/types.py +83 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/messages/__init__.py +35 -0
- mirascope/llm/messages/_utils.py +34 -0
- mirascope/llm/messages/message.py +190 -0
- mirascope/llm/models/__init__.py +21 -0
- mirascope/llm/models/models.py +1339 -0
- mirascope/llm/models/params.py +72 -0
- mirascope/llm/models/thinking_config.py +61 -0
- mirascope/llm/prompts/__init__.py +34 -0
- mirascope/llm/prompts/_utils.py +31 -0
- mirascope/llm/prompts/decorator.py +215 -0
- mirascope/llm/prompts/prompts.py +484 -0
- mirascope/llm/prompts/protocols.py +65 -0
- mirascope/llm/providers/__init__.py +65 -0
- mirascope/llm/providers/anthropic/__init__.py +11 -0
- mirascope/llm/providers/anthropic/_utils/__init__.py +27 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +297 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +272 -0
- mirascope/llm/providers/anthropic/_utils/decode.py +326 -0
- mirascope/llm/providers/anthropic/_utils/encode.py +431 -0
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +338 -0
- mirascope/llm/providers/anthropic/model_id.py +23 -0
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +440 -0
- mirascope/llm/providers/base/__init__.py +14 -0
- mirascope/llm/providers/base/_utils.py +248 -0
- mirascope/llm/providers/base/base_provider.py +1463 -0
- mirascope/llm/providers/base/kwargs.py +12 -0
- mirascope/llm/providers/google/__init__.py +6 -0
- mirascope/llm/providers/google/_utils/__init__.py +17 -0
- mirascope/llm/providers/google/_utils/decode.py +357 -0
- mirascope/llm/providers/google/_utils/encode.py +418 -0
- mirascope/llm/providers/google/_utils/errors.py +50 -0
- mirascope/llm/providers/google/message.py +7 -0
- mirascope/llm/providers/google/model_id.py +22 -0
- mirascope/llm/providers/google/model_info.py +63 -0
- mirascope/llm/providers/google/provider.py +456 -0
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +73 -0
- mirascope/llm/providers/mirascope/provider.py +313 -0
- mirascope/llm/providers/mlx/__init__.py +9 -0
- mirascope/llm/providers/mlx/_utils.py +141 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +146 -0
- mirascope/llm/providers/mlx/mlx.py +242 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +416 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/ollama/__init__.py +7 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/__init__.py +15 -0
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/__init__.py +7 -0
- mirascope/llm/providers/openai/completions/_utils/__init__.py +18 -0
- mirascope/llm/providers/openai/completions/_utils/decode.py +252 -0
- mirascope/llm/providers/openai/completions/_utils/encode.py +390 -0
- mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
- mirascope/llm/providers/openai/completions/base_provider.py +522 -0
- mirascope/llm/providers/openai/completions/provider.py +28 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +303 -0
- mirascope/llm/providers/openai/provider.py +405 -0
- mirascope/llm/providers/openai/responses/__init__.py +5 -0
- mirascope/llm/providers/openai/responses/_utils/__init__.py +15 -0
- mirascope/llm/providers/openai/responses/_utils/decode.py +289 -0
- mirascope/llm/providers/openai/responses/_utils/encode.py +399 -0
- mirascope/llm/providers/openai/responses/provider.py +472 -0
- mirascope/llm/providers/openrouter/__init__.py +5 -0
- mirascope/llm/providers/openrouter/provider.py +67 -0
- mirascope/llm/providers/provider_id.py +26 -0
- mirascope/llm/providers/provider_registry.py +305 -0
- mirascope/llm/providers/together/__init__.py +7 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +66 -0
- mirascope/llm/responses/_utils.py +146 -0
- mirascope/llm/responses/base_response.py +103 -0
- mirascope/llm/responses/base_stream_response.py +824 -0
- mirascope/llm/responses/finish_reason.py +28 -0
- mirascope/llm/responses/response.py +362 -0
- mirascope/llm/responses/root_response.py +248 -0
- mirascope/llm/responses/stream_response.py +577 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/responses/usage.py +139 -0
- mirascope/llm/tools/__init__.py +71 -0
- mirascope/llm/tools/_utils.py +34 -0
- mirascope/llm/tools/decorator.py +184 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/provider_tools.py +18 -0
- mirascope/llm/tools/tool_schema.py +321 -0
- mirascope/llm/tools/toolkit.py +178 -0
- mirascope/llm/tools/tools.py +263 -0
- mirascope/llm/tools/types.py +112 -0
- mirascope/llm/tools/web_search_tool.py +32 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope/ops/__init__.py +129 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1172 -0
- mirascope/ops/_internal/configuration.py +177 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +362 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +66 -0
- mirascope/ops/_internal/instrumentation/__init__.py +28 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/common.py +500 -0
- mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +161 -0
- mirascope/ops/_internal/instrumentation/llm/model.py +1777 -0
- mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
- mirascope/ops/_internal/instrumentation/llm/serialize.py +324 -0
- mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
- mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
- mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
- mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
- mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +133 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +389 -0
- mirascope/ops/_internal/traced_functions.py +528 -0
- mirascope/ops/_internal/tracing.py +353 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +131 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +357 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- mirascope-2.1.1.dist-info/METADATA +231 -0
- mirascope-2.1.1.dist-info/RECORD +437 -0
- {mirascope-1.0.5.dist-info → mirascope-2.1.1.dist-info}/WHEEL +1 -1
- {mirascope-1.0.5.dist-info → mirascope-2.1.1.dist-info}/licenses/LICENSE +1 -1
- mirascope/beta/__init__.py +0 -0
- mirascope/beta/openai/__init__.py +0 -5
- mirascope/beta/openai/parse.py +0 -129
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -57
- mirascope/beta/rag/chroma/vectorstores.py +0 -97
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -55
- mirascope/core/anthropic/__init__.py +0 -21
- mirascope/core/anthropic/_call.py +0 -71
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_calculate_cost.py +0 -63
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -54
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -34
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -89
- mirascope/core/anthropic/_utils/_setup_call.py +0 -76
- mirascope/core/anthropic/call_params.py +0 -36
- mirascope/core/anthropic/call_response.py +0 -158
- mirascope/core/anthropic/call_response_chunk.py +0 -104
- mirascope/core/anthropic/dynamic_config.py +0 -26
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -140
- mirascope/core/anthropic/tool.py +0 -77
- mirascope/core/base/__init__.py +0 -40
- mirascope/core/base/_call_factory.py +0 -323
- mirascope/core/base/_create.py +0 -167
- mirascope/core/base/_extract.py +0 -139
- mirascope/core/base/_partial.py +0 -63
- mirascope/core/base/_utils/__init__.py +0 -64
- mirascope/core/base/_utils/_base_type.py +0 -17
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -45
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -126
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -36
- mirascope/core/base/_utils/_format_template.py +0 -29
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_fn_args.py +0 -14
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -25
- mirascope/core/base/_utils/_get_template_values.py +0 -52
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_json_mode_content.py +0 -15
- mirascope/core/base/_utils/_parse_content_template.py +0 -157
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -51
- mirascope/core/base/_utils/_protocols.py +0 -215
- mirascope/core/base/_utils/_setup_call.py +0 -64
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -24
- mirascope/core/base/call_params.py +0 -6
- mirascope/core/base/call_response.py +0 -189
- mirascope/core/base/call_response_chunk.py +0 -91
- mirascope/core/base/dynamic_config.py +0 -55
- mirascope/core/base/message_param.py +0 -61
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -415
- mirascope/core/base/stream.py +0 -365
- mirascope/core/base/structured_stream.py +0 -251
- mirascope/core/base/tool.py +0 -126
- mirascope/core/base/toolkit.py +0 -146
- mirascope/core/cohere/__init__.py +0 -21
- mirascope/core/cohere/_call.py +0 -71
- mirascope/core/cohere/_utils/__init__.py +0 -16
- mirascope/core/cohere/_utils/_calculate_cost.py +0 -39
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -31
- mirascope/core/cohere/_utils/_get_json_output.py +0 -31
- mirascope/core/cohere/_utils/_handle_stream.py +0 -33
- mirascope/core/cohere/_utils/_setup_call.py +0 -89
- mirascope/core/cohere/call_params.py +0 -57
- mirascope/core/cohere/call_response.py +0 -167
- mirascope/core/cohere/call_response_chunk.py +0 -101
- mirascope/core/cohere/dynamic_config.py +0 -24
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -92
- mirascope/core/gemini/__init__.py +0 -21
- mirascope/core/gemini/_call.py +0 -71
- mirascope/core/gemini/_utils/__init__.py +0 -16
- mirascope/core/gemini/_utils/_calculate_cost.py +0 -8
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -74
- mirascope/core/gemini/_utils/_get_json_output.py +0 -33
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_setup_call.py +0 -68
- mirascope/core/gemini/call_params.py +0 -28
- mirascope/core/gemini/call_response.py +0 -173
- mirascope/core/gemini/call_response_chunk.py +0 -85
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -121
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/groq/__init__.py +0 -21
- mirascope/core/groq/_call.py +0 -71
- mirascope/core/groq/_utils/__init__.py +0 -16
- mirascope/core/groq/_utils/_calculate_cost.py +0 -68
- mirascope/core/groq/_utils/_convert_message_params.py +0 -23
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -121
- mirascope/core/groq/_utils/_setup_call.py +0 -67
- mirascope/core/groq/call_params.py +0 -51
- mirascope/core/groq/call_response.py +0 -160
- mirascope/core/groq/call_response_chunk.py +0 -89
- mirascope/core/groq/dynamic_config.py +0 -26
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -136
- mirascope/core/groq/tool.py +0 -79
- mirascope/core/litellm/__init__.py +0 -6
- mirascope/core/litellm/_call.py +0 -73
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -46
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/mistral/__init__.py +0 -21
- mirascope/core/mistral/_call.py +0 -69
- mirascope/core/mistral/_utils/__init__.py +0 -16
- mirascope/core/mistral/_utils/_calculate_cost.py +0 -47
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -23
- mirascope/core/mistral/_utils/_get_json_output.py +0 -28
- mirascope/core/mistral/_utils/_handle_stream.py +0 -121
- mirascope/core/mistral/_utils/_setup_call.py +0 -86
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -156
- mirascope/core/mistral/call_response_chunk.py +0 -84
- mirascope/core/mistral/dynamic_config.py +0 -24
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -117
- mirascope/core/mistral/tool.py +0 -77
- mirascope/core/openai/__init__.py +0 -21
- mirascope/core/openai/_call.py +0 -71
- mirascope/core/openai/_utils/__init__.py +0 -16
- mirascope/core/openai/_utils/_calculate_cost.py +0 -110
- mirascope/core/openai/_utils/_convert_message_params.py +0 -53
- mirascope/core/openai/_utils/_get_json_output.py +0 -27
- mirascope/core/openai/_utils/_handle_stream.py +0 -125
- mirascope/core/openai/_utils/_setup_call.py +0 -62
- mirascope/core/openai/call_params.py +0 -54
- mirascope/core/openai/call_response.py +0 -162
- mirascope/core/openai/call_response_chunk.py +0 -90
- mirascope/core/openai/dynamic_config.py +0 -26
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -148
- mirascope/core/openai/tool.py +0 -79
- mirascope/core/py.typed +0 -0
- mirascope/integrations/__init__.py +0 -20
- mirascope/integrations/_middleware_factory.py +0 -277
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -71
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -188
- mirascope/integrations/logfire/_with_logfire.py +0 -60
- mirascope/integrations/otel/__init__.py +0 -5
- mirascope/integrations/otel/_utils.py +0 -268
- mirascope/integrations/otel/_with_hyperdx.py +0 -61
- mirascope/integrations/otel/_with_otel.py +0 -60
- mirascope/integrations/tenacity.py +0 -50
- mirascope/py.typed +0 -0
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.0.5.dist-info/METADATA +0 -519
- mirascope-1.0.5.dist-info/RECORD +0 -198
|
@@ -0,0 +1,1463 @@
|
|
|
1
|
+
"""Base abstract interface for provider clients."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from collections.abc import Callable, Generator, Mapping, Sequence
|
|
7
|
+
from contextlib import contextmanager
|
|
8
|
+
from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeAlias, cast, overload
|
|
9
|
+
from typing_extensions import TypeVar, Unpack
|
|
10
|
+
|
|
11
|
+
from ...context import Context, DepsT
|
|
12
|
+
from ...exceptions import APIError, ProviderError
|
|
13
|
+
from ...formatting import FormatSpec, FormattableT
|
|
14
|
+
from ...messages import Message, UserContent, user
|
|
15
|
+
from ...responses import (
|
|
16
|
+
AsyncChunkIterator,
|
|
17
|
+
AsyncContextResponse,
|
|
18
|
+
AsyncContextStreamResponse,
|
|
19
|
+
AsyncResponse,
|
|
20
|
+
AsyncStreamResponse,
|
|
21
|
+
ChunkIterator,
|
|
22
|
+
ContextResponse,
|
|
23
|
+
ContextStreamResponse,
|
|
24
|
+
Response,
|
|
25
|
+
StreamResponse,
|
|
26
|
+
)
|
|
27
|
+
from ...tools import (
|
|
28
|
+
AsyncContextToolkit,
|
|
29
|
+
AsyncToolkit,
|
|
30
|
+
ContextToolkit,
|
|
31
|
+
Toolkit,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
if TYPE_CHECKING:
|
|
35
|
+
from ...models import Params
|
|
36
|
+
from ..provider_id import ProviderId
|
|
37
|
+
|
|
38
|
+
ProviderClientT = TypeVar("ProviderClientT")
|
|
39
|
+
|
|
40
|
+
Provider: TypeAlias = "BaseProvider[Any]"
|
|
41
|
+
"""Type alias for `BaseProvider` with any client type."""
|
|
42
|
+
|
|
43
|
+
ProviderErrorMap: TypeAlias = Mapping[
|
|
44
|
+
type[Exception],
|
|
45
|
+
"type[ProviderError] | Callable[[Exception], type[ProviderError]]",
|
|
46
|
+
]
|
|
47
|
+
"""Mapping from provider SDK exceptions to Mirascope error types.
|
|
48
|
+
|
|
49
|
+
Keys are provider SDK exception types (e.g., OpenAIError, AnthropicError).
|
|
50
|
+
Values can be:
|
|
51
|
+
- Error type: Simple 1:1 mapping (e.g., RateLimitError)
|
|
52
|
+
- Callable: Transform function returning error type based on exception details
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class BaseProvider(Generic[ProviderClientT], ABC):
|
|
57
|
+
"""Base abstract provider for LLM interactions.
|
|
58
|
+
|
|
59
|
+
This class defines explicit methods for each type of call, eliminating
|
|
60
|
+
the need for complex overloads in provider implementations.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
id: ClassVar[ProviderId]
|
|
64
|
+
"""Provider identifier (e.g., "anthropic", "openai")."""
|
|
65
|
+
|
|
66
|
+
default_scope: ClassVar[str | list[str]]
|
|
67
|
+
"""Default scope(s) for this provider when explicitly registered.
|
|
68
|
+
|
|
69
|
+
Can be a single scope string or a list of scopes. For example:
|
|
70
|
+
- "anthropic/" - Single scope
|
|
71
|
+
- ["anthropic/", "openai/"] - Multiple scopes (e.g., for AWS Bedrock)
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
error_map: ClassVar[ProviderErrorMap]
|
|
75
|
+
"""Mapping from provider SDK exceptions to Mirascope error types.
|
|
76
|
+
|
|
77
|
+
Values can be:
|
|
78
|
+
- Error type: Simple 1:1 mapping (e.g., AnthropicRateLimitError -> RateLimitError)
|
|
79
|
+
- Callable: Transform function returning error type based on exception details
|
|
80
|
+
(e.g., lambda e: NotFoundError if e.code == "model_not_found" else BadRequestError)
|
|
81
|
+
|
|
82
|
+
The mapping is walked via the exception's MRO, allowing both specific error handling
|
|
83
|
+
and fallback to base SDK error types (e.g., AnthropicError -> ProviderError).
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
client: ProviderClientT
|
|
87
|
+
|
|
88
|
+
@contextmanager
|
|
89
|
+
def _wrap_errors(self) -> Generator[None, None, None]:
|
|
90
|
+
"""Wrap provider API calls and convert errors to Mirascope exceptions.
|
|
91
|
+
|
|
92
|
+
Walks the exception's MRO to find the first matching error type in the
|
|
93
|
+
provider's error_map, allowing both specific error handling and fallback
|
|
94
|
+
to base SDK error types (e.g., AnthropicError -> ProviderError).
|
|
95
|
+
"""
|
|
96
|
+
try:
|
|
97
|
+
yield
|
|
98
|
+
except Exception as e:
|
|
99
|
+
# Walk MRO to find first matching error type in provider's error_map
|
|
100
|
+
for error_class in type(e).__mro__:
|
|
101
|
+
if error_class in self.error_map:
|
|
102
|
+
error_type_or_fn = self.error_map[error_class]
|
|
103
|
+
|
|
104
|
+
if isinstance(error_type_or_fn, type):
|
|
105
|
+
error_type = cast(type[ProviderError], error_type_or_fn)
|
|
106
|
+
else:
|
|
107
|
+
error_type = error_type_or_fn(e)
|
|
108
|
+
|
|
109
|
+
# Construct Mirascope error with metadata
|
|
110
|
+
if issubclass(error_type, APIError):
|
|
111
|
+
error: ProviderError = error_type(
|
|
112
|
+
str(e),
|
|
113
|
+
provider=self.id,
|
|
114
|
+
status_code=self.get_error_status(e),
|
|
115
|
+
original_exception=e,
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
error = error_type(
|
|
119
|
+
str(e),
|
|
120
|
+
provider=self.id,
|
|
121
|
+
original_exception=e,
|
|
122
|
+
)
|
|
123
|
+
raise error from e
|
|
124
|
+
|
|
125
|
+
# Not in error_map - not a provider error, re-raise as-is
|
|
126
|
+
raise
|
|
127
|
+
|
|
128
|
+
def _wrap_iterator_errors(self, iterator: ChunkIterator) -> ChunkIterator:
|
|
129
|
+
"""Wrap sync chunk iterator to handle errors during iteration."""
|
|
130
|
+
# TODO: Consider moving this logic into BaseSyncStreamResponse if appropriate.
|
|
131
|
+
with self._wrap_errors():
|
|
132
|
+
yield from iterator
|
|
133
|
+
|
|
134
|
+
async def _wrap_async_iterator_errors(
|
|
135
|
+
self, iterator: AsyncChunkIterator
|
|
136
|
+
) -> AsyncChunkIterator:
|
|
137
|
+
"""Wrap async chunk iterator to handle errors during iteration."""
|
|
138
|
+
# TODO: Consider moving this logic into BaseAsyncStreamResponse if appropriate.
|
|
139
|
+
with self._wrap_errors():
|
|
140
|
+
async for chunk in iterator:
|
|
141
|
+
yield chunk
|
|
142
|
+
|
|
143
|
+
@overload
|
|
144
|
+
def call(
|
|
145
|
+
self,
|
|
146
|
+
*,
|
|
147
|
+
model_id: str,
|
|
148
|
+
messages: Sequence[Message],
|
|
149
|
+
toolkit: Toolkit,
|
|
150
|
+
format: None = None,
|
|
151
|
+
**params: Unpack[Params],
|
|
152
|
+
) -> Response:
|
|
153
|
+
"""Generate an `llm.Response` without a response format."""
|
|
154
|
+
...
|
|
155
|
+
|
|
156
|
+
@overload
|
|
157
|
+
def call(
|
|
158
|
+
self,
|
|
159
|
+
*,
|
|
160
|
+
model_id: str,
|
|
161
|
+
messages: Sequence[Message],
|
|
162
|
+
toolkit: Toolkit,
|
|
163
|
+
format: FormatSpec[FormattableT],
|
|
164
|
+
**params: Unpack[Params],
|
|
165
|
+
) -> Response[FormattableT]:
|
|
166
|
+
"""Generate an `llm.Response` with a response format."""
|
|
167
|
+
...
|
|
168
|
+
|
|
169
|
+
@overload
|
|
170
|
+
def call(
|
|
171
|
+
self,
|
|
172
|
+
*,
|
|
173
|
+
model_id: str,
|
|
174
|
+
messages: Sequence[Message],
|
|
175
|
+
toolkit: Toolkit,
|
|
176
|
+
format: FormatSpec[FormattableT] | None,
|
|
177
|
+
**params: Unpack[Params],
|
|
178
|
+
) -> Response | Response[FormattableT]:
|
|
179
|
+
"""Generate an `llm.Response` with an optional response format."""
|
|
180
|
+
...
|
|
181
|
+
|
|
182
|
+
def call(
|
|
183
|
+
self,
|
|
184
|
+
*,
|
|
185
|
+
model_id: str,
|
|
186
|
+
messages: Sequence[Message],
|
|
187
|
+
toolkit: Toolkit,
|
|
188
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
189
|
+
**params: Unpack[Params],
|
|
190
|
+
) -> Response | Response[FormattableT]:
|
|
191
|
+
"""Generate an `llm.Response` by synchronously calling this client's LLM provider.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
model_id: Model identifier to use.
|
|
195
|
+
messages: Messages to send to the LLM.
|
|
196
|
+
tools: Optional tools that the model may invoke.
|
|
197
|
+
format: Optional response format specifier.
|
|
198
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
An `llm.Response` object containing the LLM-generated content.
|
|
202
|
+
"""
|
|
203
|
+
with self._wrap_errors():
|
|
204
|
+
return self._call(
|
|
205
|
+
model_id=model_id,
|
|
206
|
+
messages=messages,
|
|
207
|
+
toolkit=toolkit,
|
|
208
|
+
format=format,
|
|
209
|
+
**params,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
@abstractmethod
|
|
213
|
+
def _call(
|
|
214
|
+
self,
|
|
215
|
+
*,
|
|
216
|
+
model_id: str,
|
|
217
|
+
messages: Sequence[Message],
|
|
218
|
+
toolkit: Toolkit,
|
|
219
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
220
|
+
**params: Unpack[Params],
|
|
221
|
+
) -> Response | Response[FormattableT]:
|
|
222
|
+
"""Implementation for call(). Subclasses override this method."""
|
|
223
|
+
...
|
|
224
|
+
|
|
225
|
+
@overload
|
|
226
|
+
def context_call(
|
|
227
|
+
self,
|
|
228
|
+
*,
|
|
229
|
+
ctx: Context[DepsT],
|
|
230
|
+
model_id: str,
|
|
231
|
+
messages: Sequence[Message],
|
|
232
|
+
toolkit: ContextToolkit[DepsT],
|
|
233
|
+
format: None = None,
|
|
234
|
+
**params: Unpack[Params],
|
|
235
|
+
) -> ContextResponse[DepsT, None]:
|
|
236
|
+
"""Generate an `llm.ContextResponse` without a response format."""
|
|
237
|
+
...
|
|
238
|
+
|
|
239
|
+
@overload
|
|
240
|
+
def context_call(
|
|
241
|
+
self,
|
|
242
|
+
*,
|
|
243
|
+
ctx: Context[DepsT],
|
|
244
|
+
model_id: str,
|
|
245
|
+
messages: Sequence[Message],
|
|
246
|
+
toolkit: ContextToolkit[DepsT],
|
|
247
|
+
format: FormatSpec[FormattableT],
|
|
248
|
+
**params: Unpack[Params],
|
|
249
|
+
) -> ContextResponse[DepsT, FormattableT]:
|
|
250
|
+
"""Generate an `llm.ContextResponse` with a response format."""
|
|
251
|
+
...
|
|
252
|
+
|
|
253
|
+
@overload
|
|
254
|
+
def context_call(
|
|
255
|
+
self,
|
|
256
|
+
*,
|
|
257
|
+
ctx: Context[DepsT],
|
|
258
|
+
model_id: str,
|
|
259
|
+
messages: Sequence[Message],
|
|
260
|
+
toolkit: ContextToolkit[DepsT],
|
|
261
|
+
format: FormatSpec[FormattableT] | None,
|
|
262
|
+
**params: Unpack[Params],
|
|
263
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
264
|
+
"""Generate an `llm.ContextResponse` with an optional response format."""
|
|
265
|
+
...
|
|
266
|
+
|
|
267
|
+
def context_call(
|
|
268
|
+
self,
|
|
269
|
+
*,
|
|
270
|
+
ctx: Context[DepsT],
|
|
271
|
+
model_id: str,
|
|
272
|
+
messages: Sequence[Message],
|
|
273
|
+
toolkit: ContextToolkit[DepsT],
|
|
274
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
275
|
+
**params: Unpack[Params],
|
|
276
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
277
|
+
"""Generate an `llm.ContextResponse` by synchronously calling this client's LLM provider.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
ctx: Context object with dependencies for tools.
|
|
281
|
+
model_id: Model identifier to use.
|
|
282
|
+
messages: Messages to send to the LLM.
|
|
283
|
+
tools: Optional tools that the model may invoke.
|
|
284
|
+
format: Optional response format specifier.
|
|
285
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
289
|
+
"""
|
|
290
|
+
with self._wrap_errors():
|
|
291
|
+
return self._context_call(
|
|
292
|
+
ctx=ctx,
|
|
293
|
+
model_id=model_id,
|
|
294
|
+
messages=messages,
|
|
295
|
+
toolkit=toolkit,
|
|
296
|
+
format=format,
|
|
297
|
+
**params,
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
@abstractmethod
|
|
301
|
+
def _context_call(
|
|
302
|
+
self,
|
|
303
|
+
*,
|
|
304
|
+
ctx: Context[DepsT],
|
|
305
|
+
model_id: str,
|
|
306
|
+
messages: Sequence[Message],
|
|
307
|
+
toolkit: ContextToolkit[DepsT],
|
|
308
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
309
|
+
**params: Unpack[Params],
|
|
310
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
311
|
+
"""Implementation for context_call(). Subclasses override this method."""
|
|
312
|
+
...
|
|
313
|
+
|
|
314
|
+
@overload
|
|
315
|
+
async def call_async(
|
|
316
|
+
self,
|
|
317
|
+
*,
|
|
318
|
+
model_id: str,
|
|
319
|
+
messages: Sequence[Message],
|
|
320
|
+
toolkit: AsyncToolkit,
|
|
321
|
+
format: None = None,
|
|
322
|
+
**params: Unpack[Params],
|
|
323
|
+
) -> AsyncResponse:
|
|
324
|
+
"""Generate an `llm.AsyncResponse` without a response format."""
|
|
325
|
+
...
|
|
326
|
+
|
|
327
|
+
@overload
|
|
328
|
+
async def call_async(
|
|
329
|
+
self,
|
|
330
|
+
*,
|
|
331
|
+
model_id: str,
|
|
332
|
+
messages: Sequence[Message],
|
|
333
|
+
toolkit: AsyncToolkit,
|
|
334
|
+
format: FormatSpec[FormattableT],
|
|
335
|
+
**params: Unpack[Params],
|
|
336
|
+
) -> AsyncResponse[FormattableT]:
|
|
337
|
+
"""Generate an `llm.AsyncResponse` with a response format."""
|
|
338
|
+
...
|
|
339
|
+
|
|
340
|
+
@overload
|
|
341
|
+
async def call_async(
|
|
342
|
+
self,
|
|
343
|
+
*,
|
|
344
|
+
model_id: str,
|
|
345
|
+
messages: Sequence[Message],
|
|
346
|
+
toolkit: AsyncToolkit,
|
|
347
|
+
format: FormatSpec[FormattableT] | None,
|
|
348
|
+
**params: Unpack[Params],
|
|
349
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
350
|
+
"""Generate an `llm.AsyncResponse` with an optional response format."""
|
|
351
|
+
...
|
|
352
|
+
|
|
353
|
+
async def call_async(
|
|
354
|
+
self,
|
|
355
|
+
*,
|
|
356
|
+
model_id: str,
|
|
357
|
+
messages: Sequence[Message],
|
|
358
|
+
toolkit: AsyncToolkit,
|
|
359
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
360
|
+
**params: Unpack[Params],
|
|
361
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
362
|
+
"""Generate an `llm.AsyncResponse` by asynchronously calling this client's LLM provider.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
model_id: Model identifier to use.
|
|
366
|
+
messages: Messages to send to the LLM.
|
|
367
|
+
tools: Optional tools that the model may invoke.
|
|
368
|
+
format: Optional response format specifier.
|
|
369
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
370
|
+
|
|
371
|
+
Returns:
|
|
372
|
+
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
373
|
+
"""
|
|
374
|
+
with self._wrap_errors():
|
|
375
|
+
return await self._call_async(
|
|
376
|
+
model_id=model_id,
|
|
377
|
+
messages=messages,
|
|
378
|
+
toolkit=toolkit,
|
|
379
|
+
format=format,
|
|
380
|
+
**params,
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
@abstractmethod
|
|
384
|
+
async def _call_async(
|
|
385
|
+
self,
|
|
386
|
+
*,
|
|
387
|
+
model_id: str,
|
|
388
|
+
messages: Sequence[Message],
|
|
389
|
+
toolkit: AsyncToolkit,
|
|
390
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
391
|
+
**params: Unpack[Params],
|
|
392
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
393
|
+
"""Implementation for call_async(). Subclasses override this method."""
|
|
394
|
+
...
|
|
395
|
+
|
|
396
|
+
@overload
|
|
397
|
+
async def context_call_async(
|
|
398
|
+
self,
|
|
399
|
+
*,
|
|
400
|
+
ctx: Context[DepsT],
|
|
401
|
+
model_id: str,
|
|
402
|
+
messages: Sequence[Message],
|
|
403
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
404
|
+
format: None = None,
|
|
405
|
+
**params: Unpack[Params],
|
|
406
|
+
) -> AsyncContextResponse[DepsT, None]:
|
|
407
|
+
"""Generate an `llm.AsyncContextResponse` without a response format."""
|
|
408
|
+
...
|
|
409
|
+
|
|
410
|
+
@overload
|
|
411
|
+
async def context_call_async(
|
|
412
|
+
self,
|
|
413
|
+
*,
|
|
414
|
+
ctx: Context[DepsT],
|
|
415
|
+
model_id: str,
|
|
416
|
+
messages: Sequence[Message],
|
|
417
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
418
|
+
format: FormatSpec[FormattableT],
|
|
419
|
+
**params: Unpack[Params],
|
|
420
|
+
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
421
|
+
"""Generate an `llm.AsyncContextResponse` with a response format."""
|
|
422
|
+
...
|
|
423
|
+
|
|
424
|
+
@overload
|
|
425
|
+
async def context_call_async(
|
|
426
|
+
self,
|
|
427
|
+
*,
|
|
428
|
+
ctx: Context[DepsT],
|
|
429
|
+
model_id: str,
|
|
430
|
+
messages: Sequence[Message],
|
|
431
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
432
|
+
format: FormatSpec[FormattableT] | None,
|
|
433
|
+
**params: Unpack[Params],
|
|
434
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
435
|
+
"""Generate an `llm.AsyncContextResponse` with an optional response format."""
|
|
436
|
+
...
|
|
437
|
+
|
|
438
|
+
async def context_call_async(
|
|
439
|
+
self,
|
|
440
|
+
*,
|
|
441
|
+
ctx: Context[DepsT],
|
|
442
|
+
model_id: str,
|
|
443
|
+
messages: Sequence[Message],
|
|
444
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
445
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
446
|
+
**params: Unpack[Params],
|
|
447
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
448
|
+
"""Generate an `llm.AsyncContextResponse` by asynchronously calling this client's LLM provider.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
ctx: Context object with dependencies for tools.
|
|
452
|
+
model_id: Model identifier to use.
|
|
453
|
+
messages: Messages to send to the LLM.
|
|
454
|
+
tools: Optional tools that the model may invoke.
|
|
455
|
+
format: Optional response format specifier.
|
|
456
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
460
|
+
"""
|
|
461
|
+
with self._wrap_errors():
|
|
462
|
+
return await self._context_call_async(
|
|
463
|
+
ctx=ctx,
|
|
464
|
+
model_id=model_id,
|
|
465
|
+
messages=messages,
|
|
466
|
+
toolkit=toolkit,
|
|
467
|
+
format=format,
|
|
468
|
+
**params,
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
@abstractmethod
|
|
472
|
+
async def _context_call_async(
|
|
473
|
+
self,
|
|
474
|
+
*,
|
|
475
|
+
ctx: Context[DepsT],
|
|
476
|
+
model_id: str,
|
|
477
|
+
messages: Sequence[Message],
|
|
478
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
479
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
480
|
+
**params: Unpack[Params],
|
|
481
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
482
|
+
"""Implementation for context_call_async(). Subclasses override this method."""
|
|
483
|
+
...
|
|
484
|
+
|
|
485
|
+
@overload
|
|
486
|
+
def stream(
|
|
487
|
+
self,
|
|
488
|
+
*,
|
|
489
|
+
model_id: str,
|
|
490
|
+
messages: Sequence[Message],
|
|
491
|
+
toolkit: Toolkit,
|
|
492
|
+
format: None = None,
|
|
493
|
+
**params: Unpack[Params],
|
|
494
|
+
) -> StreamResponse:
|
|
495
|
+
"""Stream an `llm.StreamResponse` without a response format."""
|
|
496
|
+
...
|
|
497
|
+
|
|
498
|
+
@overload
|
|
499
|
+
def stream(
|
|
500
|
+
self,
|
|
501
|
+
*,
|
|
502
|
+
model_id: str,
|
|
503
|
+
messages: Sequence[Message],
|
|
504
|
+
toolkit: Toolkit,
|
|
505
|
+
format: FormatSpec[FormattableT],
|
|
506
|
+
**params: Unpack[Params],
|
|
507
|
+
) -> StreamResponse[FormattableT]:
|
|
508
|
+
"""Stream an `llm.StreamResponse` with a response format."""
|
|
509
|
+
...
|
|
510
|
+
|
|
511
|
+
@overload
|
|
512
|
+
def stream(
|
|
513
|
+
self,
|
|
514
|
+
*,
|
|
515
|
+
model_id: str,
|
|
516
|
+
messages: Sequence[Message],
|
|
517
|
+
toolkit: Toolkit,
|
|
518
|
+
format: FormatSpec[FormattableT] | None,
|
|
519
|
+
**params: Unpack[Params],
|
|
520
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
521
|
+
"""Stream an `llm.StreamResponse` with an optional response format."""
|
|
522
|
+
...
|
|
523
|
+
|
|
524
|
+
def stream(
|
|
525
|
+
self,
|
|
526
|
+
*,
|
|
527
|
+
model_id: str,
|
|
528
|
+
messages: Sequence[Message],
|
|
529
|
+
toolkit: Toolkit,
|
|
530
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
531
|
+
**params: Unpack[Params],
|
|
532
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
533
|
+
"""Generate an `llm.StreamResponse` by synchronously streaming from this client's LLM provider.
|
|
534
|
+
|
|
535
|
+
Args:
|
|
536
|
+
model_id: Model identifier to use.
|
|
537
|
+
messages: Messages to send to the LLM.
|
|
538
|
+
tools: Optional tools that the model may invoke.
|
|
539
|
+
format: Optional response format specifier.
|
|
540
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
541
|
+
|
|
542
|
+
Returns:
|
|
543
|
+
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
544
|
+
"""
|
|
545
|
+
with self._wrap_errors():
|
|
546
|
+
stream_response = self._stream(
|
|
547
|
+
model_id=model_id,
|
|
548
|
+
messages=messages,
|
|
549
|
+
toolkit=toolkit,
|
|
550
|
+
format=format,
|
|
551
|
+
**params,
|
|
552
|
+
)
|
|
553
|
+
stream_response._chunk_iterator = self._wrap_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
554
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
555
|
+
)
|
|
556
|
+
return stream_response
|
|
557
|
+
|
|
558
|
+
@abstractmethod
|
|
559
|
+
def _stream(
|
|
560
|
+
self,
|
|
561
|
+
*,
|
|
562
|
+
model_id: str,
|
|
563
|
+
messages: Sequence[Message],
|
|
564
|
+
toolkit: Toolkit,
|
|
565
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
566
|
+
**params: Unpack[Params],
|
|
567
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
568
|
+
"""Implementation for stream(). Subclasses override this method."""
|
|
569
|
+
...
|
|
570
|
+
|
|
571
|
+
@overload
|
|
572
|
+
def context_stream(
|
|
573
|
+
self,
|
|
574
|
+
*,
|
|
575
|
+
ctx: Context[DepsT],
|
|
576
|
+
model_id: str,
|
|
577
|
+
messages: Sequence[Message],
|
|
578
|
+
toolkit: ContextToolkit[DepsT],
|
|
579
|
+
format: None = None,
|
|
580
|
+
**params: Unpack[Params],
|
|
581
|
+
) -> ContextStreamResponse[DepsT, None]:
|
|
582
|
+
"""Stream an `llm.ContextStreamResponse` without a response format."""
|
|
583
|
+
...
|
|
584
|
+
|
|
585
|
+
@overload
|
|
586
|
+
def context_stream(
|
|
587
|
+
self,
|
|
588
|
+
*,
|
|
589
|
+
ctx: Context[DepsT],
|
|
590
|
+
model_id: str,
|
|
591
|
+
messages: Sequence[Message],
|
|
592
|
+
toolkit: ContextToolkit[DepsT],
|
|
593
|
+
format: FormatSpec[FormattableT],
|
|
594
|
+
**params: Unpack[Params],
|
|
595
|
+
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
596
|
+
"""Stream an `llm.ContextStreamResponse` with a response format."""
|
|
597
|
+
...
|
|
598
|
+
|
|
599
|
+
@overload
|
|
600
|
+
def context_stream(
|
|
601
|
+
self,
|
|
602
|
+
*,
|
|
603
|
+
ctx: Context[DepsT],
|
|
604
|
+
model_id: str,
|
|
605
|
+
messages: Sequence[Message],
|
|
606
|
+
toolkit: ContextToolkit[DepsT],
|
|
607
|
+
format: FormatSpec[FormattableT] | None,
|
|
608
|
+
**params: Unpack[Params],
|
|
609
|
+
) -> (
|
|
610
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
611
|
+
):
|
|
612
|
+
"""Stream an `llm.ContextStreamResponse` with an optional response format."""
|
|
613
|
+
...
|
|
614
|
+
|
|
615
|
+
def context_stream(
|
|
616
|
+
self,
|
|
617
|
+
*,
|
|
618
|
+
ctx: Context[DepsT],
|
|
619
|
+
model_id: str,
|
|
620
|
+
messages: Sequence[Message],
|
|
621
|
+
toolkit: ContextToolkit[DepsT],
|
|
622
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
623
|
+
**params: Unpack[Params],
|
|
624
|
+
) -> (
|
|
625
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
626
|
+
):
|
|
627
|
+
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from this client's LLM provider.
|
|
628
|
+
|
|
629
|
+
Args:
|
|
630
|
+
ctx: Context object with dependencies for tools.
|
|
631
|
+
model_id: Model identifier to use.
|
|
632
|
+
messages: Messages to send to the LLM.
|
|
633
|
+
tools: Optional tools that the model may invoke.
|
|
634
|
+
format: Optional response format specifier.
|
|
635
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
636
|
+
|
|
637
|
+
Returns:
|
|
638
|
+
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
639
|
+
"""
|
|
640
|
+
with self._wrap_errors():
|
|
641
|
+
stream_response = self._context_stream(
|
|
642
|
+
ctx=ctx,
|
|
643
|
+
model_id=model_id,
|
|
644
|
+
messages=messages,
|
|
645
|
+
toolkit=toolkit,
|
|
646
|
+
format=format,
|
|
647
|
+
**params,
|
|
648
|
+
)
|
|
649
|
+
stream_response._chunk_iterator = self._wrap_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
650
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
651
|
+
)
|
|
652
|
+
return stream_response
|
|
653
|
+
|
|
654
|
+
@abstractmethod
|
|
655
|
+
def _context_stream(
|
|
656
|
+
self,
|
|
657
|
+
*,
|
|
658
|
+
ctx: Context[DepsT],
|
|
659
|
+
model_id: str,
|
|
660
|
+
messages: Sequence[Message],
|
|
661
|
+
toolkit: ContextToolkit[DepsT],
|
|
662
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
663
|
+
**params: Unpack[Params],
|
|
664
|
+
) -> (
|
|
665
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
666
|
+
):
|
|
667
|
+
"""Implementation for context_stream(). Subclasses override this method."""
|
|
668
|
+
...
|
|
669
|
+
|
|
670
|
+
@overload
|
|
671
|
+
async def stream_async(
|
|
672
|
+
self,
|
|
673
|
+
*,
|
|
674
|
+
model_id: str,
|
|
675
|
+
messages: Sequence[Message],
|
|
676
|
+
toolkit: AsyncToolkit,
|
|
677
|
+
format: None = None,
|
|
678
|
+
**params: Unpack[Params],
|
|
679
|
+
) -> AsyncStreamResponse:
|
|
680
|
+
"""Stream an `llm.AsyncStreamResponse` without a response format."""
|
|
681
|
+
...
|
|
682
|
+
|
|
683
|
+
@overload
|
|
684
|
+
async def stream_async(
|
|
685
|
+
self,
|
|
686
|
+
*,
|
|
687
|
+
model_id: str,
|
|
688
|
+
messages: Sequence[Message],
|
|
689
|
+
toolkit: AsyncToolkit,
|
|
690
|
+
format: FormatSpec[FormattableT],
|
|
691
|
+
**params: Unpack[Params],
|
|
692
|
+
) -> AsyncStreamResponse[FormattableT]:
|
|
693
|
+
"""Stream an `llm.AsyncStreamResponse` with a response format."""
|
|
694
|
+
...
|
|
695
|
+
|
|
696
|
+
@overload
|
|
697
|
+
async def stream_async(
|
|
698
|
+
self,
|
|
699
|
+
*,
|
|
700
|
+
model_id: str,
|
|
701
|
+
messages: Sequence[Message],
|
|
702
|
+
toolkit: AsyncToolkit,
|
|
703
|
+
format: FormatSpec[FormattableT] | None,
|
|
704
|
+
**params: Unpack[Params],
|
|
705
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
706
|
+
"""Stream an `llm.AsyncStreamResponse` with an optional response format."""
|
|
707
|
+
...
|
|
708
|
+
|
|
709
|
+
async def stream_async(
|
|
710
|
+
self,
|
|
711
|
+
*,
|
|
712
|
+
model_id: str,
|
|
713
|
+
messages: Sequence[Message],
|
|
714
|
+
toolkit: AsyncToolkit,
|
|
715
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
716
|
+
**params: Unpack[Params],
|
|
717
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
718
|
+
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this client's LLM provider.
|
|
719
|
+
|
|
720
|
+
Args:
|
|
721
|
+
model_id: Model identifier to use.
|
|
722
|
+
messages: Messages to send to the LLM.
|
|
723
|
+
tools: Optional tools that the model may invoke.
|
|
724
|
+
format: Optional response format specifier.
|
|
725
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
726
|
+
|
|
727
|
+
Returns:
|
|
728
|
+
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
729
|
+
"""
|
|
730
|
+
with self._wrap_errors():
|
|
731
|
+
stream_response = await self._stream_async(
|
|
732
|
+
model_id=model_id,
|
|
733
|
+
messages=messages,
|
|
734
|
+
toolkit=toolkit,
|
|
735
|
+
format=format,
|
|
736
|
+
**params,
|
|
737
|
+
)
|
|
738
|
+
stream_response._chunk_iterator = self._wrap_async_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
739
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
740
|
+
)
|
|
741
|
+
return stream_response
|
|
742
|
+
|
|
743
|
+
@abstractmethod
|
|
744
|
+
async def _stream_async(
|
|
745
|
+
self,
|
|
746
|
+
*,
|
|
747
|
+
model_id: str,
|
|
748
|
+
messages: Sequence[Message],
|
|
749
|
+
toolkit: AsyncToolkit,
|
|
750
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
751
|
+
**params: Unpack[Params],
|
|
752
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
753
|
+
"""Implementation for stream_async(). Subclasses override this method."""
|
|
754
|
+
...
|
|
755
|
+
|
|
756
|
+
@overload
|
|
757
|
+
async def context_stream_async(
|
|
758
|
+
self,
|
|
759
|
+
*,
|
|
760
|
+
ctx: Context[DepsT],
|
|
761
|
+
model_id: str,
|
|
762
|
+
messages: Sequence[Message],
|
|
763
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
764
|
+
format: None = None,
|
|
765
|
+
**params: Unpack[Params],
|
|
766
|
+
) -> AsyncContextStreamResponse[DepsT, None]:
|
|
767
|
+
"""Stream an `llm.AsyncContextStreamResponse` without a response format."""
|
|
768
|
+
...
|
|
769
|
+
|
|
770
|
+
@overload
|
|
771
|
+
async def context_stream_async(
|
|
772
|
+
self,
|
|
773
|
+
*,
|
|
774
|
+
ctx: Context[DepsT],
|
|
775
|
+
model_id: str,
|
|
776
|
+
messages: Sequence[Message],
|
|
777
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
778
|
+
format: FormatSpec[FormattableT],
|
|
779
|
+
**params: Unpack[Params],
|
|
780
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
781
|
+
"""Stream an `llm.AsyncContextStreamResponse` with a response format."""
|
|
782
|
+
...
|
|
783
|
+
|
|
784
|
+
@overload
|
|
785
|
+
async def context_stream_async(
|
|
786
|
+
self,
|
|
787
|
+
*,
|
|
788
|
+
ctx: Context[DepsT],
|
|
789
|
+
model_id: str,
|
|
790
|
+
messages: Sequence[Message],
|
|
791
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
792
|
+
format: FormatSpec[FormattableT] | None,
|
|
793
|
+
**params: Unpack[Params],
|
|
794
|
+
) -> (
|
|
795
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
796
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
797
|
+
):
|
|
798
|
+
"""Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
799
|
+
...
|
|
800
|
+
|
|
801
|
+
async def context_stream_async(
|
|
802
|
+
self,
|
|
803
|
+
*,
|
|
804
|
+
ctx: Context[DepsT],
|
|
805
|
+
model_id: str,
|
|
806
|
+
messages: Sequence[Message],
|
|
807
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
808
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
809
|
+
**params: Unpack[Params],
|
|
810
|
+
) -> (
|
|
811
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
812
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
813
|
+
):
|
|
814
|
+
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this client's LLM provider.
|
|
815
|
+
|
|
816
|
+
Args:
|
|
817
|
+
ctx: Context object with dependencies for tools.
|
|
818
|
+
model_id: Model identifier to use.
|
|
819
|
+
messages: Messages to send to the LLM.
|
|
820
|
+
tools: Optional tools that the model may invoke.
|
|
821
|
+
format: Optional response format specifier.
|
|
822
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
823
|
+
|
|
824
|
+
Returns:
|
|
825
|
+
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
826
|
+
"""
|
|
827
|
+
with self._wrap_errors():
|
|
828
|
+
stream_response = await self._context_stream_async(
|
|
829
|
+
ctx=ctx,
|
|
830
|
+
model_id=model_id,
|
|
831
|
+
messages=messages,
|
|
832
|
+
toolkit=toolkit,
|
|
833
|
+
format=format,
|
|
834
|
+
**params,
|
|
835
|
+
)
|
|
836
|
+
stream_response._chunk_iterator = self._wrap_async_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
837
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
838
|
+
)
|
|
839
|
+
return stream_response
|
|
840
|
+
|
|
841
|
+
@abstractmethod
|
|
842
|
+
async def _context_stream_async(
|
|
843
|
+
self,
|
|
844
|
+
*,
|
|
845
|
+
ctx: Context[DepsT],
|
|
846
|
+
model_id: str,
|
|
847
|
+
messages: Sequence[Message],
|
|
848
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
849
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
850
|
+
**params: Unpack[Params],
|
|
851
|
+
) -> (
|
|
852
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
853
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
854
|
+
):
|
|
855
|
+
"""Implementation for context_stream_async(). Subclasses override this method."""
|
|
856
|
+
...
|
|
857
|
+
|
|
858
|
+
@overload
|
|
859
|
+
def resume(
|
|
860
|
+
self,
|
|
861
|
+
*,
|
|
862
|
+
model_id: str,
|
|
863
|
+
response: Response,
|
|
864
|
+
content: UserContent,
|
|
865
|
+
**params: Unpack[Params],
|
|
866
|
+
) -> Response:
|
|
867
|
+
"""Resume an `llm.Response` without a response format."""
|
|
868
|
+
...
|
|
869
|
+
|
|
870
|
+
@overload
|
|
871
|
+
def resume(
|
|
872
|
+
self,
|
|
873
|
+
*,
|
|
874
|
+
model_id: str,
|
|
875
|
+
response: Response[FormattableT],
|
|
876
|
+
content: UserContent,
|
|
877
|
+
**params: Unpack[Params],
|
|
878
|
+
) -> Response[FormattableT]:
|
|
879
|
+
"""Resume an `llm.Response` with a response format."""
|
|
880
|
+
...
|
|
881
|
+
|
|
882
|
+
@overload
|
|
883
|
+
def resume(
|
|
884
|
+
self,
|
|
885
|
+
*,
|
|
886
|
+
model_id: str,
|
|
887
|
+
response: Response | Response[FormattableT],
|
|
888
|
+
content: UserContent,
|
|
889
|
+
**params: Unpack[Params],
|
|
890
|
+
) -> Response | Response[FormattableT]:
|
|
891
|
+
"""Resume an `llm.Response` with an optional response format."""
|
|
892
|
+
...
|
|
893
|
+
|
|
894
|
+
def resume(
|
|
895
|
+
self,
|
|
896
|
+
*,
|
|
897
|
+
model_id: str,
|
|
898
|
+
response: Response | Response[FormattableT],
|
|
899
|
+
content: UserContent,
|
|
900
|
+
**params: Unpack[Params],
|
|
901
|
+
) -> Response | Response[FormattableT]:
|
|
902
|
+
"""Generate a new `llm.Response` by extending another response's messages with additional user content.
|
|
903
|
+
|
|
904
|
+
Args:
|
|
905
|
+
model_id: Model identifier to use.
|
|
906
|
+
response: Previous response to extend.
|
|
907
|
+
content: Additional user content to append.
|
|
908
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
909
|
+
|
|
910
|
+
Returns:
|
|
911
|
+
A new `llm.Response` object containing the extended conversation.
|
|
912
|
+
|
|
913
|
+
Note:
|
|
914
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
915
|
+
around calling `client.call()` with a messages array derived from the response
|
|
916
|
+
messages. However, clients may override this with first-class resume logic.
|
|
917
|
+
"""
|
|
918
|
+
messages = response.messages + [user(content)]
|
|
919
|
+
return self.call(
|
|
920
|
+
model_id=model_id,
|
|
921
|
+
messages=messages,
|
|
922
|
+
toolkit=response.toolkit,
|
|
923
|
+
format=response.format,
|
|
924
|
+
**params,
|
|
925
|
+
)
|
|
926
|
+
|
|
927
|
+
@overload
|
|
928
|
+
async def resume_async(
|
|
929
|
+
self,
|
|
930
|
+
*,
|
|
931
|
+
model_id: str,
|
|
932
|
+
response: AsyncResponse,
|
|
933
|
+
content: UserContent,
|
|
934
|
+
**params: Unpack[Params],
|
|
935
|
+
) -> AsyncResponse:
|
|
936
|
+
"""Resume an `llm.AsyncResponse` without a response format."""
|
|
937
|
+
...
|
|
938
|
+
|
|
939
|
+
@overload
|
|
940
|
+
async def resume_async(
|
|
941
|
+
self,
|
|
942
|
+
*,
|
|
943
|
+
model_id: str,
|
|
944
|
+
response: AsyncResponse[FormattableT],
|
|
945
|
+
content: UserContent,
|
|
946
|
+
**params: Unpack[Params],
|
|
947
|
+
) -> AsyncResponse[FormattableT]:
|
|
948
|
+
"""Resume an `llm.AsyncResponse` with a response format."""
|
|
949
|
+
...
|
|
950
|
+
|
|
951
|
+
@overload
|
|
952
|
+
async def resume_async(
|
|
953
|
+
self,
|
|
954
|
+
*,
|
|
955
|
+
model_id: str,
|
|
956
|
+
response: AsyncResponse | AsyncResponse[FormattableT],
|
|
957
|
+
content: UserContent,
|
|
958
|
+
**params: Unpack[Params],
|
|
959
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
960
|
+
"""Resume an `llm.AsyncResponse` with an optional response format."""
|
|
961
|
+
...
|
|
962
|
+
|
|
963
|
+
async def resume_async(
|
|
964
|
+
self,
|
|
965
|
+
*,
|
|
966
|
+
model_id: str,
|
|
967
|
+
response: AsyncResponse | AsyncResponse[FormattableT],
|
|
968
|
+
content: UserContent,
|
|
969
|
+
**params: Unpack[Params],
|
|
970
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
971
|
+
"""Generate a new `llm.AsyncResponse` by extending another response's messages with additional user content.
|
|
972
|
+
|
|
973
|
+
Args:
|
|
974
|
+
model_id: Model identifier to use.
|
|
975
|
+
response: Previous async response to extend.
|
|
976
|
+
content: Additional user content to append.
|
|
977
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
978
|
+
|
|
979
|
+
Returns:
|
|
980
|
+
A new `llm.AsyncResponse` object containing the extended conversation.
|
|
981
|
+
|
|
982
|
+
Note:
|
|
983
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
984
|
+
around calling `client.call_async()` with a messages array derived from the response
|
|
985
|
+
messages. However, clients may override this with first-class resume logic.
|
|
986
|
+
"""
|
|
987
|
+
messages = response.messages + [user(content)]
|
|
988
|
+
return await self.call_async(
|
|
989
|
+
model_id=model_id,
|
|
990
|
+
messages=messages,
|
|
991
|
+
toolkit=response.toolkit,
|
|
992
|
+
format=response.format,
|
|
993
|
+
**params,
|
|
994
|
+
)
|
|
995
|
+
|
|
996
|
+
@overload
|
|
997
|
+
def context_resume(
|
|
998
|
+
self,
|
|
999
|
+
*,
|
|
1000
|
+
ctx: Context[DepsT],
|
|
1001
|
+
model_id: str,
|
|
1002
|
+
response: ContextResponse[DepsT, None],
|
|
1003
|
+
content: UserContent,
|
|
1004
|
+
**params: Unpack[Params],
|
|
1005
|
+
) -> ContextResponse[DepsT, None]:
|
|
1006
|
+
"""Resume an `llm.ContextResponse` without a response format."""
|
|
1007
|
+
...
|
|
1008
|
+
|
|
1009
|
+
@overload
|
|
1010
|
+
def context_resume(
|
|
1011
|
+
self,
|
|
1012
|
+
*,
|
|
1013
|
+
ctx: Context[DepsT],
|
|
1014
|
+
model_id: str,
|
|
1015
|
+
response: ContextResponse[DepsT, FormattableT],
|
|
1016
|
+
content: UserContent,
|
|
1017
|
+
**params: Unpack[Params],
|
|
1018
|
+
) -> ContextResponse[DepsT, FormattableT]:
|
|
1019
|
+
"""Resume an `llm.ContextResponse` with a response format."""
|
|
1020
|
+
...
|
|
1021
|
+
|
|
1022
|
+
@overload
|
|
1023
|
+
def context_resume(
|
|
1024
|
+
self,
|
|
1025
|
+
*,
|
|
1026
|
+
ctx: Context[DepsT],
|
|
1027
|
+
model_id: str,
|
|
1028
|
+
response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
|
|
1029
|
+
content: UserContent,
|
|
1030
|
+
**params: Unpack[Params],
|
|
1031
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
1032
|
+
"""Resume an `llm.ContextResponse` with an optional response format."""
|
|
1033
|
+
...
|
|
1034
|
+
|
|
1035
|
+
def context_resume(
|
|
1036
|
+
self,
|
|
1037
|
+
*,
|
|
1038
|
+
ctx: Context[DepsT],
|
|
1039
|
+
model_id: str,
|
|
1040
|
+
response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
|
|
1041
|
+
content: UserContent,
|
|
1042
|
+
**params: Unpack[Params],
|
|
1043
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
1044
|
+
"""Generate a new `llm.ContextResponse` by extending another response's messages with additional user content.
|
|
1045
|
+
|
|
1046
|
+
Args:
|
|
1047
|
+
ctx: Context object with dependencies for tools.
|
|
1048
|
+
model_id: Model identifier to use.
|
|
1049
|
+
response: Previous context response to extend.
|
|
1050
|
+
content: Additional user content to append.
|
|
1051
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
1052
|
+
|
|
1053
|
+
Returns:
|
|
1054
|
+
A new `llm.ContextResponse` object containing the extended conversation.
|
|
1055
|
+
|
|
1056
|
+
Note:
|
|
1057
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
1058
|
+
around calling `client.context_call()` with a messages array derived from the response
|
|
1059
|
+
messages. However, clients may override this with first-class resume logic.
|
|
1060
|
+
"""
|
|
1061
|
+
messages = response.messages + [user(content)]
|
|
1062
|
+
return self.context_call(
|
|
1063
|
+
ctx=ctx,
|
|
1064
|
+
model_id=model_id,
|
|
1065
|
+
messages=messages,
|
|
1066
|
+
toolkit=response.toolkit,
|
|
1067
|
+
format=response.format,
|
|
1068
|
+
**params,
|
|
1069
|
+
)
|
|
1070
|
+
|
|
1071
|
+
@overload
|
|
1072
|
+
async def context_resume_async(
|
|
1073
|
+
self,
|
|
1074
|
+
*,
|
|
1075
|
+
ctx: Context[DepsT],
|
|
1076
|
+
model_id: str,
|
|
1077
|
+
response: AsyncContextResponse[DepsT, None],
|
|
1078
|
+
content: UserContent,
|
|
1079
|
+
**params: Unpack[Params],
|
|
1080
|
+
) -> AsyncContextResponse[DepsT, None]:
|
|
1081
|
+
"""Resume an `llm.AsyncContextResponse` without a response format."""
|
|
1082
|
+
...
|
|
1083
|
+
|
|
1084
|
+
@overload
|
|
1085
|
+
async def context_resume_async(
|
|
1086
|
+
self,
|
|
1087
|
+
*,
|
|
1088
|
+
ctx: Context[DepsT],
|
|
1089
|
+
model_id: str,
|
|
1090
|
+
response: AsyncContextResponse[DepsT, FormattableT],
|
|
1091
|
+
content: UserContent,
|
|
1092
|
+
**params: Unpack[Params],
|
|
1093
|
+
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
1094
|
+
"""Resume an `llm.AsyncContextResponse` with a response format."""
|
|
1095
|
+
...
|
|
1096
|
+
|
|
1097
|
+
@overload
|
|
1098
|
+
async def context_resume_async(
|
|
1099
|
+
self,
|
|
1100
|
+
*,
|
|
1101
|
+
ctx: Context[DepsT],
|
|
1102
|
+
model_id: str,
|
|
1103
|
+
response: AsyncContextResponse[DepsT, None]
|
|
1104
|
+
| AsyncContextResponse[DepsT, FormattableT],
|
|
1105
|
+
content: UserContent,
|
|
1106
|
+
**params: Unpack[Params],
|
|
1107
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
1108
|
+
"""Resume an `llm.AsyncContextResponse` with an optional response format."""
|
|
1109
|
+
...
|
|
1110
|
+
|
|
1111
|
+
async def context_resume_async(
|
|
1112
|
+
self,
|
|
1113
|
+
*,
|
|
1114
|
+
ctx: Context[DepsT],
|
|
1115
|
+
model_id: str,
|
|
1116
|
+
response: AsyncContextResponse[DepsT, None]
|
|
1117
|
+
| AsyncContextResponse[DepsT, FormattableT],
|
|
1118
|
+
content: UserContent,
|
|
1119
|
+
**params: Unpack[Params],
|
|
1120
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
1121
|
+
"""Generate a new `llm.AsyncContextResponse` by extending another response's messages with additional user content.
|
|
1122
|
+
|
|
1123
|
+
Args:
|
|
1124
|
+
ctx: Context object with dependencies for tools.
|
|
1125
|
+
model_id: Model identifier to use.
|
|
1126
|
+
response: Previous async context response to extend.
|
|
1127
|
+
content: Additional user content to append.
|
|
1128
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
1129
|
+
|
|
1130
|
+
Returns:
|
|
1131
|
+
A new `llm.AsyncContextResponse` object containing the extended conversation.
|
|
1132
|
+
|
|
1133
|
+
Note:
|
|
1134
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
1135
|
+
around calling `client.context_call_async()` with a messages array derived from the response
|
|
1136
|
+
messages. However, clients may override this with first-class resume logic.
|
|
1137
|
+
"""
|
|
1138
|
+
messages = response.messages + [user(content)]
|
|
1139
|
+
return await self.context_call_async(
|
|
1140
|
+
ctx=ctx,
|
|
1141
|
+
model_id=model_id,
|
|
1142
|
+
messages=messages,
|
|
1143
|
+
toolkit=response.toolkit,
|
|
1144
|
+
format=response.format,
|
|
1145
|
+
**params,
|
|
1146
|
+
)
|
|
1147
|
+
|
|
1148
|
+
@overload
|
|
1149
|
+
def resume_stream(
|
|
1150
|
+
self,
|
|
1151
|
+
*,
|
|
1152
|
+
model_id: str,
|
|
1153
|
+
response: StreamResponse,
|
|
1154
|
+
content: UserContent,
|
|
1155
|
+
**params: Unpack[Params],
|
|
1156
|
+
) -> StreamResponse:
|
|
1157
|
+
"""Resume an `llm.StreamResponse` without a response format."""
|
|
1158
|
+
...
|
|
1159
|
+
|
|
1160
|
+
@overload
|
|
1161
|
+
def resume_stream(
|
|
1162
|
+
self,
|
|
1163
|
+
*,
|
|
1164
|
+
model_id: str,
|
|
1165
|
+
response: StreamResponse[FormattableT],
|
|
1166
|
+
content: UserContent,
|
|
1167
|
+
**params: Unpack[Params],
|
|
1168
|
+
) -> StreamResponse[FormattableT]:
|
|
1169
|
+
"""Resume an `llm.StreamResponse` with a response format."""
|
|
1170
|
+
...
|
|
1171
|
+
|
|
1172
|
+
@overload
|
|
1173
|
+
def resume_stream(
|
|
1174
|
+
self,
|
|
1175
|
+
*,
|
|
1176
|
+
model_id: str,
|
|
1177
|
+
response: StreamResponse | StreamResponse[FormattableT],
|
|
1178
|
+
content: UserContent,
|
|
1179
|
+
**params: Unpack[Params],
|
|
1180
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
1181
|
+
"""Resume an `llm.StreamResponse` with an optional response format."""
|
|
1182
|
+
...
|
|
1183
|
+
|
|
1184
|
+
def resume_stream(
|
|
1185
|
+
self,
|
|
1186
|
+
*,
|
|
1187
|
+
model_id: str,
|
|
1188
|
+
response: StreamResponse | StreamResponse[FormattableT],
|
|
1189
|
+
content: UserContent,
|
|
1190
|
+
**params: Unpack[Params],
|
|
1191
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
1192
|
+
"""Generate a new `llm.StreamResponse` by extending another response's messages with additional user content.
|
|
1193
|
+
|
|
1194
|
+
Args:
|
|
1195
|
+
model_id: Model identifier to use.
|
|
1196
|
+
response: Previous stream response to extend.
|
|
1197
|
+
content: Additional user content to append.
|
|
1198
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
1199
|
+
|
|
1200
|
+
Returns:
|
|
1201
|
+
A new `llm.StreamResponse` object for streaming the extended conversation.
|
|
1202
|
+
|
|
1203
|
+
Note:
|
|
1204
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
1205
|
+
around calling `client.stream()` with a messages array derived from the response
|
|
1206
|
+
messages. However, clients may override this with first-class resume logic.
|
|
1207
|
+
"""
|
|
1208
|
+
messages = response.messages + [user(content)]
|
|
1209
|
+
return self.stream(
|
|
1210
|
+
model_id=model_id,
|
|
1211
|
+
messages=messages,
|
|
1212
|
+
toolkit=response.toolkit,
|
|
1213
|
+
format=response.format,
|
|
1214
|
+
**params,
|
|
1215
|
+
)
|
|
1216
|
+
|
|
1217
|
+
@overload
|
|
1218
|
+
async def resume_stream_async(
|
|
1219
|
+
self,
|
|
1220
|
+
*,
|
|
1221
|
+
model_id: str,
|
|
1222
|
+
response: AsyncStreamResponse,
|
|
1223
|
+
content: UserContent,
|
|
1224
|
+
**params: Unpack[Params],
|
|
1225
|
+
) -> AsyncStreamResponse:
|
|
1226
|
+
"""Resume an `llm.AsyncStreamResponse` without a response format."""
|
|
1227
|
+
...
|
|
1228
|
+
|
|
1229
|
+
@overload
|
|
1230
|
+
async def resume_stream_async(
|
|
1231
|
+
self,
|
|
1232
|
+
*,
|
|
1233
|
+
model_id: str,
|
|
1234
|
+
response: AsyncStreamResponse[FormattableT],
|
|
1235
|
+
content: UserContent,
|
|
1236
|
+
**params: Unpack[Params],
|
|
1237
|
+
) -> AsyncStreamResponse[FormattableT]:
|
|
1238
|
+
"""Resume an `llm.AsyncStreamResponse` with a response format."""
|
|
1239
|
+
...
|
|
1240
|
+
|
|
1241
|
+
@overload
|
|
1242
|
+
async def resume_stream_async(
|
|
1243
|
+
self,
|
|
1244
|
+
*,
|
|
1245
|
+
model_id: str,
|
|
1246
|
+
response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
|
|
1247
|
+
content: UserContent,
|
|
1248
|
+
**params: Unpack[Params],
|
|
1249
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
1250
|
+
"""Resume an `llm.AsyncStreamResponse` with an optional response format."""
|
|
1251
|
+
...
|
|
1252
|
+
|
|
1253
|
+
async def resume_stream_async(
|
|
1254
|
+
self,
|
|
1255
|
+
*,
|
|
1256
|
+
model_id: str,
|
|
1257
|
+
response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
|
|
1258
|
+
content: UserContent,
|
|
1259
|
+
**params: Unpack[Params],
|
|
1260
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
1261
|
+
"""Generate a new `llm.AsyncStreamResponse` by extending another response's messages with additional user content.
|
|
1262
|
+
|
|
1263
|
+
Args:
|
|
1264
|
+
model_id: Model identifier to use.
|
|
1265
|
+
response: Previous async stream response to extend.
|
|
1266
|
+
content: Additional user content to append.
|
|
1267
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
1268
|
+
|
|
1269
|
+
Returns:
|
|
1270
|
+
A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1271
|
+
|
|
1272
|
+
Note:
|
|
1273
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
1274
|
+
around calling `client.stream_async()` with a messages array derived from the response
|
|
1275
|
+
messages. However, clients may override this with first-class resume logic.
|
|
1276
|
+
"""
|
|
1277
|
+
messages = response.messages + [user(content)]
|
|
1278
|
+
return await self.stream_async(
|
|
1279
|
+
model_id=model_id,
|
|
1280
|
+
messages=messages,
|
|
1281
|
+
toolkit=response.toolkit,
|
|
1282
|
+
format=response.format,
|
|
1283
|
+
**params,
|
|
1284
|
+
)
|
|
1285
|
+
|
|
1286
|
+
@overload
|
|
1287
|
+
def context_resume_stream(
|
|
1288
|
+
self,
|
|
1289
|
+
*,
|
|
1290
|
+
ctx: Context[DepsT],
|
|
1291
|
+
model_id: str,
|
|
1292
|
+
response: ContextStreamResponse[DepsT, None],
|
|
1293
|
+
content: UserContent,
|
|
1294
|
+
**params: Unpack[Params],
|
|
1295
|
+
) -> ContextStreamResponse[DepsT, None]:
|
|
1296
|
+
"""Resume an `llm.ContextStreamResponse` without a response format."""
|
|
1297
|
+
...
|
|
1298
|
+
|
|
1299
|
+
@overload
|
|
1300
|
+
def context_resume_stream(
|
|
1301
|
+
self,
|
|
1302
|
+
*,
|
|
1303
|
+
ctx: Context[DepsT],
|
|
1304
|
+
model_id: str,
|
|
1305
|
+
response: ContextStreamResponse[DepsT, FormattableT],
|
|
1306
|
+
content: UserContent,
|
|
1307
|
+
**params: Unpack[Params],
|
|
1308
|
+
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
1309
|
+
"""Resume an `llm.ContextStreamResponse` with a response format."""
|
|
1310
|
+
...
|
|
1311
|
+
|
|
1312
|
+
@overload
|
|
1313
|
+
def context_resume_stream(
|
|
1314
|
+
self,
|
|
1315
|
+
*,
|
|
1316
|
+
ctx: Context[DepsT],
|
|
1317
|
+
model_id: str,
|
|
1318
|
+
response: ContextStreamResponse[DepsT, None]
|
|
1319
|
+
| ContextStreamResponse[DepsT, FormattableT],
|
|
1320
|
+
content: UserContent,
|
|
1321
|
+
**params: Unpack[Params],
|
|
1322
|
+
) -> (
|
|
1323
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
1324
|
+
):
|
|
1325
|
+
"""Resume an `llm.ContextStreamResponse` with an optional response format."""
|
|
1326
|
+
...
|
|
1327
|
+
|
|
1328
|
+
def context_resume_stream(
|
|
1329
|
+
self,
|
|
1330
|
+
*,
|
|
1331
|
+
ctx: Context[DepsT],
|
|
1332
|
+
model_id: str,
|
|
1333
|
+
response: ContextStreamResponse[DepsT, None]
|
|
1334
|
+
| ContextStreamResponse[DepsT, FormattableT],
|
|
1335
|
+
content: UserContent,
|
|
1336
|
+
**params: Unpack[Params],
|
|
1337
|
+
) -> (
|
|
1338
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
1339
|
+
):
|
|
1340
|
+
"""Generate a new `llm.ContextStreamResponse` by extending another response's messages with additional user content.
|
|
1341
|
+
|
|
1342
|
+
Args:
|
|
1343
|
+
ctx: Context object with dependencies for tools.
|
|
1344
|
+
model_id: Model identifier to use.
|
|
1345
|
+
response: Previous context stream response to extend.
|
|
1346
|
+
content: Additional user content to append.
|
|
1347
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
1348
|
+
|
|
1349
|
+
Returns:
|
|
1350
|
+
A new `llm.ContextStreamResponse` object for streaming the extended conversation.
|
|
1351
|
+
|
|
1352
|
+
Note:
|
|
1353
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
1354
|
+
around calling `client.context_stream()` with a messages array derived from the response
|
|
1355
|
+
messages. However, clients may override this with first-class resume logic.
|
|
1356
|
+
"""
|
|
1357
|
+
messages = response.messages + [user(content)]
|
|
1358
|
+
return self.context_stream(
|
|
1359
|
+
ctx=ctx,
|
|
1360
|
+
model_id=model_id,
|
|
1361
|
+
messages=messages,
|
|
1362
|
+
toolkit=response.toolkit,
|
|
1363
|
+
format=response.format,
|
|
1364
|
+
**params,
|
|
1365
|
+
)
|
|
1366
|
+
|
|
1367
|
+
@overload
|
|
1368
|
+
async def context_resume_stream_async(
|
|
1369
|
+
self,
|
|
1370
|
+
*,
|
|
1371
|
+
ctx: Context[DepsT],
|
|
1372
|
+
model_id: str,
|
|
1373
|
+
response: AsyncContextStreamResponse[DepsT, None],
|
|
1374
|
+
content: UserContent,
|
|
1375
|
+
**params: Unpack[Params],
|
|
1376
|
+
) -> AsyncContextStreamResponse[DepsT, None]:
|
|
1377
|
+
"""Resume an `llm.AsyncContextStreamResponse` without a response format."""
|
|
1378
|
+
...
|
|
1379
|
+
|
|
1380
|
+
@overload
|
|
1381
|
+
async def context_resume_stream_async(
|
|
1382
|
+
self,
|
|
1383
|
+
*,
|
|
1384
|
+
ctx: Context[DepsT],
|
|
1385
|
+
model_id: str,
|
|
1386
|
+
response: AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1387
|
+
content: UserContent,
|
|
1388
|
+
**params: Unpack[Params],
|
|
1389
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
1390
|
+
"""Resume an `llm.AsyncContextStreamResponse` with a response format."""
|
|
1391
|
+
...
|
|
1392
|
+
|
|
1393
|
+
@overload
|
|
1394
|
+
async def context_resume_stream_async(
|
|
1395
|
+
self,
|
|
1396
|
+
*,
|
|
1397
|
+
ctx: Context[DepsT],
|
|
1398
|
+
model_id: str,
|
|
1399
|
+
response: AsyncContextStreamResponse[DepsT, None]
|
|
1400
|
+
| AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1401
|
+
content: UserContent,
|
|
1402
|
+
**params: Unpack[Params],
|
|
1403
|
+
) -> (
|
|
1404
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
1405
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
1406
|
+
):
|
|
1407
|
+
"""Resume an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
1408
|
+
...
|
|
1409
|
+
|
|
1410
|
+
async def context_resume_stream_async(
|
|
1411
|
+
self,
|
|
1412
|
+
*,
|
|
1413
|
+
ctx: Context[DepsT],
|
|
1414
|
+
model_id: str,
|
|
1415
|
+
response: AsyncContextStreamResponse[DepsT, None]
|
|
1416
|
+
| AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1417
|
+
content: UserContent,
|
|
1418
|
+
**params: Unpack[Params],
|
|
1419
|
+
) -> (
|
|
1420
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
1421
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
1422
|
+
):
|
|
1423
|
+
"""Generate a new `llm.AsyncContextStreamResponse` by extending another response's messages with additional user content.
|
|
1424
|
+
|
|
1425
|
+
Args:
|
|
1426
|
+
ctx: Context object with dependencies for tools.
|
|
1427
|
+
model_id: Model identifier to use.
|
|
1428
|
+
response: Previous async context stream response to extend.
|
|
1429
|
+
content: Additional user content to append.
|
|
1430
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
1431
|
+
|
|
1432
|
+
Returns:
|
|
1433
|
+
A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1434
|
+
|
|
1435
|
+
Note:
|
|
1436
|
+
Uses the previous response's tools and output format. This base method wraps
|
|
1437
|
+
around calling `client.context_stream_async()` with a messages array derived from the response
|
|
1438
|
+
messages. However, clients may override this with first-class resume logic.
|
|
1439
|
+
"""
|
|
1440
|
+
messages = response.messages + [user(content)]
|
|
1441
|
+
return await self.context_stream_async(
|
|
1442
|
+
ctx=ctx,
|
|
1443
|
+
model_id=model_id,
|
|
1444
|
+
messages=messages,
|
|
1445
|
+
toolkit=response.toolkit,
|
|
1446
|
+
format=response.format,
|
|
1447
|
+
**params,
|
|
1448
|
+
)
|
|
1449
|
+
|
|
1450
|
+
@abstractmethod
|
|
1451
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
1452
|
+
"""Extract HTTP status code from provider-specific exception.
|
|
1453
|
+
|
|
1454
|
+
Different SDKs store status codes differently (e.g., .status_code vs .code).
|
|
1455
|
+
Each provider implements this to handle their SDK's convention.
|
|
1456
|
+
|
|
1457
|
+
Args:
|
|
1458
|
+
e: The exception to extract status code from.
|
|
1459
|
+
|
|
1460
|
+
Returns:
|
|
1461
|
+
The HTTP status code if available, None otherwise.
|
|
1462
|
+
"""
|
|
1463
|
+
...
|