mirascope 2.0.0a5__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +10 -1
- mirascope/_stubs.py +363 -0
- mirascope/api/__init__.py +8 -0
- mirascope/api/_generated/__init__.py +285 -2
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +506 -0
- mirascope/api/_generated/annotations/raw_client.py +1414 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +12 -2
- mirascope/api/_generated/api_keys/client.py +77 -0
- mirascope/api/_generated/api_keys/raw_client.py +422 -39
- mirascope/api/_generated/api_keys/types/__init__.py +7 -1
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +4 -12
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +4 -12
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +4 -12
- mirascope/api/_generated/client.py +42 -0
- mirascope/api/_generated/core/client_wrapper.py +2 -14
- mirascope/api/_generated/core/datetime_utils.py +1 -3
- mirascope/api/_generated/core/file.py +2 -5
- mirascope/api/_generated/core/http_client.py +36 -112
- mirascope/api/_generated/core/jsonable_encoder.py +1 -3
- mirascope/api/_generated/core/pydantic_utilities.py +19 -74
- mirascope/api/_generated/core/query_encoder.py +1 -3
- mirascope/api/_generated/core/serialization.py +4 -10
- mirascope/api/_generated/docs/client.py +2 -6
- mirascope/api/_generated/docs/raw_client.py +51 -5
- mirascope/api/_generated/environment.py +3 -3
- mirascope/api/_generated/environments/__init__.py +6 -0
- mirascope/api/_generated/environments/client.py +117 -0
- mirascope/api/_generated/environments/raw_client.py +530 -51
- mirascope/api/_generated/environments/types/__init__.py +10 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +1 -3
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +1 -3
- mirascope/api/_generated/environments/types/environments_list_response_item.py +1 -3
- mirascope/api/_generated/environments/types/environments_update_response.py +1 -3
- mirascope/api/_generated/errors/__init__.py +8 -0
- mirascope/api/_generated/errors/bad_request_error.py +1 -2
- mirascope/api/_generated/errors/conflict_error.py +1 -2
- mirascope/api/_generated/errors/forbidden_error.py +1 -5
- mirascope/api/_generated/errors/internal_server_error.py +1 -6
- mirascope/api/_generated/errors/not_found_error.py +1 -5
- mirascope/api/_generated/errors/payment_required_error.py +15 -0
- mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
- mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +39 -0
- mirascope/api/_generated/functions/client.py +647 -0
- mirascope/api/_generated/functions/raw_client.py +1890 -0
- mirascope/api/_generated/functions/types/__init__.py +53 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/client.py +2 -6
- mirascope/api/_generated/health/raw_client.py +51 -5
- mirascope/api/_generated/health/types/health_check_response.py +1 -3
- mirascope/api/_generated/organization_invitations/__init__.py +33 -0
- mirascope/api/_generated/organization_invitations/client.py +546 -0
- mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
- mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
- mirascope/api/_generated/organization_memberships/__init__.py +19 -0
- mirascope/api/_generated/organization_memberships/client.py +302 -0
- mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
- mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/organizations/__init__.py +26 -0
- mirascope/api/_generated/organizations/client.py +465 -0
- mirascope/api/_generated/organizations/raw_client.py +1799 -108
- mirascope/api/_generated/organizations/types/__init__.py +48 -0
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_get_response.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
- mirascope/api/_generated/project_memberships/__init__.py +25 -0
- mirascope/api/_generated/project_memberships/client.py +437 -0
- mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +2 -12
- mirascope/api/_generated/projects/client.py +17 -71
- mirascope/api/_generated/projects/raw_client.py +295 -51
- mirascope/api/_generated/projects/types/__init__.py +1 -6
- mirascope/api/_generated/projects/types/projects_create_response.py +3 -9
- mirascope/api/_generated/projects/types/projects_get_response.py +3 -9
- mirascope/api/_generated/projects/types/projects_list_response_item.py +3 -9
- mirascope/api/_generated/projects/types/projects_update_response.py +3 -9
- mirascope/api/_generated/reference.md +3619 -182
- mirascope/api/_generated/tags/__init__.py +19 -0
- mirascope/api/_generated/tags/client.py +504 -0
- mirascope/api/_generated/tags/raw_client.py +1288 -0
- mirascope/api/_generated/tags/types/__init__.py +17 -0
- mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
- mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
- mirascope/api/_generated/token_cost/__init__.py +7 -0
- mirascope/api/_generated/token_cost/client.py +160 -0
- mirascope/api/_generated/token_cost/raw_client.py +264 -0
- mirascope/api/_generated/token_cost/types/__init__.py +8 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
- mirascope/api/_generated/traces/__init__.py +42 -0
- mirascope/api/_generated/traces/client.py +941 -0
- mirascope/api/_generated/traces/raw_client.py +2177 -23
- mirascope/api/_generated/traces/types/__init__.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +4 -11
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +4 -8
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +6 -18
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_response.py +2 -5
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +3 -9
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
- mirascope/api/_generated/types/__init__.py +48 -0
- mirascope/api/_generated/types/already_exists_error.py +1 -3
- mirascope/api/_generated/types/bad_request_error_body.py +50 -0
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +1 -3
- mirascope/api/_generated/types/date.py +3 -0
- mirascope/api/_generated/types/http_api_decode_error.py +1 -3
- mirascope/api/_generated/types/immutable_resource_error.py +22 -0
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +1 -3
- mirascope/api/_generated/types/issue_tag.py +1 -8
- mirascope/api/_generated/types/not_found_error_body.py +1 -3
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +1 -3
- mirascope/api/_generated/types/permission_denied_error_tag.py +1 -3
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
- mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
- mirascope/api/_generated/types/property_key_key.py +1 -3
- mirascope/api/_generated/types/rate_limit_error.py +31 -0
- mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
- mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
- mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/api/settings.py +19 -1
- mirascope/llm/__init__.py +55 -8
- mirascope/llm/calls/__init__.py +2 -1
- mirascope/llm/calls/calls.py +3 -1
- mirascope/llm/calls/decorator.py +21 -7
- mirascope/llm/content/tool_call.py +6 -0
- mirascope/llm/content/tool_output.py +22 -5
- mirascope/llm/exceptions.py +284 -71
- mirascope/llm/formatting/__init__.py +19 -2
- mirascope/llm/formatting/format.py +219 -30
- mirascope/llm/formatting/output_parser.py +178 -0
- mirascope/llm/formatting/partial.py +80 -7
- mirascope/llm/formatting/primitives.py +192 -0
- mirascope/llm/formatting/types.py +21 -64
- mirascope/llm/mcp/__init__.py +2 -2
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/messages/__init__.py +3 -0
- mirascope/llm/messages/_utils.py +34 -0
- mirascope/llm/models/__init__.py +5 -0
- mirascope/llm/models/models.py +137 -69
- mirascope/llm/{providers/base → models}/params.py +16 -37
- mirascope/llm/models/thinking_config.py +61 -0
- mirascope/llm/prompts/_utils.py +0 -32
- mirascope/llm/prompts/decorator.py +16 -5
- mirascope/llm/prompts/prompts.py +131 -68
- mirascope/llm/providers/__init__.py +18 -2
- mirascope/llm/providers/anthropic/__init__.py +3 -21
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -11
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +75 -25
- mirascope/llm/providers/anthropic/_utils/decode.py +22 -11
- mirascope/llm/providers/anthropic/_utils/encode.py +82 -20
- mirascope/llm/providers/anthropic/_utils/errors.py +2 -2
- mirascope/llm/providers/anthropic/beta_provider.py +64 -18
- mirascope/llm/providers/anthropic/provider.py +91 -33
- mirascope/llm/providers/base/__init__.py +0 -2
- mirascope/llm/providers/base/_utils.py +55 -11
- mirascope/llm/providers/base/base_provider.py +116 -37
- mirascope/llm/providers/google/__init__.py +2 -17
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +37 -15
- mirascope/llm/providers/google/_utils/encode.py +127 -19
- mirascope/llm/providers/google/_utils/errors.py +3 -2
- mirascope/llm/providers/google/model_info.py +1 -0
- mirascope/llm/providers/google/provider.py +68 -19
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +73 -0
- mirascope/llm/providers/mirascope/provider.py +349 -0
- mirascope/llm/providers/mlx/__init__.py +2 -17
- mirascope/llm/providers/mlx/_utils.py +8 -3
- mirascope/llm/providers/mlx/encoding/base.py +5 -2
- mirascope/llm/providers/mlx/encoding/transformers.py +5 -2
- mirascope/llm/providers/mlx/mlx.py +23 -6
- mirascope/llm/providers/mlx/provider.py +42 -13
- mirascope/llm/providers/ollama/__init__.py +1 -13
- mirascope/llm/providers/openai/_utils/errors.py +2 -2
- mirascope/llm/providers/openai/completions/__init__.py +2 -20
- mirascope/llm/providers/openai/completions/_utils/decode.py +14 -3
- mirascope/llm/providers/openai/completions/_utils/encode.py +35 -28
- mirascope/llm/providers/openai/completions/base_provider.py +40 -11
- mirascope/llm/providers/openai/provider.py +40 -10
- mirascope/llm/providers/openai/responses/__init__.py +1 -17
- mirascope/llm/providers/openai/responses/_utils/__init__.py +2 -0
- mirascope/llm/providers/openai/responses/_utils/decode.py +21 -8
- mirascope/llm/providers/openai/responses/_utils/encode.py +59 -19
- mirascope/llm/providers/openai/responses/provider.py +56 -18
- mirascope/llm/providers/provider_id.py +1 -0
- mirascope/llm/providers/provider_registry.py +96 -19
- mirascope/llm/providers/together/__init__.py +1 -13
- mirascope/llm/responses/__init__.py +6 -1
- mirascope/llm/responses/_utils.py +102 -12
- mirascope/llm/responses/base_response.py +5 -2
- mirascope/llm/responses/base_stream_response.py +139 -45
- mirascope/llm/responses/response.py +2 -1
- mirascope/llm/responses/root_response.py +89 -17
- mirascope/llm/responses/stream_response.py +6 -9
- mirascope/llm/tools/decorator.py +17 -8
- mirascope/llm/tools/tool_schema.py +43 -10
- mirascope/llm/tools/toolkit.py +35 -27
- mirascope/llm/tools/tools.py +123 -30
- mirascope/ops/__init__.py +64 -109
- mirascope/ops/_internal/configuration.py +82 -31
- mirascope/ops/_internal/exporters/exporters.py +64 -11
- mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
- mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +1 -1
- mirascope/ops/_internal/instrumentation/llm/llm.py +116 -1243
- mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
- mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
- mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
- mirascope/ops/_internal/protocols.py +83 -1
- mirascope/ops/_internal/traced_calls.py +4 -0
- mirascope/ops/_internal/traced_functions.py +141 -12
- mirascope/ops/_internal/tracing.py +78 -1
- mirascope/ops/_internal/utils.py +52 -4
- mirascope/ops/_internal/versioned_functions.py +54 -43
- {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/METADATA +14 -13
- mirascope-2.0.1.dist-info/RECORD +423 -0
- {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/licenses/LICENSE +1 -1
- mirascope/llm/formatting/_utils.py +0 -78
- mirascope/llm/mcp/client.py +0 -118
- mirascope/llm/providers/_missing_import_stubs.py +0 -49
- mirascope-2.0.0a5.dist-info/RECORD +0 -265
- {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,15 @@
|
|
|
1
1
|
"""Beta Anthropic provider implementation."""
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
from collections.abc import Sequence
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
4
7
|
from typing_extensions import Unpack
|
|
5
8
|
|
|
6
9
|
from anthropic import Anthropic, AsyncAnthropic
|
|
7
10
|
|
|
8
11
|
from ...context import Context, DepsT
|
|
9
|
-
from ...formatting import Format, FormattableT
|
|
12
|
+
from ...formatting import Format, FormattableT, OutputParser
|
|
10
13
|
from ...messages import Message
|
|
11
14
|
from ...responses import (
|
|
12
15
|
AsyncContextResponse,
|
|
@@ -28,11 +31,14 @@ from ...tools import (
|
|
|
28
31
|
Tool,
|
|
29
32
|
Toolkit,
|
|
30
33
|
)
|
|
31
|
-
from ..base import BaseProvider
|
|
34
|
+
from ..base import BaseProvider
|
|
32
35
|
from . import _utils
|
|
33
36
|
from ._utils import beta_decode, beta_encode
|
|
34
37
|
from .model_id import model_name
|
|
35
38
|
|
|
39
|
+
if TYPE_CHECKING:
|
|
40
|
+
from ...models import Params
|
|
41
|
+
|
|
36
42
|
|
|
37
43
|
class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
38
44
|
"""Provider using beta Anthropic API."""
|
|
@@ -58,7 +64,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
58
64
|
model_id: str,
|
|
59
65
|
messages: Sequence[Message],
|
|
60
66
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
61
|
-
format: type[FormattableT]
|
|
67
|
+
format: type[FormattableT]
|
|
68
|
+
| Format[FormattableT]
|
|
69
|
+
| OutputParser[FormattableT]
|
|
70
|
+
| None = None,
|
|
62
71
|
**params: Unpack[Params],
|
|
63
72
|
) -> Response | Response[FormattableT]:
|
|
64
73
|
"""Generate an `llm.Response` using the beta Anthropic API."""
|
|
@@ -70,8 +79,9 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
70
79
|
params=params,
|
|
71
80
|
)
|
|
72
81
|
beta_response = self.client.beta.messages.parse(**kwargs)
|
|
82
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
73
83
|
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
74
|
-
beta_response, model_id
|
|
84
|
+
beta_response, model_id, include_thoughts=include_thoughts
|
|
75
85
|
)
|
|
76
86
|
return Response(
|
|
77
87
|
raw=beta_response,
|
|
@@ -96,7 +106,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
96
106
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
97
107
|
| ContextToolkit[DepsT]
|
|
98
108
|
| None = None,
|
|
99
|
-
format: type[FormattableT]
|
|
109
|
+
format: type[FormattableT]
|
|
110
|
+
| Format[FormattableT]
|
|
111
|
+
| OutputParser[FormattableT]
|
|
112
|
+
| None = None,
|
|
100
113
|
**params: Unpack[Params],
|
|
101
114
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
102
115
|
"""Generate an `llm.ContextResponse` using the beta Anthropic API."""
|
|
@@ -108,8 +121,9 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
108
121
|
params=params,
|
|
109
122
|
)
|
|
110
123
|
beta_response = self.client.beta.messages.parse(**kwargs)
|
|
124
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
111
125
|
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
112
|
-
beta_response, model_id
|
|
126
|
+
beta_response, model_id, include_thoughts=include_thoughts
|
|
113
127
|
)
|
|
114
128
|
return ContextResponse(
|
|
115
129
|
raw=beta_response,
|
|
@@ -131,7 +145,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
131
145
|
model_id: str,
|
|
132
146
|
messages: Sequence[Message],
|
|
133
147
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
134
|
-
format: type[FormattableT]
|
|
148
|
+
format: type[FormattableT]
|
|
149
|
+
| Format[FormattableT]
|
|
150
|
+
| OutputParser[FormattableT]
|
|
151
|
+
| None = None,
|
|
135
152
|
**params: Unpack[Params],
|
|
136
153
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
137
154
|
"""Generate an `llm.AsyncResponse` using the beta Anthropic API."""
|
|
@@ -143,8 +160,9 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
143
160
|
params=params,
|
|
144
161
|
)
|
|
145
162
|
beta_response = await self.async_client.beta.messages.parse(**kwargs)
|
|
163
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
146
164
|
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
147
|
-
beta_response, model_id
|
|
165
|
+
beta_response, model_id, include_thoughts=include_thoughts
|
|
148
166
|
)
|
|
149
167
|
return AsyncResponse(
|
|
150
168
|
raw=beta_response,
|
|
@@ -169,7 +187,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
169
187
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
170
188
|
| AsyncContextToolkit[DepsT]
|
|
171
189
|
| None = None,
|
|
172
|
-
format: type[FormattableT]
|
|
190
|
+
format: type[FormattableT]
|
|
191
|
+
| Format[FormattableT]
|
|
192
|
+
| OutputParser[FormattableT]
|
|
193
|
+
| None = None,
|
|
173
194
|
**params: Unpack[Params],
|
|
174
195
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
175
196
|
"""Generate an `llm.AsyncContextResponse` using the beta Anthropic API."""
|
|
@@ -181,8 +202,9 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
181
202
|
params=params,
|
|
182
203
|
)
|
|
183
204
|
beta_response = await self.async_client.beta.messages.parse(**kwargs)
|
|
205
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
184
206
|
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
185
|
-
beta_response, model_id
|
|
207
|
+
beta_response, model_id, include_thoughts=include_thoughts
|
|
186
208
|
)
|
|
187
209
|
return AsyncContextResponse(
|
|
188
210
|
raw=beta_response,
|
|
@@ -204,7 +226,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
204
226
|
model_id: str,
|
|
205
227
|
messages: Sequence[Message],
|
|
206
228
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
207
|
-
format: type[FormattableT]
|
|
229
|
+
format: type[FormattableT]
|
|
230
|
+
| Format[FormattableT]
|
|
231
|
+
| OutputParser[FormattableT]
|
|
232
|
+
| None = None,
|
|
208
233
|
**params: Unpack[Params],
|
|
209
234
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
210
235
|
"""Generate an `llm.StreamResponse` using the beta Anthropic API."""
|
|
@@ -216,7 +241,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
216
241
|
params=params,
|
|
217
242
|
)
|
|
218
243
|
beta_stream = self.client.beta.messages.stream(**kwargs)
|
|
219
|
-
|
|
244
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
245
|
+
chunk_iterator = beta_decode.beta_decode_stream(
|
|
246
|
+
beta_stream, include_thoughts=include_thoughts
|
|
247
|
+
)
|
|
220
248
|
return StreamResponse(
|
|
221
249
|
provider_id="anthropic",
|
|
222
250
|
model_id=model_id,
|
|
@@ -237,7 +265,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
237
265
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
238
266
|
| ContextToolkit[DepsT]
|
|
239
267
|
| None = None,
|
|
240
|
-
format: type[FormattableT]
|
|
268
|
+
format: type[FormattableT]
|
|
269
|
+
| Format[FormattableT]
|
|
270
|
+
| OutputParser[FormattableT]
|
|
271
|
+
| None = None,
|
|
241
272
|
**params: Unpack[Params],
|
|
242
273
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
243
274
|
"""Generate an `llm.ContextStreamResponse` using the beta Anthropic API."""
|
|
@@ -249,7 +280,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
249
280
|
params=params,
|
|
250
281
|
)
|
|
251
282
|
beta_stream = self.client.beta.messages.stream(**kwargs)
|
|
252
|
-
|
|
283
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
284
|
+
chunk_iterator = beta_decode.beta_decode_stream(
|
|
285
|
+
beta_stream, include_thoughts=include_thoughts
|
|
286
|
+
)
|
|
253
287
|
return ContextStreamResponse(
|
|
254
288
|
provider_id="anthropic",
|
|
255
289
|
model_id=model_id,
|
|
@@ -267,7 +301,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
267
301
|
model_id: str,
|
|
268
302
|
messages: Sequence[Message],
|
|
269
303
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
270
|
-
format: type[FormattableT]
|
|
304
|
+
format: type[FormattableT]
|
|
305
|
+
| Format[FormattableT]
|
|
306
|
+
| OutputParser[FormattableT]
|
|
307
|
+
| None = None,
|
|
271
308
|
**params: Unpack[Params],
|
|
272
309
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
273
310
|
"""Generate an `llm.AsyncStreamResponse` using the beta Anthropic API."""
|
|
@@ -279,7 +316,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
279
316
|
params=params,
|
|
280
317
|
)
|
|
281
318
|
beta_stream = self.async_client.beta.messages.stream(**kwargs)
|
|
282
|
-
|
|
319
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
320
|
+
chunk_iterator = beta_decode.beta_decode_async_stream(
|
|
321
|
+
beta_stream, include_thoughts=include_thoughts
|
|
322
|
+
)
|
|
283
323
|
return AsyncStreamResponse(
|
|
284
324
|
provider_id="anthropic",
|
|
285
325
|
model_id=model_id,
|
|
@@ -300,7 +340,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
300
340
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
301
341
|
| AsyncContextToolkit[DepsT]
|
|
302
342
|
| None = None,
|
|
303
|
-
format: type[FormattableT]
|
|
343
|
+
format: type[FormattableT]
|
|
344
|
+
| Format[FormattableT]
|
|
345
|
+
| OutputParser[FormattableT]
|
|
346
|
+
| None = None,
|
|
304
347
|
**params: Unpack[Params],
|
|
305
348
|
) -> (
|
|
306
349
|
AsyncContextStreamResponse[DepsT]
|
|
@@ -315,7 +358,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
315
358
|
params=params,
|
|
316
359
|
)
|
|
317
360
|
beta_stream = self.async_client.beta.messages.stream(**kwargs)
|
|
318
|
-
|
|
361
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
362
|
+
chunk_iterator = beta_decode.beta_decode_async_stream(
|
|
363
|
+
beta_stream, include_thoughts=include_thoughts
|
|
364
|
+
)
|
|
319
365
|
return AsyncContextStreamResponse(
|
|
320
366
|
provider_id="anthropic",
|
|
321
367
|
model_id=model_id,
|
|
@@ -1,12 +1,15 @@
|
|
|
1
1
|
"""Anthropic client implementation."""
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
from collections.abc import Sequence
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
4
7
|
from typing_extensions import Unpack
|
|
5
8
|
|
|
6
9
|
from anthropic import Anthropic, AsyncAnthropic
|
|
7
10
|
|
|
8
11
|
from ...context import Context, DepsT
|
|
9
|
-
from ...formatting import Format, FormattableT, resolve_format
|
|
12
|
+
from ...formatting import Format, FormattableT, OutputParser, resolve_format
|
|
10
13
|
from ...messages import Message
|
|
11
14
|
from ...responses import (
|
|
12
15
|
AsyncContextResponse,
|
|
@@ -19,35 +22,50 @@ from ...responses import (
|
|
|
19
22
|
StreamResponse,
|
|
20
23
|
)
|
|
21
24
|
from ...tools import (
|
|
25
|
+
AnyToolSchema,
|
|
22
26
|
AsyncContextTool,
|
|
23
27
|
AsyncContextToolkit,
|
|
24
28
|
AsyncTool,
|
|
25
29
|
AsyncToolkit,
|
|
30
|
+
BaseToolkit,
|
|
26
31
|
ContextTool,
|
|
27
32
|
ContextToolkit,
|
|
28
33
|
Tool,
|
|
29
34
|
Toolkit,
|
|
30
35
|
)
|
|
31
|
-
from ..base import BaseProvider,
|
|
36
|
+
from ..base import BaseProvider, _utils as _base_utils
|
|
32
37
|
from . import _utils
|
|
33
38
|
from .beta_provider import AnthropicBetaProvider
|
|
34
39
|
from .model_id import AnthropicModelId, model_name
|
|
35
40
|
from .model_info import MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
|
|
36
41
|
|
|
42
|
+
if TYPE_CHECKING:
|
|
43
|
+
from ...models import Params
|
|
44
|
+
|
|
37
45
|
|
|
38
46
|
def _should_use_beta(
|
|
39
47
|
model_id: AnthropicModelId,
|
|
40
|
-
format: type[FormattableT]
|
|
48
|
+
format: type[FormattableT]
|
|
49
|
+
| Format[FormattableT]
|
|
50
|
+
| OutputParser[FormattableT]
|
|
51
|
+
| None,
|
|
52
|
+
tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
|
|
41
53
|
) -> bool:
|
|
42
|
-
"""Determine whether to use the beta API based on format mode.
|
|
54
|
+
"""Determine whether to use the beta API based on format mode or strict tools.
|
|
43
55
|
|
|
44
|
-
If the format resolves to strict mode,
|
|
45
|
-
strict structured output support, then we
|
|
56
|
+
If the format resolves to strict mode, or any tools have strict=True,
|
|
57
|
+
and the model plausibly has strict structured output support, then we
|
|
58
|
+
will use the beta provider.
|
|
46
59
|
"""
|
|
47
|
-
|
|
48
|
-
if resolved is None or resolved.mode != "strict":
|
|
60
|
+
if model_name(model_id) in MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS:
|
|
49
61
|
return False
|
|
50
|
-
|
|
62
|
+
|
|
63
|
+
# Check if format requires strict mode
|
|
64
|
+
resolved = resolve_format(format, default_mode=_utils.DEFAULT_FORMAT_MODE)
|
|
65
|
+
if resolved is not None and resolved.mode == "strict":
|
|
66
|
+
return True
|
|
67
|
+
|
|
68
|
+
return _base_utils.has_strict_tools(tools)
|
|
51
69
|
|
|
52
70
|
|
|
53
71
|
class AnthropicProvider(BaseProvider[Anthropic]):
|
|
@@ -76,11 +94,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
76
94
|
model_id: AnthropicModelId,
|
|
77
95
|
messages: Sequence[Message],
|
|
78
96
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
79
|
-
format: type[FormattableT]
|
|
97
|
+
format: type[FormattableT]
|
|
98
|
+
| Format[FormattableT]
|
|
99
|
+
| OutputParser[FormattableT]
|
|
100
|
+
| None = None,
|
|
80
101
|
**params: Unpack[Params],
|
|
81
102
|
) -> Response | Response[FormattableT]:
|
|
82
103
|
"""Generate an `llm.Response` by synchronously calling the Anthropic Messages API."""
|
|
83
|
-
if _should_use_beta(model_id, format):
|
|
104
|
+
if _should_use_beta(model_id, format, tools):
|
|
84
105
|
return self._beta_provider.call(
|
|
85
106
|
model_id=model_id,
|
|
86
107
|
messages=messages,
|
|
@@ -97,8 +118,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
97
118
|
params=params,
|
|
98
119
|
)
|
|
99
120
|
anthropic_response = self.client.messages.create(**kwargs)
|
|
121
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
100
122
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
101
|
-
anthropic_response, model_id
|
|
123
|
+
anthropic_response, model_id, include_thoughts=include_thoughts
|
|
102
124
|
)
|
|
103
125
|
return Response(
|
|
104
126
|
raw=anthropic_response,
|
|
@@ -123,11 +145,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
123
145
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
124
146
|
| ContextToolkit[DepsT]
|
|
125
147
|
| None = None,
|
|
126
|
-
format: type[FormattableT]
|
|
148
|
+
format: type[FormattableT]
|
|
149
|
+
| Format[FormattableT]
|
|
150
|
+
| OutputParser[FormattableT]
|
|
151
|
+
| None = None,
|
|
127
152
|
**params: Unpack[Params],
|
|
128
153
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
129
154
|
"""Generate an `llm.ContextResponse` by synchronously calling the Anthropic Messages API."""
|
|
130
|
-
if _should_use_beta(model_id, format):
|
|
155
|
+
if _should_use_beta(model_id, format, tools):
|
|
131
156
|
return self._beta_provider.context_call(
|
|
132
157
|
ctx=ctx,
|
|
133
158
|
model_id=model_id,
|
|
@@ -145,8 +170,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
145
170
|
params=params,
|
|
146
171
|
)
|
|
147
172
|
anthropic_response = self.client.messages.create(**kwargs)
|
|
173
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
148
174
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
149
|
-
anthropic_response, model_id
|
|
175
|
+
anthropic_response, model_id, include_thoughts=include_thoughts
|
|
150
176
|
)
|
|
151
177
|
return ContextResponse(
|
|
152
178
|
raw=anthropic_response,
|
|
@@ -168,11 +194,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
168
194
|
model_id: AnthropicModelId,
|
|
169
195
|
messages: Sequence[Message],
|
|
170
196
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
171
|
-
format: type[FormattableT]
|
|
197
|
+
format: type[FormattableT]
|
|
198
|
+
| Format[FormattableT]
|
|
199
|
+
| OutputParser[FormattableT]
|
|
200
|
+
| None = None,
|
|
172
201
|
**params: Unpack[Params],
|
|
173
202
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
174
203
|
"""Generate an `llm.AsyncResponse` by asynchronously calling the Anthropic Messages API."""
|
|
175
|
-
if _should_use_beta(model_id, format):
|
|
204
|
+
if _should_use_beta(model_id, format, tools):
|
|
176
205
|
return await self._beta_provider.call_async(
|
|
177
206
|
model_id=model_id,
|
|
178
207
|
messages=messages,
|
|
@@ -189,8 +218,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
189
218
|
params=params,
|
|
190
219
|
)
|
|
191
220
|
anthropic_response = await self.async_client.messages.create(**kwargs)
|
|
221
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
192
222
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
193
|
-
anthropic_response, model_id
|
|
223
|
+
anthropic_response, model_id, include_thoughts=include_thoughts
|
|
194
224
|
)
|
|
195
225
|
return AsyncResponse(
|
|
196
226
|
raw=anthropic_response,
|
|
@@ -215,11 +245,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
215
245
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
216
246
|
| AsyncContextToolkit[DepsT]
|
|
217
247
|
| None = None,
|
|
218
|
-
format: type[FormattableT]
|
|
248
|
+
format: type[FormattableT]
|
|
249
|
+
| Format[FormattableT]
|
|
250
|
+
| OutputParser[FormattableT]
|
|
251
|
+
| None = None,
|
|
219
252
|
**params: Unpack[Params],
|
|
220
253
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
221
254
|
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the Anthropic Messages API."""
|
|
222
|
-
if _should_use_beta(model_id, format):
|
|
255
|
+
if _should_use_beta(model_id, format, tools):
|
|
223
256
|
return await self._beta_provider.context_call_async(
|
|
224
257
|
ctx=ctx,
|
|
225
258
|
model_id=model_id,
|
|
@@ -237,8 +270,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
237
270
|
params=params,
|
|
238
271
|
)
|
|
239
272
|
anthropic_response = await self.async_client.messages.create(**kwargs)
|
|
273
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
240
274
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
241
|
-
anthropic_response, model_id
|
|
275
|
+
anthropic_response, model_id, include_thoughts=include_thoughts
|
|
242
276
|
)
|
|
243
277
|
return AsyncContextResponse(
|
|
244
278
|
raw=anthropic_response,
|
|
@@ -260,11 +294,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
260
294
|
model_id: AnthropicModelId,
|
|
261
295
|
messages: Sequence[Message],
|
|
262
296
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
263
|
-
format: type[FormattableT]
|
|
297
|
+
format: type[FormattableT]
|
|
298
|
+
| Format[FormattableT]
|
|
299
|
+
| OutputParser[FormattableT]
|
|
300
|
+
| None = None,
|
|
264
301
|
**params: Unpack[Params],
|
|
265
302
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
266
303
|
"""Generate an `llm.StreamResponse` by synchronously streaming from the Anthropic Messages API."""
|
|
267
|
-
if _should_use_beta(model_id, format):
|
|
304
|
+
if _should_use_beta(model_id, format, tools):
|
|
268
305
|
return self._beta_provider.stream(
|
|
269
306
|
model_id=model_id,
|
|
270
307
|
messages=messages,
|
|
@@ -281,7 +318,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
281
318
|
params=params,
|
|
282
319
|
)
|
|
283
320
|
anthropic_stream = self.client.messages.stream(**kwargs)
|
|
284
|
-
|
|
321
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
322
|
+
chunk_iterator = _utils.decode_stream(
|
|
323
|
+
anthropic_stream, include_thoughts=include_thoughts
|
|
324
|
+
)
|
|
285
325
|
return StreamResponse(
|
|
286
326
|
provider_id="anthropic",
|
|
287
327
|
model_id=model_id,
|
|
@@ -302,11 +342,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
302
342
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
303
343
|
| ContextToolkit[DepsT]
|
|
304
344
|
| None = None,
|
|
305
|
-
format: type[FormattableT]
|
|
345
|
+
format: type[FormattableT]
|
|
346
|
+
| Format[FormattableT]
|
|
347
|
+
| OutputParser[FormattableT]
|
|
348
|
+
| None = None,
|
|
306
349
|
**params: Unpack[Params],
|
|
307
350
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
308
351
|
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the Anthropic Messages API."""
|
|
309
|
-
if _should_use_beta(model_id, format):
|
|
352
|
+
if _should_use_beta(model_id, format, tools):
|
|
310
353
|
return self._beta_provider.context_stream(
|
|
311
354
|
ctx=ctx,
|
|
312
355
|
model_id=model_id,
|
|
@@ -324,7 +367,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
324
367
|
params=params,
|
|
325
368
|
)
|
|
326
369
|
anthropic_stream = self.client.messages.stream(**kwargs)
|
|
327
|
-
|
|
370
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
371
|
+
chunk_iterator = _utils.decode_stream(
|
|
372
|
+
anthropic_stream, include_thoughts=include_thoughts
|
|
373
|
+
)
|
|
328
374
|
return ContextStreamResponse(
|
|
329
375
|
provider_id="anthropic",
|
|
330
376
|
model_id=model_id,
|
|
@@ -342,11 +388,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
342
388
|
model_id: AnthropicModelId,
|
|
343
389
|
messages: Sequence[Message],
|
|
344
390
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
345
|
-
format: type[FormattableT]
|
|
391
|
+
format: type[FormattableT]
|
|
392
|
+
| Format[FormattableT]
|
|
393
|
+
| OutputParser[FormattableT]
|
|
394
|
+
| None = None,
|
|
346
395
|
**params: Unpack[Params],
|
|
347
396
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
348
397
|
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
|
|
349
|
-
if _should_use_beta(model_id, format):
|
|
398
|
+
if _should_use_beta(model_id, format, tools):
|
|
350
399
|
return await self._beta_provider.stream_async(
|
|
351
400
|
model_id=model_id,
|
|
352
401
|
messages=messages,
|
|
@@ -362,7 +411,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
362
411
|
params=params,
|
|
363
412
|
)
|
|
364
413
|
anthropic_stream = self.async_client.messages.stream(**kwargs)
|
|
365
|
-
|
|
414
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
415
|
+
chunk_iterator = _utils.decode_async_stream(
|
|
416
|
+
anthropic_stream, include_thoughts=include_thoughts
|
|
417
|
+
)
|
|
366
418
|
return AsyncStreamResponse(
|
|
367
419
|
provider_id="anthropic",
|
|
368
420
|
model_id=model_id,
|
|
@@ -383,14 +435,17 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
383
435
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
384
436
|
| AsyncContextToolkit[DepsT]
|
|
385
437
|
| None = None,
|
|
386
|
-
format: type[FormattableT]
|
|
438
|
+
format: type[FormattableT]
|
|
439
|
+
| Format[FormattableT]
|
|
440
|
+
| OutputParser[FormattableT]
|
|
441
|
+
| None = None,
|
|
387
442
|
**params: Unpack[Params],
|
|
388
443
|
) -> (
|
|
389
444
|
AsyncContextStreamResponse[DepsT]
|
|
390
445
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
391
446
|
):
|
|
392
447
|
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
|
|
393
|
-
if _should_use_beta(model_id, format):
|
|
448
|
+
if _should_use_beta(model_id, format, tools):
|
|
394
449
|
return await self._beta_provider.context_stream_async(
|
|
395
450
|
ctx=ctx,
|
|
396
451
|
model_id=model_id,
|
|
@@ -408,7 +463,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
408
463
|
params=params,
|
|
409
464
|
)
|
|
410
465
|
anthropic_stream = self.async_client.messages.stream(**kwargs)
|
|
411
|
-
|
|
466
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
467
|
+
chunk_iterator = _utils.decode_async_stream(
|
|
468
|
+
anthropic_stream, include_thoughts=include_thoughts
|
|
469
|
+
)
|
|
412
470
|
return AsyncContextStreamResponse(
|
|
413
471
|
provider_id="anthropic",
|
|
414
472
|
model_id=model_id,
|
|
@@ -3,13 +3,11 @@
|
|
|
3
3
|
from . import _utils
|
|
4
4
|
from .base_provider import BaseProvider, Provider, ProviderErrorMap
|
|
5
5
|
from .kwargs import BaseKwargs, KwargsT
|
|
6
|
-
from .params import Params
|
|
7
6
|
|
|
8
7
|
__all__ = [
|
|
9
8
|
"BaseKwargs",
|
|
10
9
|
"BaseProvider",
|
|
11
10
|
"KwargsT",
|
|
12
|
-
"Params",
|
|
13
11
|
"Provider",
|
|
14
12
|
"ProviderErrorMap",
|
|
15
13
|
"_utils",
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import logging
|
|
2
4
|
from collections.abc import Generator, Sequence
|
|
3
5
|
from contextlib import contextmanager
|
|
@@ -5,10 +7,14 @@ from typing import TYPE_CHECKING, TypeAlias, cast, get_type_hints
|
|
|
5
7
|
|
|
6
8
|
from ...content import Text
|
|
7
9
|
from ...messages import AssistantMessage, Message, SystemMessage, UserMessage
|
|
10
|
+
from ...models.params import (
|
|
11
|
+
Params, # Import directly from params.py to avoid circular dependency
|
|
12
|
+
)
|
|
13
|
+
from ...tools import AnyToolSchema, BaseToolkit
|
|
8
14
|
from ..provider_id import ProviderId
|
|
9
|
-
from .params import Params
|
|
10
15
|
|
|
11
16
|
if TYPE_CHECKING:
|
|
17
|
+
from ...models import ThinkingConfig
|
|
12
18
|
from ..model_id import ModelId
|
|
13
19
|
|
|
14
20
|
logger = logging.getLogger(__name__)
|
|
@@ -16,6 +22,29 @@ logger = logging.getLogger(__name__)
|
|
|
16
22
|
SystemMessageContent: TypeAlias = str | None
|
|
17
23
|
|
|
18
24
|
|
|
25
|
+
def get_include_thoughts(params: Params) -> bool:
|
|
26
|
+
"""Extract include_thoughts from params thinking config."""
|
|
27
|
+
thinking_config = params.get("thinking")
|
|
28
|
+
return (thinking_config or {}).get("include_thoughts", False)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def has_strict_tools(
|
|
32
|
+
tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
|
|
33
|
+
) -> bool:
|
|
34
|
+
"""Check if any tools have strict=True explicitly set.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
tools: The tools to check, either a sequence or a toolkit
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
True if any tool has strict=True, False otherwise
|
|
41
|
+
"""
|
|
42
|
+
if tools is None:
|
|
43
|
+
return False
|
|
44
|
+
tools_list = tools.tools if isinstance(tools, BaseToolkit) else tools
|
|
45
|
+
return any(tool.strict is True for tool in tools_list)
|
|
46
|
+
|
|
47
|
+
|
|
19
48
|
def ensure_additional_properties_false(obj: object) -> None:
|
|
20
49
|
"""Recursively adds additionalProperties = False to a schema, required for strict mode."""
|
|
21
50
|
if isinstance(obj, dict):
|
|
@@ -30,6 +59,27 @@ def ensure_additional_properties_false(obj: object) -> None:
|
|
|
30
59
|
ensure_additional_properties_false(item)
|
|
31
60
|
|
|
32
61
|
|
|
62
|
+
def ensure_all_properties_required(obj: object) -> None:
|
|
63
|
+
"""Recursively ensures all properties are in required array, needed for OpenAI strict mode.
|
|
64
|
+
|
|
65
|
+
OpenAI's strict mode requires that all properties in an object schema are listed
|
|
66
|
+
in the 'required' array, even if they have default values.
|
|
67
|
+
"""
|
|
68
|
+
if isinstance(obj, dict):
|
|
69
|
+
obj = cast(dict[str, object], obj)
|
|
70
|
+
if obj.get("type") == "object" and "properties" in obj:
|
|
71
|
+
properties = obj.get("properties")
|
|
72
|
+
if isinstance(properties, dict):
|
|
73
|
+
property_keys = cast(dict[str, object], properties)
|
|
74
|
+
obj["required"] = list(property_keys.keys())
|
|
75
|
+
for value in obj.values():
|
|
76
|
+
ensure_all_properties_required(value)
|
|
77
|
+
elif isinstance(obj, list):
|
|
78
|
+
obj = cast(list[object], obj)
|
|
79
|
+
for item in obj:
|
|
80
|
+
ensure_all_properties_required(item)
|
|
81
|
+
|
|
82
|
+
|
|
33
83
|
def add_system_instructions(
|
|
34
84
|
messages: Sequence[Message], additional_system_instructions: str
|
|
35
85
|
) -> Sequence[Message]:
|
|
@@ -138,23 +188,17 @@ class SafeParamsAccessor:
|
|
|
138
188
|
return self._params.get("stop_sequences")
|
|
139
189
|
|
|
140
190
|
@property
|
|
141
|
-
def thinking(self) ->
|
|
191
|
+
def thinking(self) -> ThinkingConfig | None:
|
|
142
192
|
"""Access the thinking parameter."""
|
|
143
193
|
self._unaccessed.discard("thinking")
|
|
144
194
|
return self._params.get("thinking")
|
|
145
195
|
|
|
146
|
-
@property
|
|
147
|
-
def encode_thoughts_as_text(self) -> bool | None:
|
|
148
|
-
"""Access the encode_thoughts_as_text parameter."""
|
|
149
|
-
self._unaccessed.discard("encode_thoughts_as_text")
|
|
150
|
-
return self._params.get("encode_thoughts_as_text")
|
|
151
|
-
|
|
152
196
|
def emit_warning_for_unused_param(
|
|
153
197
|
self,
|
|
154
198
|
param_name: str,
|
|
155
199
|
param_value: object,
|
|
156
|
-
provider_id:
|
|
157
|
-
model_id:
|
|
200
|
+
provider_id: ProviderId,
|
|
201
|
+
model_id: ModelId | None = None,
|
|
158
202
|
) -> None:
|
|
159
203
|
unsupported_by = f"provider: {provider_id}"
|
|
160
204
|
if model_id:
|
|
@@ -174,7 +218,7 @@ class SafeParamsAccessor:
|
|
|
174
218
|
def ensure_all_params_accessed(
|
|
175
219
|
*,
|
|
176
220
|
params: Params,
|
|
177
|
-
provider_id:
|
|
221
|
+
provider_id: ProviderId,
|
|
178
222
|
unsupported_params: list[str] | None = None,
|
|
179
223
|
) -> Generator[SafeParamsAccessor, None, None]:
|
|
180
224
|
"""Context manager that ensures all parameters are accessed.
|