mirascope 2.0.0a5__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +10 -1
- mirascope/_stubs.py +363 -0
- mirascope/api/__init__.py +8 -0
- mirascope/api/_generated/__init__.py +285 -2
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +506 -0
- mirascope/api/_generated/annotations/raw_client.py +1414 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +12 -2
- mirascope/api/_generated/api_keys/client.py +77 -0
- mirascope/api/_generated/api_keys/raw_client.py +422 -39
- mirascope/api/_generated/api_keys/types/__init__.py +7 -1
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +4 -12
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +4 -12
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +4 -12
- mirascope/api/_generated/client.py +42 -0
- mirascope/api/_generated/core/client_wrapper.py +2 -14
- mirascope/api/_generated/core/datetime_utils.py +1 -3
- mirascope/api/_generated/core/file.py +2 -5
- mirascope/api/_generated/core/http_client.py +36 -112
- mirascope/api/_generated/core/jsonable_encoder.py +1 -3
- mirascope/api/_generated/core/pydantic_utilities.py +19 -74
- mirascope/api/_generated/core/query_encoder.py +1 -3
- mirascope/api/_generated/core/serialization.py +4 -10
- mirascope/api/_generated/docs/client.py +2 -6
- mirascope/api/_generated/docs/raw_client.py +51 -5
- mirascope/api/_generated/environment.py +3 -3
- mirascope/api/_generated/environments/__init__.py +6 -0
- mirascope/api/_generated/environments/client.py +117 -0
- mirascope/api/_generated/environments/raw_client.py +530 -51
- mirascope/api/_generated/environments/types/__init__.py +10 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +1 -3
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +1 -3
- mirascope/api/_generated/environments/types/environments_list_response_item.py +1 -3
- mirascope/api/_generated/environments/types/environments_update_response.py +1 -3
- mirascope/api/_generated/errors/__init__.py +8 -0
- mirascope/api/_generated/errors/bad_request_error.py +1 -2
- mirascope/api/_generated/errors/conflict_error.py +1 -2
- mirascope/api/_generated/errors/forbidden_error.py +1 -5
- mirascope/api/_generated/errors/internal_server_error.py +1 -6
- mirascope/api/_generated/errors/not_found_error.py +1 -5
- mirascope/api/_generated/errors/payment_required_error.py +15 -0
- mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
- mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +39 -0
- mirascope/api/_generated/functions/client.py +647 -0
- mirascope/api/_generated/functions/raw_client.py +1890 -0
- mirascope/api/_generated/functions/types/__init__.py +53 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/client.py +2 -6
- mirascope/api/_generated/health/raw_client.py +51 -5
- mirascope/api/_generated/health/types/health_check_response.py +1 -3
- mirascope/api/_generated/organization_invitations/__init__.py +33 -0
- mirascope/api/_generated/organization_invitations/client.py +546 -0
- mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
- mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
- mirascope/api/_generated/organization_memberships/__init__.py +19 -0
- mirascope/api/_generated/organization_memberships/client.py +302 -0
- mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
- mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/organizations/__init__.py +26 -0
- mirascope/api/_generated/organizations/client.py +465 -0
- mirascope/api/_generated/organizations/raw_client.py +1799 -108
- mirascope/api/_generated/organizations/types/__init__.py +48 -0
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_get_response.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +4 -3
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
- mirascope/api/_generated/project_memberships/__init__.py +25 -0
- mirascope/api/_generated/project_memberships/client.py +437 -0
- mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +2 -12
- mirascope/api/_generated/projects/client.py +17 -71
- mirascope/api/_generated/projects/raw_client.py +295 -51
- mirascope/api/_generated/projects/types/__init__.py +1 -6
- mirascope/api/_generated/projects/types/projects_create_response.py +3 -9
- mirascope/api/_generated/projects/types/projects_get_response.py +3 -9
- mirascope/api/_generated/projects/types/projects_list_response_item.py +3 -9
- mirascope/api/_generated/projects/types/projects_update_response.py +3 -9
- mirascope/api/_generated/reference.md +3619 -182
- mirascope/api/_generated/tags/__init__.py +19 -0
- mirascope/api/_generated/tags/client.py +504 -0
- mirascope/api/_generated/tags/raw_client.py +1288 -0
- mirascope/api/_generated/tags/types/__init__.py +17 -0
- mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
- mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
- mirascope/api/_generated/token_cost/__init__.py +7 -0
- mirascope/api/_generated/token_cost/client.py +160 -0
- mirascope/api/_generated/token_cost/raw_client.py +264 -0
- mirascope/api/_generated/token_cost/types/__init__.py +8 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
- mirascope/api/_generated/traces/__init__.py +42 -0
- mirascope/api/_generated/traces/client.py +941 -0
- mirascope/api/_generated/traces/raw_client.py +2177 -23
- mirascope/api/_generated/traces/types/__init__.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +4 -11
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +4 -8
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +6 -18
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_response.py +2 -5
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +3 -9
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
- mirascope/api/_generated/types/__init__.py +48 -0
- mirascope/api/_generated/types/already_exists_error.py +1 -3
- mirascope/api/_generated/types/bad_request_error_body.py +50 -0
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +1 -3
- mirascope/api/_generated/types/date.py +3 -0
- mirascope/api/_generated/types/http_api_decode_error.py +1 -3
- mirascope/api/_generated/types/immutable_resource_error.py +22 -0
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +1 -3
- mirascope/api/_generated/types/issue_tag.py +1 -8
- mirascope/api/_generated/types/not_found_error_body.py +1 -3
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +1 -3
- mirascope/api/_generated/types/permission_denied_error_tag.py +1 -3
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
- mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
- mirascope/api/_generated/types/property_key_key.py +1 -3
- mirascope/api/_generated/types/rate_limit_error.py +31 -0
- mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
- mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
- mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/api/settings.py +19 -1
- mirascope/llm/__init__.py +55 -8
- mirascope/llm/calls/__init__.py +2 -1
- mirascope/llm/calls/calls.py +3 -1
- mirascope/llm/calls/decorator.py +21 -7
- mirascope/llm/content/tool_call.py +6 -0
- mirascope/llm/content/tool_output.py +22 -5
- mirascope/llm/exceptions.py +284 -71
- mirascope/llm/formatting/__init__.py +19 -2
- mirascope/llm/formatting/format.py +219 -30
- mirascope/llm/formatting/output_parser.py +178 -0
- mirascope/llm/formatting/partial.py +80 -7
- mirascope/llm/formatting/primitives.py +192 -0
- mirascope/llm/formatting/types.py +21 -64
- mirascope/llm/mcp/__init__.py +2 -2
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/messages/__init__.py +3 -0
- mirascope/llm/messages/_utils.py +34 -0
- mirascope/llm/models/__init__.py +5 -0
- mirascope/llm/models/models.py +137 -69
- mirascope/llm/{providers/base → models}/params.py +16 -37
- mirascope/llm/models/thinking_config.py +61 -0
- mirascope/llm/prompts/_utils.py +0 -32
- mirascope/llm/prompts/decorator.py +16 -5
- mirascope/llm/prompts/prompts.py +131 -68
- mirascope/llm/providers/__init__.py +18 -2
- mirascope/llm/providers/anthropic/__init__.py +3 -21
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -11
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +75 -25
- mirascope/llm/providers/anthropic/_utils/decode.py +22 -11
- mirascope/llm/providers/anthropic/_utils/encode.py +82 -20
- mirascope/llm/providers/anthropic/_utils/errors.py +2 -2
- mirascope/llm/providers/anthropic/beta_provider.py +64 -18
- mirascope/llm/providers/anthropic/provider.py +91 -33
- mirascope/llm/providers/base/__init__.py +0 -2
- mirascope/llm/providers/base/_utils.py +55 -11
- mirascope/llm/providers/base/base_provider.py +116 -37
- mirascope/llm/providers/google/__init__.py +2 -17
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +37 -15
- mirascope/llm/providers/google/_utils/encode.py +127 -19
- mirascope/llm/providers/google/_utils/errors.py +3 -2
- mirascope/llm/providers/google/model_info.py +1 -0
- mirascope/llm/providers/google/provider.py +68 -19
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +73 -0
- mirascope/llm/providers/mirascope/provider.py +349 -0
- mirascope/llm/providers/mlx/__init__.py +2 -17
- mirascope/llm/providers/mlx/_utils.py +8 -3
- mirascope/llm/providers/mlx/encoding/base.py +5 -2
- mirascope/llm/providers/mlx/encoding/transformers.py +5 -2
- mirascope/llm/providers/mlx/mlx.py +23 -6
- mirascope/llm/providers/mlx/provider.py +42 -13
- mirascope/llm/providers/ollama/__init__.py +1 -13
- mirascope/llm/providers/openai/_utils/errors.py +2 -2
- mirascope/llm/providers/openai/completions/__init__.py +2 -20
- mirascope/llm/providers/openai/completions/_utils/decode.py +14 -3
- mirascope/llm/providers/openai/completions/_utils/encode.py +35 -28
- mirascope/llm/providers/openai/completions/base_provider.py +40 -11
- mirascope/llm/providers/openai/provider.py +40 -10
- mirascope/llm/providers/openai/responses/__init__.py +1 -17
- mirascope/llm/providers/openai/responses/_utils/__init__.py +2 -0
- mirascope/llm/providers/openai/responses/_utils/decode.py +21 -8
- mirascope/llm/providers/openai/responses/_utils/encode.py +59 -19
- mirascope/llm/providers/openai/responses/provider.py +56 -18
- mirascope/llm/providers/provider_id.py +1 -0
- mirascope/llm/providers/provider_registry.py +96 -19
- mirascope/llm/providers/together/__init__.py +1 -13
- mirascope/llm/responses/__init__.py +6 -1
- mirascope/llm/responses/_utils.py +102 -12
- mirascope/llm/responses/base_response.py +5 -2
- mirascope/llm/responses/base_stream_response.py +139 -45
- mirascope/llm/responses/response.py +2 -1
- mirascope/llm/responses/root_response.py +89 -17
- mirascope/llm/responses/stream_response.py +6 -9
- mirascope/llm/tools/decorator.py +17 -8
- mirascope/llm/tools/tool_schema.py +43 -10
- mirascope/llm/tools/toolkit.py +35 -27
- mirascope/llm/tools/tools.py +123 -30
- mirascope/ops/__init__.py +64 -109
- mirascope/ops/_internal/configuration.py +82 -31
- mirascope/ops/_internal/exporters/exporters.py +64 -11
- mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
- mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +1 -1
- mirascope/ops/_internal/instrumentation/llm/llm.py +116 -1243
- mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
- mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
- mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
- mirascope/ops/_internal/protocols.py +83 -1
- mirascope/ops/_internal/traced_calls.py +4 -0
- mirascope/ops/_internal/traced_functions.py +141 -12
- mirascope/ops/_internal/tracing.py +78 -1
- mirascope/ops/_internal/utils.py +52 -4
- mirascope/ops/_internal/versioned_functions.py +54 -43
- {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/METADATA +14 -13
- mirascope-2.0.1.dist-info/RECORD +423 -0
- {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/licenses/LICENSE +1 -1
- mirascope/llm/formatting/_utils.py +0 -78
- mirascope/llm/mcp/client.py +0 -118
- mirascope/llm/providers/_missing_import_stubs.py +0 -49
- mirascope-2.0.0a5.dist-info/RECORD +0 -265
- {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/WHEEL +0 -0
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
"""Google message encoding and request preparation."""
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
import base64
|
|
4
6
|
import json
|
|
5
7
|
from collections.abc import Sequence
|
|
6
8
|
from functools import lru_cache
|
|
7
|
-
from typing import Any, TypedDict, cast
|
|
9
|
+
from typing import TYPE_CHECKING, Any, TypedDict, cast
|
|
8
10
|
from typing_extensions import Required
|
|
9
11
|
|
|
10
12
|
from google.genai import types as genai_types
|
|
@@ -14,17 +16,109 @@ from ....exceptions import FeatureNotSupportedError
|
|
|
14
16
|
from ....formatting import (
|
|
15
17
|
Format,
|
|
16
18
|
FormattableT,
|
|
17
|
-
|
|
19
|
+
OutputParser,
|
|
18
20
|
resolve_format,
|
|
19
21
|
)
|
|
20
22
|
from ....messages import AssistantMessage, Message, UserMessage
|
|
21
23
|
from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
|
|
22
|
-
from ...base import
|
|
24
|
+
from ...base import _utils as _base_utils
|
|
23
25
|
from ..model_id import GoogleModelId, model_name
|
|
24
26
|
from ..model_info import MODELS_WITHOUT_STRUCTURED_OUTPUT_AND_TOOLS_SUPPORT
|
|
25
27
|
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from ....models import Params, ThinkingConfig, ThinkingLevel
|
|
30
|
+
|
|
26
31
|
UNKNOWN_TOOL_ID = "google_unknown_tool_id"
|
|
27
32
|
|
|
33
|
+
# Thinking level to a float multiplier % of max tokens (for 2.5 models using budget)
|
|
34
|
+
THINKING_LEVEL_TO_BUDGET_MULTIPLIER: dict[ThinkingLevel, float] = {
|
|
35
|
+
"none": 0,
|
|
36
|
+
"minimal": 0.1,
|
|
37
|
+
"low": 0.2,
|
|
38
|
+
"medium": 0.4,
|
|
39
|
+
"high": 0.6,
|
|
40
|
+
"max": 0.8,
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
# Gemini 3 Pro supports only LOW or HIGH
|
|
44
|
+
# https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
|
|
45
|
+
THINKING_LEVEL_FOR_GEMINI_3_PRO: dict[ThinkingLevel, genai_types.ThinkingLevel] = {
|
|
46
|
+
"default": genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
|
|
47
|
+
"none": genai_types.ThinkingLevel.LOW,
|
|
48
|
+
"minimal": genai_types.ThinkingLevel.LOW,
|
|
49
|
+
"low": genai_types.ThinkingLevel.LOW,
|
|
50
|
+
"medium": genai_types.ThinkingLevel.HIGH,
|
|
51
|
+
"high": genai_types.ThinkingLevel.HIGH,
|
|
52
|
+
"max": genai_types.ThinkingLevel.HIGH,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Gemini 3 Flash supports MINIMAL, LOW, MEDIUM, HIGH
|
|
56
|
+
# https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
|
|
57
|
+
THINKING_LEVEL_FOR_GEMINI_3_FLASH: dict[ThinkingLevel, genai_types.ThinkingLevel] = {
|
|
58
|
+
"default": genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
|
|
59
|
+
"none": genai_types.ThinkingLevel.MINIMAL,
|
|
60
|
+
"minimal": genai_types.ThinkingLevel.MINIMAL,
|
|
61
|
+
"low": genai_types.ThinkingLevel.LOW,
|
|
62
|
+
"medium": genai_types.ThinkingLevel.MEDIUM,
|
|
63
|
+
"high": genai_types.ThinkingLevel.HIGH,
|
|
64
|
+
"max": genai_types.ThinkingLevel.HIGH,
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def google_thinking_config(
|
|
69
|
+
thinking_config: ThinkingConfig,
|
|
70
|
+
max_tokens: int | None,
|
|
71
|
+
model_id: GoogleModelId,
|
|
72
|
+
) -> genai_types.ThinkingConfigDict:
|
|
73
|
+
"""Compute Google thinking configuration based on model version.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
thinking_config: The ThinkingConfig from params
|
|
77
|
+
max_tokens: Max output tokens (used to compute budget for 2.5 models)
|
|
78
|
+
model_id: The Google model ID to determine version
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
ThinkingConfigDict with either thinking_level or thinking_budget set.
|
|
82
|
+
|
|
83
|
+
Notes:
|
|
84
|
+
- Gemini 2.5 models use thinking_budget (token count)
|
|
85
|
+
- Gemini 3.0 Pro supports thinking_level "low" or "high"
|
|
86
|
+
- Gemini 3.0 Flash supports thinking_level "minimal", "low", "medium", "high"
|
|
87
|
+
|
|
88
|
+
See: https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
|
|
89
|
+
"""
|
|
90
|
+
level: ThinkingLevel = thinking_config.get("level", "default")
|
|
91
|
+
include_thoughts = thinking_config.get("include_thoughts")
|
|
92
|
+
|
|
93
|
+
result = genai_types.ThinkingConfigDict()
|
|
94
|
+
|
|
95
|
+
if "gemini-3-flash" in model_id:
|
|
96
|
+
result["thinking_level"] = THINKING_LEVEL_FOR_GEMINI_3_FLASH.get(
|
|
97
|
+
level, genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED
|
|
98
|
+
)
|
|
99
|
+
elif "gemini-3-pro" in model_id:
|
|
100
|
+
result["thinking_level"] = THINKING_LEVEL_FOR_GEMINI_3_PRO.get(
|
|
101
|
+
level, genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED
|
|
102
|
+
)
|
|
103
|
+
else: # Fall back to 2.5-style budgets
|
|
104
|
+
# 2.5 models use thinking_budget
|
|
105
|
+
if level == "default":
|
|
106
|
+
budget = -1 # Dynamic budget
|
|
107
|
+
elif level == "none":
|
|
108
|
+
budget = 0 # Disable thinking
|
|
109
|
+
else:
|
|
110
|
+
# Compute budget as percentage of max_tokens
|
|
111
|
+
if max_tokens is None:
|
|
112
|
+
max_tokens = 16000
|
|
113
|
+
multiplier = THINKING_LEVEL_TO_BUDGET_MULTIPLIER.get(level, 0.4)
|
|
114
|
+
budget = int(multiplier * max_tokens)
|
|
115
|
+
|
|
116
|
+
result["thinking_budget"] = budget
|
|
117
|
+
if include_thoughts is not None:
|
|
118
|
+
result["include_thoughts"] = include_thoughts
|
|
119
|
+
|
|
120
|
+
return result
|
|
121
|
+
|
|
28
122
|
|
|
29
123
|
class GoogleKwargs(TypedDict, total=False):
|
|
30
124
|
"""Kwargs for Google's generate_content method."""
|
|
@@ -105,7 +199,7 @@ def _encode_content(
|
|
|
105
199
|
function_response=genai_types.FunctionResponseDict(
|
|
106
200
|
id=part.id if part.id != UNKNOWN_TOOL_ID else None,
|
|
107
201
|
name=part.name,
|
|
108
|
-
response={"output": str(part.
|
|
202
|
+
response={"output": str(part.result)},
|
|
109
203
|
)
|
|
110
204
|
)
|
|
111
205
|
)
|
|
@@ -177,7 +271,10 @@ def encode_request(
|
|
|
177
271
|
model_id: GoogleModelId,
|
|
178
272
|
messages: Sequence[Message],
|
|
179
273
|
tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
|
|
180
|
-
format: type[FormattableT]
|
|
274
|
+
format: type[FormattableT]
|
|
275
|
+
| Format[FormattableT]
|
|
276
|
+
| OutputParser[FormattableT]
|
|
277
|
+
| None,
|
|
181
278
|
params: Params,
|
|
182
279
|
) -> tuple[Sequence[Message], Format[FormattableT] | None, GoogleKwargs]:
|
|
183
280
|
"""Prepares a request for the genai `Client.models.generate_content` method."""
|
|
@@ -187,7 +284,7 @@ def encode_request(
|
|
|
187
284
|
google_config: genai_types.GenerateContentConfigDict = (
|
|
188
285
|
genai_types.GenerateContentConfigDict()
|
|
189
286
|
)
|
|
190
|
-
|
|
287
|
+
encode_thoughts_as_text = False
|
|
191
288
|
google_model_name = model_name(model_id)
|
|
192
289
|
|
|
193
290
|
with _base_utils.ensure_all_params_accessed(
|
|
@@ -206,19 +303,28 @@ def encode_request(
|
|
|
206
303
|
if param_accessor.stop_sequences is not None:
|
|
207
304
|
google_config["stop_sequences"] = param_accessor.stop_sequences
|
|
208
305
|
if param_accessor.thinking is not None:
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
encode_thoughts = True
|
|
306
|
+
thinking_config = param_accessor.thinking
|
|
307
|
+
|
|
308
|
+
# Compute thinking config based on model version
|
|
309
|
+
google_config["thinking_config"] = google_thinking_config(
|
|
310
|
+
thinking_config, param_accessor.max_tokens, model_id
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# Handle encode_thoughts_as_text from ThinkingConfig
|
|
314
|
+
if thinking_config.get("encode_thoughts_as_text"):
|
|
315
|
+
encode_thoughts_as_text = True
|
|
220
316
|
|
|
221
317
|
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
318
|
+
|
|
319
|
+
if _base_utils.has_strict_tools(tools):
|
|
320
|
+
raise FeatureNotSupportedError(
|
|
321
|
+
feature="strict tools",
|
|
322
|
+
provider_id="google",
|
|
323
|
+
model_id=model_id,
|
|
324
|
+
message="Google does not support strict mode for tools. "
|
|
325
|
+
"Set strict=False on your tools or omit the strict parameter.",
|
|
326
|
+
)
|
|
327
|
+
|
|
222
328
|
google_tools: list[genai_types.ToolDict] = []
|
|
223
329
|
|
|
224
330
|
allows_strict_mode_with_tools = (
|
|
@@ -244,7 +350,7 @@ def encode_request(
|
|
|
244
350
|
google_config["response_mime_type"] = "application/json"
|
|
245
351
|
google_config["response_schema"] = format.schema
|
|
246
352
|
elif format.mode == "tool":
|
|
247
|
-
format_tool_schema =
|
|
353
|
+
format_tool_schema = format.create_tool_schema()
|
|
248
354
|
format_tool = _convert_tool_to_function_declaration(format_tool_schema)
|
|
249
355
|
google_tools.append(
|
|
250
356
|
genai_types.ToolDict(function_declarations=[format_tool])
|
|
@@ -286,7 +392,9 @@ def encode_request(
|
|
|
286
392
|
|
|
287
393
|
kwargs = GoogleKwargs(
|
|
288
394
|
model=model_name(model_id),
|
|
289
|
-
contents=_encode_messages(
|
|
395
|
+
contents=_encode_messages(
|
|
396
|
+
remaining_messages, model_id, encode_thoughts_as_text
|
|
397
|
+
),
|
|
290
398
|
config=google_config,
|
|
291
399
|
)
|
|
292
400
|
|
|
@@ -11,20 +11,21 @@ from ....exceptions import (
|
|
|
11
11
|
BadRequestError,
|
|
12
12
|
NotFoundError,
|
|
13
13
|
PermissionError,
|
|
14
|
+
ProviderError,
|
|
14
15
|
RateLimitError,
|
|
15
16
|
ServerError,
|
|
16
17
|
)
|
|
17
18
|
from ...base import ProviderErrorMap
|
|
18
19
|
|
|
19
20
|
|
|
20
|
-
def map_google_error(e: Exception) -> type[
|
|
21
|
+
def map_google_error(e: Exception) -> type[ProviderError]:
|
|
21
22
|
"""Map Google error to appropriate Mirascope error type.
|
|
22
23
|
|
|
23
24
|
Google only provides ClientError (4xx) and ServerError (5xx) with status codes,
|
|
24
25
|
so we map based on status code and message patterns.
|
|
25
26
|
"""
|
|
26
27
|
if not isinstance(e, GoogleClientError | GoogleServerError):
|
|
27
|
-
return
|
|
28
|
+
return ProviderError
|
|
28
29
|
|
|
29
30
|
# Authentication errors (401) or 400 with "API key not valid"
|
|
30
31
|
if e.code == 401 or (e.code == 400 and "API key not valid" in str(e)):
|
|
@@ -21,6 +21,7 @@ GoogleKnownModels = Literal[
|
|
|
21
21
|
"google/gemini-2.5-flash-lite-preview-09-2025",
|
|
22
22
|
"google/gemini-2.5-flash-preview-09-2025",
|
|
23
23
|
"google/gemini-2.5-pro",
|
|
24
|
+
"google/gemini-3-flash-preview",
|
|
24
25
|
"google/gemini-3-pro-image-preview",
|
|
25
26
|
"google/gemini-3-pro-preview",
|
|
26
27
|
"google/gemini-flash-latest",
|
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
"""Google provider implementation."""
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
from collections.abc import Sequence
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
4
7
|
from typing_extensions import Unpack
|
|
5
8
|
|
|
6
9
|
from google.genai import Client
|
|
7
10
|
from google.genai.types import HttpOptions
|
|
8
11
|
|
|
9
12
|
from ...context import Context, DepsT
|
|
10
|
-
from ...formatting import Format, FormattableT
|
|
13
|
+
from ...formatting import Format, FormattableT, OutputParser
|
|
11
14
|
from ...messages import Message
|
|
12
15
|
from ...responses import (
|
|
13
16
|
AsyncContextResponse,
|
|
@@ -29,10 +32,13 @@ from ...tools import (
|
|
|
29
32
|
Tool,
|
|
30
33
|
Toolkit,
|
|
31
34
|
)
|
|
32
|
-
from ..base import BaseProvider
|
|
35
|
+
from ..base import BaseProvider
|
|
33
36
|
from . import _utils
|
|
34
37
|
from .model_id import GoogleModelId, model_name
|
|
35
38
|
|
|
39
|
+
if TYPE_CHECKING:
|
|
40
|
+
from ...models import Params
|
|
41
|
+
|
|
36
42
|
|
|
37
43
|
class GoogleProvider(BaseProvider[Client]):
|
|
38
44
|
"""The client for the Google LLM model."""
|
|
@@ -47,7 +53,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
47
53
|
"""Initialize the Google client."""
|
|
48
54
|
http_options = None
|
|
49
55
|
if base_url:
|
|
50
|
-
http_options = HttpOptions(
|
|
56
|
+
http_options = HttpOptions(
|
|
57
|
+
base_url=base_url,
|
|
58
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
59
|
+
)
|
|
51
60
|
|
|
52
61
|
self.client = Client(api_key=api_key, http_options=http_options)
|
|
53
62
|
|
|
@@ -61,7 +70,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
61
70
|
model_id: GoogleModelId,
|
|
62
71
|
messages: Sequence[Message],
|
|
63
72
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
64
|
-
format: type[FormattableT]
|
|
73
|
+
format: type[FormattableT]
|
|
74
|
+
| Format[FormattableT]
|
|
75
|
+
| OutputParser[FormattableT]
|
|
76
|
+
| None = None,
|
|
65
77
|
**params: Unpack[Params],
|
|
66
78
|
) -> Response | Response[FormattableT]:
|
|
67
79
|
"""Generate an `llm.Response` by synchronously calling the Google GenAI API.
|
|
@@ -85,8 +97,9 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
85
97
|
)
|
|
86
98
|
google_response = self.client.models.generate_content(**kwargs)
|
|
87
99
|
|
|
100
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
88
101
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
89
|
-
google_response, model_id
|
|
102
|
+
google_response, model_id, include_thoughts=include_thoughts
|
|
90
103
|
)
|
|
91
104
|
|
|
92
105
|
return Response(
|
|
@@ -112,7 +125,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
112
125
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
113
126
|
| ContextToolkit[DepsT]
|
|
114
127
|
| None = None,
|
|
115
|
-
format: type[FormattableT]
|
|
128
|
+
format: type[FormattableT]
|
|
129
|
+
| Format[FormattableT]
|
|
130
|
+
| OutputParser[FormattableT]
|
|
131
|
+
| None = None,
|
|
116
132
|
**params: Unpack[Params],
|
|
117
133
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
118
134
|
"""Generate an `llm.ContextResponse` by synchronously calling the Google GenAI API.
|
|
@@ -137,8 +153,9 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
137
153
|
)
|
|
138
154
|
google_response = self.client.models.generate_content(**kwargs)
|
|
139
155
|
|
|
156
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
140
157
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
141
|
-
google_response, model_id
|
|
158
|
+
google_response, model_id, include_thoughts=include_thoughts
|
|
142
159
|
)
|
|
143
160
|
|
|
144
161
|
return ContextResponse(
|
|
@@ -161,7 +178,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
161
178
|
model_id: GoogleModelId,
|
|
162
179
|
messages: Sequence[Message],
|
|
163
180
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
164
|
-
format: type[FormattableT]
|
|
181
|
+
format: type[FormattableT]
|
|
182
|
+
| Format[FormattableT]
|
|
183
|
+
| OutputParser[FormattableT]
|
|
184
|
+
| None = None,
|
|
165
185
|
**params: Unpack[Params],
|
|
166
186
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
167
187
|
"""Generate an `llm.AsyncResponse` by asynchronously calling the Google GenAI API.
|
|
@@ -185,8 +205,9 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
185
205
|
)
|
|
186
206
|
google_response = await self.client.aio.models.generate_content(**kwargs)
|
|
187
207
|
|
|
208
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
188
209
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
189
|
-
google_response, model_id
|
|
210
|
+
google_response, model_id, include_thoughts=include_thoughts
|
|
190
211
|
)
|
|
191
212
|
|
|
192
213
|
return AsyncResponse(
|
|
@@ -212,7 +233,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
212
233
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
213
234
|
| AsyncContextToolkit[DepsT]
|
|
214
235
|
| None = None,
|
|
215
|
-
format: type[FormattableT]
|
|
236
|
+
format: type[FormattableT]
|
|
237
|
+
| Format[FormattableT]
|
|
238
|
+
| OutputParser[FormattableT]
|
|
239
|
+
| None = None,
|
|
216
240
|
**params: Unpack[Params],
|
|
217
241
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
218
242
|
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the Google GenAI API.
|
|
@@ -237,8 +261,9 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
237
261
|
)
|
|
238
262
|
google_response = await self.client.aio.models.generate_content(**kwargs)
|
|
239
263
|
|
|
264
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
240
265
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
241
|
-
google_response, model_id
|
|
266
|
+
google_response, model_id, include_thoughts=include_thoughts
|
|
242
267
|
)
|
|
243
268
|
|
|
244
269
|
return AsyncContextResponse(
|
|
@@ -261,7 +286,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
261
286
|
model_id: GoogleModelId,
|
|
262
287
|
messages: Sequence[Message],
|
|
263
288
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
264
|
-
format: type[FormattableT]
|
|
289
|
+
format: type[FormattableT]
|
|
290
|
+
| Format[FormattableT]
|
|
291
|
+
| OutputParser[FormattableT]
|
|
292
|
+
| None = None,
|
|
265
293
|
**params: Unpack[Params],
|
|
266
294
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
267
295
|
"""Generate an `llm.StreamResponse` by synchronously streaming from the Google GenAI API.
|
|
@@ -286,7 +314,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
286
314
|
|
|
287
315
|
google_stream = self.client.models.generate_content_stream(**kwargs)
|
|
288
316
|
|
|
289
|
-
|
|
317
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
318
|
+
chunk_iterator = _utils.decode_stream(
|
|
319
|
+
google_stream, include_thoughts=include_thoughts
|
|
320
|
+
)
|
|
290
321
|
|
|
291
322
|
return StreamResponse(
|
|
292
323
|
provider_id="google",
|
|
@@ -308,7 +339,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
308
339
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
309
340
|
| ContextToolkit[DepsT]
|
|
310
341
|
| None = None,
|
|
311
|
-
format: type[FormattableT]
|
|
342
|
+
format: type[FormattableT]
|
|
343
|
+
| Format[FormattableT]
|
|
344
|
+
| OutputParser[FormattableT]
|
|
345
|
+
| None = None,
|
|
312
346
|
**params: Unpack[Params],
|
|
313
347
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
314
348
|
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the Google GenAI API.
|
|
@@ -334,7 +368,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
334
368
|
|
|
335
369
|
google_stream = self.client.models.generate_content_stream(**kwargs)
|
|
336
370
|
|
|
337
|
-
|
|
371
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
372
|
+
chunk_iterator = _utils.decode_stream(
|
|
373
|
+
google_stream, include_thoughts=include_thoughts
|
|
374
|
+
)
|
|
338
375
|
|
|
339
376
|
return ContextStreamResponse(
|
|
340
377
|
provider_id="google",
|
|
@@ -353,7 +390,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
353
390
|
model_id: GoogleModelId,
|
|
354
391
|
messages: Sequence[Message],
|
|
355
392
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
356
|
-
format: type[FormattableT]
|
|
393
|
+
format: type[FormattableT]
|
|
394
|
+
| Format[FormattableT]
|
|
395
|
+
| OutputParser[FormattableT]
|
|
396
|
+
| None = None,
|
|
357
397
|
**params: Unpack[Params],
|
|
358
398
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
359
399
|
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Google GenAI API.
|
|
@@ -378,7 +418,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
378
418
|
|
|
379
419
|
google_stream = await self.client.aio.models.generate_content_stream(**kwargs)
|
|
380
420
|
|
|
381
|
-
|
|
421
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
422
|
+
chunk_iterator = _utils.decode_async_stream(
|
|
423
|
+
google_stream, include_thoughts=include_thoughts
|
|
424
|
+
)
|
|
382
425
|
|
|
383
426
|
return AsyncStreamResponse(
|
|
384
427
|
provider_id="google",
|
|
@@ -400,7 +443,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
400
443
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
401
444
|
| AsyncContextToolkit[DepsT]
|
|
402
445
|
| None = None,
|
|
403
|
-
format: type[FormattableT]
|
|
446
|
+
format: type[FormattableT]
|
|
447
|
+
| Format[FormattableT]
|
|
448
|
+
| OutputParser[FormattableT]
|
|
449
|
+
| None = None,
|
|
404
450
|
**params: Unpack[Params],
|
|
405
451
|
) -> (
|
|
406
452
|
AsyncContextStreamResponse[DepsT]
|
|
@@ -429,7 +475,10 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
429
475
|
|
|
430
476
|
google_stream = await self.client.aio.models.generate_content_stream(**kwargs)
|
|
431
477
|
|
|
432
|
-
|
|
478
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
479
|
+
chunk_iterator = _utils.decode_async_stream(
|
|
480
|
+
google_stream, include_thoughts=include_thoughts
|
|
481
|
+
)
|
|
433
482
|
|
|
434
483
|
return AsyncContextStreamResponse(
|
|
435
484
|
provider_id="google",
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Utility functions for Mirascope Router provider."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import cast
|
|
5
|
+
|
|
6
|
+
from ..base import Provider
|
|
7
|
+
from ..provider_id import ProviderId
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def extract_model_scope(model_id: str) -> str | None:
|
|
11
|
+
"""Extract model scope from model ID.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
model_id: Model identifier in the format "scope/model-name"
|
|
15
|
+
e.g., "openai/gpt-4", "anthropic/claude-3", "google/gemini-pro"
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
The model scope (e.g., "openai", "anthropic", "google") or None if invalid format.
|
|
19
|
+
"""
|
|
20
|
+
if "/" not in model_id:
|
|
21
|
+
return None
|
|
22
|
+
return model_id.split("/", 1)[0]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_default_router_base_url() -> str:
|
|
26
|
+
"""Get the default router base URL from environment or use default.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
The router base URL (without trailing provider path).
|
|
30
|
+
"""
|
|
31
|
+
return os.environ.get(
|
|
32
|
+
"MIRASCOPE_ROUTER_BASE_URL", "https://mirascope.com/router/v2"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def create_underlying_provider(
|
|
37
|
+
model_scope: str, api_key: str, router_base_url: str
|
|
38
|
+
) -> Provider:
|
|
39
|
+
"""Create and cache an underlying provider instance using provider_singleton.
|
|
40
|
+
|
|
41
|
+
This function constructs the appropriate router URL for the provider and
|
|
42
|
+
delegates to provider_singleton for caching and instantiation.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
model_scope: The model scope (e.g., "openai", "anthropic", "google")
|
|
46
|
+
api_key: The API key to use for authentication
|
|
47
|
+
router_base_url: The base router URL (e.g., "http://mirascope.com/router/v2")
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
A cached provider instance configured for the Mirascope Router.
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
ValueError: If the provider is unsupported.
|
|
54
|
+
"""
|
|
55
|
+
if model_scope not in ["anthropic", "google", "openai"]:
|
|
56
|
+
raise ValueError(
|
|
57
|
+
f"Unsupported provider: {model_scope}. "
|
|
58
|
+
f"Mirascope Router currently supports: anthropic, google, openai"
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
base_url = f"{router_base_url}/{model_scope}"
|
|
62
|
+
if model_scope == "openai": # OpenAI expects /v1, which their SDK doesn't add
|
|
63
|
+
base_url = f"{base_url}/v1"
|
|
64
|
+
|
|
65
|
+
# Lazy import to avoid circular dependencies
|
|
66
|
+
from ..provider_registry import provider_singleton
|
|
67
|
+
|
|
68
|
+
# Use provider_singleton which provides caching
|
|
69
|
+
return provider_singleton(
|
|
70
|
+
cast(ProviderId, model_scope),
|
|
71
|
+
api_key=api_key,
|
|
72
|
+
base_url=base_url,
|
|
73
|
+
)
|