mirascope 1.22.4__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +5 -50
- mirascope/_stubs.py +384 -0
- mirascope/_utils.py +34 -0
- mirascope/api/__init__.py +14 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +444 -0
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +506 -0
- mirascope/api/_generated/annotations/raw_client.py +1414 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +17 -0
- mirascope/api/_generated/api_keys/client.py +530 -0
- mirascope/api/_generated/api_keys/raw_client.py +1236 -0
- mirascope/api/_generated/api_keys/types/__init__.py +15 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
- mirascope/api/_generated/client.py +211 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +46 -0
- mirascope/api/_generated/core/datetime_utils.py +28 -0
- mirascope/api/_generated/core/file.py +67 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +543 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +100 -0
- mirascope/api/_generated/core/pydantic_utilities.py +255 -0
- mirascope/api/_generated/core/query_encoder.py +58 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +276 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +91 -0
- mirascope/api/_generated/docs/raw_client.py +178 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/environments/__init__.py +23 -0
- mirascope/api/_generated/environments/client.py +649 -0
- mirascope/api/_generated/environments/raw_client.py +1567 -0
- mirascope/api/_generated/environments/types/__init__.py +25 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
- mirascope/api/_generated/errors/__init__.py +25 -0
- mirascope/api/_generated/errors/bad_request_error.py +14 -0
- mirascope/api/_generated/errors/conflict_error.py +14 -0
- mirascope/api/_generated/errors/forbidden_error.py +11 -0
- mirascope/api/_generated/errors/internal_server_error.py +10 -0
- mirascope/api/_generated/errors/not_found_error.py +11 -0
- mirascope/api/_generated/errors/payment_required_error.py +15 -0
- mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
- mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +39 -0
- mirascope/api/_generated/functions/client.py +647 -0
- mirascope/api/_generated/functions/raw_client.py +1890 -0
- mirascope/api/_generated/functions/types/__init__.py +53 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +92 -0
- mirascope/api/_generated/health/raw_client.py +175 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +22 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/organization_invitations/__init__.py +33 -0
- mirascope/api/_generated/organization_invitations/client.py +546 -0
- mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
- mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
- mirascope/api/_generated/organization_memberships/__init__.py +19 -0
- mirascope/api/_generated/organization_memberships/client.py +302 -0
- mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
- mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/organizations/__init__.py +51 -0
- mirascope/api/_generated/organizations/client.py +869 -0
- mirascope/api/_generated/organizations/raw_client.py +2593 -0
- mirascope/api/_generated/organizations/types/__init__.py +71 -0
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
- mirascope/api/_generated/project_memberships/__init__.py +29 -0
- mirascope/api/_generated/project_memberships/client.py +528 -0
- mirascope/api/_generated/project_memberships/raw_client.py +1278 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +7 -0
- mirascope/api/_generated/projects/client.py +428 -0
- mirascope/api/_generated/projects/raw_client.py +1302 -0
- mirascope/api/_generated/projects/types/__init__.py +10 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +25 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +25 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +25 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +25 -0
- mirascope/api/_generated/reference.md +4987 -0
- mirascope/api/_generated/tags/__init__.py +19 -0
- mirascope/api/_generated/tags/client.py +504 -0
- mirascope/api/_generated/tags/raw_client.py +1288 -0
- mirascope/api/_generated/tags/types/__init__.py +17 -0
- mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
- mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
- mirascope/api/_generated/token_cost/__init__.py +7 -0
- mirascope/api/_generated/token_cost/client.py +160 -0
- mirascope/api/_generated/token_cost/raw_client.py +264 -0
- mirascope/api/_generated/token_cost/types/__init__.py +8 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
- mirascope/api/_generated/traces/__init__.py +97 -0
- mirascope/api/_generated/traces/client.py +1103 -0
- mirascope/api/_generated/traces/raw_client.py +2322 -0
- mirascope/api/_generated/traces/types/__init__.py +155 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +20 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +48 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +20 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
- mirascope/api/_generated/types/__init__.py +85 -0
- mirascope/api/_generated/types/already_exists_error.py +22 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/bad_request_error_body.py +50 -0
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +22 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/date.py +3 -0
- mirascope/api/_generated/types/http_api_decode_error.py +27 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/immutable_resource_error.py +22 -0
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +38 -0
- mirascope/api/_generated/types/issue_tag.py +10 -0
- mirascope/api/_generated/types/not_found_error_body.py +22 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +22 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +5 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
- mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_key.py +25 -0
- mirascope/api/_generated/types/property_key_key_tag.py +5 -0
- mirascope/api/_generated/types/rate_limit_error.py +31 -0
- mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
- mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
- mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +99 -0
- mirascope/llm/__init__.py +309 -13
- mirascope/llm/calls/__init__.py +17 -0
- mirascope/llm/calls/calls.py +348 -0
- mirascope/llm/calls/decorator.py +268 -0
- mirascope/llm/content/__init__.py +71 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +69 -0
- mirascope/llm/content/tool_output.py +43 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +41 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +360 -0
- mirascope/llm/formatting/__init__.py +39 -0
- mirascope/llm/formatting/format.py +291 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/output_parser.py +178 -0
- mirascope/llm/formatting/partial.py +131 -0
- mirascope/llm/formatting/primitives.py +192 -0
- mirascope/llm/formatting/types.py +83 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/messages/__init__.py +35 -0
- mirascope/llm/messages/_utils.py +34 -0
- mirascope/llm/messages/message.py +190 -0
- mirascope/llm/models/__init__.py +21 -0
- mirascope/llm/models/models.py +1339 -0
- mirascope/llm/models/params.py +72 -0
- mirascope/llm/models/thinking_config.py +61 -0
- mirascope/llm/prompts/__init__.py +34 -0
- mirascope/llm/prompts/_utils.py +31 -0
- mirascope/llm/prompts/decorator.py +215 -0
- mirascope/llm/prompts/prompts.py +484 -0
- mirascope/llm/prompts/protocols.py +65 -0
- mirascope/llm/providers/__init__.py +65 -0
- mirascope/llm/providers/anthropic/__init__.py +11 -0
- mirascope/llm/providers/anthropic/_utils/__init__.py +27 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +297 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +272 -0
- mirascope/llm/providers/anthropic/_utils/decode.py +326 -0
- mirascope/llm/providers/anthropic/_utils/encode.py +431 -0
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +338 -0
- mirascope/llm/providers/anthropic/model_id.py +23 -0
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +440 -0
- mirascope/llm/providers/base/__init__.py +14 -0
- mirascope/llm/providers/base/_utils.py +248 -0
- mirascope/llm/providers/base/base_provider.py +1463 -0
- mirascope/llm/providers/base/kwargs.py +12 -0
- mirascope/llm/providers/google/__init__.py +6 -0
- mirascope/llm/providers/google/_utils/__init__.py +17 -0
- mirascope/llm/providers/google/_utils/decode.py +357 -0
- mirascope/llm/providers/google/_utils/encode.py +418 -0
- mirascope/llm/providers/google/_utils/errors.py +50 -0
- mirascope/llm/providers/google/message.py +7 -0
- mirascope/llm/providers/google/model_id.py +22 -0
- mirascope/llm/providers/google/model_info.py +63 -0
- mirascope/llm/providers/google/provider.py +456 -0
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +73 -0
- mirascope/llm/providers/mirascope/provider.py +313 -0
- mirascope/llm/providers/mlx/__init__.py +9 -0
- mirascope/llm/providers/mlx/_utils.py +141 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +146 -0
- mirascope/llm/providers/mlx/mlx.py +242 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +416 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/ollama/__init__.py +7 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/__init__.py +15 -0
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/__init__.py +7 -0
- mirascope/llm/providers/openai/completions/_utils/__init__.py +18 -0
- mirascope/llm/providers/openai/completions/_utils/decode.py +252 -0
- mirascope/llm/providers/openai/completions/_utils/encode.py +390 -0
- mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
- mirascope/llm/providers/openai/completions/base_provider.py +522 -0
- mirascope/llm/providers/openai/completions/provider.py +28 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +303 -0
- mirascope/llm/providers/openai/provider.py +405 -0
- mirascope/llm/providers/openai/responses/__init__.py +5 -0
- mirascope/llm/providers/openai/responses/_utils/__init__.py +15 -0
- mirascope/llm/providers/openai/responses/_utils/decode.py +289 -0
- mirascope/llm/providers/openai/responses/_utils/encode.py +399 -0
- mirascope/llm/providers/openai/responses/provider.py +472 -0
- mirascope/llm/providers/openrouter/__init__.py +5 -0
- mirascope/llm/providers/openrouter/provider.py +67 -0
- mirascope/llm/providers/provider_id.py +26 -0
- mirascope/llm/providers/provider_registry.py +305 -0
- mirascope/llm/providers/together/__init__.py +7 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +66 -0
- mirascope/llm/responses/_utils.py +146 -0
- mirascope/llm/responses/base_response.py +103 -0
- mirascope/llm/responses/base_stream_response.py +824 -0
- mirascope/llm/responses/finish_reason.py +28 -0
- mirascope/llm/responses/response.py +362 -0
- mirascope/llm/responses/root_response.py +248 -0
- mirascope/llm/responses/stream_response.py +577 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/responses/usage.py +139 -0
- mirascope/llm/tools/__init__.py +71 -0
- mirascope/llm/tools/_utils.py +34 -0
- mirascope/llm/tools/decorator.py +184 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/provider_tools.py +18 -0
- mirascope/llm/tools/tool_schema.py +321 -0
- mirascope/llm/tools/toolkit.py +178 -0
- mirascope/llm/tools/tools.py +263 -0
- mirascope/llm/tools/types.py +112 -0
- mirascope/llm/tools/web_search_tool.py +32 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope/ops/__init__.py +129 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1172 -0
- mirascope/ops/_internal/configuration.py +177 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +362 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +66 -0
- mirascope/ops/_internal/instrumentation/__init__.py +28 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/common.py +500 -0
- mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +161 -0
- mirascope/ops/_internal/instrumentation/llm/model.py +1777 -0
- mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
- mirascope/ops/_internal/instrumentation/llm/serialize.py +324 -0
- mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
- mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
- mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
- mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
- mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +133 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +389 -0
- mirascope/ops/_internal/traced_functions.py +528 -0
- mirascope/ops/_internal/tracing.py +353 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +131 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +357 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- mirascope-2.1.1.dist-info/METADATA +231 -0
- mirascope-2.1.1.dist-info/RECORD +437 -0
- mirascope-2.1.1.dist-info/WHEEL +4 -0
- mirascope-2.1.1.dist-info/licenses/LICENSE +21 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -57
- mirascope/beta/rag/chroma/vectorstores.py +0 -97
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -107
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -142
- mirascope/core/anthropic/_utils/_setup_call.py +0 -134
- mirascope/core/anthropic/call_params.py +0 -41
- mirascope/core/anthropic/call_response.py +0 -206
- mirascope/core/anthropic/call_response_chunk.py +0 -132
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -147
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -84
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -171
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -15
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -321
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -340
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -162
- mirascope/core/base/messages.py +0 -111
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -205
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -171
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -249
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -96
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -206
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -35
- mirascope/core/google/_utils/_message_param_converter.py +0 -162
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -34
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -232
- mirascope/core/google/call_response_chunk.py +0 -110
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -143
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -154
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -80
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -146
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -91
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/experimental/graphs/finite_state_machine.py +0 -714
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -167
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -277
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -128
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.22.4.dist-info/METADATA +0 -169
- mirascope-1.22.4.dist-info/RECORD +0 -377
- mirascope-1.22.4.dist-info/WHEEL +0 -4
- mirascope-1.22.4.dist-info/licenses/LICENSE +0 -21
|
@@ -0,0 +1,1339 @@
|
|
|
1
|
+
"""The model context manager for the `llm` module."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from contextvars import ContextVar, Token
|
|
7
|
+
from types import TracebackType
|
|
8
|
+
from typing import overload
|
|
9
|
+
from typing_extensions import Unpack
|
|
10
|
+
|
|
11
|
+
from ..context import Context, DepsT
|
|
12
|
+
from ..formatting import Format, FormatSpec, FormattableT
|
|
13
|
+
from ..messages import Message, UserContent, promote_to_messages
|
|
14
|
+
from ..providers import (
|
|
15
|
+
ModelId,
|
|
16
|
+
Provider,
|
|
17
|
+
ProviderId,
|
|
18
|
+
get_provider_for_model,
|
|
19
|
+
)
|
|
20
|
+
from ..responses import (
|
|
21
|
+
AsyncContextResponse,
|
|
22
|
+
AsyncContextStreamResponse,
|
|
23
|
+
AsyncResponse,
|
|
24
|
+
AsyncStreamResponse,
|
|
25
|
+
ContextResponse,
|
|
26
|
+
ContextStreamResponse,
|
|
27
|
+
Response,
|
|
28
|
+
StreamResponse,
|
|
29
|
+
)
|
|
30
|
+
from ..tools import (
|
|
31
|
+
AsyncContextTools,
|
|
32
|
+
AsyncTools,
|
|
33
|
+
ContextTools,
|
|
34
|
+
Tools,
|
|
35
|
+
normalize_async_context_tools,
|
|
36
|
+
normalize_async_tools,
|
|
37
|
+
normalize_context_tools,
|
|
38
|
+
normalize_tools,
|
|
39
|
+
)
|
|
40
|
+
from .params import Params
|
|
41
|
+
|
|
42
|
+
MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def model_from_context() -> Model | None:
|
|
46
|
+
"""Get the LLM currently set via context, if any."""
|
|
47
|
+
return MODEL_CONTEXT.get()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class Model:
|
|
51
|
+
"""The unified LLM interface that delegates to provider-specific clients.
|
|
52
|
+
|
|
53
|
+
This class provides a consistent interface for interacting with language models
|
|
54
|
+
from various providers. It handles the common operations like generating responses,
|
|
55
|
+
streaming, and async variants by delegating to the appropriate client methods.
|
|
56
|
+
|
|
57
|
+
**Usage Note:** In most cases, you should use `llm.use_model()` instead of instantiating
|
|
58
|
+
`Model` directly. This preserves the ability to override the model at runtime using
|
|
59
|
+
the `llm.model()` context manager. Only instantiate `Model` directly if you want to
|
|
60
|
+
hardcode a specific model and prevent it from being overridden by context.
|
|
61
|
+
|
|
62
|
+
Example (recommended - allows override):
|
|
63
|
+
|
|
64
|
+
```python
|
|
65
|
+
from mirascope import llm
|
|
66
|
+
|
|
67
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
68
|
+
# Uses context model if available, otherwise creates default
|
|
69
|
+
model = llm.use_model("openai/gpt-5-mini")
|
|
70
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
71
|
+
|
|
72
|
+
# Uses default model
|
|
73
|
+
response = recommend_book("fantasy")
|
|
74
|
+
|
|
75
|
+
# Override with different model
|
|
76
|
+
with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
|
|
77
|
+
response = recommend_book("fantasy") # Uses Claude
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
Example (direct instantiation - prevents override):
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from mirascope import llm
|
|
84
|
+
|
|
85
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
86
|
+
# Hardcoded model, cannot be overridden by context
|
|
87
|
+
model = llm.Model("openai/gpt-5-mini")
|
|
88
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
89
|
+
```
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
model_id: ModelId
|
|
93
|
+
"""The model being used (e.g. `"openai/gpt-4o-mini"`)."""
|
|
94
|
+
|
|
95
|
+
params: Params
|
|
96
|
+
"""The default parameters for the model (temperature, max_tokens, etc.)."""
|
|
97
|
+
|
|
98
|
+
def __init__(
|
|
99
|
+
self,
|
|
100
|
+
model_id: ModelId,
|
|
101
|
+
**params: Unpack[Params],
|
|
102
|
+
) -> None:
|
|
103
|
+
"""Initialize the Model with model_id and optional params."""
|
|
104
|
+
if "/" not in model_id:
|
|
105
|
+
raise ValueError(
|
|
106
|
+
"Invalid model_id format. Expected format: 'provider/model-name' "
|
|
107
|
+
f"(e.g., 'openai/gpt-4'). Got: '{model_id}'"
|
|
108
|
+
)
|
|
109
|
+
self.model_id = model_id
|
|
110
|
+
self.params = params
|
|
111
|
+
self._token_stack: list[Token[Model | None]] = []
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def provider(self) -> Provider:
|
|
115
|
+
"""The provider being used (e.g. an `OpenAIProvider`).
|
|
116
|
+
|
|
117
|
+
This property dynamically looks up the provider from the registry based on
|
|
118
|
+
the current model_id. This allows provider overrides via `llm.register_provider()`
|
|
119
|
+
to take effect even after the model instance is created.
|
|
120
|
+
|
|
121
|
+
Raises:
|
|
122
|
+
NoRegisteredProviderError: If no provider is available for the model_id
|
|
123
|
+
"""
|
|
124
|
+
return get_provider_for_model(self.model_id)
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def provider_id(self) -> ProviderId:
|
|
128
|
+
"""The string id of the provider being used (e.g. `"openai"`).
|
|
129
|
+
|
|
130
|
+
This property returns the `id` field of the dynamically resolved provider.
|
|
131
|
+
|
|
132
|
+
Raises:
|
|
133
|
+
NoRegisteredProviderError: If no provider is available for the model_id
|
|
134
|
+
"""
|
|
135
|
+
return self.provider.id
|
|
136
|
+
|
|
137
|
+
def __enter__(self) -> Model:
|
|
138
|
+
"""Enter the context manager, setting this model in context."""
|
|
139
|
+
token = MODEL_CONTEXT.set(self)
|
|
140
|
+
self._token_stack.append(token)
|
|
141
|
+
return self
|
|
142
|
+
|
|
143
|
+
def __exit__(
|
|
144
|
+
self,
|
|
145
|
+
exc_type: type[BaseException] | None,
|
|
146
|
+
exc_val: BaseException | None,
|
|
147
|
+
exc_tb: TracebackType | None,
|
|
148
|
+
) -> None:
|
|
149
|
+
"""Exit the context manager, resetting the model context."""
|
|
150
|
+
if self._token_stack:
|
|
151
|
+
token = self._token_stack.pop()
|
|
152
|
+
MODEL_CONTEXT.reset(token)
|
|
153
|
+
|
|
154
|
+
@overload
|
|
155
|
+
def call(
|
|
156
|
+
self,
|
|
157
|
+
content: UserContent | Sequence[Message],
|
|
158
|
+
*,
|
|
159
|
+
tools: Tools | None = None,
|
|
160
|
+
format: None = None,
|
|
161
|
+
) -> Response:
|
|
162
|
+
"""Generate an `llm.Response` without a response format."""
|
|
163
|
+
...
|
|
164
|
+
|
|
165
|
+
@overload
|
|
166
|
+
def call(
|
|
167
|
+
self,
|
|
168
|
+
content: UserContent | Sequence[Message],
|
|
169
|
+
*,
|
|
170
|
+
tools: Tools | None = None,
|
|
171
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
172
|
+
) -> Response[FormattableT]:
|
|
173
|
+
"""Generate an `llm.Response` with a response format."""
|
|
174
|
+
...
|
|
175
|
+
|
|
176
|
+
@overload
|
|
177
|
+
def call(
|
|
178
|
+
self,
|
|
179
|
+
content: UserContent | Sequence[Message],
|
|
180
|
+
*,
|
|
181
|
+
tools: Tools | None = None,
|
|
182
|
+
format: FormatSpec[FormattableT] | None,
|
|
183
|
+
) -> Response | Response[FormattableT]:
|
|
184
|
+
"""Generate an `llm.Response` with an optional response format."""
|
|
185
|
+
...
|
|
186
|
+
|
|
187
|
+
def call(
|
|
188
|
+
self,
|
|
189
|
+
content: UserContent | Sequence[Message],
|
|
190
|
+
*,
|
|
191
|
+
tools: Tools | None = None,
|
|
192
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
193
|
+
) -> Response | Response[FormattableT]:
|
|
194
|
+
"""Generate an `llm.Response` by synchronously calling this model's LLM provider.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
198
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
199
|
+
Messages for full control.
|
|
200
|
+
tools: Optional tools that the model may invoke.
|
|
201
|
+
format: Optional response format specifier.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
An `llm.Response` object containing the LLM-generated content.
|
|
205
|
+
"""
|
|
206
|
+
messages = promote_to_messages(content)
|
|
207
|
+
return self.provider.call(
|
|
208
|
+
model_id=self.model_id,
|
|
209
|
+
messages=messages,
|
|
210
|
+
toolkit=normalize_tools(tools),
|
|
211
|
+
format=format,
|
|
212
|
+
**self.params,
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
@overload
|
|
216
|
+
async def call_async(
|
|
217
|
+
self,
|
|
218
|
+
content: UserContent | Sequence[Message],
|
|
219
|
+
*,
|
|
220
|
+
tools: AsyncTools | None = None,
|
|
221
|
+
format: None = None,
|
|
222
|
+
) -> AsyncResponse:
|
|
223
|
+
"""Generate an `llm.AsyncResponse` without a response format."""
|
|
224
|
+
...
|
|
225
|
+
|
|
226
|
+
@overload
|
|
227
|
+
async def call_async(
|
|
228
|
+
self,
|
|
229
|
+
content: UserContent | Sequence[Message],
|
|
230
|
+
*,
|
|
231
|
+
tools: AsyncTools | None = None,
|
|
232
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
233
|
+
) -> AsyncResponse[FormattableT]:
|
|
234
|
+
"""Generate an `llm.AsyncResponse` with a response format."""
|
|
235
|
+
...
|
|
236
|
+
|
|
237
|
+
@overload
|
|
238
|
+
async def call_async(
|
|
239
|
+
self,
|
|
240
|
+
content: UserContent | Sequence[Message],
|
|
241
|
+
*,
|
|
242
|
+
tools: AsyncTools | None = None,
|
|
243
|
+
format: FormatSpec[FormattableT] | None,
|
|
244
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
245
|
+
"""Generate an `llm.AsyncResponse` with an optional response format."""
|
|
246
|
+
...
|
|
247
|
+
|
|
248
|
+
async def call_async(
|
|
249
|
+
self,
|
|
250
|
+
content: UserContent | Sequence[Message],
|
|
251
|
+
*,
|
|
252
|
+
tools: AsyncTools | None = None,
|
|
253
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
254
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
255
|
+
"""Generate an `llm.AsyncResponse` by asynchronously calling this model's LLM provider.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
259
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
260
|
+
Messages for full control.
|
|
261
|
+
tools: Optional tools that the model may invoke.
|
|
262
|
+
format: Optional response format specifier.
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
266
|
+
"""
|
|
267
|
+
messages = promote_to_messages(content)
|
|
268
|
+
return await self.provider.call_async(
|
|
269
|
+
model_id=self.model_id,
|
|
270
|
+
messages=messages,
|
|
271
|
+
toolkit=normalize_async_tools(tools),
|
|
272
|
+
format=format,
|
|
273
|
+
**self.params,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
@overload
|
|
277
|
+
def stream(
|
|
278
|
+
self,
|
|
279
|
+
content: UserContent | Sequence[Message],
|
|
280
|
+
*,
|
|
281
|
+
tools: Tools | None = None,
|
|
282
|
+
format: None = None,
|
|
283
|
+
) -> StreamResponse:
|
|
284
|
+
"""Stream an `llm.StreamResponse` without a response format."""
|
|
285
|
+
...
|
|
286
|
+
|
|
287
|
+
@overload
|
|
288
|
+
def stream(
|
|
289
|
+
self,
|
|
290
|
+
content: UserContent | Sequence[Message],
|
|
291
|
+
*,
|
|
292
|
+
tools: Tools | None = None,
|
|
293
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
294
|
+
) -> StreamResponse[FormattableT]:
|
|
295
|
+
"""Stream an `llm.StreamResponse` with a response format."""
|
|
296
|
+
...
|
|
297
|
+
|
|
298
|
+
@overload
|
|
299
|
+
def stream(
|
|
300
|
+
self,
|
|
301
|
+
content: UserContent | Sequence[Message],
|
|
302
|
+
*,
|
|
303
|
+
tools: Tools | None = None,
|
|
304
|
+
format: FormatSpec[FormattableT] | None,
|
|
305
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
306
|
+
"""Stream an `llm.StreamResponse` with an optional response format."""
|
|
307
|
+
...
|
|
308
|
+
|
|
309
|
+
def stream(
|
|
310
|
+
self,
|
|
311
|
+
content: UserContent | Sequence[Message],
|
|
312
|
+
*,
|
|
313
|
+
tools: Tools | None = None,
|
|
314
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
315
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
316
|
+
"""Generate an `llm.StreamResponse` by synchronously streaming from this model's LLM provider.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
320
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
321
|
+
Messages for full control.
|
|
322
|
+
tools: Optional tools that the model may invoke.
|
|
323
|
+
format: Optional response format specifier.
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
327
|
+
"""
|
|
328
|
+
messages = promote_to_messages(content)
|
|
329
|
+
return self.provider.stream(
|
|
330
|
+
model_id=self.model_id,
|
|
331
|
+
messages=messages,
|
|
332
|
+
toolkit=normalize_tools(tools),
|
|
333
|
+
format=format,
|
|
334
|
+
**self.params,
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
@overload
|
|
338
|
+
async def stream_async(
|
|
339
|
+
self,
|
|
340
|
+
content: UserContent | Sequence[Message],
|
|
341
|
+
*,
|
|
342
|
+
tools: AsyncTools | None = None,
|
|
343
|
+
format: None = None,
|
|
344
|
+
) -> AsyncStreamResponse:
|
|
345
|
+
"""Stream an `llm.AsyncStreamResponse` without a response format."""
|
|
346
|
+
...
|
|
347
|
+
|
|
348
|
+
@overload
|
|
349
|
+
async def stream_async(
|
|
350
|
+
self,
|
|
351
|
+
content: UserContent | Sequence[Message],
|
|
352
|
+
*,
|
|
353
|
+
tools: AsyncTools | None = None,
|
|
354
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
355
|
+
) -> AsyncStreamResponse[FormattableT]:
|
|
356
|
+
"""Stream an `llm.AsyncStreamResponse` with a response format."""
|
|
357
|
+
...
|
|
358
|
+
|
|
359
|
+
@overload
|
|
360
|
+
async def stream_async(
|
|
361
|
+
self,
|
|
362
|
+
content: UserContent | Sequence[Message],
|
|
363
|
+
*,
|
|
364
|
+
tools: AsyncTools | None = None,
|
|
365
|
+
format: FormatSpec[FormattableT] | None,
|
|
366
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
367
|
+
"""Stream an `llm.AsyncStreamResponse` with an optional response format."""
|
|
368
|
+
...
|
|
369
|
+
|
|
370
|
+
async def stream_async(
|
|
371
|
+
self,
|
|
372
|
+
content: UserContent | Sequence[Message],
|
|
373
|
+
*,
|
|
374
|
+
tools: AsyncTools | None = None,
|
|
375
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
376
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
377
|
+
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
381
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
382
|
+
Messages for full control.
|
|
383
|
+
tools: Optional tools that the model may invoke.
|
|
384
|
+
format: Optional response format specifier.
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
388
|
+
"""
|
|
389
|
+
messages = promote_to_messages(content)
|
|
390
|
+
return await self.provider.stream_async(
|
|
391
|
+
model_id=self.model_id,
|
|
392
|
+
messages=messages,
|
|
393
|
+
toolkit=normalize_async_tools(tools),
|
|
394
|
+
format=format,
|
|
395
|
+
**self.params,
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
@overload
|
|
399
|
+
def context_call(
|
|
400
|
+
self,
|
|
401
|
+
content: UserContent | Sequence[Message],
|
|
402
|
+
*,
|
|
403
|
+
ctx: Context[DepsT],
|
|
404
|
+
tools: ContextTools[DepsT] | None = None,
|
|
405
|
+
format: None = None,
|
|
406
|
+
) -> ContextResponse[DepsT, None]:
|
|
407
|
+
"""Generate an `llm.ContextResponse` without a response format."""
|
|
408
|
+
...
|
|
409
|
+
|
|
410
|
+
@overload
|
|
411
|
+
def context_call(
|
|
412
|
+
self,
|
|
413
|
+
content: UserContent | Sequence[Message],
|
|
414
|
+
*,
|
|
415
|
+
ctx: Context[DepsT],
|
|
416
|
+
tools: ContextTools[DepsT] | None = None,
|
|
417
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
418
|
+
) -> ContextResponse[DepsT, FormattableT]:
|
|
419
|
+
"""Generate an `llm.ContextResponse` with a response format."""
|
|
420
|
+
...
|
|
421
|
+
|
|
422
|
+
@overload
|
|
423
|
+
def context_call(
|
|
424
|
+
self,
|
|
425
|
+
content: UserContent | Sequence[Message],
|
|
426
|
+
*,
|
|
427
|
+
ctx: Context[DepsT],
|
|
428
|
+
tools: ContextTools[DepsT] | None = None,
|
|
429
|
+
format: FormatSpec[FormattableT] | None,
|
|
430
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
431
|
+
"""Generate an `llm.ContextResponse` with an optional response format."""
|
|
432
|
+
...
|
|
433
|
+
|
|
434
|
+
def context_call(
|
|
435
|
+
self,
|
|
436
|
+
content: UserContent | Sequence[Message],
|
|
437
|
+
*,
|
|
438
|
+
ctx: Context[DepsT],
|
|
439
|
+
tools: ContextTools[DepsT] | None = None,
|
|
440
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
441
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
442
|
+
"""Generate an `llm.ContextResponse` by synchronously calling this model's LLM provider.
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
446
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
447
|
+
Messages for full control.
|
|
448
|
+
ctx: Context object with dependencies for tools.
|
|
449
|
+
tools: Optional tools that the model may invoke.
|
|
450
|
+
format: Optional response format specifier.
|
|
451
|
+
|
|
452
|
+
Returns:
|
|
453
|
+
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
454
|
+
"""
|
|
455
|
+
messages = promote_to_messages(content)
|
|
456
|
+
return self.provider.context_call(
|
|
457
|
+
ctx=ctx,
|
|
458
|
+
model_id=self.model_id,
|
|
459
|
+
messages=messages,
|
|
460
|
+
toolkit=normalize_context_tools(tools),
|
|
461
|
+
format=format,
|
|
462
|
+
**self.params,
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
@overload
|
|
466
|
+
async def context_call_async(
|
|
467
|
+
self,
|
|
468
|
+
content: UserContent | Sequence[Message],
|
|
469
|
+
*,
|
|
470
|
+
ctx: Context[DepsT],
|
|
471
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
472
|
+
format: None = None,
|
|
473
|
+
) -> AsyncContextResponse[DepsT, None]:
|
|
474
|
+
"""Generate an `llm.AsyncContextResponse` without a response format."""
|
|
475
|
+
...
|
|
476
|
+
|
|
477
|
+
@overload
|
|
478
|
+
async def context_call_async(
|
|
479
|
+
self,
|
|
480
|
+
content: UserContent | Sequence[Message],
|
|
481
|
+
*,
|
|
482
|
+
ctx: Context[DepsT],
|
|
483
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
484
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
485
|
+
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
486
|
+
"""Generate an `llm.AsyncContextResponse` with a response format."""
|
|
487
|
+
...
|
|
488
|
+
|
|
489
|
+
@overload
|
|
490
|
+
async def context_call_async(
|
|
491
|
+
self,
|
|
492
|
+
content: UserContent | Sequence[Message],
|
|
493
|
+
*,
|
|
494
|
+
ctx: Context[DepsT],
|
|
495
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
496
|
+
format: FormatSpec[FormattableT] | None,
|
|
497
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
498
|
+
"""Generate an `llm.AsyncContextResponse` with an optional response format."""
|
|
499
|
+
...
|
|
500
|
+
|
|
501
|
+
async def context_call_async(
|
|
502
|
+
self,
|
|
503
|
+
content: UserContent | Sequence[Message],
|
|
504
|
+
*,
|
|
505
|
+
ctx: Context[DepsT],
|
|
506
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
507
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
508
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
509
|
+
"""Generate an `llm.AsyncContextResponse` by asynchronously calling this model's LLM provider.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
513
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
514
|
+
Messages for full control.
|
|
515
|
+
ctx: Context object with dependencies for tools.
|
|
516
|
+
tools: Optional tools that the model may invoke.
|
|
517
|
+
format: Optional response format specifier.
|
|
518
|
+
|
|
519
|
+
Returns:
|
|
520
|
+
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
521
|
+
"""
|
|
522
|
+
messages = promote_to_messages(content)
|
|
523
|
+
return await self.provider.context_call_async(
|
|
524
|
+
ctx=ctx,
|
|
525
|
+
model_id=self.model_id,
|
|
526
|
+
messages=messages,
|
|
527
|
+
toolkit=normalize_async_context_tools(tools),
|
|
528
|
+
format=format,
|
|
529
|
+
**self.params,
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
@overload
|
|
533
|
+
def context_stream(
|
|
534
|
+
self,
|
|
535
|
+
content: UserContent | Sequence[Message],
|
|
536
|
+
*,
|
|
537
|
+
ctx: Context[DepsT],
|
|
538
|
+
tools: ContextTools[DepsT] | None = None,
|
|
539
|
+
format: None = None,
|
|
540
|
+
) -> ContextStreamResponse[DepsT, None]:
|
|
541
|
+
"""Stream an `llm.ContextStreamResponse` without a response format."""
|
|
542
|
+
...
|
|
543
|
+
|
|
544
|
+
@overload
|
|
545
|
+
def context_stream(
|
|
546
|
+
self,
|
|
547
|
+
content: UserContent | Sequence[Message],
|
|
548
|
+
*,
|
|
549
|
+
ctx: Context[DepsT],
|
|
550
|
+
tools: ContextTools[DepsT] | None = None,
|
|
551
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
552
|
+
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
553
|
+
"""Stream an `llm.ContextStreamResponse` with a response format."""
|
|
554
|
+
...
|
|
555
|
+
|
|
556
|
+
@overload
|
|
557
|
+
def context_stream(
|
|
558
|
+
self,
|
|
559
|
+
content: UserContent | Sequence[Message],
|
|
560
|
+
*,
|
|
561
|
+
ctx: Context[DepsT],
|
|
562
|
+
tools: ContextTools[DepsT] | None = None,
|
|
563
|
+
format: FormatSpec[FormattableT] | None,
|
|
564
|
+
) -> (
|
|
565
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
566
|
+
):
|
|
567
|
+
"""Stream an `llm.ContextStreamResponse` with an optional response format."""
|
|
568
|
+
...
|
|
569
|
+
|
|
570
|
+
def context_stream(
|
|
571
|
+
self,
|
|
572
|
+
content: UserContent | Sequence[Message],
|
|
573
|
+
*,
|
|
574
|
+
ctx: Context[DepsT],
|
|
575
|
+
tools: ContextTools[DepsT] | None = None,
|
|
576
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
577
|
+
) -> (
|
|
578
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
579
|
+
):
|
|
580
|
+
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from this model's LLM provider.
|
|
581
|
+
|
|
582
|
+
Args:
|
|
583
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
584
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
585
|
+
Messages for full control.
|
|
586
|
+
ctx: Context object with dependencies for tools.
|
|
587
|
+
tools: Optional tools that the model may invoke.
|
|
588
|
+
format: Optional response format specifier.
|
|
589
|
+
|
|
590
|
+
Returns:
|
|
591
|
+
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
592
|
+
"""
|
|
593
|
+
messages = promote_to_messages(content)
|
|
594
|
+
return self.provider.context_stream(
|
|
595
|
+
ctx=ctx,
|
|
596
|
+
model_id=self.model_id,
|
|
597
|
+
messages=messages,
|
|
598
|
+
toolkit=normalize_context_tools(tools),
|
|
599
|
+
format=format,
|
|
600
|
+
**self.params,
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
@overload
|
|
604
|
+
async def context_stream_async(
|
|
605
|
+
self,
|
|
606
|
+
content: UserContent | Sequence[Message],
|
|
607
|
+
*,
|
|
608
|
+
ctx: Context[DepsT],
|
|
609
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
610
|
+
format: None = None,
|
|
611
|
+
) -> AsyncContextStreamResponse[DepsT, None]:
|
|
612
|
+
"""Stream an `llm.AsyncContextStreamResponse` without a response format."""
|
|
613
|
+
...
|
|
614
|
+
|
|
615
|
+
@overload
|
|
616
|
+
async def context_stream_async(
|
|
617
|
+
self,
|
|
618
|
+
content: UserContent | Sequence[Message],
|
|
619
|
+
*,
|
|
620
|
+
ctx: Context[DepsT],
|
|
621
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
622
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
623
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
624
|
+
"""Stream an `llm.AsyncContextStreamResponse` with a response format."""
|
|
625
|
+
...
|
|
626
|
+
|
|
627
|
+
@overload
|
|
628
|
+
async def context_stream_async(
|
|
629
|
+
self,
|
|
630
|
+
content: UserContent | Sequence[Message],
|
|
631
|
+
*,
|
|
632
|
+
ctx: Context[DepsT],
|
|
633
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
634
|
+
format: FormatSpec[FormattableT] | None,
|
|
635
|
+
) -> (
|
|
636
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
637
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
638
|
+
):
|
|
639
|
+
"""Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
640
|
+
...
|
|
641
|
+
|
|
642
|
+
async def context_stream_async(
|
|
643
|
+
self,
|
|
644
|
+
content: UserContent | Sequence[Message],
|
|
645
|
+
*,
|
|
646
|
+
ctx: Context[DepsT],
|
|
647
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
648
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
649
|
+
) -> (
|
|
650
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
651
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
652
|
+
):
|
|
653
|
+
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
654
|
+
|
|
655
|
+
Args:
|
|
656
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
657
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
658
|
+
Messages for full control.
|
|
659
|
+
ctx: Context object with dependencies for tools.
|
|
660
|
+
tools: Optional tools that the model may invoke.
|
|
661
|
+
format: Optional response format specifier.
|
|
662
|
+
|
|
663
|
+
Returns:
|
|
664
|
+
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
665
|
+
"""
|
|
666
|
+
messages = promote_to_messages(content)
|
|
667
|
+
return await self.provider.context_stream_async(
|
|
668
|
+
ctx=ctx,
|
|
669
|
+
model_id=self.model_id,
|
|
670
|
+
messages=messages,
|
|
671
|
+
toolkit=normalize_async_context_tools(tools),
|
|
672
|
+
format=format,
|
|
673
|
+
**self.params,
|
|
674
|
+
)
|
|
675
|
+
|
|
676
|
+
@overload
|
|
677
|
+
def resume(
|
|
678
|
+
self,
|
|
679
|
+
*,
|
|
680
|
+
response: Response,
|
|
681
|
+
content: UserContent,
|
|
682
|
+
) -> Response:
|
|
683
|
+
"""Resume an `llm.Response` without a response format."""
|
|
684
|
+
...
|
|
685
|
+
|
|
686
|
+
@overload
|
|
687
|
+
def resume(
|
|
688
|
+
self,
|
|
689
|
+
*,
|
|
690
|
+
response: Response[FormattableT],
|
|
691
|
+
content: UserContent,
|
|
692
|
+
) -> Response[FormattableT]:
|
|
693
|
+
"""Resume an `llm.Response` with a response format."""
|
|
694
|
+
...
|
|
695
|
+
|
|
696
|
+
@overload
|
|
697
|
+
def resume(
|
|
698
|
+
self,
|
|
699
|
+
*,
|
|
700
|
+
response: Response | Response[FormattableT],
|
|
701
|
+
content: UserContent,
|
|
702
|
+
) -> Response | Response[FormattableT]:
|
|
703
|
+
"""Resume an `llm.Response` with an optional response format."""
|
|
704
|
+
...
|
|
705
|
+
|
|
706
|
+
def resume(
|
|
707
|
+
self,
|
|
708
|
+
*,
|
|
709
|
+
response: Response | Response[FormattableT],
|
|
710
|
+
content: UserContent,
|
|
711
|
+
) -> Response | Response[FormattableT]:
|
|
712
|
+
"""Generate a new `llm.Response` by extending another response's messages with additional user content.
|
|
713
|
+
|
|
714
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
715
|
+
|
|
716
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
717
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
718
|
+
API for resuming an existing interaction.
|
|
719
|
+
|
|
720
|
+
Args:
|
|
721
|
+
response: Previous response to extend.
|
|
722
|
+
content: Additional user content to append.
|
|
723
|
+
|
|
724
|
+
Returns:
|
|
725
|
+
A new `llm.Response` object containing the extended conversation.
|
|
726
|
+
"""
|
|
727
|
+
return self.provider.resume(
|
|
728
|
+
model_id=self.model_id,
|
|
729
|
+
response=response,
|
|
730
|
+
content=content,
|
|
731
|
+
**self.params,
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
@overload
|
|
735
|
+
async def resume_async(
|
|
736
|
+
self,
|
|
737
|
+
*,
|
|
738
|
+
response: AsyncResponse,
|
|
739
|
+
content: UserContent,
|
|
740
|
+
) -> AsyncResponse:
|
|
741
|
+
"""Resume an `llm.AsyncResponse` without a response format."""
|
|
742
|
+
...
|
|
743
|
+
|
|
744
|
+
@overload
|
|
745
|
+
async def resume_async(
|
|
746
|
+
self,
|
|
747
|
+
*,
|
|
748
|
+
response: AsyncResponse[FormattableT],
|
|
749
|
+
content: UserContent,
|
|
750
|
+
) -> AsyncResponse[FormattableT]:
|
|
751
|
+
"""Resume an `llm.AsyncResponse` with a response format."""
|
|
752
|
+
...
|
|
753
|
+
|
|
754
|
+
@overload
|
|
755
|
+
async def resume_async(
|
|
756
|
+
self,
|
|
757
|
+
*,
|
|
758
|
+
response: AsyncResponse | AsyncResponse[FormattableT],
|
|
759
|
+
content: UserContent,
|
|
760
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
761
|
+
"""Resume an `llm.AsyncResponse` with an optional response format."""
|
|
762
|
+
...
|
|
763
|
+
|
|
764
|
+
async def resume_async(
|
|
765
|
+
self,
|
|
766
|
+
*,
|
|
767
|
+
response: AsyncResponse | AsyncResponse[FormattableT],
|
|
768
|
+
content: UserContent,
|
|
769
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
770
|
+
"""Generate a new `llm.AsyncResponse` by extending another response's messages with additional user content.
|
|
771
|
+
|
|
772
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
773
|
+
|
|
774
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
775
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
776
|
+
API for resuming an existing interaction.
|
|
777
|
+
|
|
778
|
+
Args:
|
|
779
|
+
response: Previous async response to extend.
|
|
780
|
+
content: Additional user content to append.
|
|
781
|
+
|
|
782
|
+
Returns:
|
|
783
|
+
A new `llm.AsyncResponse` object containing the extended conversation.
|
|
784
|
+
"""
|
|
785
|
+
return await self.provider.resume_async(
|
|
786
|
+
model_id=self.model_id,
|
|
787
|
+
response=response,
|
|
788
|
+
content=content,
|
|
789
|
+
**self.params,
|
|
790
|
+
)
|
|
791
|
+
|
|
792
|
+
@overload
|
|
793
|
+
def context_resume(
|
|
794
|
+
self,
|
|
795
|
+
*,
|
|
796
|
+
ctx: Context[DepsT],
|
|
797
|
+
response: ContextResponse[DepsT, None],
|
|
798
|
+
content: UserContent,
|
|
799
|
+
) -> ContextResponse[DepsT, None]:
|
|
800
|
+
"""Resume an `llm.ContextResponse` without a response format."""
|
|
801
|
+
...
|
|
802
|
+
|
|
803
|
+
@overload
|
|
804
|
+
def context_resume(
|
|
805
|
+
self,
|
|
806
|
+
*,
|
|
807
|
+
ctx: Context[DepsT],
|
|
808
|
+
response: ContextResponse[DepsT, FormattableT],
|
|
809
|
+
content: UserContent,
|
|
810
|
+
) -> ContextResponse[DepsT, FormattableT]:
|
|
811
|
+
"""Resume an `llm.ContextResponse` with a response format."""
|
|
812
|
+
...
|
|
813
|
+
|
|
814
|
+
@overload
|
|
815
|
+
def context_resume(
|
|
816
|
+
self,
|
|
817
|
+
*,
|
|
818
|
+
ctx: Context[DepsT],
|
|
819
|
+
response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
|
|
820
|
+
content: UserContent,
|
|
821
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
822
|
+
"""Resume an `llm.ContextResponse` with an optional response format."""
|
|
823
|
+
...
|
|
824
|
+
|
|
825
|
+
def context_resume(
|
|
826
|
+
self,
|
|
827
|
+
*,
|
|
828
|
+
ctx: Context[DepsT],
|
|
829
|
+
response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
|
|
830
|
+
content: UserContent,
|
|
831
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
832
|
+
"""Generate a new `llm.ContextResponse` by extending another response's messages with additional user content.
|
|
833
|
+
|
|
834
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
835
|
+
|
|
836
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
837
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
838
|
+
API for resuming an existing interaction.
|
|
839
|
+
|
|
840
|
+
Args:
|
|
841
|
+
ctx: Context object with dependencies for tools.
|
|
842
|
+
response: Previous context response to extend.
|
|
843
|
+
content: Additional user content to append.
|
|
844
|
+
|
|
845
|
+
Returns:
|
|
846
|
+
A new `llm.ContextResponse` object containing the extended conversation.
|
|
847
|
+
"""
|
|
848
|
+
return self.provider.context_resume(
|
|
849
|
+
ctx=ctx,
|
|
850
|
+
model_id=self.model_id,
|
|
851
|
+
response=response,
|
|
852
|
+
content=content,
|
|
853
|
+
**self.params,
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
@overload
|
|
857
|
+
async def context_resume_async(
|
|
858
|
+
self,
|
|
859
|
+
*,
|
|
860
|
+
ctx: Context[DepsT],
|
|
861
|
+
response: AsyncContextResponse[DepsT, None],
|
|
862
|
+
content: UserContent,
|
|
863
|
+
) -> AsyncContextResponse[DepsT, None]:
|
|
864
|
+
"""Resume an `llm.AsyncContextResponse` without a response format."""
|
|
865
|
+
...
|
|
866
|
+
|
|
867
|
+
@overload
|
|
868
|
+
async def context_resume_async(
|
|
869
|
+
self,
|
|
870
|
+
*,
|
|
871
|
+
ctx: Context[DepsT],
|
|
872
|
+
response: AsyncContextResponse[DepsT, FormattableT],
|
|
873
|
+
content: UserContent,
|
|
874
|
+
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
875
|
+
"""Resume an `llm.AsyncContextResponse` with a response format."""
|
|
876
|
+
...
|
|
877
|
+
|
|
878
|
+
@overload
|
|
879
|
+
async def context_resume_async(
|
|
880
|
+
self,
|
|
881
|
+
*,
|
|
882
|
+
ctx: Context[DepsT],
|
|
883
|
+
response: AsyncContextResponse[DepsT, None]
|
|
884
|
+
| AsyncContextResponse[DepsT, FormattableT],
|
|
885
|
+
content: UserContent,
|
|
886
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
887
|
+
"""Resume an `llm.AsyncContextResponse` with an optional response format."""
|
|
888
|
+
...
|
|
889
|
+
|
|
890
|
+
async def context_resume_async(
|
|
891
|
+
self,
|
|
892
|
+
*,
|
|
893
|
+
ctx: Context[DepsT],
|
|
894
|
+
response: AsyncContextResponse[DepsT, None]
|
|
895
|
+
| AsyncContextResponse[DepsT, FormattableT],
|
|
896
|
+
content: UserContent,
|
|
897
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
898
|
+
"""Generate a new `llm.AsyncContextResponse` by extending another response's messages with additional user content.
|
|
899
|
+
|
|
900
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
901
|
+
|
|
902
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
903
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
904
|
+
API for resuming an existing interaction.
|
|
905
|
+
|
|
906
|
+
Args:
|
|
907
|
+
ctx: Context object with dependencies for tools.
|
|
908
|
+
response: Previous async context response to extend.
|
|
909
|
+
content: Additional user content to append.
|
|
910
|
+
|
|
911
|
+
Returns:
|
|
912
|
+
A new `llm.AsyncContextResponse` object containing the extended conversation.
|
|
913
|
+
"""
|
|
914
|
+
return await self.provider.context_resume_async(
|
|
915
|
+
ctx=ctx,
|
|
916
|
+
model_id=self.model_id,
|
|
917
|
+
response=response,
|
|
918
|
+
content=content,
|
|
919
|
+
**self.params,
|
|
920
|
+
)
|
|
921
|
+
|
|
922
|
+
@overload
|
|
923
|
+
def resume_stream(
|
|
924
|
+
self,
|
|
925
|
+
*,
|
|
926
|
+
response: StreamResponse,
|
|
927
|
+
content: UserContent,
|
|
928
|
+
) -> StreamResponse:
|
|
929
|
+
"""Resume an `llm.StreamResponse` without a response format."""
|
|
930
|
+
...
|
|
931
|
+
|
|
932
|
+
@overload
|
|
933
|
+
def resume_stream(
|
|
934
|
+
self,
|
|
935
|
+
*,
|
|
936
|
+
response: StreamResponse[FormattableT],
|
|
937
|
+
content: UserContent,
|
|
938
|
+
) -> StreamResponse[FormattableT]:
|
|
939
|
+
"""Resume an `llm.StreamResponse` with a response format."""
|
|
940
|
+
...
|
|
941
|
+
|
|
942
|
+
@overload
|
|
943
|
+
def resume_stream(
|
|
944
|
+
self,
|
|
945
|
+
*,
|
|
946
|
+
response: StreamResponse | StreamResponse[FormattableT],
|
|
947
|
+
content: UserContent,
|
|
948
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
949
|
+
"""Resume an `llm.StreamResponse` with an optional response format."""
|
|
950
|
+
...
|
|
951
|
+
|
|
952
|
+
def resume_stream(
|
|
953
|
+
self,
|
|
954
|
+
*,
|
|
955
|
+
response: StreamResponse | StreamResponse[FormattableT],
|
|
956
|
+
content: UserContent,
|
|
957
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
958
|
+
"""Generate a new `llm.StreamResponse` by extending another response's messages with additional user content.
|
|
959
|
+
|
|
960
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
961
|
+
|
|
962
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
963
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
964
|
+
API for resuming an existing interaction.
|
|
965
|
+
|
|
966
|
+
Args:
|
|
967
|
+
response: Previous stream response to extend.
|
|
968
|
+
content: Additional user content to append.
|
|
969
|
+
|
|
970
|
+
Returns:
|
|
971
|
+
A new `llm.StreamResponse` object for streaming the extended conversation.
|
|
972
|
+
"""
|
|
973
|
+
return self.provider.resume_stream(
|
|
974
|
+
model_id=self.model_id,
|
|
975
|
+
response=response,
|
|
976
|
+
content=content,
|
|
977
|
+
**self.params,
|
|
978
|
+
)
|
|
979
|
+
|
|
980
|
+
@overload
|
|
981
|
+
async def resume_stream_async(
|
|
982
|
+
self,
|
|
983
|
+
*,
|
|
984
|
+
response: AsyncStreamResponse,
|
|
985
|
+
content: UserContent,
|
|
986
|
+
) -> AsyncStreamResponse:
|
|
987
|
+
"""Resume an `llm.AsyncStreamResponse` without a response format."""
|
|
988
|
+
...
|
|
989
|
+
|
|
990
|
+
@overload
|
|
991
|
+
async def resume_stream_async(
|
|
992
|
+
self,
|
|
993
|
+
*,
|
|
994
|
+
response: AsyncStreamResponse[FormattableT],
|
|
995
|
+
content: UserContent,
|
|
996
|
+
) -> AsyncStreamResponse[FormattableT]:
|
|
997
|
+
"""Resume an `llm.AsyncStreamResponse` with a response format."""
|
|
998
|
+
...
|
|
999
|
+
|
|
1000
|
+
@overload
|
|
1001
|
+
async def resume_stream_async(
|
|
1002
|
+
self,
|
|
1003
|
+
*,
|
|
1004
|
+
response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
|
|
1005
|
+
content: UserContent,
|
|
1006
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
1007
|
+
"""Resume an `llm.AsyncStreamResponse` with an optional response format."""
|
|
1008
|
+
...
|
|
1009
|
+
|
|
1010
|
+
async def resume_stream_async(
|
|
1011
|
+
self,
|
|
1012
|
+
*,
|
|
1013
|
+
response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
|
|
1014
|
+
content: UserContent,
|
|
1015
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
1016
|
+
"""Generate a new `llm.AsyncStreamResponse` by extending another response's messages with additional user content.
|
|
1017
|
+
|
|
1018
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
1019
|
+
|
|
1020
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
1021
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
1022
|
+
API for resuming an existing interaction.
|
|
1023
|
+
|
|
1024
|
+
Args:
|
|
1025
|
+
response: Previous async stream response to extend.
|
|
1026
|
+
content: Additional user content to append.
|
|
1027
|
+
|
|
1028
|
+
Returns:
|
|
1029
|
+
A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1030
|
+
"""
|
|
1031
|
+
return await self.provider.resume_stream_async(
|
|
1032
|
+
model_id=self.model_id,
|
|
1033
|
+
response=response,
|
|
1034
|
+
content=content,
|
|
1035
|
+
**self.params,
|
|
1036
|
+
)
|
|
1037
|
+
|
|
1038
|
+
@overload
|
|
1039
|
+
def context_resume_stream(
|
|
1040
|
+
self,
|
|
1041
|
+
*,
|
|
1042
|
+
ctx: Context[DepsT],
|
|
1043
|
+
response: ContextStreamResponse[DepsT, None],
|
|
1044
|
+
content: UserContent,
|
|
1045
|
+
) -> ContextStreamResponse[DepsT, None]:
|
|
1046
|
+
"""Resume an `llm.ContextStreamResponse` without a response format."""
|
|
1047
|
+
...
|
|
1048
|
+
|
|
1049
|
+
@overload
|
|
1050
|
+
def context_resume_stream(
|
|
1051
|
+
self,
|
|
1052
|
+
*,
|
|
1053
|
+
ctx: Context[DepsT],
|
|
1054
|
+
response: ContextStreamResponse[DepsT, FormattableT],
|
|
1055
|
+
content: UserContent,
|
|
1056
|
+
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
1057
|
+
"""Resume an `llm.ContextStreamResponse` with a response format."""
|
|
1058
|
+
...
|
|
1059
|
+
|
|
1060
|
+
@overload
|
|
1061
|
+
def context_resume_stream(
|
|
1062
|
+
self,
|
|
1063
|
+
*,
|
|
1064
|
+
ctx: Context[DepsT],
|
|
1065
|
+
response: ContextStreamResponse[DepsT, None]
|
|
1066
|
+
| ContextStreamResponse[DepsT, FormattableT],
|
|
1067
|
+
content: UserContent,
|
|
1068
|
+
) -> (
|
|
1069
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
1070
|
+
):
|
|
1071
|
+
"""Resume an `llm.ContextStreamResponse` with an optional response format."""
|
|
1072
|
+
...
|
|
1073
|
+
|
|
1074
|
+
def context_resume_stream(
|
|
1075
|
+
self,
|
|
1076
|
+
*,
|
|
1077
|
+
ctx: Context[DepsT],
|
|
1078
|
+
response: ContextStreamResponse[DepsT, None]
|
|
1079
|
+
| ContextStreamResponse[DepsT, FormattableT],
|
|
1080
|
+
content: UserContent,
|
|
1081
|
+
) -> (
|
|
1082
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
1083
|
+
):
|
|
1084
|
+
"""Generate a new `llm.ContextStreamResponse` by extending another response's messages with additional user content.
|
|
1085
|
+
|
|
1086
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
1087
|
+
|
|
1088
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
1089
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
1090
|
+
API for resuming an existing interaction.
|
|
1091
|
+
|
|
1092
|
+
Args:
|
|
1093
|
+
ctx: Context object with dependencies for tools.
|
|
1094
|
+
response: Previous context stream response to extend.
|
|
1095
|
+
content: Additional user content to append.
|
|
1096
|
+
|
|
1097
|
+
Returns:
|
|
1098
|
+
A new `llm.ContextStreamResponse` object for streaming the extended conversation.
|
|
1099
|
+
"""
|
|
1100
|
+
return self.provider.context_resume_stream(
|
|
1101
|
+
ctx=ctx,
|
|
1102
|
+
model_id=self.model_id,
|
|
1103
|
+
response=response,
|
|
1104
|
+
content=content,
|
|
1105
|
+
**self.params,
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
@overload
|
|
1109
|
+
async def context_resume_stream_async(
|
|
1110
|
+
self,
|
|
1111
|
+
*,
|
|
1112
|
+
ctx: Context[DepsT],
|
|
1113
|
+
response: AsyncContextStreamResponse[DepsT, None],
|
|
1114
|
+
content: UserContent,
|
|
1115
|
+
) -> AsyncContextStreamResponse[DepsT, None]:
|
|
1116
|
+
"""Resume an `llm.AsyncContextStreamResponse` without a response format."""
|
|
1117
|
+
...
|
|
1118
|
+
|
|
1119
|
+
@overload
|
|
1120
|
+
async def context_resume_stream_async(
|
|
1121
|
+
self,
|
|
1122
|
+
*,
|
|
1123
|
+
ctx: Context[DepsT],
|
|
1124
|
+
response: AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1125
|
+
content: UserContent,
|
|
1126
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
1127
|
+
"""Resume an `llm.AsyncContextStreamResponse` with a response format."""
|
|
1128
|
+
...
|
|
1129
|
+
|
|
1130
|
+
@overload
|
|
1131
|
+
async def context_resume_stream_async(
|
|
1132
|
+
self,
|
|
1133
|
+
*,
|
|
1134
|
+
ctx: Context[DepsT],
|
|
1135
|
+
response: AsyncContextStreamResponse[DepsT, None]
|
|
1136
|
+
| AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1137
|
+
content: UserContent,
|
|
1138
|
+
) -> (
|
|
1139
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
1140
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
1141
|
+
):
|
|
1142
|
+
"""Resume an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
1143
|
+
...
|
|
1144
|
+
|
|
1145
|
+
async def context_resume_stream_async(
|
|
1146
|
+
self,
|
|
1147
|
+
*,
|
|
1148
|
+
ctx: Context[DepsT],
|
|
1149
|
+
response: AsyncContextStreamResponse[DepsT, None]
|
|
1150
|
+
| AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1151
|
+
content: UserContent,
|
|
1152
|
+
) -> (
|
|
1153
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
1154
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
1155
|
+
):
|
|
1156
|
+
"""Generate a new `llm.AsyncContextStreamResponse` by extending another response's messages with additional user content.
|
|
1157
|
+
|
|
1158
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
1159
|
+
|
|
1160
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
1161
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
1162
|
+
API for resuming an existing interaction.
|
|
1163
|
+
|
|
1164
|
+
Args:
|
|
1165
|
+
ctx: Context object with dependencies for tools.
|
|
1166
|
+
response: Previous async context stream response to extend.
|
|
1167
|
+
content: Additional user content to append.
|
|
1168
|
+
|
|
1169
|
+
Returns:
|
|
1170
|
+
A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1171
|
+
"""
|
|
1172
|
+
return await self.provider.context_resume_stream_async(
|
|
1173
|
+
ctx=ctx,
|
|
1174
|
+
model_id=self.model_id,
|
|
1175
|
+
response=response,
|
|
1176
|
+
content=content,
|
|
1177
|
+
**self.params,
|
|
1178
|
+
)
|
|
1179
|
+
|
|
1180
|
+
|
|
1181
|
+
def model(
|
|
1182
|
+
model_id: ModelId,
|
|
1183
|
+
**params: Unpack[Params],
|
|
1184
|
+
) -> Model:
|
|
1185
|
+
"""Helper for creating a `Model` instance (which may be used as a context manager).
|
|
1186
|
+
|
|
1187
|
+
This is just an alias for the `Model` constructor, added for convenience.
|
|
1188
|
+
|
|
1189
|
+
This function returns a `Model` instance that implements the context manager protocol.
|
|
1190
|
+
When used with a `with` statement, the model will be set in context and used by both
|
|
1191
|
+
`llm.use_model()` and `llm.call()` within that context. This allows you to override
|
|
1192
|
+
the default model at runtime without modifying function definitions.
|
|
1193
|
+
|
|
1194
|
+
The returned `Model` instance can also be stored and reused:
|
|
1195
|
+
|
|
1196
|
+
```python
|
|
1197
|
+
m = llm.model("openai/gpt-4o")
|
|
1198
|
+
# Use directly
|
|
1199
|
+
response = m.call("Hello!")
|
|
1200
|
+
# Or use as context manager
|
|
1201
|
+
with m:
|
|
1202
|
+
response = recommend_book("fantasy")
|
|
1203
|
+
```
|
|
1204
|
+
|
|
1205
|
+
When a model is set in context, it completely overrides any model ID or parameters
|
|
1206
|
+
specified in `llm.use_model()` or `llm.call()`. The context model's parameters take
|
|
1207
|
+
precedence, and any unset parameters use default values.
|
|
1208
|
+
|
|
1209
|
+
Args:
|
|
1210
|
+
model_id: A model ID string (e.g., "openai/gpt-4").
|
|
1211
|
+
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1212
|
+
|
|
1213
|
+
Returns:
|
|
1214
|
+
A Model instance that can be used as a context manager.
|
|
1215
|
+
|
|
1216
|
+
Raises:
|
|
1217
|
+
ValueError: If the specified provider is not supported.
|
|
1218
|
+
|
|
1219
|
+
Example:
|
|
1220
|
+
With `llm.use_model()`
|
|
1221
|
+
|
|
1222
|
+
```python
|
|
1223
|
+
import mirascope.llm as llm
|
|
1224
|
+
|
|
1225
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
1226
|
+
model = llm.use_model("openai/gpt-5-mini")
|
|
1227
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
1228
|
+
|
|
1229
|
+
# Override the default model at runtime
|
|
1230
|
+
with llm.model("anthropic/claude-sonnet-4-5"):
|
|
1231
|
+
response = recommend_book("fantasy") # Uses Claude instead of GPT
|
|
1232
|
+
```
|
|
1233
|
+
|
|
1234
|
+
Example:
|
|
1235
|
+
With `llm.call()`
|
|
1236
|
+
|
|
1237
|
+
```python
|
|
1238
|
+
import mirascope.llm as llm
|
|
1239
|
+
|
|
1240
|
+
@llm.call("openai/gpt-5-mini")
|
|
1241
|
+
def recommend_book(genre: str):
|
|
1242
|
+
return f"Please recommend a {genre} book."
|
|
1243
|
+
|
|
1244
|
+
# Override the decorated model at runtime
|
|
1245
|
+
with llm.model("anthropic/claude-sonnet-4-0"):
|
|
1246
|
+
response = recommend_book("fantasy") # Uses Claude instead of GPT
|
|
1247
|
+
```
|
|
1248
|
+
|
|
1249
|
+
Example:
|
|
1250
|
+
Storing and reusing Model instances
|
|
1251
|
+
|
|
1252
|
+
```python
|
|
1253
|
+
import mirascope.llm as llm
|
|
1254
|
+
|
|
1255
|
+
# Create and store a model
|
|
1256
|
+
m = llm.model("openai/gpt-4o")
|
|
1257
|
+
|
|
1258
|
+
# Use it directly
|
|
1259
|
+
response = m.call("Hello!")
|
|
1260
|
+
|
|
1261
|
+
# Or use it as a context manager
|
|
1262
|
+
with m:
|
|
1263
|
+
response = recommend_book("fantasy")
|
|
1264
|
+
```
|
|
1265
|
+
"""
|
|
1266
|
+
return Model(model_id, **params)
|
|
1267
|
+
|
|
1268
|
+
|
|
1269
|
+
@overload
|
|
1270
|
+
def use_model(
|
|
1271
|
+
model: ModelId,
|
|
1272
|
+
**params: Unpack[Params],
|
|
1273
|
+
) -> Model:
|
|
1274
|
+
"""Get the model from context if available, otherwise create a new `Model`.
|
|
1275
|
+
|
|
1276
|
+
This overload accepts a model ID string and allows additional params.
|
|
1277
|
+
"""
|
|
1278
|
+
...
|
|
1279
|
+
|
|
1280
|
+
|
|
1281
|
+
@overload
|
|
1282
|
+
def use_model(
|
|
1283
|
+
model: Model,
|
|
1284
|
+
) -> Model:
|
|
1285
|
+
"""Get the model from context if available, otherwise use the provided `Model`.
|
|
1286
|
+
|
|
1287
|
+
This overload accepts a `Model` instance and does not allow additional params.
|
|
1288
|
+
"""
|
|
1289
|
+
...
|
|
1290
|
+
|
|
1291
|
+
|
|
1292
|
+
def use_model(
|
|
1293
|
+
model: Model | ModelId,
|
|
1294
|
+
**params: Unpack[Params],
|
|
1295
|
+
) -> Model:
|
|
1296
|
+
"""Get the model from context if available, otherwise create a new `Model`.
|
|
1297
|
+
|
|
1298
|
+
This function checks if a model has been set in the context (via `llm.model()`
|
|
1299
|
+
context manager). If a model is found in the context, it returns that model,
|
|
1300
|
+
ignoring any model ID or parameters passed to this function. Otherwise, it creates
|
|
1301
|
+
and returns a new `llm.Model` instance with the provided arguments.
|
|
1302
|
+
|
|
1303
|
+
This allows you to write functions that work with a default model but can be
|
|
1304
|
+
overridden at runtime using the `llm.model()` context manager.
|
|
1305
|
+
|
|
1306
|
+
Args:
|
|
1307
|
+
model: A model ID string (e.g., "openai/gpt-4") or a Model instance
|
|
1308
|
+
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1309
|
+
Only available when passing a model ID string
|
|
1310
|
+
|
|
1311
|
+
Returns:
|
|
1312
|
+
An `llm.Model` instance from context (if set) or a new instance with the specified settings.
|
|
1313
|
+
|
|
1314
|
+
Raises:
|
|
1315
|
+
ValueError: If the specified provider is not supported.
|
|
1316
|
+
|
|
1317
|
+
Example:
|
|
1318
|
+
|
|
1319
|
+
```python
|
|
1320
|
+
import mirascope.llm as llm
|
|
1321
|
+
|
|
1322
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
1323
|
+
model = llm.use_model("openai/gpt-5-mini")
|
|
1324
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
1325
|
+
|
|
1326
|
+
# Uses the default model (gpt-5-mini)
|
|
1327
|
+
response = recommend_book("fantasy")
|
|
1328
|
+
|
|
1329
|
+
# Override with a different model
|
|
1330
|
+
with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
|
|
1331
|
+
response = recommend_book("fantasy") # Uses Claude instead
|
|
1332
|
+
```
|
|
1333
|
+
"""
|
|
1334
|
+
context_model = model_from_context()
|
|
1335
|
+
if context_model is not None:
|
|
1336
|
+
return context_model
|
|
1337
|
+
if isinstance(model, str):
|
|
1338
|
+
return Model(model, **params)
|
|
1339
|
+
return model
|