mirascope 1.0.5__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +6 -6
- mirascope/_stubs.py +384 -0
- mirascope/_utils.py +34 -0
- mirascope/api/__init__.py +14 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +444 -0
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +506 -0
- mirascope/api/_generated/annotations/raw_client.py +1414 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +17 -0
- mirascope/api/_generated/api_keys/client.py +530 -0
- mirascope/api/_generated/api_keys/raw_client.py +1236 -0
- mirascope/api/_generated/api_keys/types/__init__.py +15 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
- mirascope/api/_generated/client.py +211 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +46 -0
- mirascope/api/_generated/core/datetime_utils.py +28 -0
- mirascope/api/_generated/core/file.py +67 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +543 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +100 -0
- mirascope/api/_generated/core/pydantic_utilities.py +255 -0
- mirascope/api/_generated/core/query_encoder.py +58 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +276 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +91 -0
- mirascope/api/_generated/docs/raw_client.py +178 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/environments/__init__.py +23 -0
- mirascope/api/_generated/environments/client.py +649 -0
- mirascope/api/_generated/environments/raw_client.py +1567 -0
- mirascope/api/_generated/environments/types/__init__.py +25 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
- mirascope/api/_generated/errors/__init__.py +25 -0
- mirascope/api/_generated/errors/bad_request_error.py +14 -0
- mirascope/api/_generated/errors/conflict_error.py +14 -0
- mirascope/api/_generated/errors/forbidden_error.py +11 -0
- mirascope/api/_generated/errors/internal_server_error.py +10 -0
- mirascope/api/_generated/errors/not_found_error.py +11 -0
- mirascope/api/_generated/errors/payment_required_error.py +15 -0
- mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
- mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +39 -0
- mirascope/api/_generated/functions/client.py +647 -0
- mirascope/api/_generated/functions/raw_client.py +1890 -0
- mirascope/api/_generated/functions/types/__init__.py +53 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +92 -0
- mirascope/api/_generated/health/raw_client.py +175 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +22 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/organization_invitations/__init__.py +33 -0
- mirascope/api/_generated/organization_invitations/client.py +546 -0
- mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
- mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
- mirascope/api/_generated/organization_memberships/__init__.py +19 -0
- mirascope/api/_generated/organization_memberships/client.py +302 -0
- mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
- mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/organizations/__init__.py +51 -0
- mirascope/api/_generated/organizations/client.py +869 -0
- mirascope/api/_generated/organizations/raw_client.py +2593 -0
- mirascope/api/_generated/organizations/types/__init__.py +71 -0
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +5 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
- mirascope/api/_generated/project_memberships/__init__.py +29 -0
- mirascope/api/_generated/project_memberships/client.py +528 -0
- mirascope/api/_generated/project_memberships/raw_client.py +1278 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +7 -0
- mirascope/api/_generated/projects/client.py +428 -0
- mirascope/api/_generated/projects/raw_client.py +1302 -0
- mirascope/api/_generated/projects/types/__init__.py +10 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +25 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +25 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +25 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +25 -0
- mirascope/api/_generated/reference.md +4987 -0
- mirascope/api/_generated/tags/__init__.py +19 -0
- mirascope/api/_generated/tags/client.py +504 -0
- mirascope/api/_generated/tags/raw_client.py +1288 -0
- mirascope/api/_generated/tags/types/__init__.py +17 -0
- mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
- mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
- mirascope/api/_generated/token_cost/__init__.py +7 -0
- mirascope/api/_generated/token_cost/client.py +160 -0
- mirascope/api/_generated/token_cost/raw_client.py +264 -0
- mirascope/api/_generated/token_cost/types/__init__.py +8 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
- mirascope/api/_generated/traces/__init__.py +97 -0
- mirascope/api/_generated/traces/client.py +1103 -0
- mirascope/api/_generated/traces/raw_client.py +2322 -0
- mirascope/api/_generated/traces/types/__init__.py +155 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +20 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +48 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +38 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +19 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +20 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
- mirascope/api/_generated/types/__init__.py +85 -0
- mirascope/api/_generated/types/already_exists_error.py +22 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/bad_request_error_body.py +50 -0
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +22 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/date.py +3 -0
- mirascope/api/_generated/types/http_api_decode_error.py +27 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/immutable_resource_error.py +22 -0
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +38 -0
- mirascope/api/_generated/types/issue_tag.py +10 -0
- mirascope/api/_generated/types/not_found_error_body.py +22 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +22 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +5 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
- mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_key.py +25 -0
- mirascope/api/_generated/types/property_key_key_tag.py +5 -0
- mirascope/api/_generated/types/rate_limit_error.py +31 -0
- mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
- mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
- mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +99 -0
- mirascope/llm/__init__.py +316 -0
- mirascope/llm/calls/__init__.py +17 -0
- mirascope/llm/calls/calls.py +348 -0
- mirascope/llm/calls/decorator.py +268 -0
- mirascope/llm/content/__init__.py +71 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +69 -0
- mirascope/llm/content/tool_output.py +43 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +41 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +360 -0
- mirascope/llm/formatting/__init__.py +39 -0
- mirascope/llm/formatting/format.py +291 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/output_parser.py +178 -0
- mirascope/llm/formatting/partial.py +131 -0
- mirascope/llm/formatting/primitives.py +192 -0
- mirascope/llm/formatting/types.py +83 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/messages/__init__.py +35 -0
- mirascope/llm/messages/_utils.py +34 -0
- mirascope/llm/messages/message.py +190 -0
- mirascope/llm/models/__init__.py +21 -0
- mirascope/llm/models/models.py +1339 -0
- mirascope/llm/models/params.py +72 -0
- mirascope/llm/models/thinking_config.py +61 -0
- mirascope/llm/prompts/__init__.py +34 -0
- mirascope/llm/prompts/_utils.py +31 -0
- mirascope/llm/prompts/decorator.py +215 -0
- mirascope/llm/prompts/prompts.py +484 -0
- mirascope/llm/prompts/protocols.py +65 -0
- mirascope/llm/providers/__init__.py +65 -0
- mirascope/llm/providers/anthropic/__init__.py +11 -0
- mirascope/llm/providers/anthropic/_utils/__init__.py +27 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +297 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +272 -0
- mirascope/llm/providers/anthropic/_utils/decode.py +326 -0
- mirascope/llm/providers/anthropic/_utils/encode.py +431 -0
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +338 -0
- mirascope/llm/providers/anthropic/model_id.py +23 -0
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +440 -0
- mirascope/llm/providers/base/__init__.py +14 -0
- mirascope/llm/providers/base/_utils.py +248 -0
- mirascope/llm/providers/base/base_provider.py +1463 -0
- mirascope/llm/providers/base/kwargs.py +12 -0
- mirascope/llm/providers/google/__init__.py +6 -0
- mirascope/llm/providers/google/_utils/__init__.py +17 -0
- mirascope/llm/providers/google/_utils/decode.py +357 -0
- mirascope/llm/providers/google/_utils/encode.py +418 -0
- mirascope/llm/providers/google/_utils/errors.py +50 -0
- mirascope/llm/providers/google/message.py +7 -0
- mirascope/llm/providers/google/model_id.py +22 -0
- mirascope/llm/providers/google/model_info.py +63 -0
- mirascope/llm/providers/google/provider.py +456 -0
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +73 -0
- mirascope/llm/providers/mirascope/provider.py +313 -0
- mirascope/llm/providers/mlx/__init__.py +9 -0
- mirascope/llm/providers/mlx/_utils.py +141 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +146 -0
- mirascope/llm/providers/mlx/mlx.py +242 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +416 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/ollama/__init__.py +7 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/__init__.py +15 -0
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/__init__.py +7 -0
- mirascope/llm/providers/openai/completions/_utils/__init__.py +18 -0
- mirascope/llm/providers/openai/completions/_utils/decode.py +252 -0
- mirascope/llm/providers/openai/completions/_utils/encode.py +390 -0
- mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
- mirascope/llm/providers/openai/completions/base_provider.py +522 -0
- mirascope/llm/providers/openai/completions/provider.py +28 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +303 -0
- mirascope/llm/providers/openai/provider.py +405 -0
- mirascope/llm/providers/openai/responses/__init__.py +5 -0
- mirascope/llm/providers/openai/responses/_utils/__init__.py +15 -0
- mirascope/llm/providers/openai/responses/_utils/decode.py +289 -0
- mirascope/llm/providers/openai/responses/_utils/encode.py +399 -0
- mirascope/llm/providers/openai/responses/provider.py +472 -0
- mirascope/llm/providers/openrouter/__init__.py +5 -0
- mirascope/llm/providers/openrouter/provider.py +67 -0
- mirascope/llm/providers/provider_id.py +26 -0
- mirascope/llm/providers/provider_registry.py +305 -0
- mirascope/llm/providers/together/__init__.py +7 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +66 -0
- mirascope/llm/responses/_utils.py +146 -0
- mirascope/llm/responses/base_response.py +103 -0
- mirascope/llm/responses/base_stream_response.py +824 -0
- mirascope/llm/responses/finish_reason.py +28 -0
- mirascope/llm/responses/response.py +362 -0
- mirascope/llm/responses/root_response.py +248 -0
- mirascope/llm/responses/stream_response.py +577 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/responses/usage.py +139 -0
- mirascope/llm/tools/__init__.py +71 -0
- mirascope/llm/tools/_utils.py +34 -0
- mirascope/llm/tools/decorator.py +184 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/provider_tools.py +18 -0
- mirascope/llm/tools/tool_schema.py +321 -0
- mirascope/llm/tools/toolkit.py +178 -0
- mirascope/llm/tools/tools.py +263 -0
- mirascope/llm/tools/types.py +112 -0
- mirascope/llm/tools/web_search_tool.py +32 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope/ops/__init__.py +129 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1172 -0
- mirascope/ops/_internal/configuration.py +177 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +362 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +66 -0
- mirascope/ops/_internal/instrumentation/__init__.py +28 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/common.py +500 -0
- mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +161 -0
- mirascope/ops/_internal/instrumentation/llm/model.py +1777 -0
- mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
- mirascope/ops/_internal/instrumentation/llm/serialize.py +324 -0
- mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
- mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
- mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
- mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
- mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +133 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +389 -0
- mirascope/ops/_internal/traced_functions.py +528 -0
- mirascope/ops/_internal/tracing.py +353 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +131 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +357 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- mirascope-2.1.1.dist-info/METADATA +231 -0
- mirascope-2.1.1.dist-info/RECORD +437 -0
- {mirascope-1.0.5.dist-info → mirascope-2.1.1.dist-info}/WHEEL +1 -1
- {mirascope-1.0.5.dist-info → mirascope-2.1.1.dist-info}/licenses/LICENSE +1 -1
- mirascope/beta/__init__.py +0 -0
- mirascope/beta/openai/__init__.py +0 -5
- mirascope/beta/openai/parse.py +0 -129
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -57
- mirascope/beta/rag/chroma/vectorstores.py +0 -97
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -55
- mirascope/core/anthropic/__init__.py +0 -21
- mirascope/core/anthropic/_call.py +0 -71
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_calculate_cost.py +0 -63
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -54
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -34
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -89
- mirascope/core/anthropic/_utils/_setup_call.py +0 -76
- mirascope/core/anthropic/call_params.py +0 -36
- mirascope/core/anthropic/call_response.py +0 -158
- mirascope/core/anthropic/call_response_chunk.py +0 -104
- mirascope/core/anthropic/dynamic_config.py +0 -26
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -140
- mirascope/core/anthropic/tool.py +0 -77
- mirascope/core/base/__init__.py +0 -40
- mirascope/core/base/_call_factory.py +0 -323
- mirascope/core/base/_create.py +0 -167
- mirascope/core/base/_extract.py +0 -139
- mirascope/core/base/_partial.py +0 -63
- mirascope/core/base/_utils/__init__.py +0 -64
- mirascope/core/base/_utils/_base_type.py +0 -17
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -45
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -126
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -36
- mirascope/core/base/_utils/_format_template.py +0 -29
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_fn_args.py +0 -14
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -25
- mirascope/core/base/_utils/_get_template_values.py +0 -52
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_json_mode_content.py +0 -15
- mirascope/core/base/_utils/_parse_content_template.py +0 -157
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -51
- mirascope/core/base/_utils/_protocols.py +0 -215
- mirascope/core/base/_utils/_setup_call.py +0 -64
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -24
- mirascope/core/base/call_params.py +0 -6
- mirascope/core/base/call_response.py +0 -189
- mirascope/core/base/call_response_chunk.py +0 -91
- mirascope/core/base/dynamic_config.py +0 -55
- mirascope/core/base/message_param.py +0 -61
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -415
- mirascope/core/base/stream.py +0 -365
- mirascope/core/base/structured_stream.py +0 -251
- mirascope/core/base/tool.py +0 -126
- mirascope/core/base/toolkit.py +0 -146
- mirascope/core/cohere/__init__.py +0 -21
- mirascope/core/cohere/_call.py +0 -71
- mirascope/core/cohere/_utils/__init__.py +0 -16
- mirascope/core/cohere/_utils/_calculate_cost.py +0 -39
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -31
- mirascope/core/cohere/_utils/_get_json_output.py +0 -31
- mirascope/core/cohere/_utils/_handle_stream.py +0 -33
- mirascope/core/cohere/_utils/_setup_call.py +0 -89
- mirascope/core/cohere/call_params.py +0 -57
- mirascope/core/cohere/call_response.py +0 -167
- mirascope/core/cohere/call_response_chunk.py +0 -101
- mirascope/core/cohere/dynamic_config.py +0 -24
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -92
- mirascope/core/gemini/__init__.py +0 -21
- mirascope/core/gemini/_call.py +0 -71
- mirascope/core/gemini/_utils/__init__.py +0 -16
- mirascope/core/gemini/_utils/_calculate_cost.py +0 -8
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -74
- mirascope/core/gemini/_utils/_get_json_output.py +0 -33
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_setup_call.py +0 -68
- mirascope/core/gemini/call_params.py +0 -28
- mirascope/core/gemini/call_response.py +0 -173
- mirascope/core/gemini/call_response_chunk.py +0 -85
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -121
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/groq/__init__.py +0 -21
- mirascope/core/groq/_call.py +0 -71
- mirascope/core/groq/_utils/__init__.py +0 -16
- mirascope/core/groq/_utils/_calculate_cost.py +0 -68
- mirascope/core/groq/_utils/_convert_message_params.py +0 -23
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -121
- mirascope/core/groq/_utils/_setup_call.py +0 -67
- mirascope/core/groq/call_params.py +0 -51
- mirascope/core/groq/call_response.py +0 -160
- mirascope/core/groq/call_response_chunk.py +0 -89
- mirascope/core/groq/dynamic_config.py +0 -26
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -136
- mirascope/core/groq/tool.py +0 -79
- mirascope/core/litellm/__init__.py +0 -6
- mirascope/core/litellm/_call.py +0 -73
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -46
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/mistral/__init__.py +0 -21
- mirascope/core/mistral/_call.py +0 -69
- mirascope/core/mistral/_utils/__init__.py +0 -16
- mirascope/core/mistral/_utils/_calculate_cost.py +0 -47
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -23
- mirascope/core/mistral/_utils/_get_json_output.py +0 -28
- mirascope/core/mistral/_utils/_handle_stream.py +0 -121
- mirascope/core/mistral/_utils/_setup_call.py +0 -86
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -156
- mirascope/core/mistral/call_response_chunk.py +0 -84
- mirascope/core/mistral/dynamic_config.py +0 -24
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -117
- mirascope/core/mistral/tool.py +0 -77
- mirascope/core/openai/__init__.py +0 -21
- mirascope/core/openai/_call.py +0 -71
- mirascope/core/openai/_utils/__init__.py +0 -16
- mirascope/core/openai/_utils/_calculate_cost.py +0 -110
- mirascope/core/openai/_utils/_convert_message_params.py +0 -53
- mirascope/core/openai/_utils/_get_json_output.py +0 -27
- mirascope/core/openai/_utils/_handle_stream.py +0 -125
- mirascope/core/openai/_utils/_setup_call.py +0 -62
- mirascope/core/openai/call_params.py +0 -54
- mirascope/core/openai/call_response.py +0 -162
- mirascope/core/openai/call_response_chunk.py +0 -90
- mirascope/core/openai/dynamic_config.py +0 -26
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -148
- mirascope/core/openai/tool.py +0 -79
- mirascope/core/py.typed +0 -0
- mirascope/integrations/__init__.py +0 -20
- mirascope/integrations/_middleware_factory.py +0 -277
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -71
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -188
- mirascope/integrations/logfire/_with_logfire.py +0 -60
- mirascope/integrations/otel/__init__.py +0 -5
- mirascope/integrations/otel/_utils.py +0 -268
- mirascope/integrations/otel/_with_hyperdx.py +0 -61
- mirascope/integrations/otel/_with_otel.py +0 -60
- mirascope/integrations/tenacity.py +0 -50
- mirascope/py.typed +0 -0
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.0.5.dist-info/METADATA +0 -519
- mirascope-1.0.5.dist-info/RECORD +0 -198
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import threading
|
|
5
|
+
from collections.abc import Iterable, Sequence
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
from typing_extensions import Unpack
|
|
9
|
+
|
|
10
|
+
import mlx.core as mx
|
|
11
|
+
import mlx.nn as nn
|
|
12
|
+
from mlx_lm import stream_generate # type: ignore[reportPrivateImportUsage]
|
|
13
|
+
from mlx_lm.generate import GenerationResponse
|
|
14
|
+
from transformers import PreTrainedTokenizer
|
|
15
|
+
|
|
16
|
+
from ...formatting import Format, FormatSpec, FormattableT
|
|
17
|
+
from ...messages import AssistantMessage, Message, assistant
|
|
18
|
+
from ...responses import AsyncChunkIterator, ChunkIterator, StreamResponseChunk
|
|
19
|
+
from ...tools import AnyToolSchema, BaseToolkit
|
|
20
|
+
from . import _utils
|
|
21
|
+
from .encoding import BaseEncoder, TokenIds
|
|
22
|
+
from .model_id import MLXModelId
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from ...models import Params
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _consume_sync_stream_into_queue(
|
|
29
|
+
generation_stream: ChunkIterator,
|
|
30
|
+
loop: asyncio.AbstractEventLoop,
|
|
31
|
+
queue: asyncio.Queue[StreamResponseChunk | Exception | None],
|
|
32
|
+
) -> None:
|
|
33
|
+
"""Consume a synchronous stream and put chunks into an async queue.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
sync_stream: The synchronous chunk iterator to consume.
|
|
37
|
+
loop: The event loop for scheduling queue operations.
|
|
38
|
+
queue: The async queue to put chunks into.
|
|
39
|
+
"""
|
|
40
|
+
try:
|
|
41
|
+
for response in generation_stream:
|
|
42
|
+
asyncio.run_coroutine_threadsafe(queue.put(response), loop)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
asyncio.run_coroutine_threadsafe(queue.put(e), loop)
|
|
45
|
+
|
|
46
|
+
asyncio.run_coroutine_threadsafe(queue.put(None), loop)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass(frozen=True)
|
|
50
|
+
class MLX:
|
|
51
|
+
"""MLX model wrapper for synchronous and asynchronous generation.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
model_id: The MLX model identifier.
|
|
55
|
+
model: The underlying MLX model.
|
|
56
|
+
tokenizer: The tokenizer for the model.
|
|
57
|
+
encoder: The encoder for prompts and responses.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
model_id: MLXModelId
|
|
61
|
+
"""The MLX model identifier."""
|
|
62
|
+
|
|
63
|
+
model: nn.Module
|
|
64
|
+
"""The underlying MLX model."""
|
|
65
|
+
|
|
66
|
+
tokenizer: PreTrainedTokenizer
|
|
67
|
+
"""The tokenizer for the model."""
|
|
68
|
+
|
|
69
|
+
encoder: BaseEncoder
|
|
70
|
+
"""The encoder for prompts and responses."""
|
|
71
|
+
|
|
72
|
+
_lock: threading.Lock = field(default_factory=threading.Lock)
|
|
73
|
+
"""The lock for thread-safety."""
|
|
74
|
+
|
|
75
|
+
def _stream_generate(
|
|
76
|
+
self,
|
|
77
|
+
prompt: TokenIds,
|
|
78
|
+
seed: int | None,
|
|
79
|
+
**kwargs: Unpack[_utils.StreamGenerateKwargs],
|
|
80
|
+
) -> Iterable[GenerationResponse]:
|
|
81
|
+
"""Generator that streams generation responses.
|
|
82
|
+
|
|
83
|
+
Using this generator instead of calling stream_generate directly ensures
|
|
84
|
+
thread-safety when using the model in a multi-threaded context.
|
|
85
|
+
"""
|
|
86
|
+
with self._lock:
|
|
87
|
+
if seed is not None:
|
|
88
|
+
mx.random.seed(seed)
|
|
89
|
+
|
|
90
|
+
return stream_generate(
|
|
91
|
+
self.model,
|
|
92
|
+
self.tokenizer,
|
|
93
|
+
prompt,
|
|
94
|
+
**kwargs,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
async def _stream_generate_async(
|
|
98
|
+
self,
|
|
99
|
+
prompt: TokenIds,
|
|
100
|
+
seed: int | None,
|
|
101
|
+
**kwargs: Unpack[_utils.StreamGenerateKwargs],
|
|
102
|
+
) -> AsyncChunkIterator:
|
|
103
|
+
"""Async generator that streams generation responses.
|
|
104
|
+
|
|
105
|
+
Note that, while stream_generate returns an iterable of GenerationResponse,
|
|
106
|
+
here we return an `AsyncChunkIterator`, in order to avoid having to implement
|
|
107
|
+
both synchronous and asynchronous versions of BaseEncoder.decode_stream.
|
|
108
|
+
This makes sense as in this case, there is nothing to gain from consuming the
|
|
109
|
+
generation asyncnronously.
|
|
110
|
+
"""
|
|
111
|
+
loop = asyncio.get_running_loop()
|
|
112
|
+
generation_queue: asyncio.Queue[StreamResponseChunk | Exception | None] = (
|
|
113
|
+
asyncio.Queue()
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
sync_stream = self.encoder.decode_stream(
|
|
117
|
+
self._stream_generate(
|
|
118
|
+
prompt,
|
|
119
|
+
seed,
|
|
120
|
+
**kwargs,
|
|
121
|
+
)
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
consume_task = asyncio.create_task(
|
|
125
|
+
asyncio.to_thread(
|
|
126
|
+
_consume_sync_stream_into_queue, sync_stream, loop, generation_queue
|
|
127
|
+
),
|
|
128
|
+
)
|
|
129
|
+
while item := await generation_queue.get():
|
|
130
|
+
if isinstance(item, Exception):
|
|
131
|
+
raise item
|
|
132
|
+
|
|
133
|
+
yield item
|
|
134
|
+
|
|
135
|
+
await consume_task
|
|
136
|
+
|
|
137
|
+
def stream(
|
|
138
|
+
self,
|
|
139
|
+
messages: Sequence[Message],
|
|
140
|
+
tools: BaseToolkit[AnyToolSchema],
|
|
141
|
+
format: FormatSpec[FormattableT] | None,
|
|
142
|
+
params: Params,
|
|
143
|
+
) -> tuple[Sequence[Message], Format[FormattableT] | None, ChunkIterator]:
|
|
144
|
+
"""Stream response chunks synchronously.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
messages: The input messages.
|
|
148
|
+
tools: Optional tools for the model.
|
|
149
|
+
format: Optional response format.
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Tuple of messages, format, and chunk iterator.
|
|
153
|
+
"""
|
|
154
|
+
messages, format, prompt = self.encoder.encode_request(messages, tools, format)
|
|
155
|
+
seed, kwargs = _utils.encode_params(params)
|
|
156
|
+
|
|
157
|
+
stream = self._stream_generate(prompt, seed, **kwargs)
|
|
158
|
+
return messages, format, self.encoder.decode_stream(stream)
|
|
159
|
+
|
|
160
|
+
async def stream_async(
|
|
161
|
+
self,
|
|
162
|
+
messages: Sequence[Message],
|
|
163
|
+
tools: BaseToolkit[AnyToolSchema],
|
|
164
|
+
format: FormatSpec[FormattableT] | None,
|
|
165
|
+
params: Params,
|
|
166
|
+
) -> tuple[Sequence[Message], Format[FormattableT] | None, AsyncChunkIterator]:
|
|
167
|
+
"""Stream response chunks asynchronously.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
messages: The input messages.
|
|
171
|
+
tools: Optional tools for the model.
|
|
172
|
+
format: Optional response format.
|
|
173
|
+
Returns:
|
|
174
|
+
Tuple of messages, format, and async chunk iterator.
|
|
175
|
+
"""
|
|
176
|
+
messages, format, prompt = await asyncio.to_thread(
|
|
177
|
+
self.encoder.encode_request, messages, tools, format
|
|
178
|
+
)
|
|
179
|
+
seed, kwargs = _utils.encode_params(params)
|
|
180
|
+
|
|
181
|
+
chunk_iterator = self._stream_generate_async(prompt, seed, **kwargs)
|
|
182
|
+
return messages, format, chunk_iterator
|
|
183
|
+
|
|
184
|
+
def generate(
|
|
185
|
+
self,
|
|
186
|
+
messages: Sequence[Message],
|
|
187
|
+
tools: BaseToolkit[AnyToolSchema],
|
|
188
|
+
format: FormatSpec[FormattableT] | None,
|
|
189
|
+
params: Params,
|
|
190
|
+
) -> tuple[
|
|
191
|
+
Sequence[Message],
|
|
192
|
+
Format[FormattableT] | None,
|
|
193
|
+
AssistantMessage,
|
|
194
|
+
GenerationResponse | None,
|
|
195
|
+
]:
|
|
196
|
+
"""Generate a response synchronously.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
messages: The input messages.
|
|
200
|
+
tools: Optional tools for the model.
|
|
201
|
+
format: Optional response format.
|
|
202
|
+
params: Generation parameters.
|
|
203
|
+
Returns:
|
|
204
|
+
Tuple of messages, format, assistant message, and last generation response.
|
|
205
|
+
"""
|
|
206
|
+
messages, format, prompt = self.encoder.encode_request(messages, tools, format)
|
|
207
|
+
seed, kwargs = _utils.encode_params(params)
|
|
208
|
+
|
|
209
|
+
stream = self._stream_generate(prompt, seed, **kwargs)
|
|
210
|
+
assistant_content, last_response = self.encoder.decode_response(stream)
|
|
211
|
+
assistant_message = assistant(
|
|
212
|
+
content=assistant_content,
|
|
213
|
+
model_id=self.model_id,
|
|
214
|
+
provider_id="mlx",
|
|
215
|
+
raw_message=None,
|
|
216
|
+
name=None,
|
|
217
|
+
)
|
|
218
|
+
return messages, format, assistant_message, last_response
|
|
219
|
+
|
|
220
|
+
async def generate_async(
|
|
221
|
+
self,
|
|
222
|
+
messages: Sequence[Message],
|
|
223
|
+
tools: BaseToolkit[AnyToolSchema],
|
|
224
|
+
format: FormatSpec[FormattableT] | None,
|
|
225
|
+
params: Params,
|
|
226
|
+
) -> tuple[
|
|
227
|
+
Sequence[Message],
|
|
228
|
+
Format[FormattableT] | None,
|
|
229
|
+
AssistantMessage,
|
|
230
|
+
GenerationResponse | None,
|
|
231
|
+
]:
|
|
232
|
+
"""Generate a response asynchronously.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
messages: The input messages.
|
|
236
|
+
tools: Optional tools for the model.
|
|
237
|
+
format: Optional response format.
|
|
238
|
+
params: Generation parameters.
|
|
239
|
+
Returns:
|
|
240
|
+
Tuple of messages, format, assistant message, and last generation response.
|
|
241
|
+
"""
|
|
242
|
+
return await asyncio.to_thread(self.generate, messages, tools, format, params)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from typing import TypeAlias
|
|
2
|
+
|
|
3
|
+
# TODO: Add more explicit literals
|
|
4
|
+
# TODO: Ensure automatic model downloads are supported.
|
|
5
|
+
# TODO: Ensure instructions are clear for examples that run as copied
|
|
6
|
+
MLXModelId: TypeAlias = str
|
|
7
|
+
"""The identifier of the MLX model to be loaded by the MLX client.
|
|
8
|
+
|
|
9
|
+
An MLX model identifier might be a local path to a model's file, or a huggingface
|
|
10
|
+
repository such as:
|
|
11
|
+
- "mlx-community/Qwen3-8B-4bit-DWQ-053125"
|
|
12
|
+
- "mlx-community/gpt-oss-20b-MXFP4-Q8"
|
|
13
|
+
|
|
14
|
+
For more details, see:
|
|
15
|
+
- https://github.com/ml-explore/mlx-lm/?tab=readme-ov-file#supported-models
|
|
16
|
+
- https://huggingface.co/mlx-community
|
|
17
|
+
"""
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from functools import cache, lru_cache
|
|
5
|
+
from typing import TYPE_CHECKING, cast
|
|
6
|
+
from typing_extensions import Unpack
|
|
7
|
+
|
|
8
|
+
import mlx.nn as nn
|
|
9
|
+
from mlx_lm import load as mlx_load
|
|
10
|
+
from transformers import PreTrainedTokenizer
|
|
11
|
+
|
|
12
|
+
from ...context import Context, DepsT
|
|
13
|
+
from ...formatting import FormatSpec, FormattableT
|
|
14
|
+
from ...messages import Message
|
|
15
|
+
from ...responses import (
|
|
16
|
+
AsyncContextResponse,
|
|
17
|
+
AsyncContextStreamResponse,
|
|
18
|
+
AsyncResponse,
|
|
19
|
+
AsyncStreamResponse,
|
|
20
|
+
ContextResponse,
|
|
21
|
+
ContextStreamResponse,
|
|
22
|
+
Response,
|
|
23
|
+
StreamResponse,
|
|
24
|
+
)
|
|
25
|
+
from ...tools import (
|
|
26
|
+
AsyncContextToolkit,
|
|
27
|
+
AsyncToolkit,
|
|
28
|
+
ContextToolkit,
|
|
29
|
+
Toolkit,
|
|
30
|
+
)
|
|
31
|
+
from ..base import BaseProvider
|
|
32
|
+
from . import _utils
|
|
33
|
+
from .encoding import TransformersEncoder
|
|
34
|
+
from .mlx import MLX
|
|
35
|
+
from .model_id import MLXModelId
|
|
36
|
+
|
|
37
|
+
if TYPE_CHECKING:
|
|
38
|
+
from ...models import Params
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@cache
|
|
42
|
+
def _mlx_client_singleton() -> MLXProvider:
|
|
43
|
+
"""Get or create the singleton MLX client instance."""
|
|
44
|
+
return MLXProvider()
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def client() -> MLXProvider:
|
|
48
|
+
"""Get the MLX client singleton instance."""
|
|
49
|
+
return _mlx_client_singleton()
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@lru_cache(maxsize=16)
|
|
53
|
+
def _get_mlx(model_id: MLXModelId) -> MLX:
|
|
54
|
+
model, tokenizer = cast(tuple[nn.Module, PreTrainedTokenizer], mlx_load(model_id))
|
|
55
|
+
encoder = TransformersEncoder(tokenizer)
|
|
56
|
+
return MLX(
|
|
57
|
+
model_id,
|
|
58
|
+
model,
|
|
59
|
+
tokenizer,
|
|
60
|
+
encoder,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class MLXProvider(BaseProvider[None]):
|
|
65
|
+
"""Client for interacting with MLX language models.
|
|
66
|
+
|
|
67
|
+
This client provides methods for generating responses from MLX models,
|
|
68
|
+
supporting both synchronous and asynchronous operations, as well as
|
|
69
|
+
streaming responses.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
id = "mlx"
|
|
73
|
+
default_scope = "mlx-community/"
|
|
74
|
+
error_map = _utils.MLX_ERROR_MAP
|
|
75
|
+
|
|
76
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
77
|
+
"""Extract HTTP status code from MLX exception.
|
|
78
|
+
|
|
79
|
+
MLX/HuggingFace Hub exceptions don't have status codes.
|
|
80
|
+
"""
|
|
81
|
+
return None
|
|
82
|
+
|
|
83
|
+
def _call(
|
|
84
|
+
self,
|
|
85
|
+
*,
|
|
86
|
+
model_id: MLXModelId,
|
|
87
|
+
messages: Sequence[Message],
|
|
88
|
+
toolkit: Toolkit,
|
|
89
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
90
|
+
**params: Unpack[Params],
|
|
91
|
+
) -> Response | Response[FormattableT]:
|
|
92
|
+
"""Generate an `llm.Response` using MLX model.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
model_id: Model identifier to use.
|
|
96
|
+
messages: Messages to send to the LLM.
|
|
97
|
+
tools: Optional tools that the model may invoke.
|
|
98
|
+
format: Optional response format specifier.
|
|
99
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
An `llm.Response` object containing the LLM-generated content.
|
|
103
|
+
"""
|
|
104
|
+
mlx = _get_mlx(model_id)
|
|
105
|
+
|
|
106
|
+
input_messages, format, assistant_message, response = mlx.generate(
|
|
107
|
+
messages, toolkit, format, params
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
return Response(
|
|
111
|
+
raw=response,
|
|
112
|
+
provider_id="mlx",
|
|
113
|
+
model_id=model_id,
|
|
114
|
+
provider_model_name=model_id,
|
|
115
|
+
params=params,
|
|
116
|
+
tools=toolkit,
|
|
117
|
+
input_messages=input_messages,
|
|
118
|
+
assistant_message=assistant_message,
|
|
119
|
+
finish_reason=_utils.extract_finish_reason(response),
|
|
120
|
+
usage=_utils.extract_usage(response),
|
|
121
|
+
format=format,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def _context_call(
|
|
125
|
+
self,
|
|
126
|
+
*,
|
|
127
|
+
ctx: Context[DepsT],
|
|
128
|
+
model_id: MLXModelId,
|
|
129
|
+
messages: Sequence[Message],
|
|
130
|
+
toolkit: ContextToolkit[DepsT],
|
|
131
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
132
|
+
**params: Unpack[Params],
|
|
133
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
134
|
+
"""Generate an `llm.ContextResponse` using MLX model.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
ctx: Context object with dependencies for tools.
|
|
138
|
+
model_id: Model identifier to use.
|
|
139
|
+
messages: Messages to send to the LLM.
|
|
140
|
+
tools: Optional tools that the model may invoke.
|
|
141
|
+
format: Optional response format specifier.
|
|
142
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
146
|
+
"""
|
|
147
|
+
mlx = _get_mlx(model_id)
|
|
148
|
+
|
|
149
|
+
input_messages, format, assistant_message, response = mlx.generate(
|
|
150
|
+
messages, toolkit, format, params
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
return ContextResponse(
|
|
154
|
+
raw=response,
|
|
155
|
+
provider_id="mlx",
|
|
156
|
+
model_id=model_id,
|
|
157
|
+
provider_model_name=model_id,
|
|
158
|
+
params=params,
|
|
159
|
+
tools=toolkit,
|
|
160
|
+
input_messages=input_messages,
|
|
161
|
+
assistant_message=assistant_message,
|
|
162
|
+
finish_reason=_utils.extract_finish_reason(response),
|
|
163
|
+
usage=_utils.extract_usage(response),
|
|
164
|
+
format=format,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
async def _call_async(
|
|
168
|
+
self,
|
|
169
|
+
*,
|
|
170
|
+
model_id: MLXModelId,
|
|
171
|
+
messages: Sequence[Message],
|
|
172
|
+
toolkit: AsyncToolkit,
|
|
173
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
174
|
+
**params: Unpack[Params],
|
|
175
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
176
|
+
"""Generate an `llm.AsyncResponse` using MLX model by asynchronously calloing
|
|
177
|
+
`asycio.to_thread`.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
model_id: Model identifier to use.
|
|
181
|
+
messages: Messages to send to the LLM.
|
|
182
|
+
tools: Optional tools that the model may invoke.
|
|
183
|
+
format: Optional response format specifier.
|
|
184
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
188
|
+
"""
|
|
189
|
+
mlx = _get_mlx(model_id)
|
|
190
|
+
|
|
191
|
+
(
|
|
192
|
+
input_messages,
|
|
193
|
+
format,
|
|
194
|
+
assistant_message,
|
|
195
|
+
response,
|
|
196
|
+
) = await mlx.generate_async(messages, toolkit, format, params)
|
|
197
|
+
|
|
198
|
+
return AsyncResponse(
|
|
199
|
+
raw=response,
|
|
200
|
+
provider_id="mlx",
|
|
201
|
+
model_id=model_id,
|
|
202
|
+
provider_model_name=model_id,
|
|
203
|
+
params=params,
|
|
204
|
+
tools=toolkit,
|
|
205
|
+
input_messages=input_messages,
|
|
206
|
+
assistant_message=assistant_message,
|
|
207
|
+
finish_reason=_utils.extract_finish_reason(response),
|
|
208
|
+
usage=_utils.extract_usage(response),
|
|
209
|
+
format=format,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
async def _context_call_async(
|
|
213
|
+
self,
|
|
214
|
+
*,
|
|
215
|
+
ctx: Context[DepsT],
|
|
216
|
+
model_id: MLXModelId,
|
|
217
|
+
messages: Sequence[Message],
|
|
218
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
219
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
220
|
+
**params: Unpack[Params],
|
|
221
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
222
|
+
"""Generate an `llm.AsyncResponse` using MLX model by asynchronously calloing
|
|
223
|
+
`asycio.to_thread`.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
ctx: Context object with dependencies for tools.
|
|
227
|
+
model_id: Model identifier to use.
|
|
228
|
+
messages: Messages to send to the LLM.
|
|
229
|
+
tools: Optional tools that the model may invoke.
|
|
230
|
+
format: Optional response format specifier.
|
|
231
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
235
|
+
"""
|
|
236
|
+
mlx = _get_mlx(model_id)
|
|
237
|
+
|
|
238
|
+
(
|
|
239
|
+
input_messages,
|
|
240
|
+
format,
|
|
241
|
+
assistant_message,
|
|
242
|
+
response,
|
|
243
|
+
) = await mlx.generate_async(messages, toolkit, format, params)
|
|
244
|
+
|
|
245
|
+
return AsyncContextResponse(
|
|
246
|
+
raw=response,
|
|
247
|
+
provider_id="mlx",
|
|
248
|
+
model_id=model_id,
|
|
249
|
+
provider_model_name=model_id,
|
|
250
|
+
params=params,
|
|
251
|
+
tools=toolkit,
|
|
252
|
+
input_messages=input_messages,
|
|
253
|
+
assistant_message=assistant_message,
|
|
254
|
+
finish_reason=_utils.extract_finish_reason(response),
|
|
255
|
+
usage=_utils.extract_usage(response),
|
|
256
|
+
format=format,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
def _stream(
|
|
260
|
+
self,
|
|
261
|
+
*,
|
|
262
|
+
model_id: MLXModelId,
|
|
263
|
+
messages: Sequence[Message],
|
|
264
|
+
toolkit: Toolkit,
|
|
265
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
266
|
+
**params: Unpack[Params],
|
|
267
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
268
|
+
"""Generate an `llm.StreamResponse` by synchronously streaming from MLX model output.
|
|
269
|
+
|
|
270
|
+
Args:
|
|
271
|
+
model_id: Model identifier to use.
|
|
272
|
+
messages: Messages to send to the LLM.
|
|
273
|
+
tools: Optional tools that the model may invoke.
|
|
274
|
+
format: Optional response format specifier.
|
|
275
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
279
|
+
"""
|
|
280
|
+
mlx = _get_mlx(model_id)
|
|
281
|
+
|
|
282
|
+
input_messages, format, chunk_iterator = mlx.stream(
|
|
283
|
+
messages, toolkit, format, params
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
return StreamResponse(
|
|
287
|
+
provider_id="mlx",
|
|
288
|
+
model_id=model_id,
|
|
289
|
+
provider_model_name=model_id,
|
|
290
|
+
params=params,
|
|
291
|
+
tools=toolkit,
|
|
292
|
+
input_messages=input_messages,
|
|
293
|
+
chunk_iterator=chunk_iterator,
|
|
294
|
+
format=format,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
def _context_stream(
|
|
298
|
+
self,
|
|
299
|
+
*,
|
|
300
|
+
ctx: Context[DepsT],
|
|
301
|
+
model_id: MLXModelId,
|
|
302
|
+
messages: Sequence[Message],
|
|
303
|
+
toolkit: ContextToolkit[DepsT],
|
|
304
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
305
|
+
**params: Unpack[Params],
|
|
306
|
+
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
307
|
+
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from MLX model output.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
ctx: Context object with dependencies for tools.
|
|
311
|
+
model_id: Model identifier to use.
|
|
312
|
+
messages: Messages to send to the LLM.
|
|
313
|
+
tools: Optional tools that the model may invoke.
|
|
314
|
+
format: Optional response format specifier.
|
|
315
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
319
|
+
"""
|
|
320
|
+
mlx = _get_mlx(model_id)
|
|
321
|
+
|
|
322
|
+
input_messages, format, chunk_iterator = mlx.stream(
|
|
323
|
+
messages, toolkit, format, params
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
return ContextStreamResponse(
|
|
327
|
+
provider_id="mlx",
|
|
328
|
+
model_id=model_id,
|
|
329
|
+
provider_model_name=model_id,
|
|
330
|
+
params=params,
|
|
331
|
+
tools=toolkit,
|
|
332
|
+
input_messages=input_messages,
|
|
333
|
+
chunk_iterator=chunk_iterator,
|
|
334
|
+
format=format,
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
async def _stream_async(
|
|
338
|
+
self,
|
|
339
|
+
*,
|
|
340
|
+
model_id: MLXModelId,
|
|
341
|
+
messages: Sequence[Message],
|
|
342
|
+
toolkit: AsyncToolkit,
|
|
343
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
344
|
+
**params: Unpack[Params],
|
|
345
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
346
|
+
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from MLX model output.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
model_id: Model identifier to use.
|
|
350
|
+
messages: Messages to send to the LLM.
|
|
351
|
+
tools: Optional tools that the model may invoke.
|
|
352
|
+
format: Optional response format specifier.
|
|
353
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
354
|
+
|
|
355
|
+
Returns:
|
|
356
|
+
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
357
|
+
"""
|
|
358
|
+
mlx = _get_mlx(model_id)
|
|
359
|
+
|
|
360
|
+
input_messages, format, chunk_iterator = await mlx.stream_async(
|
|
361
|
+
messages, toolkit, format, params
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
return AsyncStreamResponse(
|
|
365
|
+
provider_id="mlx",
|
|
366
|
+
model_id=model_id,
|
|
367
|
+
provider_model_name=model_id,
|
|
368
|
+
params=params,
|
|
369
|
+
tools=toolkit,
|
|
370
|
+
input_messages=input_messages,
|
|
371
|
+
chunk_iterator=chunk_iterator,
|
|
372
|
+
format=format,
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
async def _context_stream_async(
|
|
376
|
+
self,
|
|
377
|
+
*,
|
|
378
|
+
ctx: Context[DepsT],
|
|
379
|
+
model_id: MLXModelId,
|
|
380
|
+
messages: Sequence[Message],
|
|
381
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
382
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
383
|
+
**params: Unpack[Params],
|
|
384
|
+
) -> (
|
|
385
|
+
AsyncContextStreamResponse[DepsT]
|
|
386
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
387
|
+
):
|
|
388
|
+
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from MLX model output.
|
|
389
|
+
|
|
390
|
+
Args:
|
|
391
|
+
ctx: Context object with dependencies for tools.
|
|
392
|
+
model_id: Model identifier to use.
|
|
393
|
+
messages: Messages to send to the LLM.
|
|
394
|
+
tools: Optional tools that the model may invoke.
|
|
395
|
+
format: Optional response format specifier.
|
|
396
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
397
|
+
|
|
398
|
+
Returns:
|
|
399
|
+
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
400
|
+
"""
|
|
401
|
+
mlx = _get_mlx(model_id)
|
|
402
|
+
|
|
403
|
+
input_messages, format, chunk_iterator = await mlx.stream_async(
|
|
404
|
+
messages, toolkit, format, params
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
return AsyncContextStreamResponse(
|
|
408
|
+
provider_id="mlx",
|
|
409
|
+
model_id=model_id,
|
|
410
|
+
provider_model_name=model_id,
|
|
411
|
+
params=params,
|
|
412
|
+
tools=toolkit,
|
|
413
|
+
input_messages=input_messages,
|
|
414
|
+
chunk_iterator=chunk_iterator,
|
|
415
|
+
format=format,
|
|
416
|
+
)
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from typing import TypeAlias
|
|
2
|
+
|
|
3
|
+
from .anthropic import (
|
|
4
|
+
AnthropicModelId,
|
|
5
|
+
)
|
|
6
|
+
from .google import (
|
|
7
|
+
GoogleModelId,
|
|
8
|
+
)
|
|
9
|
+
from .mlx import (
|
|
10
|
+
MLXModelId,
|
|
11
|
+
)
|
|
12
|
+
from .openai import (
|
|
13
|
+
OpenAIModelId,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
ModelId: TypeAlias = AnthropicModelId | GoogleModelId | OpenAIModelId | MLXModelId | str
|