mirascope 2.0.0__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -11
- mirascope/graphs/__init__.py +22 -0
- mirascope/graphs/finite_state_machine.py +625 -0
- mirascope/llm/__init__.py +16 -101
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +1 -2
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +58 -84
- mirascope/llm/calls/decorator.py +120 -140
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/{providers/openai/completions → clients/anthropic}/_utils/__init__.py +0 -2
- mirascope/llm/{providers → clients}/anthropic/_utils/decode.py +22 -66
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/{providers → clients}/base/__init__.py +5 -4
- mirascope/llm/{providers → clients}/base/_utils.py +17 -78
- mirascope/llm/{providers/base/base_provider.py → clients/base/client.py} +145 -468
- mirascope/llm/{models → clients/base}/params.py +37 -16
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/{providers/openai/responses → clients/google}/_utils/__init__.py +0 -2
- mirascope/llm/{providers → clients}/google/_utils/decode.py +22 -98
- mirascope/llm/{providers → clients}/google/_utils/encode.py +46 -168
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/{providers/google → clients/openai/completions}/_utils/__init__.py +0 -4
- mirascope/llm/{providers → clients}/openai/completions/_utils/decode.py +9 -74
- mirascope/llm/{providers → clients}/openai/completions/_utils/encode.py +52 -70
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/{providers → clients}/openai/responses/_utils/decode.py +14 -80
- mirascope/llm/{providers → clients}/openai/responses/_utils/encode.py +41 -92
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +2 -3
- mirascope/llm/content/tool_call.py +0 -6
- mirascope/llm/content/tool_output.py +5 -22
- mirascope/llm/context/_utils.py +6 -19
- mirascope/llm/exceptions.py +43 -298
- mirascope/llm/formatting/__init__.py +2 -19
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +30 -219
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/formatting/partial.py +7 -80
- mirascope/llm/formatting/types.py +64 -21
- mirascope/llm/mcp/__init__.py +2 -2
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +0 -3
- mirascope/llm/messages/message.py +5 -13
- mirascope/llm/models/__init__.py +2 -7
- mirascope/llm/models/models.py +139 -315
- mirascope/llm/prompts/__init__.py +12 -13
- mirascope/llm/prompts/_utils.py +43 -14
- mirascope/llm/prompts/decorator.py +204 -144
- mirascope/llm/prompts/protocols.py +59 -25
- mirascope/llm/responses/__init__.py +1 -9
- mirascope/llm/responses/_utils.py +12 -102
- mirascope/llm/responses/base_response.py +6 -18
- mirascope/llm/responses/base_stream_response.py +50 -173
- mirascope/llm/responses/finish_reason.py +0 -1
- mirascope/llm/responses/response.py +13 -34
- mirascope/llm/responses/root_response.py +29 -100
- mirascope/llm/responses/stream_response.py +31 -40
- mirascope/llm/tools/__init__.py +2 -9
- mirascope/llm/tools/_utils.py +3 -12
- mirascope/llm/tools/decorator.py +16 -25
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +19 -87
- mirascope/llm/tools/toolkit.py +27 -35
- mirascope/llm/tools/tools.py +41 -135
- {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/METADATA +9 -95
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +1 -1
- {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +1 -1
- mirascope/_stubs.py +0 -363
- mirascope/api/__init__.py +0 -14
- mirascope/api/_generated/README.md +0 -207
- mirascope/api/_generated/__init__.py +0 -440
- mirascope/api/_generated/annotations/__init__.py +0 -33
- mirascope/api/_generated/annotations/client.py +0 -506
- mirascope/api/_generated/annotations/raw_client.py +0 -1414
- mirascope/api/_generated/annotations/types/__init__.py +0 -31
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_create_response.py +0 -48
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_get_response.py +0 -48
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_list_response.py +0 -21
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +0 -50
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_update_response.py +0 -48
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +0 -5
- mirascope/api/_generated/api_keys/__init__.py +0 -17
- mirascope/api/_generated/api_keys/client.py +0 -530
- mirascope/api/_generated/api_keys/raw_client.py +0 -1236
- mirascope/api/_generated/api_keys/types/__init__.py +0 -15
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +0 -28
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +0 -27
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +0 -40
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +0 -27
- mirascope/api/_generated/client.py +0 -211
- mirascope/api/_generated/core/__init__.py +0 -52
- mirascope/api/_generated/core/api_error.py +0 -23
- mirascope/api/_generated/core/client_wrapper.py +0 -46
- mirascope/api/_generated/core/datetime_utils.py +0 -28
- mirascope/api/_generated/core/file.py +0 -67
- mirascope/api/_generated/core/force_multipart.py +0 -16
- mirascope/api/_generated/core/http_client.py +0 -543
- mirascope/api/_generated/core/http_response.py +0 -55
- mirascope/api/_generated/core/jsonable_encoder.py +0 -100
- mirascope/api/_generated/core/pydantic_utilities.py +0 -255
- mirascope/api/_generated/core/query_encoder.py +0 -58
- mirascope/api/_generated/core/remove_none_from_dict.py +0 -11
- mirascope/api/_generated/core/request_options.py +0 -35
- mirascope/api/_generated/core/serialization.py +0 -276
- mirascope/api/_generated/docs/__init__.py +0 -4
- mirascope/api/_generated/docs/client.py +0 -91
- mirascope/api/_generated/docs/raw_client.py +0 -178
- mirascope/api/_generated/environment.py +0 -9
- mirascope/api/_generated/environments/__init__.py +0 -23
- mirascope/api/_generated/environments/client.py +0 -649
- mirascope/api/_generated/environments/raw_client.py +0 -1567
- mirascope/api/_generated/environments/types/__init__.py +0 -25
- mirascope/api/_generated/environments/types/environments_create_response.py +0 -24
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +0 -60
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +0 -24
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +0 -22
- mirascope/api/_generated/environments/types/environments_get_response.py +0 -24
- mirascope/api/_generated/environments/types/environments_list_response_item.py +0 -24
- mirascope/api/_generated/environments/types/environments_update_response.py +0 -24
- mirascope/api/_generated/errors/__init__.py +0 -25
- mirascope/api/_generated/errors/bad_request_error.py +0 -14
- mirascope/api/_generated/errors/conflict_error.py +0 -14
- mirascope/api/_generated/errors/forbidden_error.py +0 -11
- mirascope/api/_generated/errors/internal_server_error.py +0 -10
- mirascope/api/_generated/errors/not_found_error.py +0 -11
- mirascope/api/_generated/errors/payment_required_error.py +0 -15
- mirascope/api/_generated/errors/service_unavailable_error.py +0 -14
- mirascope/api/_generated/errors/too_many_requests_error.py +0 -15
- mirascope/api/_generated/errors/unauthorized_error.py +0 -11
- mirascope/api/_generated/functions/__init__.py +0 -39
- mirascope/api/_generated/functions/client.py +0 -647
- mirascope/api/_generated/functions/raw_client.py +0 -1890
- mirascope/api/_generated/functions/types/__init__.py +0 -53
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_create_response.py +0 -37
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +0 -39
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +0 -53
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +0 -22
- mirascope/api/_generated/functions/types/functions_get_response.py +0 -37
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +0 -25
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +0 -56
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +0 -22
- mirascope/api/_generated/functions/types/functions_list_response.py +0 -21
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +0 -41
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +0 -20
- mirascope/api/_generated/health/__init__.py +0 -7
- mirascope/api/_generated/health/client.py +0 -92
- mirascope/api/_generated/health/raw_client.py +0 -175
- mirascope/api/_generated/health/types/__init__.py +0 -8
- mirascope/api/_generated/health/types/health_check_response.py +0 -22
- mirascope/api/_generated/health/types/health_check_response_status.py +0 -5
- mirascope/api/_generated/organization_invitations/__init__.py +0 -33
- mirascope/api/_generated/organization_invitations/client.py +0 -546
- mirascope/api/_generated/organization_invitations/raw_client.py +0 -1519
- mirascope/api/_generated/organization_invitations/types/__init__.py +0 -53
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +0 -34
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +0 -48
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +0 -48
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +0 -48
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +0 -7
- mirascope/api/_generated/organization_memberships/__init__.py +0 -19
- mirascope/api/_generated/organization_memberships/client.py +0 -302
- mirascope/api/_generated/organization_memberships/raw_client.py +0 -736
- mirascope/api/_generated/organization_memberships/types/__init__.py +0 -27
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +0 -33
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +0 -7
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +0 -7
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +0 -31
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +0 -7
- mirascope/api/_generated/organizations/__init__.py +0 -51
- mirascope/api/_generated/organizations/client.py +0 -869
- mirascope/api/_generated/organizations/raw_client.py +0 -2593
- mirascope/api/_generated/organizations/types/__init__.py +0 -71
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +0 -24
- mirascope/api/_generated/organizations/types/organizations_create_response.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_get_response.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +0 -47
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +0 -33
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +0 -24
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +0 -53
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +0 -34
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_update_response.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +0 -35
- mirascope/api/_generated/project_memberships/__init__.py +0 -25
- mirascope/api/_generated/project_memberships/client.py +0 -437
- mirascope/api/_generated/project_memberships/raw_client.py +0 -1039
- mirascope/api/_generated/project_memberships/types/__init__.py +0 -29
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +0 -35
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +0 -33
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +0 -35
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +0 -7
- mirascope/api/_generated/projects/__init__.py +0 -7
- mirascope/api/_generated/projects/client.py +0 -428
- mirascope/api/_generated/projects/raw_client.py +0 -1302
- mirascope/api/_generated/projects/types/__init__.py +0 -10
- mirascope/api/_generated/projects/types/projects_create_response.py +0 -25
- mirascope/api/_generated/projects/types/projects_get_response.py +0 -25
- mirascope/api/_generated/projects/types/projects_list_response_item.py +0 -25
- mirascope/api/_generated/projects/types/projects_update_response.py +0 -25
- mirascope/api/_generated/reference.md +0 -4915
- mirascope/api/_generated/tags/__init__.py +0 -19
- mirascope/api/_generated/tags/client.py +0 -504
- mirascope/api/_generated/tags/raw_client.py +0 -1288
- mirascope/api/_generated/tags/types/__init__.py +0 -17
- mirascope/api/_generated/tags/types/tags_create_response.py +0 -41
- mirascope/api/_generated/tags/types/tags_get_response.py +0 -41
- mirascope/api/_generated/tags/types/tags_list_response.py +0 -23
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +0 -41
- mirascope/api/_generated/tags/types/tags_update_response.py +0 -41
- mirascope/api/_generated/token_cost/__init__.py +0 -7
- mirascope/api/_generated/token_cost/client.py +0 -160
- mirascope/api/_generated/token_cost/raw_client.py +0 -264
- mirascope/api/_generated/token_cost/types/__init__.py +0 -8
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +0 -54
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +0 -52
- mirascope/api/_generated/traces/__init__.py +0 -97
- mirascope/api/_generated/traces/client.py +0 -1103
- mirascope/api/_generated/traces/raw_client.py +0 -2322
- mirascope/api/_generated/traces/types/__init__.py +0 -155
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +0 -29
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +0 -27
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +0 -23
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +0 -38
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +0 -19
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +0 -20
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +0 -29
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +0 -31
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +0 -23
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +0 -38
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +0 -19
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +0 -48
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +0 -23
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +0 -38
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +0 -19
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +0 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +0 -20
- mirascope/api/_generated/traces/types/traces_create_response.py +0 -24
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +0 -22
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +0 -60
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +0 -24
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +0 -22
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +0 -33
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +0 -88
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +0 -33
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -88
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +0 -25
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +0 -44
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +0 -50
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +0 -5
- mirascope/api/_generated/traces/types/traces_search_response.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +0 -50
- mirascope/api/_generated/types/__init__.py +0 -85
- mirascope/api/_generated/types/already_exists_error.py +0 -22
- mirascope/api/_generated/types/already_exists_error_tag.py +0 -5
- mirascope/api/_generated/types/bad_request_error_body.py +0 -50
- mirascope/api/_generated/types/click_house_error.py +0 -22
- mirascope/api/_generated/types/database_error.py +0 -22
- mirascope/api/_generated/types/database_error_tag.py +0 -5
- mirascope/api/_generated/types/date.py +0 -3
- mirascope/api/_generated/types/http_api_decode_error.py +0 -27
- mirascope/api/_generated/types/http_api_decode_error_tag.py +0 -5
- mirascope/api/_generated/types/immutable_resource_error.py +0 -22
- mirascope/api/_generated/types/internal_server_error_body.py +0 -49
- mirascope/api/_generated/types/issue.py +0 -38
- mirascope/api/_generated/types/issue_tag.py +0 -10
- mirascope/api/_generated/types/not_found_error_body.py +0 -22
- mirascope/api/_generated/types/not_found_error_tag.py +0 -5
- mirascope/api/_generated/types/number_from_string.py +0 -3
- mirascope/api/_generated/types/permission_denied_error.py +0 -22
- mirascope/api/_generated/types/permission_denied_error_tag.py +0 -5
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +0 -32
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +0 -7
- mirascope/api/_generated/types/pricing_unavailable_error.py +0 -23
- mirascope/api/_generated/types/property_key.py +0 -7
- mirascope/api/_generated/types/property_key_key.py +0 -25
- mirascope/api/_generated/types/property_key_key_tag.py +0 -5
- mirascope/api/_generated/types/rate_limit_error.py +0 -31
- mirascope/api/_generated/types/rate_limit_error_tag.py +0 -5
- mirascope/api/_generated/types/service_unavailable_error_body.py +0 -24
- mirascope/api/_generated/types/service_unavailable_error_tag.py +0 -7
- mirascope/api/_generated/types/stripe_error.py +0 -20
- mirascope/api/_generated/types/subscription_past_due_error.py +0 -31
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +0 -7
- mirascope/api/_generated/types/unauthorized_error_body.py +0 -21
- mirascope/api/_generated/types/unauthorized_error_tag.py +0 -5
- mirascope/api/client.py +0 -255
- mirascope/api/settings.py +0 -99
- mirascope/llm/formatting/output_parser.py +0 -178
- mirascope/llm/formatting/primitives.py +0 -192
- mirascope/llm/mcp/mcp_client.py +0 -130
- mirascope/llm/messages/_utils.py +0 -34
- mirascope/llm/models/thinking_config.py +0 -61
- mirascope/llm/prompts/prompts.py +0 -487
- mirascope/llm/providers/__init__.py +0 -62
- mirascope/llm/providers/anthropic/__init__.py +0 -11
- mirascope/llm/providers/anthropic/_utils/__init__.py +0 -27
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +0 -282
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +0 -266
- mirascope/llm/providers/anthropic/_utils/encode.py +0 -418
- mirascope/llm/providers/anthropic/_utils/errors.py +0 -46
- mirascope/llm/providers/anthropic/beta_provider.py +0 -374
- mirascope/llm/providers/anthropic/model_id.py +0 -23
- mirascope/llm/providers/anthropic/model_info.py +0 -87
- mirascope/llm/providers/anthropic/provider.py +0 -479
- mirascope/llm/providers/google/__init__.py +0 -6
- mirascope/llm/providers/google/_utils/errors.py +0 -50
- mirascope/llm/providers/google/model_id.py +0 -22
- mirascope/llm/providers/google/model_info.py +0 -63
- mirascope/llm/providers/google/provider.py +0 -492
- mirascope/llm/providers/mirascope/__init__.py +0 -5
- mirascope/llm/providers/mirascope/_utils.py +0 -73
- mirascope/llm/providers/mirascope/provider.py +0 -349
- mirascope/llm/providers/mlx/__init__.py +0 -9
- mirascope/llm/providers/mlx/_utils.py +0 -141
- mirascope/llm/providers/mlx/encoding/__init__.py +0 -8
- mirascope/llm/providers/mlx/encoding/base.py +0 -72
- mirascope/llm/providers/mlx/encoding/transformers.py +0 -150
- mirascope/llm/providers/mlx/mlx.py +0 -254
- mirascope/llm/providers/mlx/model_id.py +0 -17
- mirascope/llm/providers/mlx/provider.py +0 -452
- mirascope/llm/providers/model_id.py +0 -16
- mirascope/llm/providers/ollama/__init__.py +0 -7
- mirascope/llm/providers/ollama/provider.py +0 -71
- mirascope/llm/providers/openai/__init__.py +0 -15
- mirascope/llm/providers/openai/_utils/__init__.py +0 -5
- mirascope/llm/providers/openai/_utils/errors.py +0 -46
- mirascope/llm/providers/openai/completions/__init__.py +0 -7
- mirascope/llm/providers/openai/completions/base_provider.py +0 -542
- mirascope/llm/providers/openai/completions/provider.py +0 -22
- mirascope/llm/providers/openai/model_id.py +0 -31
- mirascope/llm/providers/openai/model_info.py +0 -303
- mirascope/llm/providers/openai/provider.py +0 -441
- mirascope/llm/providers/openai/responses/__init__.py +0 -5
- mirascope/llm/providers/openai/responses/provider.py +0 -513
- mirascope/llm/providers/provider_id.py +0 -24
- mirascope/llm/providers/provider_registry.py +0 -299
- mirascope/llm/providers/together/__init__.py +0 -7
- mirascope/llm/providers/together/provider.py +0 -40
- mirascope/llm/responses/usage.py +0 -95
- mirascope/ops/__init__.py +0 -111
- mirascope/ops/_internal/__init__.py +0 -5
- mirascope/ops/_internal/closure.py +0 -1169
- mirascope/ops/_internal/configuration.py +0 -177
- mirascope/ops/_internal/context.py +0 -76
- mirascope/ops/_internal/exporters/__init__.py +0 -26
- mirascope/ops/_internal/exporters/exporters.py +0 -395
- mirascope/ops/_internal/exporters/processors.py +0 -104
- mirascope/ops/_internal/exporters/types.py +0 -165
- mirascope/ops/_internal/exporters/utils.py +0 -29
- mirascope/ops/_internal/instrumentation/__init__.py +0 -8
- mirascope/ops/_internal/instrumentation/llm/__init__.py +0 -8
- mirascope/ops/_internal/instrumentation/llm/common.py +0 -530
- mirascope/ops/_internal/instrumentation/llm/cost.py +0 -190
- mirascope/ops/_internal/instrumentation/llm/encode.py +0 -238
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +0 -38
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +0 -31
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +0 -38
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +0 -18
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +0 -100
- mirascope/ops/_internal/instrumentation/llm/llm.py +0 -161
- mirascope/ops/_internal/instrumentation/llm/model.py +0 -1798
- mirascope/ops/_internal/instrumentation/llm/response.py +0 -521
- mirascope/ops/_internal/instrumentation/llm/serialize.py +0 -300
- mirascope/ops/_internal/propagation.py +0 -198
- mirascope/ops/_internal/protocols.py +0 -133
- mirascope/ops/_internal/session.py +0 -139
- mirascope/ops/_internal/spans.py +0 -232
- mirascope/ops/_internal/traced_calls.py +0 -375
- mirascope/ops/_internal/traced_functions.py +0 -523
- mirascope/ops/_internal/tracing.py +0 -353
- mirascope/ops/_internal/types.py +0 -13
- mirascope/ops/_internal/utils.py +0 -123
- mirascope/ops/_internal/versioned_calls.py +0 -512
- mirascope/ops/_internal/versioned_functions.py +0 -357
- mirascope/ops/_internal/versioning.py +0 -303
- mirascope/ops/exceptions.py +0 -21
- mirascope-2.0.0.dist-info/RECORD +0 -423
- /mirascope/llm/{providers → clients}/base/kwargs.py +0 -0
- /mirascope/llm/{providers → clients}/google/message.py +0 -0
mirascope/llm/models/models.py
CHANGED
|
@@ -2,21 +2,16 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from collections.abc import Sequence
|
|
6
|
-
from
|
|
7
|
-
from
|
|
8
|
-
from typing import overload
|
|
5
|
+
from collections.abc import Iterator, Sequence
|
|
6
|
+
from contextlib import contextmanager
|
|
7
|
+
from contextvars import ContextVar
|
|
8
|
+
from typing import TYPE_CHECKING, overload
|
|
9
9
|
from typing_extensions import Unpack
|
|
10
10
|
|
|
11
|
+
from ..clients import PROVIDERS, get_client
|
|
11
12
|
from ..context import Context, DepsT
|
|
12
|
-
from ..formatting import Format, FormattableT
|
|
13
|
-
from ..messages import Message, UserContent
|
|
14
|
-
from ..providers import (
|
|
15
|
-
ModelId,
|
|
16
|
-
Provider,
|
|
17
|
-
ProviderId,
|
|
18
|
-
get_provider_for_model,
|
|
19
|
-
)
|
|
13
|
+
from ..formatting import Format, FormattableT
|
|
14
|
+
from ..messages import Message, UserContent
|
|
20
15
|
from ..responses import (
|
|
21
16
|
AsyncContextResponse,
|
|
22
17
|
AsyncContextStreamResponse,
|
|
@@ -37,12 +32,19 @@ from ..tools import (
|
|
|
37
32
|
Tool,
|
|
38
33
|
Toolkit,
|
|
39
34
|
)
|
|
40
|
-
|
|
35
|
+
|
|
36
|
+
if TYPE_CHECKING:
|
|
37
|
+
from ..clients import (
|
|
38
|
+
ModelId,
|
|
39
|
+
Params,
|
|
40
|
+
Provider,
|
|
41
|
+
)
|
|
42
|
+
|
|
41
43
|
|
|
42
44
|
MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
|
|
43
45
|
|
|
44
46
|
|
|
45
|
-
def
|
|
47
|
+
def get_model_from_context() -> Model | None:
|
|
46
48
|
"""Get the LLM currently set via context, if any."""
|
|
47
49
|
return MODEL_CONTEXT.get()
|
|
48
50
|
|
|
@@ -66,14 +68,15 @@ class Model:
|
|
|
66
68
|
|
|
67
69
|
def recommend_book(genre: str) -> llm.Response:
|
|
68
70
|
# Uses context model if available, otherwise creates default
|
|
69
|
-
model = llm.use_model("openai
|
|
70
|
-
|
|
71
|
+
model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
|
|
72
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
73
|
+
return model.call(messages=[message])
|
|
71
74
|
|
|
72
75
|
# Uses default model
|
|
73
76
|
response = recommend_book("fantasy")
|
|
74
77
|
|
|
75
78
|
# Override with different model
|
|
76
|
-
with llm.model(provider="anthropic", model_id="
|
|
79
|
+
with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
|
|
77
80
|
response = recommend_book("fantasy") # Uses Claude
|
|
78
81
|
```
|
|
79
82
|
|
|
@@ -84,78 +87,39 @@ class Model:
|
|
|
84
87
|
|
|
85
88
|
def recommend_book(genre: str) -> llm.Response:
|
|
86
89
|
# Hardcoded model, cannot be overridden by context
|
|
87
|
-
model = llm.Model("openai
|
|
88
|
-
|
|
90
|
+
model = llm.Model(provider="openai", model_id="gpt-4o-mini")
|
|
91
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
92
|
+
return model.call(messages=[message])
|
|
89
93
|
```
|
|
90
94
|
"""
|
|
91
95
|
|
|
96
|
+
provider: Provider
|
|
97
|
+
"""The provider being used (e.g. `openai`)."""
|
|
98
|
+
|
|
92
99
|
model_id: ModelId
|
|
93
|
-
"""The model being used (e.g. `
|
|
100
|
+
"""The model being used (e.g. `gpt-4o-mini`)."""
|
|
94
101
|
|
|
95
102
|
params: Params
|
|
96
103
|
"""The default parameters for the model (temperature, max_tokens, etc.)."""
|
|
97
104
|
|
|
98
105
|
def __init__(
|
|
99
106
|
self,
|
|
107
|
+
provider: Provider,
|
|
100
108
|
model_id: ModelId,
|
|
101
109
|
**params: Unpack[Params],
|
|
102
110
|
) -> None:
|
|
103
|
-
"""Initialize the Model with model_id and optional params."""
|
|
104
|
-
if
|
|
105
|
-
raise ValueError(
|
|
106
|
-
|
|
107
|
-
f"(e.g., 'openai/gpt-4'). Got: '{model_id}'"
|
|
108
|
-
)
|
|
111
|
+
"""Initialize the Model with provider, model_id, and optional params."""
|
|
112
|
+
if provider not in PROVIDERS:
|
|
113
|
+
raise ValueError(f"Unknown provider: {provider}")
|
|
114
|
+
self.provider = provider
|
|
109
115
|
self.model_id = model_id
|
|
110
116
|
self.params = params
|
|
111
|
-
self._token_stack: list[Token[Model | None]] = []
|
|
112
|
-
|
|
113
|
-
@property
|
|
114
|
-
def provider(self) -> Provider:
|
|
115
|
-
"""The provider being used (e.g. an `OpenAIProvider`).
|
|
116
|
-
|
|
117
|
-
This property dynamically looks up the provider from the registry based on
|
|
118
|
-
the current model_id. This allows provider overrides via `llm.register_provider()`
|
|
119
|
-
to take effect even after the model instance is created.
|
|
120
|
-
|
|
121
|
-
Raises:
|
|
122
|
-
NoRegisteredProviderError: If no provider is available for the model_id
|
|
123
|
-
"""
|
|
124
|
-
return get_provider_for_model(self.model_id)
|
|
125
|
-
|
|
126
|
-
@property
|
|
127
|
-
def provider_id(self) -> ProviderId:
|
|
128
|
-
"""The string id of the provider being used (e.g. `"openai"`).
|
|
129
|
-
|
|
130
|
-
This property returns the `id` field of the dynamically resolved provider.
|
|
131
|
-
|
|
132
|
-
Raises:
|
|
133
|
-
NoRegisteredProviderError: If no provider is available for the model_id
|
|
134
|
-
"""
|
|
135
|
-
return self.provider.id
|
|
136
|
-
|
|
137
|
-
def __enter__(self) -> Model:
|
|
138
|
-
"""Enter the context manager, setting this model in context."""
|
|
139
|
-
token = MODEL_CONTEXT.set(self)
|
|
140
|
-
self._token_stack.append(token)
|
|
141
|
-
return self
|
|
142
|
-
|
|
143
|
-
def __exit__(
|
|
144
|
-
self,
|
|
145
|
-
exc_type: type[BaseException] | None,
|
|
146
|
-
exc_val: BaseException | None,
|
|
147
|
-
exc_tb: TracebackType | None,
|
|
148
|
-
) -> None:
|
|
149
|
-
"""Exit the context manager, resetting the model context."""
|
|
150
|
-
if self._token_stack:
|
|
151
|
-
token = self._token_stack.pop()
|
|
152
|
-
MODEL_CONTEXT.reset(token)
|
|
153
117
|
|
|
154
118
|
@overload
|
|
155
119
|
def call(
|
|
156
120
|
self,
|
|
157
|
-
content: UserContent | Sequence[Message],
|
|
158
121
|
*,
|
|
122
|
+
messages: Sequence[Message],
|
|
159
123
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
160
124
|
format: None = None,
|
|
161
125
|
) -> Response:
|
|
@@ -165,8 +129,8 @@ class Model:
|
|
|
165
129
|
@overload
|
|
166
130
|
def call(
|
|
167
131
|
self,
|
|
168
|
-
content: UserContent | Sequence[Message],
|
|
169
132
|
*,
|
|
133
|
+
messages: Sequence[Message],
|
|
170
134
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
171
135
|
format: type[FormattableT] | Format[FormattableT],
|
|
172
136
|
) -> Response[FormattableT]:
|
|
@@ -176,41 +140,32 @@ class Model:
|
|
|
176
140
|
@overload
|
|
177
141
|
def call(
|
|
178
142
|
self,
|
|
179
|
-
content: UserContent | Sequence[Message],
|
|
180
143
|
*,
|
|
144
|
+
messages: Sequence[Message],
|
|
181
145
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
182
|
-
format: type[FormattableT]
|
|
183
|
-
| Format[FormattableT]
|
|
184
|
-
| OutputParser[FormattableT]
|
|
185
|
-
| None,
|
|
146
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
186
147
|
) -> Response | Response[FormattableT]:
|
|
187
148
|
"""Generate an `llm.Response` with an optional response format."""
|
|
188
149
|
...
|
|
189
150
|
|
|
190
151
|
def call(
|
|
191
152
|
self,
|
|
192
|
-
content: UserContent | Sequence[Message],
|
|
193
153
|
*,
|
|
154
|
+
messages: Sequence[Message],
|
|
194
155
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
195
|
-
format: type[FormattableT]
|
|
196
|
-
| Format[FormattableT]
|
|
197
|
-
| OutputParser[FormattableT]
|
|
198
|
-
| None = None,
|
|
156
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
199
157
|
) -> Response | Response[FormattableT]:
|
|
200
158
|
"""Generate an `llm.Response` by synchronously calling this model's LLM provider.
|
|
201
159
|
|
|
202
160
|
Args:
|
|
203
|
-
|
|
204
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
205
|
-
Messages for full control.
|
|
161
|
+
messages: Messages to send to the LLM.
|
|
206
162
|
tools: Optional tools that the model may invoke.
|
|
207
163
|
format: Optional response format specifier.
|
|
208
164
|
|
|
209
165
|
Returns:
|
|
210
166
|
An `llm.Response` object containing the LLM-generated content.
|
|
211
167
|
"""
|
|
212
|
-
|
|
213
|
-
return self.provider.call(
|
|
168
|
+
return get_client(self.provider).call(
|
|
214
169
|
model_id=self.model_id,
|
|
215
170
|
messages=messages,
|
|
216
171
|
tools=tools,
|
|
@@ -221,8 +176,8 @@ class Model:
|
|
|
221
176
|
@overload
|
|
222
177
|
async def call_async(
|
|
223
178
|
self,
|
|
224
|
-
content: UserContent | Sequence[Message],
|
|
225
179
|
*,
|
|
180
|
+
messages: Sequence[Message],
|
|
226
181
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
227
182
|
format: None = None,
|
|
228
183
|
) -> AsyncResponse:
|
|
@@ -232,8 +187,8 @@ class Model:
|
|
|
232
187
|
@overload
|
|
233
188
|
async def call_async(
|
|
234
189
|
self,
|
|
235
|
-
content: UserContent | Sequence[Message],
|
|
236
190
|
*,
|
|
191
|
+
messages: Sequence[Message],
|
|
237
192
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
238
193
|
format: type[FormattableT] | Format[FormattableT],
|
|
239
194
|
) -> AsyncResponse[FormattableT]:
|
|
@@ -243,41 +198,32 @@ class Model:
|
|
|
243
198
|
@overload
|
|
244
199
|
async def call_async(
|
|
245
200
|
self,
|
|
246
|
-
content: UserContent | Sequence[Message],
|
|
247
201
|
*,
|
|
202
|
+
messages: Sequence[Message],
|
|
248
203
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
249
|
-
format: type[FormattableT]
|
|
250
|
-
| Format[FormattableT]
|
|
251
|
-
| OutputParser[FormattableT]
|
|
252
|
-
| None,
|
|
204
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
253
205
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
254
206
|
"""Generate an `llm.AsyncResponse` with an optional response format."""
|
|
255
207
|
...
|
|
256
208
|
|
|
257
209
|
async def call_async(
|
|
258
210
|
self,
|
|
259
|
-
content: UserContent | Sequence[Message],
|
|
260
211
|
*,
|
|
212
|
+
messages: Sequence[Message],
|
|
261
213
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
262
|
-
format: type[FormattableT]
|
|
263
|
-
| Format[FormattableT]
|
|
264
|
-
| OutputParser[FormattableT]
|
|
265
|
-
| None = None,
|
|
214
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
266
215
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
267
216
|
"""Generate an `llm.AsyncResponse` by asynchronously calling this model's LLM provider.
|
|
268
217
|
|
|
269
218
|
Args:
|
|
270
|
-
|
|
271
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
272
|
-
Messages for full control.
|
|
219
|
+
messages: Messages to send to the LLM.
|
|
273
220
|
tools: Optional tools that the model may invoke.
|
|
274
221
|
format: Optional response format specifier.
|
|
275
222
|
|
|
276
223
|
Returns:
|
|
277
224
|
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
278
225
|
"""
|
|
279
|
-
|
|
280
|
-
return await self.provider.call_async(
|
|
226
|
+
return await get_client(self.provider).call_async(
|
|
281
227
|
model_id=self.model_id,
|
|
282
228
|
messages=messages,
|
|
283
229
|
tools=tools,
|
|
@@ -288,8 +234,8 @@ class Model:
|
|
|
288
234
|
@overload
|
|
289
235
|
def stream(
|
|
290
236
|
self,
|
|
291
|
-
content: UserContent | Sequence[Message],
|
|
292
237
|
*,
|
|
238
|
+
messages: Sequence[Message],
|
|
293
239
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
294
240
|
format: None = None,
|
|
295
241
|
) -> StreamResponse:
|
|
@@ -299,8 +245,8 @@ class Model:
|
|
|
299
245
|
@overload
|
|
300
246
|
def stream(
|
|
301
247
|
self,
|
|
302
|
-
content: UserContent | Sequence[Message],
|
|
303
248
|
*,
|
|
249
|
+
messages: Sequence[Message],
|
|
304
250
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
305
251
|
format: type[FormattableT] | Format[FormattableT],
|
|
306
252
|
) -> StreamResponse[FormattableT]:
|
|
@@ -310,41 +256,32 @@ class Model:
|
|
|
310
256
|
@overload
|
|
311
257
|
def stream(
|
|
312
258
|
self,
|
|
313
|
-
content: UserContent | Sequence[Message],
|
|
314
259
|
*,
|
|
260
|
+
messages: Sequence[Message],
|
|
315
261
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
316
|
-
format: type[FormattableT]
|
|
317
|
-
| Format[FormattableT]
|
|
318
|
-
| OutputParser[FormattableT]
|
|
319
|
-
| None,
|
|
262
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
320
263
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
321
264
|
"""Stream an `llm.StreamResponse` with an optional response format."""
|
|
322
265
|
...
|
|
323
266
|
|
|
324
267
|
def stream(
|
|
325
268
|
self,
|
|
326
|
-
content: UserContent | Sequence[Message],
|
|
327
269
|
*,
|
|
270
|
+
messages: Sequence[Message],
|
|
328
271
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
329
|
-
format: type[FormattableT]
|
|
330
|
-
| Format[FormattableT]
|
|
331
|
-
| OutputParser[FormattableT]
|
|
332
|
-
| None = None,
|
|
272
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
333
273
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
334
274
|
"""Generate an `llm.StreamResponse` by synchronously streaming from this model's LLM provider.
|
|
335
275
|
|
|
336
276
|
Args:
|
|
337
|
-
|
|
338
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
339
|
-
Messages for full control.
|
|
277
|
+
messages: Messages to send to the LLM.
|
|
340
278
|
tools: Optional tools that the model may invoke.
|
|
341
279
|
format: Optional response format specifier.
|
|
342
280
|
|
|
343
281
|
Returns:
|
|
344
282
|
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
345
283
|
"""
|
|
346
|
-
|
|
347
|
-
return self.provider.stream(
|
|
284
|
+
return get_client(self.provider).stream(
|
|
348
285
|
model_id=self.model_id,
|
|
349
286
|
messages=messages,
|
|
350
287
|
tools=tools,
|
|
@@ -355,8 +292,8 @@ class Model:
|
|
|
355
292
|
@overload
|
|
356
293
|
async def stream_async(
|
|
357
294
|
self,
|
|
358
|
-
content: UserContent | Sequence[Message],
|
|
359
295
|
*,
|
|
296
|
+
messages: list[Message],
|
|
360
297
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
361
298
|
format: None = None,
|
|
362
299
|
) -> AsyncStreamResponse:
|
|
@@ -366,8 +303,8 @@ class Model:
|
|
|
366
303
|
@overload
|
|
367
304
|
async def stream_async(
|
|
368
305
|
self,
|
|
369
|
-
content: UserContent | Sequence[Message],
|
|
370
306
|
*,
|
|
307
|
+
messages: list[Message],
|
|
371
308
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
372
309
|
format: type[FormattableT] | Format[FormattableT],
|
|
373
310
|
) -> AsyncStreamResponse[FormattableT]:
|
|
@@ -377,41 +314,32 @@ class Model:
|
|
|
377
314
|
@overload
|
|
378
315
|
async def stream_async(
|
|
379
316
|
self,
|
|
380
|
-
content: UserContent | Sequence[Message],
|
|
381
317
|
*,
|
|
318
|
+
messages: list[Message],
|
|
382
319
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
383
|
-
format: type[FormattableT]
|
|
384
|
-
| Format[FormattableT]
|
|
385
|
-
| OutputParser[FormattableT]
|
|
386
|
-
| None,
|
|
320
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
387
321
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
388
322
|
"""Stream an `llm.AsyncStreamResponse` with an optional response format."""
|
|
389
323
|
...
|
|
390
324
|
|
|
391
325
|
async def stream_async(
|
|
392
326
|
self,
|
|
393
|
-
content: UserContent | Sequence[Message],
|
|
394
327
|
*,
|
|
328
|
+
messages: list[Message],
|
|
395
329
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
396
|
-
format: type[FormattableT]
|
|
397
|
-
| Format[FormattableT]
|
|
398
|
-
| OutputParser[FormattableT]
|
|
399
|
-
| None = None,
|
|
330
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
400
331
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
401
332
|
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
402
333
|
|
|
403
334
|
Args:
|
|
404
|
-
|
|
405
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
406
|
-
Messages for full control.
|
|
335
|
+
messages: Messages to send to the LLM.
|
|
407
336
|
tools: Optional tools that the model may invoke.
|
|
408
337
|
format: Optional response format specifier.
|
|
409
338
|
|
|
410
339
|
Returns:
|
|
411
340
|
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
412
341
|
"""
|
|
413
|
-
|
|
414
|
-
return await self.provider.stream_async(
|
|
342
|
+
return await get_client(self.provider).stream_async(
|
|
415
343
|
model_id=self.model_id,
|
|
416
344
|
messages=messages,
|
|
417
345
|
tools=tools,
|
|
@@ -422,9 +350,9 @@ class Model:
|
|
|
422
350
|
@overload
|
|
423
351
|
def context_call(
|
|
424
352
|
self,
|
|
425
|
-
content: UserContent | Sequence[Message],
|
|
426
353
|
*,
|
|
427
354
|
ctx: Context[DepsT],
|
|
355
|
+
messages: Sequence[Message],
|
|
428
356
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
429
357
|
| ContextToolkit[DepsT]
|
|
430
358
|
| None = None,
|
|
@@ -436,9 +364,9 @@ class Model:
|
|
|
436
364
|
@overload
|
|
437
365
|
def context_call(
|
|
438
366
|
self,
|
|
439
|
-
content: UserContent | Sequence[Message],
|
|
440
367
|
*,
|
|
441
368
|
ctx: Context[DepsT],
|
|
369
|
+
messages: Sequence[Message],
|
|
442
370
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
443
371
|
| ContextToolkit[DepsT]
|
|
444
372
|
| None = None,
|
|
@@ -450,48 +378,39 @@ class Model:
|
|
|
450
378
|
@overload
|
|
451
379
|
def context_call(
|
|
452
380
|
self,
|
|
453
|
-
content: UserContent | Sequence[Message],
|
|
454
381
|
*,
|
|
455
382
|
ctx: Context[DepsT],
|
|
383
|
+
messages: Sequence[Message],
|
|
456
384
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
457
385
|
| ContextToolkit[DepsT]
|
|
458
386
|
| None = None,
|
|
459
|
-
format: type[FormattableT]
|
|
460
|
-
| Format[FormattableT]
|
|
461
|
-
| OutputParser[FormattableT]
|
|
462
|
-
| None,
|
|
387
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
463
388
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
464
389
|
"""Generate an `llm.ContextResponse` with an optional response format."""
|
|
465
390
|
...
|
|
466
391
|
|
|
467
392
|
def context_call(
|
|
468
393
|
self,
|
|
469
|
-
content: UserContent | Sequence[Message],
|
|
470
394
|
*,
|
|
471
395
|
ctx: Context[DepsT],
|
|
396
|
+
messages: Sequence[Message],
|
|
472
397
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
473
398
|
| ContextToolkit[DepsT]
|
|
474
399
|
| None = None,
|
|
475
|
-
format: type[FormattableT]
|
|
476
|
-
| Format[FormattableT]
|
|
477
|
-
| OutputParser[FormattableT]
|
|
478
|
-
| None = None,
|
|
400
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
479
401
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
480
402
|
"""Generate an `llm.ContextResponse` by synchronously calling this model's LLM provider.
|
|
481
403
|
|
|
482
404
|
Args:
|
|
483
|
-
content: Content to send to the LLM. Can be a string (converted to user
|
|
484
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
485
|
-
Messages for full control.
|
|
486
405
|
ctx: Context object with dependencies for tools.
|
|
406
|
+
messages: Messages to send to the LLM.
|
|
487
407
|
tools: Optional tools that the model may invoke.
|
|
488
408
|
format: Optional response format specifier.
|
|
489
409
|
|
|
490
410
|
Returns:
|
|
491
411
|
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
492
412
|
"""
|
|
493
|
-
|
|
494
|
-
return self.provider.context_call(
|
|
413
|
+
return get_client(self.provider).context_call(
|
|
495
414
|
ctx=ctx,
|
|
496
415
|
model_id=self.model_id,
|
|
497
416
|
messages=messages,
|
|
@@ -503,9 +422,9 @@ class Model:
|
|
|
503
422
|
@overload
|
|
504
423
|
async def context_call_async(
|
|
505
424
|
self,
|
|
506
|
-
content: UserContent | Sequence[Message],
|
|
507
425
|
*,
|
|
508
426
|
ctx: Context[DepsT],
|
|
427
|
+
messages: Sequence[Message],
|
|
509
428
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
510
429
|
| AsyncContextToolkit[DepsT]
|
|
511
430
|
| None = None,
|
|
@@ -517,9 +436,9 @@ class Model:
|
|
|
517
436
|
@overload
|
|
518
437
|
async def context_call_async(
|
|
519
438
|
self,
|
|
520
|
-
content: UserContent | Sequence[Message],
|
|
521
439
|
*,
|
|
522
440
|
ctx: Context[DepsT],
|
|
441
|
+
messages: Sequence[Message],
|
|
523
442
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
524
443
|
| AsyncContextToolkit[DepsT]
|
|
525
444
|
| None = None,
|
|
@@ -531,48 +450,39 @@ class Model:
|
|
|
531
450
|
@overload
|
|
532
451
|
async def context_call_async(
|
|
533
452
|
self,
|
|
534
|
-
content: UserContent | Sequence[Message],
|
|
535
453
|
*,
|
|
536
454
|
ctx: Context[DepsT],
|
|
455
|
+
messages: Sequence[Message],
|
|
537
456
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
538
457
|
| AsyncContextToolkit[DepsT]
|
|
539
458
|
| None = None,
|
|
540
|
-
format: type[FormattableT]
|
|
541
|
-
| Format[FormattableT]
|
|
542
|
-
| OutputParser[FormattableT]
|
|
543
|
-
| None,
|
|
459
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
544
460
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
545
461
|
"""Generate an `llm.AsyncContextResponse` with an optional response format."""
|
|
546
462
|
...
|
|
547
463
|
|
|
548
464
|
async def context_call_async(
|
|
549
465
|
self,
|
|
550
|
-
content: UserContent | Sequence[Message],
|
|
551
466
|
*,
|
|
552
467
|
ctx: Context[DepsT],
|
|
468
|
+
messages: Sequence[Message],
|
|
553
469
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
554
470
|
| AsyncContextToolkit[DepsT]
|
|
555
471
|
| None = None,
|
|
556
|
-
format: type[FormattableT]
|
|
557
|
-
| Format[FormattableT]
|
|
558
|
-
| OutputParser[FormattableT]
|
|
559
|
-
| None = None,
|
|
472
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
560
473
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
561
474
|
"""Generate an `llm.AsyncContextResponse` by asynchronously calling this model's LLM provider.
|
|
562
475
|
|
|
563
476
|
Args:
|
|
564
|
-
content: Content to send to the LLM. Can be a string (converted to user
|
|
565
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
566
|
-
Messages for full control.
|
|
567
477
|
ctx: Context object with dependencies for tools.
|
|
478
|
+
messages: Messages to send to the LLM.
|
|
568
479
|
tools: Optional tools that the model may invoke.
|
|
569
480
|
format: Optional response format specifier.
|
|
570
481
|
|
|
571
482
|
Returns:
|
|
572
483
|
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
573
484
|
"""
|
|
574
|
-
|
|
575
|
-
return await self.provider.context_call_async(
|
|
485
|
+
return await get_client(self.provider).context_call_async(
|
|
576
486
|
ctx=ctx,
|
|
577
487
|
model_id=self.model_id,
|
|
578
488
|
messages=messages,
|
|
@@ -584,9 +494,9 @@ class Model:
|
|
|
584
494
|
@overload
|
|
585
495
|
def context_stream(
|
|
586
496
|
self,
|
|
587
|
-
content: UserContent | Sequence[Message],
|
|
588
497
|
*,
|
|
589
498
|
ctx: Context[DepsT],
|
|
499
|
+
messages: Sequence[Message],
|
|
590
500
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
591
501
|
| ContextToolkit[DepsT]
|
|
592
502
|
| None = None,
|
|
@@ -598,9 +508,9 @@ class Model:
|
|
|
598
508
|
@overload
|
|
599
509
|
def context_stream(
|
|
600
510
|
self,
|
|
601
|
-
content: UserContent | Sequence[Message],
|
|
602
511
|
*,
|
|
603
512
|
ctx: Context[DepsT],
|
|
513
|
+
messages: Sequence[Message],
|
|
604
514
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
605
515
|
| ContextToolkit[DepsT]
|
|
606
516
|
| None = None,
|
|
@@ -612,16 +522,13 @@ class Model:
|
|
|
612
522
|
@overload
|
|
613
523
|
def context_stream(
|
|
614
524
|
self,
|
|
615
|
-
content: UserContent | Sequence[Message],
|
|
616
525
|
*,
|
|
617
526
|
ctx: Context[DepsT],
|
|
527
|
+
messages: Sequence[Message],
|
|
618
528
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
619
529
|
| ContextToolkit[DepsT]
|
|
620
530
|
| None = None,
|
|
621
|
-
format: type[FormattableT]
|
|
622
|
-
| Format[FormattableT]
|
|
623
|
-
| OutputParser[FormattableT]
|
|
624
|
-
| None,
|
|
531
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
625
532
|
) -> (
|
|
626
533
|
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
627
534
|
):
|
|
@@ -630,34 +537,28 @@ class Model:
|
|
|
630
537
|
|
|
631
538
|
def context_stream(
|
|
632
539
|
self,
|
|
633
|
-
content: UserContent | Sequence[Message],
|
|
634
540
|
*,
|
|
635
541
|
ctx: Context[DepsT],
|
|
542
|
+
messages: Sequence[Message],
|
|
636
543
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
637
544
|
| ContextToolkit[DepsT]
|
|
638
545
|
| None = None,
|
|
639
|
-
format: type[FormattableT]
|
|
640
|
-
| Format[FormattableT]
|
|
641
|
-
| OutputParser[FormattableT]
|
|
642
|
-
| None = None,
|
|
546
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
643
547
|
) -> (
|
|
644
548
|
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
645
549
|
):
|
|
646
550
|
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from this model's LLM provider.
|
|
647
551
|
|
|
648
552
|
Args:
|
|
649
|
-
content: Content to send to the LLM. Can be a string (converted to user
|
|
650
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
651
|
-
Messages for full control.
|
|
652
553
|
ctx: Context object with dependencies for tools.
|
|
554
|
+
messages: Messages to send to the LLM.
|
|
653
555
|
tools: Optional tools that the model may invoke.
|
|
654
556
|
format: Optional response format specifier.
|
|
655
557
|
|
|
656
558
|
Returns:
|
|
657
559
|
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
658
560
|
"""
|
|
659
|
-
|
|
660
|
-
return self.provider.context_stream(
|
|
561
|
+
return get_client(self.provider).context_stream(
|
|
661
562
|
ctx=ctx,
|
|
662
563
|
model_id=self.model_id,
|
|
663
564
|
messages=messages,
|
|
@@ -669,9 +570,9 @@ class Model:
|
|
|
669
570
|
@overload
|
|
670
571
|
async def context_stream_async(
|
|
671
572
|
self,
|
|
672
|
-
content: UserContent | Sequence[Message],
|
|
673
573
|
*,
|
|
674
574
|
ctx: Context[DepsT],
|
|
575
|
+
messages: list[Message],
|
|
675
576
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
676
577
|
| AsyncContextToolkit[DepsT]
|
|
677
578
|
| None = None,
|
|
@@ -683,9 +584,9 @@ class Model:
|
|
|
683
584
|
@overload
|
|
684
585
|
async def context_stream_async(
|
|
685
586
|
self,
|
|
686
|
-
content: UserContent | Sequence[Message],
|
|
687
587
|
*,
|
|
688
588
|
ctx: Context[DepsT],
|
|
589
|
+
messages: list[Message],
|
|
689
590
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
690
591
|
| AsyncContextToolkit[DepsT]
|
|
691
592
|
| None = None,
|
|
@@ -697,16 +598,13 @@ class Model:
|
|
|
697
598
|
@overload
|
|
698
599
|
async def context_stream_async(
|
|
699
600
|
self,
|
|
700
|
-
content: UserContent | Sequence[Message],
|
|
701
601
|
*,
|
|
702
602
|
ctx: Context[DepsT],
|
|
603
|
+
messages: list[Message],
|
|
703
604
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
704
605
|
| AsyncContextToolkit[DepsT]
|
|
705
606
|
| None = None,
|
|
706
|
-
format: type[FormattableT]
|
|
707
|
-
| Format[FormattableT]
|
|
708
|
-
| OutputParser[FormattableT]
|
|
709
|
-
| None,
|
|
607
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
710
608
|
) -> (
|
|
711
609
|
AsyncContextStreamResponse[DepsT, None]
|
|
712
610
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
@@ -716,16 +614,13 @@ class Model:
|
|
|
716
614
|
|
|
717
615
|
async def context_stream_async(
|
|
718
616
|
self,
|
|
719
|
-
content: UserContent | Sequence[Message],
|
|
720
617
|
*,
|
|
721
618
|
ctx: Context[DepsT],
|
|
619
|
+
messages: list[Message],
|
|
722
620
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
723
621
|
| AsyncContextToolkit[DepsT]
|
|
724
622
|
| None = None,
|
|
725
|
-
format: type[FormattableT]
|
|
726
|
-
| Format[FormattableT]
|
|
727
|
-
| OutputParser[FormattableT]
|
|
728
|
-
| None = None,
|
|
623
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
729
624
|
) -> (
|
|
730
625
|
AsyncContextStreamResponse[DepsT, None]
|
|
731
626
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
@@ -733,18 +628,15 @@ class Model:
|
|
|
733
628
|
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
734
629
|
|
|
735
630
|
Args:
|
|
736
|
-
content: Content to send to the LLM. Can be a string (converted to user
|
|
737
|
-
message), UserContent, a sequence of UserContent, or a sequence of
|
|
738
|
-
Messages for full control.
|
|
739
631
|
ctx: Context object with dependencies for tools.
|
|
632
|
+
messages: Messages to send to the LLM.
|
|
740
633
|
tools: Optional tools that the model may invoke.
|
|
741
634
|
format: Optional response format specifier.
|
|
742
635
|
|
|
743
636
|
Returns:
|
|
744
637
|
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
745
638
|
"""
|
|
746
|
-
|
|
747
|
-
return await self.provider.context_stream_async(
|
|
639
|
+
return await get_client(self.provider).context_stream_async(
|
|
748
640
|
ctx=ctx,
|
|
749
641
|
model_id=self.model_id,
|
|
750
642
|
messages=messages,
|
|
@@ -804,7 +696,7 @@ class Model:
|
|
|
804
696
|
Returns:
|
|
805
697
|
A new `llm.Response` object containing the extended conversation.
|
|
806
698
|
"""
|
|
807
|
-
return self.provider.resume(
|
|
699
|
+
return get_client(self.provider).resume(
|
|
808
700
|
model_id=self.model_id,
|
|
809
701
|
response=response,
|
|
810
702
|
content=content,
|
|
@@ -862,7 +754,7 @@ class Model:
|
|
|
862
754
|
Returns:
|
|
863
755
|
A new `llm.AsyncResponse` object containing the extended conversation.
|
|
864
756
|
"""
|
|
865
|
-
return await self.provider.resume_async(
|
|
757
|
+
return await get_client(self.provider).resume_async(
|
|
866
758
|
model_id=self.model_id,
|
|
867
759
|
response=response,
|
|
868
760
|
content=content,
|
|
@@ -925,7 +817,7 @@ class Model:
|
|
|
925
817
|
Returns:
|
|
926
818
|
A new `llm.ContextResponse` object containing the extended conversation.
|
|
927
819
|
"""
|
|
928
|
-
return self.provider.context_resume(
|
|
820
|
+
return get_client(self.provider).context_resume(
|
|
929
821
|
ctx=ctx,
|
|
930
822
|
model_id=self.model_id,
|
|
931
823
|
response=response,
|
|
@@ -991,7 +883,7 @@ class Model:
|
|
|
991
883
|
Returns:
|
|
992
884
|
A new `llm.AsyncContextResponse` object containing the extended conversation.
|
|
993
885
|
"""
|
|
994
|
-
return await self.provider.context_resume_async(
|
|
886
|
+
return await get_client(self.provider).context_resume_async(
|
|
995
887
|
ctx=ctx,
|
|
996
888
|
model_id=self.model_id,
|
|
997
889
|
response=response,
|
|
@@ -1050,7 +942,7 @@ class Model:
|
|
|
1050
942
|
Returns:
|
|
1051
943
|
A new `llm.StreamResponse` object for streaming the extended conversation.
|
|
1052
944
|
"""
|
|
1053
|
-
return self.provider.resume_stream(
|
|
945
|
+
return get_client(self.provider).resume_stream(
|
|
1054
946
|
model_id=self.model_id,
|
|
1055
947
|
response=response,
|
|
1056
948
|
content=content,
|
|
@@ -1108,7 +1000,7 @@ class Model:
|
|
|
1108
1000
|
Returns:
|
|
1109
1001
|
A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1110
1002
|
"""
|
|
1111
|
-
return await self.provider.resume_stream_async(
|
|
1003
|
+
return await get_client(self.provider).resume_stream_async(
|
|
1112
1004
|
model_id=self.model_id,
|
|
1113
1005
|
response=response,
|
|
1114
1006
|
content=content,
|
|
@@ -1177,7 +1069,7 @@ class Model:
|
|
|
1177
1069
|
Returns:
|
|
1178
1070
|
A new `llm.ContextStreamResponse` object for streaming the extended conversation.
|
|
1179
1071
|
"""
|
|
1180
|
-
return self.provider.context_resume_stream(
|
|
1072
|
+
return get_client(self.provider).context_resume_stream(
|
|
1181
1073
|
ctx=ctx,
|
|
1182
1074
|
model_id=self.model_id,
|
|
1183
1075
|
response=response,
|
|
@@ -1249,7 +1141,7 @@ class Model:
|
|
|
1249
1141
|
Returns:
|
|
1250
1142
|
A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1251
1143
|
"""
|
|
1252
|
-
return await self.provider.context_resume_stream_async(
|
|
1144
|
+
return await get_client(self.provider).context_resume_stream_async(
|
|
1253
1145
|
ctx=ctx,
|
|
1254
1146
|
model_id=self.model_id,
|
|
1255
1147
|
response=response,
|
|
@@ -1258,138 +1150,71 @@ class Model:
|
|
|
1258
1150
|
)
|
|
1259
1151
|
|
|
1260
1152
|
|
|
1153
|
+
@contextmanager
|
|
1261
1154
|
def model(
|
|
1155
|
+
*,
|
|
1156
|
+
provider: Provider,
|
|
1262
1157
|
model_id: ModelId,
|
|
1263
1158
|
**params: Unpack[Params],
|
|
1264
|
-
) ->
|
|
1265
|
-
"""
|
|
1266
|
-
|
|
1267
|
-
This is just an alias for the `Model` constructor, added for convenience.
|
|
1268
|
-
|
|
1269
|
-
This function returns a `Model` instance that implements the context manager protocol.
|
|
1270
|
-
When used with a `with` statement, the model will be set in context and used by both
|
|
1271
|
-
`llm.use_model()` and `llm.call()` within that context. This allows you to override
|
|
1272
|
-
the default model at runtime without modifying function definitions.
|
|
1273
|
-
|
|
1274
|
-
The returned `Model` instance can also be stored and reused:
|
|
1159
|
+
) -> Iterator[None]:
|
|
1160
|
+
"""Set a model in context for the duration of the context manager.
|
|
1275
1161
|
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
# Use directly
|
|
1279
|
-
response = m.call("Hello!")
|
|
1280
|
-
# Or use as context manager
|
|
1281
|
-
with m:
|
|
1282
|
-
response = recommend_book("fantasy")
|
|
1283
|
-
```
|
|
1284
|
-
|
|
1285
|
-
When a model is set in context, it completely overrides any model ID or parameters
|
|
1286
|
-
specified in `llm.use_model()` or `llm.call()`. The context model's parameters take
|
|
1287
|
-
precedence, and any unset parameters use default values.
|
|
1162
|
+
This context manager sets a model that will be used by `llm.use_model()` calls
|
|
1163
|
+
within the context. This allows you to override the default model at runtime.
|
|
1288
1164
|
|
|
1289
1165
|
Args:
|
|
1290
|
-
|
|
1166
|
+
provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
|
|
1167
|
+
model_id: The specific model identifier for the chosen provider.
|
|
1291
1168
|
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1292
1169
|
|
|
1293
|
-
Returns:
|
|
1294
|
-
A Model instance that can be used as a context manager.
|
|
1295
|
-
|
|
1296
1170
|
Raises:
|
|
1297
1171
|
ValueError: If the specified provider is not supported.
|
|
1298
1172
|
|
|
1299
1173
|
Example:
|
|
1300
|
-
With `llm.use_model()`
|
|
1301
1174
|
|
|
1302
1175
|
```python
|
|
1303
1176
|
import mirascope.llm as llm
|
|
1304
1177
|
|
|
1305
1178
|
def recommend_book(genre: str) -> llm.Response:
|
|
1306
|
-
model = llm.use_model("openai
|
|
1307
|
-
|
|
1179
|
+
model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
|
|
1180
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
1181
|
+
return model.call(messages=[message])
|
|
1308
1182
|
|
|
1309
1183
|
# Override the default model at runtime
|
|
1310
|
-
with llm.model("anthropic
|
|
1311
|
-
response = recommend_book("fantasy") # Uses Claude instead of GPT
|
|
1312
|
-
```
|
|
1313
|
-
|
|
1314
|
-
Example:
|
|
1315
|
-
With `llm.call()`
|
|
1316
|
-
|
|
1317
|
-
```python
|
|
1318
|
-
import mirascope.llm as llm
|
|
1319
|
-
|
|
1320
|
-
@llm.call("openai/gpt-5-mini")
|
|
1321
|
-
def recommend_book(genre: str):
|
|
1322
|
-
return f"Please recommend a {genre} book."
|
|
1323
|
-
|
|
1324
|
-
# Override the decorated model at runtime
|
|
1325
|
-
with llm.model("anthropic/claude-sonnet-4-0"):
|
|
1184
|
+
with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
|
|
1326
1185
|
response = recommend_book("fantasy") # Uses Claude instead of GPT
|
|
1327
1186
|
```
|
|
1328
|
-
|
|
1329
|
-
Example:
|
|
1330
|
-
Storing and reusing Model instances
|
|
1331
|
-
|
|
1332
|
-
```python
|
|
1333
|
-
import mirascope.llm as llm
|
|
1334
|
-
|
|
1335
|
-
# Create and store a model
|
|
1336
|
-
m = llm.model("openai/gpt-4o")
|
|
1337
|
-
|
|
1338
|
-
# Use it directly
|
|
1339
|
-
response = m.call("Hello!")
|
|
1340
|
-
|
|
1341
|
-
# Or use it as a context manager
|
|
1342
|
-
with m:
|
|
1343
|
-
response = recommend_book("fantasy")
|
|
1344
|
-
```
|
|
1345
|
-
"""
|
|
1346
|
-
return Model(model_id, **params)
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
@overload
|
|
1350
|
-
def use_model(
|
|
1351
|
-
model: ModelId,
|
|
1352
|
-
**params: Unpack[Params],
|
|
1353
|
-
) -> Model:
|
|
1354
|
-
"""Get the model from context if available, otherwise create a new `Model`.
|
|
1355
|
-
|
|
1356
|
-
This overload accepts a model ID string and allows additional params.
|
|
1357
1187
|
"""
|
|
1358
|
-
|
|
1188
|
+
token = MODEL_CONTEXT.set(Model(provider, model_id, **params))
|
|
1189
|
+
try:
|
|
1190
|
+
yield
|
|
1191
|
+
finally:
|
|
1192
|
+
MODEL_CONTEXT.reset(token)
|
|
1359
1193
|
|
|
1360
1194
|
|
|
1361
|
-
@overload
|
|
1362
1195
|
def use_model(
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
This overload accepts a `Model` instance and does not allow additional params.
|
|
1368
|
-
"""
|
|
1369
|
-
...
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
def use_model(
|
|
1373
|
-
model: Model | ModelId,
|
|
1196
|
+
*,
|
|
1197
|
+
provider: Provider,
|
|
1198
|
+
model_id: ModelId,
|
|
1374
1199
|
**params: Unpack[Params],
|
|
1375
1200
|
) -> Model:
|
|
1376
|
-
"""Get the model from context if available, otherwise create a new
|
|
1201
|
+
"""Get the model from context if available, otherwise create a new Model.
|
|
1377
1202
|
|
|
1378
1203
|
This function checks if a model has been set in the context (via `llm.model()`
|
|
1379
|
-
context manager). If a model is found in the context, it returns that model
|
|
1380
|
-
|
|
1381
|
-
|
|
1204
|
+
context manager). If a model is found in the context, it returns that model.
|
|
1205
|
+
Otherwise, it creates and returns a new `llm.Model` instance with the provided
|
|
1206
|
+
arguments as defaults.
|
|
1382
1207
|
|
|
1383
1208
|
This allows you to write functions that work with a default model but can be
|
|
1384
1209
|
overridden at runtime using the `llm.model()` context manager.
|
|
1385
1210
|
|
|
1386
1211
|
Args:
|
|
1387
|
-
|
|
1212
|
+
provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
|
|
1213
|
+
model_id: The specific model identifier for the chosen provider.
|
|
1388
1214
|
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1389
|
-
Only available when passing a model ID string
|
|
1390
1215
|
|
|
1391
1216
|
Returns:
|
|
1392
|
-
An `llm.Model` instance from context
|
|
1217
|
+
An `llm.Model` instance from context or a new instance with the specified settings.
|
|
1393
1218
|
|
|
1394
1219
|
Raises:
|
|
1395
1220
|
ValueError: If the specified provider is not supported.
|
|
@@ -1400,20 +1225,19 @@ def use_model(
|
|
|
1400
1225
|
import mirascope.llm as llm
|
|
1401
1226
|
|
|
1402
1227
|
def recommend_book(genre: str) -> llm.Response:
|
|
1403
|
-
model = llm.use_model("openai
|
|
1404
|
-
|
|
1228
|
+
model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
|
|
1229
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
1230
|
+
return model.call(messages=[message])
|
|
1405
1231
|
|
|
1406
|
-
# Uses the default model (gpt-
|
|
1232
|
+
# Uses the default model (gpt-4o-mini)
|
|
1407
1233
|
response = recommend_book("fantasy")
|
|
1408
1234
|
|
|
1409
1235
|
# Override with a different model
|
|
1410
|
-
with llm.model(provider="anthropic", model_id="
|
|
1236
|
+
with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
|
|
1411
1237
|
response = recommend_book("fantasy") # Uses Claude instead
|
|
1412
1238
|
```
|
|
1413
1239
|
"""
|
|
1414
|
-
context_model =
|
|
1240
|
+
context_model = get_model_from_context()
|
|
1415
1241
|
if context_model is not None:
|
|
1416
1242
|
return context_model
|
|
1417
|
-
|
|
1418
|
-
return Model(model, **params)
|
|
1419
|
-
return model
|
|
1243
|
+
return Model(provider, model_id, **params)
|