mirascope 2.0.0__py3-none-any.whl → 2.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -11
- mirascope/graphs/__init__.py +22 -0
- mirascope/graphs/finite_state_machine.py +625 -0
- mirascope/llm/__init__.py +15 -96
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +1 -2
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +58 -84
- mirascope/llm/calls/decorator.py +120 -140
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/_missing_import_stubs.py +47 -0
- mirascope/llm/clients/anthropic/__init__.py +25 -0
- mirascope/llm/{providers/openai/completions → clients/anthropic}/_utils/__init__.py +0 -2
- mirascope/llm/{providers → clients}/anthropic/_utils/decode.py +22 -66
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/{providers → clients}/base/__init__.py +5 -4
- mirascope/llm/{providers → clients}/base/_utils.py +17 -78
- mirascope/llm/{providers/base/base_provider.py → clients/base/client.py} +145 -468
- mirascope/llm/{models → clients/base}/params.py +37 -16
- mirascope/llm/clients/google/__init__.py +20 -0
- mirascope/llm/{providers/openai/responses → clients/google}/_utils/__init__.py +0 -2
- mirascope/llm/{providers → clients}/google/_utils/decode.py +22 -98
- mirascope/llm/{providers → clients}/google/_utils/encode.py +46 -168
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +28 -0
- mirascope/llm/{providers/google → clients/openai/completions}/_utils/__init__.py +0 -4
- mirascope/llm/{providers → clients}/openai/completions/_utils/decode.py +9 -74
- mirascope/llm/{providers → clients}/openai/completions/_utils/encode.py +52 -70
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +26 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/{providers → clients}/openai/responses/_utils/decode.py +14 -80
- mirascope/llm/{providers → clients}/openai/responses/_utils/encode.py +41 -92
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +2 -3
- mirascope/llm/content/tool_call.py +0 -6
- mirascope/llm/content/tool_output.py +5 -22
- mirascope/llm/context/_utils.py +6 -19
- mirascope/llm/exceptions.py +43 -298
- mirascope/llm/formatting/__init__.py +2 -19
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +30 -219
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/formatting/partial.py +7 -80
- mirascope/llm/formatting/types.py +64 -21
- mirascope/llm/mcp/__init__.py +2 -2
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +0 -3
- mirascope/llm/messages/message.py +5 -13
- mirascope/llm/models/__init__.py +2 -7
- mirascope/llm/models/models.py +139 -315
- mirascope/llm/prompts/__init__.py +12 -13
- mirascope/llm/prompts/_utils.py +43 -14
- mirascope/llm/prompts/decorator.py +204 -144
- mirascope/llm/prompts/protocols.py +59 -25
- mirascope/llm/responses/__init__.py +1 -9
- mirascope/llm/responses/_utils.py +12 -102
- mirascope/llm/responses/base_response.py +6 -18
- mirascope/llm/responses/base_stream_response.py +50 -173
- mirascope/llm/responses/finish_reason.py +0 -1
- mirascope/llm/responses/response.py +13 -34
- mirascope/llm/responses/root_response.py +29 -100
- mirascope/llm/responses/stream_response.py +31 -40
- mirascope/llm/tools/__init__.py +2 -9
- mirascope/llm/tools/_utils.py +3 -12
- mirascope/llm/tools/decorator.py +16 -25
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +19 -87
- mirascope/llm/tools/toolkit.py +27 -35
- mirascope/llm/tools/tools.py +41 -135
- {mirascope-2.0.0.dist-info → mirascope-2.0.0a1.dist-info}/METADATA +13 -90
- mirascope-2.0.0a1.dist-info/RECORD +102 -0
- {mirascope-2.0.0.dist-info → mirascope-2.0.0a1.dist-info}/WHEEL +1 -1
- {mirascope-2.0.0.dist-info → mirascope-2.0.0a1.dist-info}/licenses/LICENSE +1 -1
- mirascope/_stubs.py +0 -363
- mirascope/api/__init__.py +0 -14
- mirascope/api/_generated/README.md +0 -207
- mirascope/api/_generated/__init__.py +0 -440
- mirascope/api/_generated/annotations/__init__.py +0 -33
- mirascope/api/_generated/annotations/client.py +0 -506
- mirascope/api/_generated/annotations/raw_client.py +0 -1414
- mirascope/api/_generated/annotations/types/__init__.py +0 -31
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_create_response.py +0 -48
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_get_response.py +0 -48
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_list_response.py +0 -21
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +0 -50
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +0 -5
- mirascope/api/_generated/annotations/types/annotations_update_response.py +0 -48
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +0 -5
- mirascope/api/_generated/api_keys/__init__.py +0 -17
- mirascope/api/_generated/api_keys/client.py +0 -530
- mirascope/api/_generated/api_keys/raw_client.py +0 -1236
- mirascope/api/_generated/api_keys/types/__init__.py +0 -15
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +0 -28
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +0 -27
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +0 -40
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +0 -27
- mirascope/api/_generated/client.py +0 -211
- mirascope/api/_generated/core/__init__.py +0 -52
- mirascope/api/_generated/core/api_error.py +0 -23
- mirascope/api/_generated/core/client_wrapper.py +0 -46
- mirascope/api/_generated/core/datetime_utils.py +0 -28
- mirascope/api/_generated/core/file.py +0 -67
- mirascope/api/_generated/core/force_multipart.py +0 -16
- mirascope/api/_generated/core/http_client.py +0 -543
- mirascope/api/_generated/core/http_response.py +0 -55
- mirascope/api/_generated/core/jsonable_encoder.py +0 -100
- mirascope/api/_generated/core/pydantic_utilities.py +0 -255
- mirascope/api/_generated/core/query_encoder.py +0 -58
- mirascope/api/_generated/core/remove_none_from_dict.py +0 -11
- mirascope/api/_generated/core/request_options.py +0 -35
- mirascope/api/_generated/core/serialization.py +0 -276
- mirascope/api/_generated/docs/__init__.py +0 -4
- mirascope/api/_generated/docs/client.py +0 -91
- mirascope/api/_generated/docs/raw_client.py +0 -178
- mirascope/api/_generated/environment.py +0 -9
- mirascope/api/_generated/environments/__init__.py +0 -23
- mirascope/api/_generated/environments/client.py +0 -649
- mirascope/api/_generated/environments/raw_client.py +0 -1567
- mirascope/api/_generated/environments/types/__init__.py +0 -25
- mirascope/api/_generated/environments/types/environments_create_response.py +0 -24
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +0 -60
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +0 -24
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +0 -22
- mirascope/api/_generated/environments/types/environments_get_response.py +0 -24
- mirascope/api/_generated/environments/types/environments_list_response_item.py +0 -24
- mirascope/api/_generated/environments/types/environments_update_response.py +0 -24
- mirascope/api/_generated/errors/__init__.py +0 -25
- mirascope/api/_generated/errors/bad_request_error.py +0 -14
- mirascope/api/_generated/errors/conflict_error.py +0 -14
- mirascope/api/_generated/errors/forbidden_error.py +0 -11
- mirascope/api/_generated/errors/internal_server_error.py +0 -10
- mirascope/api/_generated/errors/not_found_error.py +0 -11
- mirascope/api/_generated/errors/payment_required_error.py +0 -15
- mirascope/api/_generated/errors/service_unavailable_error.py +0 -14
- mirascope/api/_generated/errors/too_many_requests_error.py +0 -15
- mirascope/api/_generated/errors/unauthorized_error.py +0 -11
- mirascope/api/_generated/functions/__init__.py +0 -39
- mirascope/api/_generated/functions/client.py +0 -647
- mirascope/api/_generated/functions/raw_client.py +0 -1890
- mirascope/api/_generated/functions/types/__init__.py +0 -53
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_create_response.py +0 -37
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +0 -39
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +0 -53
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +0 -22
- mirascope/api/_generated/functions/types/functions_get_response.py +0 -37
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +0 -20
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +0 -25
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +0 -56
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +0 -22
- mirascope/api/_generated/functions/types/functions_list_response.py +0 -21
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +0 -41
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +0 -20
- mirascope/api/_generated/health/__init__.py +0 -7
- mirascope/api/_generated/health/client.py +0 -92
- mirascope/api/_generated/health/raw_client.py +0 -175
- mirascope/api/_generated/health/types/__init__.py +0 -8
- mirascope/api/_generated/health/types/health_check_response.py +0 -22
- mirascope/api/_generated/health/types/health_check_response_status.py +0 -5
- mirascope/api/_generated/organization_invitations/__init__.py +0 -33
- mirascope/api/_generated/organization_invitations/client.py +0 -546
- mirascope/api/_generated/organization_invitations/raw_client.py +0 -1519
- mirascope/api/_generated/organization_invitations/types/__init__.py +0 -53
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +0 -34
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +0 -48
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +0 -48
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +0 -48
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +0 -7
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +0 -7
- mirascope/api/_generated/organization_memberships/__init__.py +0 -19
- mirascope/api/_generated/organization_memberships/client.py +0 -302
- mirascope/api/_generated/organization_memberships/raw_client.py +0 -736
- mirascope/api/_generated/organization_memberships/types/__init__.py +0 -27
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +0 -33
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +0 -7
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +0 -7
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +0 -31
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +0 -7
- mirascope/api/_generated/organizations/__init__.py +0 -51
- mirascope/api/_generated/organizations/client.py +0 -869
- mirascope/api/_generated/organizations/raw_client.py +0 -2593
- mirascope/api/_generated/organizations/types/__init__.py +0 -71
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +0 -24
- mirascope/api/_generated/organizations/types/organizations_create_response.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_get_response.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +0 -47
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +0 -33
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +0 -24
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +0 -53
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +0 -34
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_update_response.py +0 -26
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +0 -5
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +0 -7
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +0 -35
- mirascope/api/_generated/project_memberships/__init__.py +0 -25
- mirascope/api/_generated/project_memberships/client.py +0 -437
- mirascope/api/_generated/project_memberships/raw_client.py +0 -1039
- mirascope/api/_generated/project_memberships/types/__init__.py +0 -29
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +0 -35
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +0 -33
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +0 -7
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +0 -35
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +0 -7
- mirascope/api/_generated/projects/__init__.py +0 -7
- mirascope/api/_generated/projects/client.py +0 -428
- mirascope/api/_generated/projects/raw_client.py +0 -1302
- mirascope/api/_generated/projects/types/__init__.py +0 -10
- mirascope/api/_generated/projects/types/projects_create_response.py +0 -25
- mirascope/api/_generated/projects/types/projects_get_response.py +0 -25
- mirascope/api/_generated/projects/types/projects_list_response_item.py +0 -25
- mirascope/api/_generated/projects/types/projects_update_response.py +0 -25
- mirascope/api/_generated/reference.md +0 -4915
- mirascope/api/_generated/tags/__init__.py +0 -19
- mirascope/api/_generated/tags/client.py +0 -504
- mirascope/api/_generated/tags/raw_client.py +0 -1288
- mirascope/api/_generated/tags/types/__init__.py +0 -17
- mirascope/api/_generated/tags/types/tags_create_response.py +0 -41
- mirascope/api/_generated/tags/types/tags_get_response.py +0 -41
- mirascope/api/_generated/tags/types/tags_list_response.py +0 -23
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +0 -41
- mirascope/api/_generated/tags/types/tags_update_response.py +0 -41
- mirascope/api/_generated/token_cost/__init__.py +0 -7
- mirascope/api/_generated/token_cost/client.py +0 -160
- mirascope/api/_generated/token_cost/raw_client.py +0 -264
- mirascope/api/_generated/token_cost/types/__init__.py +0 -8
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +0 -54
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +0 -52
- mirascope/api/_generated/traces/__init__.py +0 -97
- mirascope/api/_generated/traces/client.py +0 -1103
- mirascope/api/_generated/traces/raw_client.py +0 -2322
- mirascope/api/_generated/traces/types/__init__.py +0 -155
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +0 -29
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +0 -27
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +0 -23
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +0 -38
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +0 -19
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +0 -20
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +0 -29
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +0 -31
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +0 -23
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +0 -38
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +0 -19
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +0 -48
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +0 -23
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +0 -38
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +0 -19
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +0 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +0 -22
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +0 -20
- mirascope/api/_generated/traces/types/traces_create_response.py +0 -24
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +0 -22
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +0 -60
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +0 -24
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +0 -22
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +0 -33
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +0 -88
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +0 -33
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -88
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +0 -25
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +0 -44
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +0 -50
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +0 -7
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +0 -5
- mirascope/api/_generated/traces/types/traces_search_response.py +0 -26
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +0 -50
- mirascope/api/_generated/types/__init__.py +0 -85
- mirascope/api/_generated/types/already_exists_error.py +0 -22
- mirascope/api/_generated/types/already_exists_error_tag.py +0 -5
- mirascope/api/_generated/types/bad_request_error_body.py +0 -50
- mirascope/api/_generated/types/click_house_error.py +0 -22
- mirascope/api/_generated/types/database_error.py +0 -22
- mirascope/api/_generated/types/database_error_tag.py +0 -5
- mirascope/api/_generated/types/date.py +0 -3
- mirascope/api/_generated/types/http_api_decode_error.py +0 -27
- mirascope/api/_generated/types/http_api_decode_error_tag.py +0 -5
- mirascope/api/_generated/types/immutable_resource_error.py +0 -22
- mirascope/api/_generated/types/internal_server_error_body.py +0 -49
- mirascope/api/_generated/types/issue.py +0 -38
- mirascope/api/_generated/types/issue_tag.py +0 -10
- mirascope/api/_generated/types/not_found_error_body.py +0 -22
- mirascope/api/_generated/types/not_found_error_tag.py +0 -5
- mirascope/api/_generated/types/number_from_string.py +0 -3
- mirascope/api/_generated/types/permission_denied_error.py +0 -22
- mirascope/api/_generated/types/permission_denied_error_tag.py +0 -5
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +0 -32
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +0 -7
- mirascope/api/_generated/types/pricing_unavailable_error.py +0 -23
- mirascope/api/_generated/types/property_key.py +0 -7
- mirascope/api/_generated/types/property_key_key.py +0 -25
- mirascope/api/_generated/types/property_key_key_tag.py +0 -5
- mirascope/api/_generated/types/rate_limit_error.py +0 -31
- mirascope/api/_generated/types/rate_limit_error_tag.py +0 -5
- mirascope/api/_generated/types/service_unavailable_error_body.py +0 -24
- mirascope/api/_generated/types/service_unavailable_error_tag.py +0 -7
- mirascope/api/_generated/types/stripe_error.py +0 -20
- mirascope/api/_generated/types/subscription_past_due_error.py +0 -31
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +0 -7
- mirascope/api/_generated/types/unauthorized_error_body.py +0 -21
- mirascope/api/_generated/types/unauthorized_error_tag.py +0 -5
- mirascope/api/client.py +0 -255
- mirascope/api/settings.py +0 -99
- mirascope/llm/formatting/output_parser.py +0 -178
- mirascope/llm/formatting/primitives.py +0 -192
- mirascope/llm/mcp/mcp_client.py +0 -130
- mirascope/llm/messages/_utils.py +0 -34
- mirascope/llm/models/thinking_config.py +0 -61
- mirascope/llm/prompts/prompts.py +0 -487
- mirascope/llm/providers/__init__.py +0 -62
- mirascope/llm/providers/anthropic/__init__.py +0 -11
- mirascope/llm/providers/anthropic/_utils/__init__.py +0 -27
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +0 -282
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +0 -266
- mirascope/llm/providers/anthropic/_utils/encode.py +0 -418
- mirascope/llm/providers/anthropic/_utils/errors.py +0 -46
- mirascope/llm/providers/anthropic/beta_provider.py +0 -374
- mirascope/llm/providers/anthropic/model_id.py +0 -23
- mirascope/llm/providers/anthropic/model_info.py +0 -87
- mirascope/llm/providers/anthropic/provider.py +0 -479
- mirascope/llm/providers/google/__init__.py +0 -6
- mirascope/llm/providers/google/_utils/errors.py +0 -50
- mirascope/llm/providers/google/model_id.py +0 -22
- mirascope/llm/providers/google/model_info.py +0 -63
- mirascope/llm/providers/google/provider.py +0 -492
- mirascope/llm/providers/mirascope/__init__.py +0 -5
- mirascope/llm/providers/mirascope/_utils.py +0 -73
- mirascope/llm/providers/mirascope/provider.py +0 -349
- mirascope/llm/providers/mlx/__init__.py +0 -9
- mirascope/llm/providers/mlx/_utils.py +0 -141
- mirascope/llm/providers/mlx/encoding/__init__.py +0 -8
- mirascope/llm/providers/mlx/encoding/base.py +0 -72
- mirascope/llm/providers/mlx/encoding/transformers.py +0 -150
- mirascope/llm/providers/mlx/mlx.py +0 -254
- mirascope/llm/providers/mlx/model_id.py +0 -17
- mirascope/llm/providers/mlx/provider.py +0 -452
- mirascope/llm/providers/model_id.py +0 -16
- mirascope/llm/providers/ollama/__init__.py +0 -7
- mirascope/llm/providers/ollama/provider.py +0 -71
- mirascope/llm/providers/openai/__init__.py +0 -15
- mirascope/llm/providers/openai/_utils/__init__.py +0 -5
- mirascope/llm/providers/openai/_utils/errors.py +0 -46
- mirascope/llm/providers/openai/completions/__init__.py +0 -7
- mirascope/llm/providers/openai/completions/base_provider.py +0 -542
- mirascope/llm/providers/openai/completions/provider.py +0 -22
- mirascope/llm/providers/openai/model_id.py +0 -31
- mirascope/llm/providers/openai/model_info.py +0 -303
- mirascope/llm/providers/openai/provider.py +0 -441
- mirascope/llm/providers/openai/responses/__init__.py +0 -5
- mirascope/llm/providers/openai/responses/provider.py +0 -513
- mirascope/llm/providers/provider_id.py +0 -24
- mirascope/llm/providers/provider_registry.py +0 -299
- mirascope/llm/providers/together/__init__.py +0 -7
- mirascope/llm/providers/together/provider.py +0 -40
- mirascope/llm/responses/usage.py +0 -95
- mirascope/ops/__init__.py +0 -111
- mirascope/ops/_internal/__init__.py +0 -5
- mirascope/ops/_internal/closure.py +0 -1169
- mirascope/ops/_internal/configuration.py +0 -177
- mirascope/ops/_internal/context.py +0 -76
- mirascope/ops/_internal/exporters/__init__.py +0 -26
- mirascope/ops/_internal/exporters/exporters.py +0 -395
- mirascope/ops/_internal/exporters/processors.py +0 -104
- mirascope/ops/_internal/exporters/types.py +0 -165
- mirascope/ops/_internal/exporters/utils.py +0 -29
- mirascope/ops/_internal/instrumentation/__init__.py +0 -8
- mirascope/ops/_internal/instrumentation/llm/__init__.py +0 -8
- mirascope/ops/_internal/instrumentation/llm/common.py +0 -530
- mirascope/ops/_internal/instrumentation/llm/cost.py +0 -190
- mirascope/ops/_internal/instrumentation/llm/encode.py +0 -238
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +0 -38
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +0 -31
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +0 -38
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +0 -18
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +0 -100
- mirascope/ops/_internal/instrumentation/llm/llm.py +0 -161
- mirascope/ops/_internal/instrumentation/llm/model.py +0 -1798
- mirascope/ops/_internal/instrumentation/llm/response.py +0 -521
- mirascope/ops/_internal/instrumentation/llm/serialize.py +0 -300
- mirascope/ops/_internal/propagation.py +0 -198
- mirascope/ops/_internal/protocols.py +0 -133
- mirascope/ops/_internal/session.py +0 -139
- mirascope/ops/_internal/spans.py +0 -232
- mirascope/ops/_internal/traced_calls.py +0 -375
- mirascope/ops/_internal/traced_functions.py +0 -523
- mirascope/ops/_internal/tracing.py +0 -353
- mirascope/ops/_internal/types.py +0 -13
- mirascope/ops/_internal/utils.py +0 -123
- mirascope/ops/_internal/versioned_calls.py +0 -512
- mirascope/ops/_internal/versioned_functions.py +0 -357
- mirascope/ops/_internal/versioning.py +0 -303
- mirascope/ops/exceptions.py +0 -21
- mirascope-2.0.0.dist-info/RECORD +0 -423
- /mirascope/llm/{providers → clients}/base/kwargs.py +0 -0
- /mirascope/llm/{providers → clients}/google/message.py +0 -0
|
@@ -1,441 +0,0 @@
|
|
|
1
|
-
"""Unified OpenAI client implementation."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
from collections.abc import Sequence
|
|
6
|
-
from typing import TYPE_CHECKING
|
|
7
|
-
from typing_extensions import Unpack
|
|
8
|
-
|
|
9
|
-
from openai import BadRequestError as OpenAIBadRequestError, OpenAI
|
|
10
|
-
|
|
11
|
-
from ...context import Context, DepsT
|
|
12
|
-
from ...exceptions import BadRequestError, NotFoundError
|
|
13
|
-
from ...formatting import Format, FormattableT, OutputParser
|
|
14
|
-
from ...messages import Message
|
|
15
|
-
from ...responses import (
|
|
16
|
-
AsyncContextResponse,
|
|
17
|
-
AsyncContextStreamResponse,
|
|
18
|
-
AsyncResponse,
|
|
19
|
-
AsyncStreamResponse,
|
|
20
|
-
ContextResponse,
|
|
21
|
-
ContextStreamResponse,
|
|
22
|
-
Response,
|
|
23
|
-
StreamResponse,
|
|
24
|
-
)
|
|
25
|
-
from ...tools import (
|
|
26
|
-
AsyncContextTool,
|
|
27
|
-
AsyncContextToolkit,
|
|
28
|
-
AsyncTool,
|
|
29
|
-
AsyncToolkit,
|
|
30
|
-
ContextTool,
|
|
31
|
-
ContextToolkit,
|
|
32
|
-
Tool,
|
|
33
|
-
Toolkit,
|
|
34
|
-
)
|
|
35
|
-
from ..base import BaseProvider
|
|
36
|
-
from . import _utils
|
|
37
|
-
from .completions import OpenAICompletionsProvider
|
|
38
|
-
from .model_id import OPENAI_KNOWN_MODELS, OpenAIModelId
|
|
39
|
-
from .responses import OpenAIResponsesProvider
|
|
40
|
-
|
|
41
|
-
if TYPE_CHECKING:
|
|
42
|
-
from ...models import Params
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def _has_audio_content(messages: Sequence[Message]) -> bool:
|
|
46
|
-
"""Returns whether a sequence of messages contains any audio content."""
|
|
47
|
-
for message in messages:
|
|
48
|
-
if message.role == "system":
|
|
49
|
-
continue
|
|
50
|
-
for content in message.content:
|
|
51
|
-
if content.type == "audio":
|
|
52
|
-
return True
|
|
53
|
-
return False
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
def choose_api_mode(model_id: OpenAIModelId, messages: Sequence[Message]) -> str:
|
|
57
|
-
"""Choose between 'responses' or 'completions' API based on model_id and messages.
|
|
58
|
-
|
|
59
|
-
Args:
|
|
60
|
-
model_id: The model identifier.
|
|
61
|
-
messages: The messages to send to the LLM.
|
|
62
|
-
|
|
63
|
-
Returns:
|
|
64
|
-
Either "responses" or "completions" depending on the model and message content.
|
|
65
|
-
|
|
66
|
-
If the user manually specified an api mode (by appending it as a suffix to the model
|
|
67
|
-
id), then we use it.
|
|
68
|
-
|
|
69
|
-
Otherwise, we prefer the responses API where supported (because it has better
|
|
70
|
-
reasoning support and better prompt caching). However we will use the :completions api
|
|
71
|
-
if the messages contain any audio content, as audio content is not yet supported in
|
|
72
|
-
the responses API.
|
|
73
|
-
"""
|
|
74
|
-
if model_id.endswith(":completions"):
|
|
75
|
-
return "completions"
|
|
76
|
-
elif model_id.endswith(":responses"):
|
|
77
|
-
return "responses"
|
|
78
|
-
|
|
79
|
-
if _has_audio_content(messages):
|
|
80
|
-
return "completions"
|
|
81
|
-
|
|
82
|
-
if f"{model_id}:responses" in OPENAI_KNOWN_MODELS:
|
|
83
|
-
# Prefer responses api when we know it is available
|
|
84
|
-
return "responses"
|
|
85
|
-
elif f"{model_id}:completions" in OPENAI_KNOWN_MODELS:
|
|
86
|
-
# If we know from testing that the completions api is available, and
|
|
87
|
-
# (implied by above) that responses wasn't, then we should use completions
|
|
88
|
-
return "completions"
|
|
89
|
-
|
|
90
|
-
# If we don't have either :responses or :completions in the known_models, it's
|
|
91
|
-
# likely that this is a new model we haven't tested. We default to responses api for
|
|
92
|
-
# openai/ models (on the assumption that they are new models and OpenAI prefers
|
|
93
|
-
# the responses API) but completions for other models (on the assumption that they
|
|
94
|
-
# are other models routing through the OpenAI completions API)
|
|
95
|
-
if model_id.startswith("openai/"):
|
|
96
|
-
return "responses"
|
|
97
|
-
else:
|
|
98
|
-
return "completions"
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
class OpenAIRoutedCompletionsProvider(OpenAICompletionsProvider):
|
|
102
|
-
"""OpenAI completions client that reports provider_id as 'openai'."""
|
|
103
|
-
|
|
104
|
-
id = "openai"
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
class OpenAIRoutedResponsesProvider(OpenAIResponsesProvider):
|
|
108
|
-
"""OpenAI responses client that reports provider_id as 'openai'."""
|
|
109
|
-
|
|
110
|
-
id = "openai"
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
class OpenAIProvider(BaseProvider[OpenAI]):
|
|
114
|
-
"""Unified provider for OpenAI that routes to Completions or Responses API based on model_id."""
|
|
115
|
-
|
|
116
|
-
id = "openai"
|
|
117
|
-
default_scope = "openai/"
|
|
118
|
-
# Include special handling for model_not_found from Responses API
|
|
119
|
-
error_map = {
|
|
120
|
-
**_utils.OPENAI_ERROR_MAP,
|
|
121
|
-
OpenAIBadRequestError: lambda e: NotFoundError
|
|
122
|
-
if hasattr(e, "code") and e.code == "model_not_found" # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
|
|
123
|
-
else BadRequestError,
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
def __init__(
|
|
127
|
-
self, *, api_key: str | None = None, base_url: str | None = None
|
|
128
|
-
) -> None:
|
|
129
|
-
"""Initialize the OpenAI provider with both subclients."""
|
|
130
|
-
self._completions_provider = OpenAIRoutedCompletionsProvider(
|
|
131
|
-
api_key=api_key, base_url=base_url
|
|
132
|
-
)
|
|
133
|
-
self._responses_provider = OpenAIRoutedResponsesProvider(
|
|
134
|
-
api_key=api_key, base_url=base_url
|
|
135
|
-
)
|
|
136
|
-
# Use completions client's underlying OpenAI client as the main one
|
|
137
|
-
self.client = self._completions_provider.client
|
|
138
|
-
|
|
139
|
-
def get_error_status(self, e: Exception) -> int | None:
|
|
140
|
-
"""Extract HTTP status code from OpenAI exception."""
|
|
141
|
-
return getattr(e, "status_code", None) # pragma: no cover
|
|
142
|
-
|
|
143
|
-
def _choose_subprovider(
|
|
144
|
-
self, model_id: OpenAIModelId, messages: Sequence[Message]
|
|
145
|
-
) -> OpenAICompletionsProvider | OpenAIResponsesProvider:
|
|
146
|
-
"""Choose the appropriate provider based on model_id and messages.
|
|
147
|
-
|
|
148
|
-
Args:
|
|
149
|
-
model_id: The model identifier.
|
|
150
|
-
messages: The messages to send to the LLM.
|
|
151
|
-
|
|
152
|
-
Returns:
|
|
153
|
-
The responses or completions subclient.
|
|
154
|
-
"""
|
|
155
|
-
api_mode = choose_api_mode(model_id, messages)
|
|
156
|
-
if api_mode == "responses":
|
|
157
|
-
return self._responses_provider
|
|
158
|
-
return self._completions_provider
|
|
159
|
-
|
|
160
|
-
def _call(
|
|
161
|
-
self,
|
|
162
|
-
*,
|
|
163
|
-
model_id: OpenAIModelId,
|
|
164
|
-
messages: Sequence[Message],
|
|
165
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
166
|
-
format: type[FormattableT]
|
|
167
|
-
| Format[FormattableT]
|
|
168
|
-
| OutputParser[FormattableT]
|
|
169
|
-
| None = None,
|
|
170
|
-
**params: Unpack[Params],
|
|
171
|
-
) -> Response | Response[FormattableT]:
|
|
172
|
-
"""Generate an `llm.Response` by synchronously calling the OpenAI API.
|
|
173
|
-
|
|
174
|
-
Args:
|
|
175
|
-
model_id: Model identifier to use.
|
|
176
|
-
messages: Messages to send to the LLM.
|
|
177
|
-
tools: Optional tools that the model may invoke.
|
|
178
|
-
format: Optional response format specifier.
|
|
179
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
180
|
-
|
|
181
|
-
Returns:
|
|
182
|
-
An `llm.Response` object containing the LLM-generated content.
|
|
183
|
-
"""
|
|
184
|
-
client = self._choose_subprovider(model_id, messages)
|
|
185
|
-
return client.call(
|
|
186
|
-
model_id=model_id,
|
|
187
|
-
messages=messages,
|
|
188
|
-
tools=tools,
|
|
189
|
-
format=format,
|
|
190
|
-
**params,
|
|
191
|
-
)
|
|
192
|
-
|
|
193
|
-
def _context_call(
|
|
194
|
-
self,
|
|
195
|
-
*,
|
|
196
|
-
ctx: Context[DepsT],
|
|
197
|
-
model_id: OpenAIModelId,
|
|
198
|
-
messages: Sequence[Message],
|
|
199
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
200
|
-
| ContextToolkit[DepsT]
|
|
201
|
-
| None = None,
|
|
202
|
-
format: type[FormattableT]
|
|
203
|
-
| Format[FormattableT]
|
|
204
|
-
| OutputParser[FormattableT]
|
|
205
|
-
| None = None,
|
|
206
|
-
**params: Unpack[Params],
|
|
207
|
-
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
208
|
-
"""Generate an `llm.ContextResponse` by synchronously calling the OpenAI API.
|
|
209
|
-
|
|
210
|
-
Args:
|
|
211
|
-
ctx: Context object with dependencies for tools.
|
|
212
|
-
model_id: Model identifier to use.
|
|
213
|
-
messages: Messages to send to the LLM.
|
|
214
|
-
tools: Optional tools that the model may invoke.
|
|
215
|
-
format: Optional response format specifier.
|
|
216
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
217
|
-
|
|
218
|
-
Returns:
|
|
219
|
-
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
220
|
-
"""
|
|
221
|
-
client = self._choose_subprovider(model_id, messages)
|
|
222
|
-
return client.context_call(
|
|
223
|
-
ctx=ctx,
|
|
224
|
-
model_id=model_id,
|
|
225
|
-
messages=messages,
|
|
226
|
-
tools=tools,
|
|
227
|
-
format=format,
|
|
228
|
-
**params,
|
|
229
|
-
)
|
|
230
|
-
|
|
231
|
-
async def _call_async(
|
|
232
|
-
self,
|
|
233
|
-
*,
|
|
234
|
-
model_id: OpenAIModelId,
|
|
235
|
-
messages: Sequence[Message],
|
|
236
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
237
|
-
format: type[FormattableT]
|
|
238
|
-
| Format[FormattableT]
|
|
239
|
-
| OutputParser[FormattableT]
|
|
240
|
-
| None = None,
|
|
241
|
-
**params: Unpack[Params],
|
|
242
|
-
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
243
|
-
"""Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI API.
|
|
244
|
-
|
|
245
|
-
Args:
|
|
246
|
-
model_id: Model identifier to use.
|
|
247
|
-
messages: Messages to send to the LLM.
|
|
248
|
-
tools: Optional tools that the model may invoke.
|
|
249
|
-
format: Optional response format specifier.
|
|
250
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
251
|
-
|
|
252
|
-
Returns:
|
|
253
|
-
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
254
|
-
"""
|
|
255
|
-
return await self._choose_subprovider(model_id, messages).call_async(
|
|
256
|
-
model_id=model_id,
|
|
257
|
-
messages=messages,
|
|
258
|
-
tools=tools,
|
|
259
|
-
format=format,
|
|
260
|
-
**params,
|
|
261
|
-
)
|
|
262
|
-
|
|
263
|
-
async def _context_call_async(
|
|
264
|
-
self,
|
|
265
|
-
*,
|
|
266
|
-
ctx: Context[DepsT],
|
|
267
|
-
model_id: OpenAIModelId,
|
|
268
|
-
messages: Sequence[Message],
|
|
269
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
270
|
-
| AsyncContextToolkit[DepsT]
|
|
271
|
-
| None = None,
|
|
272
|
-
format: type[FormattableT]
|
|
273
|
-
| Format[FormattableT]
|
|
274
|
-
| OutputParser[FormattableT]
|
|
275
|
-
| None = None,
|
|
276
|
-
**params: Unpack[Params],
|
|
277
|
-
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
278
|
-
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI API.
|
|
279
|
-
|
|
280
|
-
Args:
|
|
281
|
-
ctx: Context object with dependencies for tools.
|
|
282
|
-
model_id: Model identifier to use.
|
|
283
|
-
messages: Messages to send to the LLM.
|
|
284
|
-
tools: Optional tools that the model may invoke.
|
|
285
|
-
format: Optional response format specifier.
|
|
286
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
287
|
-
|
|
288
|
-
Returns:
|
|
289
|
-
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
290
|
-
"""
|
|
291
|
-
return await self._choose_subprovider(model_id, messages).context_call_async(
|
|
292
|
-
ctx=ctx,
|
|
293
|
-
model_id=model_id,
|
|
294
|
-
messages=messages,
|
|
295
|
-
tools=tools,
|
|
296
|
-
format=format,
|
|
297
|
-
**params,
|
|
298
|
-
)
|
|
299
|
-
|
|
300
|
-
def _stream(
|
|
301
|
-
self,
|
|
302
|
-
*,
|
|
303
|
-
model_id: OpenAIModelId,
|
|
304
|
-
messages: Sequence[Message],
|
|
305
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
306
|
-
format: type[FormattableT]
|
|
307
|
-
| Format[FormattableT]
|
|
308
|
-
| OutputParser[FormattableT]
|
|
309
|
-
| None = None,
|
|
310
|
-
**params: Unpack[Params],
|
|
311
|
-
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
312
|
-
"""Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI API.
|
|
313
|
-
|
|
314
|
-
Args:
|
|
315
|
-
model_id: Model identifier to use.
|
|
316
|
-
messages: Messages to send to the LLM.
|
|
317
|
-
tools: Optional tools that the model may invoke.
|
|
318
|
-
format: Optional response format specifier.
|
|
319
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
320
|
-
|
|
321
|
-
Returns:
|
|
322
|
-
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
323
|
-
"""
|
|
324
|
-
client = self._choose_subprovider(model_id, messages)
|
|
325
|
-
return client.stream(
|
|
326
|
-
model_id=model_id,
|
|
327
|
-
messages=messages,
|
|
328
|
-
tools=tools,
|
|
329
|
-
format=format,
|
|
330
|
-
**params,
|
|
331
|
-
)
|
|
332
|
-
|
|
333
|
-
def _context_stream(
|
|
334
|
-
self,
|
|
335
|
-
*,
|
|
336
|
-
ctx: Context[DepsT],
|
|
337
|
-
model_id: OpenAIModelId,
|
|
338
|
-
messages: Sequence[Message],
|
|
339
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
340
|
-
| ContextToolkit[DepsT]
|
|
341
|
-
| None = None,
|
|
342
|
-
format: type[FormattableT]
|
|
343
|
-
| Format[FormattableT]
|
|
344
|
-
| OutputParser[FormattableT]
|
|
345
|
-
| None = None,
|
|
346
|
-
**params: Unpack[Params],
|
|
347
|
-
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
348
|
-
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI API.
|
|
349
|
-
|
|
350
|
-
Args:
|
|
351
|
-
ctx: Context object with dependencies for tools.
|
|
352
|
-
model_id: Model identifier to use.
|
|
353
|
-
messages: Messages to send to the LLM.
|
|
354
|
-
tools: Optional tools that the model may invoke.
|
|
355
|
-
format: Optional response format specifier.
|
|
356
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
357
|
-
|
|
358
|
-
Returns:
|
|
359
|
-
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
360
|
-
"""
|
|
361
|
-
client = self._choose_subprovider(model_id, messages)
|
|
362
|
-
return client.context_stream(
|
|
363
|
-
ctx=ctx,
|
|
364
|
-
model_id=model_id,
|
|
365
|
-
messages=messages,
|
|
366
|
-
tools=tools,
|
|
367
|
-
format=format,
|
|
368
|
-
**params,
|
|
369
|
-
)
|
|
370
|
-
|
|
371
|
-
async def _stream_async(
|
|
372
|
-
self,
|
|
373
|
-
*,
|
|
374
|
-
model_id: OpenAIModelId,
|
|
375
|
-
messages: Sequence[Message],
|
|
376
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
377
|
-
format: type[FormattableT]
|
|
378
|
-
| Format[FormattableT]
|
|
379
|
-
| OutputParser[FormattableT]
|
|
380
|
-
| None = None,
|
|
381
|
-
**params: Unpack[Params],
|
|
382
|
-
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
383
|
-
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI API.
|
|
384
|
-
|
|
385
|
-
Args:
|
|
386
|
-
model_id: Model identifier to use.
|
|
387
|
-
messages: Messages to send to the LLM.
|
|
388
|
-
tools: Optional tools that the model may invoke.
|
|
389
|
-
format: Optional response format specifier.
|
|
390
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
391
|
-
|
|
392
|
-
Returns:
|
|
393
|
-
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
394
|
-
"""
|
|
395
|
-
return await self._choose_subprovider(model_id, messages).stream_async(
|
|
396
|
-
model_id=model_id,
|
|
397
|
-
messages=messages,
|
|
398
|
-
tools=tools,
|
|
399
|
-
format=format,
|
|
400
|
-
**params,
|
|
401
|
-
)
|
|
402
|
-
|
|
403
|
-
async def _context_stream_async(
|
|
404
|
-
self,
|
|
405
|
-
*,
|
|
406
|
-
ctx: Context[DepsT],
|
|
407
|
-
model_id: OpenAIModelId,
|
|
408
|
-
messages: Sequence[Message],
|
|
409
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
410
|
-
| AsyncContextToolkit[DepsT]
|
|
411
|
-
| None = None,
|
|
412
|
-
format: type[FormattableT]
|
|
413
|
-
| Format[FormattableT]
|
|
414
|
-
| OutputParser[FormattableT]
|
|
415
|
-
| None = None,
|
|
416
|
-
**params: Unpack[Params],
|
|
417
|
-
) -> (
|
|
418
|
-
AsyncContextStreamResponse[DepsT]
|
|
419
|
-
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
420
|
-
):
|
|
421
|
-
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI API.
|
|
422
|
-
|
|
423
|
-
Args:
|
|
424
|
-
ctx: Context object with dependencies for tools.
|
|
425
|
-
model_id: Model identifier to use.
|
|
426
|
-
messages: Messages to send to the LLM.
|
|
427
|
-
tools: Optional tools that the model may invoke.
|
|
428
|
-
format: Optional response format specifier.
|
|
429
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
430
|
-
|
|
431
|
-
Returns:
|
|
432
|
-
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
433
|
-
"""
|
|
434
|
-
return await self._choose_subprovider(model_id, messages).context_stream_async(
|
|
435
|
-
ctx=ctx,
|
|
436
|
-
model_id=model_id,
|
|
437
|
-
messages=messages,
|
|
438
|
-
tools=tools,
|
|
439
|
-
format=format,
|
|
440
|
-
**params,
|
|
441
|
-
)
|