mirascope 2.0.0a4__py3-none-any.whl → 2.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +10 -1
- mirascope/_stubs.py +363 -0
- mirascope/api/__init__.py +8 -0
- mirascope/api/_generated/__init__.py +119 -1
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +474 -0
- mirascope/api/_generated/annotations/raw_client.py +1095 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +7 -0
- mirascope/api/_generated/api_keys/client.py +429 -0
- mirascope/api/_generated/api_keys/raw_client.py +788 -0
- mirascope/api/_generated/api_keys/types/__init__.py +9 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
- mirascope/api/_generated/client.py +12 -0
- mirascope/api/_generated/core/client_wrapper.py +2 -14
- mirascope/api/_generated/core/datetime_utils.py +1 -3
- mirascope/api/_generated/core/file.py +2 -5
- mirascope/api/_generated/core/http_client.py +36 -112
- mirascope/api/_generated/core/jsonable_encoder.py +1 -3
- mirascope/api/_generated/core/pydantic_utilities.py +19 -74
- mirascope/api/_generated/core/query_encoder.py +1 -3
- mirascope/api/_generated/core/serialization.py +4 -10
- mirascope/api/_generated/docs/client.py +2 -6
- mirascope/api/_generated/docs/raw_client.py +4 -20
- mirascope/api/_generated/environments/__init__.py +17 -0
- mirascope/api/_generated/environments/client.py +500 -0
- mirascope/api/_generated/environments/raw_client.py +999 -0
- mirascope/api/_generated/environments/types/__init__.py +15 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
- mirascope/api/_generated/errors/__init__.py +2 -0
- mirascope/api/_generated/errors/bad_request_error.py +1 -5
- mirascope/api/_generated/errors/conflict_error.py +1 -5
- mirascope/api/_generated/errors/forbidden_error.py +1 -5
- mirascope/api/_generated/errors/internal_server_error.py +1 -6
- mirascope/api/_generated/errors/not_found_error.py +1 -5
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +29 -0
- mirascope/api/_generated/functions/client.py +433 -0
- mirascope/api/_generated/functions/raw_client.py +1049 -0
- mirascope/api/_generated/functions/types/__init__.py +29 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/client.py +2 -6
- mirascope/api/_generated/health/raw_client.py +5 -23
- mirascope/api/_generated/health/types/health_check_response.py +1 -3
- mirascope/api/_generated/organizations/__init__.py +2 -0
- mirascope/api/_generated/organizations/client.py +94 -27
- mirascope/api/_generated/organizations/raw_client.py +246 -128
- mirascope/api/_generated/organizations/types/__init__.py +2 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_credits_response.py +19 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_update_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +1 -3
- mirascope/api/_generated/projects/__init__.py +2 -12
- mirascope/api/_generated/projects/client.py +38 -68
- mirascope/api/_generated/projects/raw_client.py +92 -163
- mirascope/api/_generated/projects/types/__init__.py +1 -6
- mirascope/api/_generated/projects/types/projects_create_response.py +4 -9
- mirascope/api/_generated/projects/types/projects_get_response.py +4 -9
- mirascope/api/_generated/projects/types/projects_list_response_item.py +4 -9
- mirascope/api/_generated/projects/types/projects_update_response.py +4 -9
- mirascope/api/_generated/reference.md +1862 -70
- mirascope/api/_generated/traces/__init__.py +22 -0
- mirascope/api/_generated/traces/client.py +398 -0
- mirascope/api/_generated/traces/raw_client.py +902 -18
- mirascope/api/_generated/traces/types/__init__.py +32 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +4 -11
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +4 -8
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +6 -18
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_response.py +2 -5
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +3 -9
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +54 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +90 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +41 -0
- mirascope/api/_generated/types/__init__.py +18 -0
- mirascope/api/_generated/types/already_exists_error.py +1 -3
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +1 -3
- mirascope/api/_generated/types/http_api_decode_error.py +1 -3
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +1 -3
- mirascope/api/_generated/types/issue_tag.py +1 -8
- mirascope/api/_generated/types/not_found_error_body.py +1 -3
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +1 -3
- mirascope/api/_generated/types/permission_denied_error_tag.py +1 -3
- mirascope/api/_generated/types/property_key_key.py +1 -3
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/llm/__init__.py +6 -2
- mirascope/llm/content/tool_call.py +6 -0
- mirascope/llm/exceptions.py +28 -0
- mirascope/llm/formatting/__init__.py +2 -2
- mirascope/llm/formatting/format.py +120 -8
- mirascope/llm/formatting/types.py +1 -56
- mirascope/llm/mcp/__init__.py +2 -2
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/providers/__init__.py +26 -5
- mirascope/llm/providers/anthropic/__init__.py +3 -21
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +4 -2
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +13 -12
- mirascope/llm/providers/anthropic/_utils/decode.py +4 -2
- mirascope/llm/providers/anthropic/_utils/encode.py +57 -14
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +6 -0
- mirascope/llm/providers/anthropic/provider.py +5 -0
- mirascope/llm/providers/base/__init__.py +5 -2
- mirascope/llm/providers/base/_utils.py +2 -7
- mirascope/llm/providers/base/base_provider.py +173 -58
- mirascope/llm/providers/base/params.py +63 -34
- mirascope/llm/providers/google/__init__.py +2 -17
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +17 -8
- mirascope/llm/providers/google/_utils/encode.py +105 -16
- mirascope/llm/providers/google/_utils/errors.py +49 -0
- mirascope/llm/providers/google/model_info.py +1 -0
- mirascope/llm/providers/google/provider.py +9 -5
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +77 -0
- mirascope/llm/providers/mirascope/provider.py +318 -0
- mirascope/llm/providers/mlx/__init__.py +2 -17
- mirascope/llm/providers/mlx/_utils.py +9 -2
- mirascope/llm/providers/mlx/provider.py +8 -0
- mirascope/llm/providers/ollama/__init__.py +1 -13
- mirascope/llm/providers/openai/__init__.py +10 -1
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/__init__.py +2 -20
- mirascope/llm/providers/openai/completions/_utils/decode.py +14 -3
- mirascope/llm/providers/openai/completions/_utils/encode.py +15 -12
- mirascope/llm/providers/openai/completions/base_provider.py +6 -6
- mirascope/llm/providers/openai/provider.py +14 -1
- mirascope/llm/providers/openai/responses/__init__.py +1 -17
- mirascope/llm/providers/openai/responses/_utils/decode.py +2 -2
- mirascope/llm/providers/openai/responses/_utils/encode.py +43 -15
- mirascope/llm/providers/openai/responses/provider.py +13 -7
- mirascope/llm/providers/provider_id.py +1 -0
- mirascope/llm/providers/provider_registry.py +59 -3
- mirascope/llm/providers/together/__init__.py +1 -13
- mirascope/llm/responses/base_stream_response.py +24 -20
- mirascope/llm/tools/decorator.py +8 -4
- mirascope/llm/tools/tool_schema.py +33 -6
- mirascope/llm/tools/tools.py +84 -16
- mirascope/ops/__init__.py +60 -109
- mirascope/ops/_internal/closure.py +62 -11
- mirascope/ops/_internal/instrumentation/llm/llm.py +1 -2
- mirascope/ops/_internal/traced_functions.py +23 -4
- mirascope/ops/_internal/versioned_functions.py +54 -43
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/METADATA +7 -7
- mirascope-2.0.0a6.dist-info/RECORD +316 -0
- mirascope/llm/formatting/_utils.py +0 -78
- mirascope/llm/mcp/client.py +0 -118
- mirascope/llm/providers/_missing_import_stubs.py +0 -49
- mirascope/llm/providers/load_provider.py +0 -54
- mirascope-2.0.0a4.dist-info/RECORD +0 -247
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/WHEEL +0 -0
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,22 +1,7 @@
|
|
|
1
1
|
"""MLX client implementation."""
|
|
2
2
|
|
|
3
|
-
from
|
|
4
|
-
|
|
5
|
-
if TYPE_CHECKING:
|
|
6
|
-
from .model_id import MLXModelId
|
|
7
|
-
from .provider import MLXProvider
|
|
8
|
-
else:
|
|
9
|
-
try:
|
|
10
|
-
from .model_id import MLXModelId
|
|
11
|
-
from .provider import MLXProvider
|
|
12
|
-
except ImportError: # pragma: no cover
|
|
13
|
-
from .._missing_import_stubs import (
|
|
14
|
-
create_import_error_stub,
|
|
15
|
-
create_provider_stub,
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
MLXProvider = create_provider_stub("mlx", "MLXProvider")
|
|
19
|
-
MLXModelId = str
|
|
3
|
+
from .model_id import MLXModelId
|
|
4
|
+
from .provider import MLXProvider
|
|
20
5
|
|
|
21
6
|
__all__ = [
|
|
22
7
|
"MLXModelId",
|
|
@@ -2,14 +2,21 @@ from collections.abc import Callable
|
|
|
2
2
|
from typing import TypeAlias, TypedDict
|
|
3
3
|
|
|
4
4
|
import mlx.core as mx
|
|
5
|
+
from huggingface_hub.errors import LocalEntryNotFoundError
|
|
5
6
|
from mlx_lm.generate import GenerationResponse
|
|
6
7
|
from mlx_lm.sample_utils import make_sampler
|
|
7
8
|
|
|
9
|
+
from ...exceptions import NotFoundError
|
|
8
10
|
from ...responses import FinishReason, Usage
|
|
9
|
-
from ..base import Params, _utils as _base_utils
|
|
11
|
+
from ..base import Params, ProviderErrorMap, _utils as _base_utils
|
|
10
12
|
|
|
11
13
|
Sampler: TypeAlias = Callable[[mx.array], mx.array]
|
|
12
14
|
|
|
15
|
+
# Error mapping for MLX provider
|
|
16
|
+
MLX_ERROR_MAP: ProviderErrorMap = {
|
|
17
|
+
LocalEntryNotFoundError: NotFoundError,
|
|
18
|
+
}
|
|
19
|
+
|
|
13
20
|
|
|
14
21
|
class MakeSamplerKwargs(TypedDict, total=False):
|
|
15
22
|
"""Keyword arguments to be used for `mlx_lm`-s `make_sampler` function.
|
|
@@ -69,7 +76,7 @@ def encode_params(params: Params) -> tuple[int | None, StreamGenerateKwargs]:
|
|
|
69
76
|
with _base_utils.ensure_all_params_accessed(
|
|
70
77
|
params=params,
|
|
71
78
|
provider_id="mlx",
|
|
72
|
-
unsupported_params=["stop_sequences", "thinking"
|
|
79
|
+
unsupported_params=["stop_sequences", "thinking"],
|
|
73
80
|
) as param_accessor:
|
|
74
81
|
if param_accessor.max_tokens is not None:
|
|
75
82
|
kwargs["max_tokens"] = param_accessor.max_tokens
|
|
@@ -70,6 +70,14 @@ class MLXProvider(BaseProvider[None]):
|
|
|
70
70
|
|
|
71
71
|
id = "mlx"
|
|
72
72
|
default_scope = "mlx-community/"
|
|
73
|
+
error_map = _utils.MLX_ERROR_MAP
|
|
74
|
+
|
|
75
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
76
|
+
"""Extract HTTP status code from MLX exception.
|
|
77
|
+
|
|
78
|
+
MLX/HuggingFace Hub exceptions don't have status codes.
|
|
79
|
+
"""
|
|
80
|
+
return None
|
|
73
81
|
|
|
74
82
|
def _call(
|
|
75
83
|
self,
|
|
@@ -1,18 +1,6 @@
|
|
|
1
1
|
"""Ollama provider implementation."""
|
|
2
2
|
|
|
3
|
-
from
|
|
4
|
-
|
|
5
|
-
if TYPE_CHECKING:
|
|
6
|
-
from .provider import OllamaProvider
|
|
7
|
-
else:
|
|
8
|
-
try:
|
|
9
|
-
from .provider import OllamaProvider
|
|
10
|
-
except ImportError: # pragma: no cover
|
|
11
|
-
from .._missing_import_stubs import (
|
|
12
|
-
create_provider_stub,
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
OllamaProvider = create_provider_stub("openai", "OllamaProvider")
|
|
3
|
+
from .provider import OllamaProvider
|
|
16
4
|
|
|
17
5
|
__all__ = [
|
|
18
6
|
"OllamaProvider",
|
|
@@ -1,6 +1,15 @@
|
|
|
1
1
|
"""OpenAI client implementation."""
|
|
2
2
|
|
|
3
|
+
from .completions.base_provider import BaseOpenAICompletionsProvider
|
|
4
|
+
from .completions.provider import OpenAICompletionsProvider
|
|
3
5
|
from .model_id import OpenAIModelId
|
|
4
6
|
from .provider import OpenAIProvider
|
|
7
|
+
from .responses.provider import OpenAIResponsesProvider
|
|
5
8
|
|
|
6
|
-
__all__ = [
|
|
9
|
+
__all__ = [
|
|
10
|
+
"BaseOpenAICompletionsProvider",
|
|
11
|
+
"OpenAICompletionsProvider",
|
|
12
|
+
"OpenAIModelId",
|
|
13
|
+
"OpenAIProvider",
|
|
14
|
+
"OpenAIResponsesProvider",
|
|
15
|
+
]
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""OpenAI error handling utilities."""
|
|
2
|
+
|
|
3
|
+
from openai import (
|
|
4
|
+
APIConnectionError as OpenAIAPIConnectionError,
|
|
5
|
+
APIResponseValidationError as OpenAIAPIResponseValidationError,
|
|
6
|
+
APITimeoutError as OpenAIAPITimeoutError,
|
|
7
|
+
AuthenticationError as OpenAIAuthenticationError,
|
|
8
|
+
BadRequestError as OpenAIBadRequestError,
|
|
9
|
+
ConflictError as OpenAIConflictError,
|
|
10
|
+
InternalServerError as OpenAIInternalServerError,
|
|
11
|
+
NotFoundError as OpenAINotFoundError,
|
|
12
|
+
OpenAIError,
|
|
13
|
+
PermissionDeniedError as OpenAIPermissionDeniedError,
|
|
14
|
+
RateLimitError as OpenAIRateLimitError,
|
|
15
|
+
UnprocessableEntityError as OpenAIUnprocessableEntityError,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from ....exceptions import (
|
|
19
|
+
APIError,
|
|
20
|
+
AuthenticationError,
|
|
21
|
+
BadRequestError,
|
|
22
|
+
ConnectionError,
|
|
23
|
+
NotFoundError,
|
|
24
|
+
PermissionError,
|
|
25
|
+
RateLimitError,
|
|
26
|
+
ResponseValidationError,
|
|
27
|
+
ServerError,
|
|
28
|
+
TimeoutError,
|
|
29
|
+
)
|
|
30
|
+
from ...base import ProviderErrorMap
|
|
31
|
+
|
|
32
|
+
# Shared error mapping used by OpenAI Responses and Completions providers
|
|
33
|
+
OPENAI_ERROR_MAP: ProviderErrorMap = {
|
|
34
|
+
OpenAIAuthenticationError: AuthenticationError,
|
|
35
|
+
OpenAIPermissionDeniedError: PermissionError,
|
|
36
|
+
OpenAINotFoundError: NotFoundError,
|
|
37
|
+
OpenAIBadRequestError: BadRequestError,
|
|
38
|
+
OpenAIUnprocessableEntityError: BadRequestError,
|
|
39
|
+
OpenAIConflictError: BadRequestError,
|
|
40
|
+
OpenAIRateLimitError: RateLimitError,
|
|
41
|
+
OpenAIInternalServerError: ServerError,
|
|
42
|
+
OpenAIAPITimeoutError: TimeoutError,
|
|
43
|
+
OpenAIAPIConnectionError: ConnectionError,
|
|
44
|
+
OpenAIAPIResponseValidationError: ResponseValidationError,
|
|
45
|
+
OpenAIError: APIError, # Catch-all for unknown OpenAI errors
|
|
46
|
+
}
|
|
@@ -1,23 +1,5 @@
|
|
|
1
|
-
from
|
|
2
|
-
|
|
3
|
-
if TYPE_CHECKING:
|
|
4
|
-
from .base_provider import BaseOpenAICompletionsProvider
|
|
5
|
-
from .provider import OpenAICompletionsProvider
|
|
6
|
-
else:
|
|
7
|
-
try:
|
|
8
|
-
from .base_provider import BaseOpenAICompletionsProvider
|
|
9
|
-
from .provider import OpenAICompletionsProvider
|
|
10
|
-
except ImportError: # pragma: no cover
|
|
11
|
-
from ..._missing_import_stubs import (
|
|
12
|
-
create_provider_stub,
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
BaseOpenAICompletionsProvider = create_provider_stub(
|
|
16
|
-
"openai", "BaseOpenAICompletionsProvider"
|
|
17
|
-
)
|
|
18
|
-
OpenAICompletionsProvider = create_provider_stub(
|
|
19
|
-
"openai", "OpenAICompletionsProvider"
|
|
20
|
-
)
|
|
1
|
+
from .base_provider import BaseOpenAICompletionsProvider
|
|
2
|
+
from .provider import OpenAICompletionsProvider
|
|
21
3
|
|
|
22
4
|
__all__ = [
|
|
23
5
|
"BaseOpenAICompletionsProvider",
|
|
@@ -117,6 +117,7 @@ class _OpenAIChunkProcessor:
|
|
|
117
117
|
def __init__(self) -> None:
|
|
118
118
|
self.current_content_type: Literal["text", "tool_call"] | None = None
|
|
119
119
|
self.current_tool_index: int | None = None
|
|
120
|
+
self.current_tool_id: str | None = None
|
|
120
121
|
self.refusal_encountered = False
|
|
121
122
|
|
|
122
123
|
def process_chunk(self, chunk: openai_types.ChatCompletionChunk) -> ChunkIterator:
|
|
@@ -180,7 +181,9 @@ class _OpenAIChunkProcessor:
|
|
|
180
181
|
self.current_tool_index is not None
|
|
181
182
|
and self.current_tool_index < index
|
|
182
183
|
):
|
|
183
|
-
|
|
184
|
+
if self.current_tool_id is None: # pragma: no cover
|
|
185
|
+
raise RuntimeError("No current_tool_id for ToolCallChunk")
|
|
186
|
+
yield ToolCallEndChunk(id=self.current_tool_id)
|
|
184
187
|
self.current_tool_index = None
|
|
185
188
|
|
|
186
189
|
if self.current_tool_index is None:
|
|
@@ -201,15 +204,23 @@ class _OpenAIChunkProcessor:
|
|
|
201
204
|
id=tool_id,
|
|
202
205
|
name=name,
|
|
203
206
|
)
|
|
207
|
+
self.current_tool_id = tool_id
|
|
204
208
|
|
|
205
209
|
if tool_call_delta.function and tool_call_delta.function.arguments:
|
|
206
|
-
|
|
210
|
+
if self.current_tool_id is None: # pragma: no cover
|
|
211
|
+
raise RuntimeError("No current_tool_id for ToolCallChunk")
|
|
212
|
+
yield ToolCallChunk(
|
|
213
|
+
id=self.current_tool_id,
|
|
214
|
+
delta=tool_call_delta.function.arguments,
|
|
215
|
+
)
|
|
207
216
|
|
|
208
217
|
if choice.finish_reason:
|
|
209
218
|
if self.current_content_type == "text":
|
|
210
219
|
yield TextEndChunk()
|
|
211
220
|
elif self.current_content_type == "tool_call":
|
|
212
|
-
|
|
221
|
+
if self.current_tool_id is None: # pragma: no cover
|
|
222
|
+
raise RuntimeError("No current_tool_id for ToolCallChunk")
|
|
223
|
+
yield ToolCallEndChunk(id=self.current_tool_id)
|
|
213
224
|
elif self.current_content_type is not None: # pragma: no cover
|
|
214
225
|
raise NotImplementedError()
|
|
215
226
|
|
|
@@ -15,7 +15,6 @@ from .....exceptions import (
|
|
|
15
15
|
from .....formatting import (
|
|
16
16
|
Format,
|
|
17
17
|
FormattableT,
|
|
18
|
-
_utils as _formatting_utils,
|
|
19
18
|
resolve_format,
|
|
20
19
|
)
|
|
21
20
|
from .....messages import AssistantMessage, Message, UserMessage
|
|
@@ -144,7 +143,7 @@ def _encode_user_message(
|
|
|
144
143
|
|
|
145
144
|
|
|
146
145
|
def _encode_assistant_message(
|
|
147
|
-
message: AssistantMessage, model_id: OpenAIModelId,
|
|
146
|
+
message: AssistantMessage, model_id: OpenAIModelId, encode_thoughts_as_text: bool
|
|
148
147
|
) -> openai_types.ChatCompletionAssistantMessageParam:
|
|
149
148
|
"""Convert Mirascope `AssistantMessage` to OpenAI `ChatCompletionAssistantMessageParam`."""
|
|
150
149
|
|
|
@@ -153,7 +152,7 @@ def _encode_assistant_message(
|
|
|
153
152
|
and message.provider_model_name
|
|
154
153
|
== model_name(model_id=model_id, api_mode="completions")
|
|
155
154
|
and message.raw_message
|
|
156
|
-
and not
|
|
155
|
+
and not encode_thoughts_as_text
|
|
157
156
|
):
|
|
158
157
|
return cast(
|
|
159
158
|
openai_types.ChatCompletionAssistantMessageParam, message.raw_message
|
|
@@ -177,7 +176,7 @@ def _encode_assistant_message(
|
|
|
177
176
|
)
|
|
178
177
|
)
|
|
179
178
|
elif part.type == "thought":
|
|
180
|
-
if
|
|
179
|
+
if encode_thoughts_as_text:
|
|
181
180
|
text_params.append(
|
|
182
181
|
openai_types.ChatCompletionContentPartTextParam(
|
|
183
182
|
text="**Thinking:** " + part.thought, type="text"
|
|
@@ -203,7 +202,7 @@ def _encode_assistant_message(
|
|
|
203
202
|
|
|
204
203
|
|
|
205
204
|
def _encode_message(
|
|
206
|
-
message: Message, model_id: OpenAIModelId,
|
|
205
|
+
message: Message, model_id: OpenAIModelId, encode_thoughts_as_text: bool
|
|
207
206
|
) -> list[openai_types.ChatCompletionMessageParam]:
|
|
208
207
|
"""Convert a Mirascope `Message` to OpenAI `ChatCompletionMessageParam` format.
|
|
209
208
|
|
|
@@ -224,7 +223,7 @@ def _encode_message(
|
|
|
224
223
|
elif message.role == "user":
|
|
225
224
|
return _encode_user_message(message, model_id)
|
|
226
225
|
elif message.role == "assistant":
|
|
227
|
-
return [_encode_assistant_message(message, model_id,
|
|
226
|
+
return [_encode_assistant_message(message, model_id, encode_thoughts_as_text)]
|
|
228
227
|
else:
|
|
229
228
|
raise ValueError(f"Unsupported role: {message.role}") # pragma: no cover
|
|
230
229
|
|
|
@@ -299,12 +298,12 @@ def encode_request(
|
|
|
299
298
|
"model": base_model_name,
|
|
300
299
|
}
|
|
301
300
|
)
|
|
302
|
-
|
|
301
|
+
encode_thoughts_as_text = False
|
|
303
302
|
|
|
304
303
|
with _base_utils.ensure_all_params_accessed(
|
|
305
304
|
params=params,
|
|
306
305
|
provider_id="openai",
|
|
307
|
-
unsupported_params=["top_k"
|
|
306
|
+
unsupported_params=["top_k"],
|
|
308
307
|
) as param_accessor:
|
|
309
308
|
if param_accessor.temperature is not None:
|
|
310
309
|
kwargs["temperature"] = param_accessor.temperature
|
|
@@ -317,8 +316,10 @@ def encode_request(
|
|
|
317
316
|
kwargs["seed"] = param_accessor.seed
|
|
318
317
|
if param_accessor.stop_sequences is not None:
|
|
319
318
|
kwargs["stop"] = param_accessor.stop_sequences
|
|
320
|
-
if param_accessor.
|
|
321
|
-
|
|
319
|
+
if param_accessor.thinking is not None:
|
|
320
|
+
thinking = param_accessor.thinking
|
|
321
|
+
if thinking.get("encode_thoughts_as_text"):
|
|
322
|
+
encode_thoughts_as_text = True
|
|
322
323
|
|
|
323
324
|
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
324
325
|
|
|
@@ -345,7 +346,7 @@ def encode_request(
|
|
|
345
346
|
"function": {"name": FORMAT_TOOL_NAME},
|
|
346
347
|
}
|
|
347
348
|
kwargs["parallel_tool_calls"] = False
|
|
348
|
-
format_tool_schema =
|
|
349
|
+
format_tool_schema = format.create_tool_schema()
|
|
349
350
|
openai_tools.append(_convert_tool_to_tool_param(format_tool_schema))
|
|
350
351
|
elif (
|
|
351
352
|
format.mode == "json"
|
|
@@ -363,7 +364,9 @@ def encode_request(
|
|
|
363
364
|
|
|
364
365
|
encoded_messages: list[openai_types.ChatCompletionMessageParam] = []
|
|
365
366
|
for message in messages:
|
|
366
|
-
encoded_messages.extend(
|
|
367
|
+
encoded_messages.extend(
|
|
368
|
+
_encode_message(message, model_id, encode_thoughts_as_text)
|
|
369
|
+
)
|
|
367
370
|
kwargs["messages"] = encoded_messages
|
|
368
371
|
|
|
369
372
|
return messages, format, kwargs
|
|
@@ -31,6 +31,7 @@ from ....tools import (
|
|
|
31
31
|
Toolkit,
|
|
32
32
|
)
|
|
33
33
|
from ...base import BaseProvider, Params
|
|
34
|
+
from .. import _utils as _shared_utils
|
|
34
35
|
from ..model_id import model_name as openai_model_name
|
|
35
36
|
from . import _utils
|
|
36
37
|
|
|
@@ -44,6 +45,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
44
45
|
api_key_env_var: ClassVar[str]
|
|
45
46
|
api_key_required: ClassVar[bool] = True
|
|
46
47
|
provider_name: ClassVar[str | None] = None
|
|
48
|
+
error_map = _shared_utils.OPENAI_ERROR_MAP
|
|
47
49
|
|
|
48
50
|
def __init__(
|
|
49
51
|
self,
|
|
@@ -77,6 +79,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
77
79
|
base_url=resolved_base_url,
|
|
78
80
|
)
|
|
79
81
|
|
|
82
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
83
|
+
"""Extract HTTP status code from OpenAI exception."""
|
|
84
|
+
return getattr(e, "status_code", None)
|
|
85
|
+
|
|
80
86
|
def _model_name(self, model_id: str) -> str:
|
|
81
87
|
"""Extract the model name to send to the API."""
|
|
82
88
|
return openai_model_name(model_id, None)
|
|
@@ -114,7 +120,6 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
114
120
|
params=params,
|
|
115
121
|
)
|
|
116
122
|
kwargs["model"] = self._model_name(model_id)
|
|
117
|
-
|
|
118
123
|
openai_response = self.client.chat.completions.create(**kwargs)
|
|
119
124
|
|
|
120
125
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -171,7 +176,6 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
171
176
|
params=params,
|
|
172
177
|
)
|
|
173
178
|
kwargs["model"] = self._model_name(model_id)
|
|
174
|
-
|
|
175
179
|
openai_response = self.client.chat.completions.create(**kwargs)
|
|
176
180
|
|
|
177
181
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -224,7 +228,6 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
224
228
|
format=format,
|
|
225
229
|
)
|
|
226
230
|
kwargs["model"] = self._model_name(model_id)
|
|
227
|
-
|
|
228
231
|
openai_response = await self.async_client.chat.completions.create(**kwargs)
|
|
229
232
|
|
|
230
233
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -281,7 +284,6 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
281
284
|
format=format,
|
|
282
285
|
)
|
|
283
286
|
kwargs["model"] = self._model_name(model_id)
|
|
284
|
-
|
|
285
287
|
openai_response = await self.async_client.chat.completions.create(**kwargs)
|
|
286
288
|
|
|
287
289
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -334,7 +336,6 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
334
336
|
params=params,
|
|
335
337
|
)
|
|
336
338
|
kwargs["model"] = self._model_name(model_id)
|
|
337
|
-
|
|
338
339
|
openai_stream = self.client.chat.completions.create(
|
|
339
340
|
**kwargs,
|
|
340
341
|
stream=True,
|
|
@@ -436,7 +437,6 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
436
437
|
params=params,
|
|
437
438
|
)
|
|
438
439
|
kwargs["model"] = self._model_name(model_id)
|
|
439
|
-
|
|
440
440
|
openai_stream = await self.async_client.chat.completions.create(
|
|
441
441
|
**kwargs,
|
|
442
442
|
stream=True,
|
|
@@ -3,9 +3,10 @@
|
|
|
3
3
|
from collections.abc import Sequence
|
|
4
4
|
from typing_extensions import Unpack
|
|
5
5
|
|
|
6
|
-
from openai import OpenAI
|
|
6
|
+
from openai import BadRequestError as OpenAIBadRequestError, OpenAI
|
|
7
7
|
|
|
8
8
|
from ...context import Context, DepsT
|
|
9
|
+
from ...exceptions import BadRequestError, NotFoundError
|
|
9
10
|
from ...formatting import Format, FormattableT
|
|
10
11
|
from ...messages import Message
|
|
11
12
|
from ...responses import (
|
|
@@ -29,6 +30,7 @@ from ...tools import (
|
|
|
29
30
|
Toolkit,
|
|
30
31
|
)
|
|
31
32
|
from ..base import BaseProvider, Params
|
|
33
|
+
from . import _utils
|
|
32
34
|
from .completions import OpenAICompletionsProvider
|
|
33
35
|
from .model_id import OPENAI_KNOWN_MODELS, OpenAIModelId
|
|
34
36
|
from .responses import OpenAIResponsesProvider
|
|
@@ -107,6 +109,13 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
107
109
|
|
|
108
110
|
id = "openai"
|
|
109
111
|
default_scope = "openai/"
|
|
112
|
+
# Include special handling for model_not_found from Responses API
|
|
113
|
+
error_map = {
|
|
114
|
+
**_utils.OPENAI_ERROR_MAP,
|
|
115
|
+
OpenAIBadRequestError: lambda e: NotFoundError
|
|
116
|
+
if hasattr(e, "code") and e.code == "model_not_found" # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
|
|
117
|
+
else BadRequestError,
|
|
118
|
+
}
|
|
110
119
|
|
|
111
120
|
def __init__(
|
|
112
121
|
self, *, api_key: str | None = None, base_url: str | None = None
|
|
@@ -121,6 +130,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
121
130
|
# Use completions client's underlying OpenAI client as the main one
|
|
122
131
|
self.client = self._completions_provider.client
|
|
123
132
|
|
|
133
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
134
|
+
"""Extract HTTP status code from OpenAI exception."""
|
|
135
|
+
return getattr(e, "status_code", None) # pragma: no cover
|
|
136
|
+
|
|
124
137
|
def _choose_subprovider(
|
|
125
138
|
self, model_id: OpenAIModelId, messages: Sequence[Message]
|
|
126
139
|
) -> OpenAICompletionsProvider | OpenAIResponsesProvider:
|
|
@@ -1,20 +1,4 @@
|
|
|
1
|
-
from
|
|
2
|
-
|
|
3
|
-
if TYPE_CHECKING:
|
|
4
|
-
from .provider import OpenAIResponsesProvider
|
|
5
|
-
else:
|
|
6
|
-
try:
|
|
7
|
-
from .provider import OpenAIResponsesProvider
|
|
8
|
-
except ImportError: # pragma: no cover
|
|
9
|
-
from ..._missing_import_stubs import (
|
|
10
|
-
create_import_error_stub,
|
|
11
|
-
create_provider_stub,
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
OpenAIResponsesProvider = create_provider_stub(
|
|
15
|
-
"openai", "OpenAIResponsesProvider"
|
|
16
|
-
)
|
|
17
|
-
|
|
1
|
+
from .provider import OpenAIResponsesProvider
|
|
18
2
|
|
|
19
3
|
__all__ = [
|
|
20
4
|
"OpenAIResponsesProvider",
|
|
@@ -173,9 +173,9 @@ class _OpenAIResponsesChunkProcessor:
|
|
|
173
173
|
)
|
|
174
174
|
self.current_content_type = "tool_call"
|
|
175
175
|
elif event.type == "response.function_call_arguments.delta":
|
|
176
|
-
yield ToolCallChunk(delta=event.delta)
|
|
176
|
+
yield ToolCallChunk(id=self.current_tool_call_id, delta=event.delta)
|
|
177
177
|
elif event.type == "response.function_call_arguments.done":
|
|
178
|
-
yield ToolCallEndChunk()
|
|
178
|
+
yield ToolCallEndChunk(id=self.current_tool_call_id)
|
|
179
179
|
self.current_content_type = None
|
|
180
180
|
elif (
|
|
181
181
|
event.type == "response.reasoning_text.delta"
|
|
@@ -23,7 +23,7 @@ from openai.types.responses.response_input_param import (
|
|
|
23
23
|
FunctionCallOutput,
|
|
24
24
|
Message as ResponseInputMessageParam,
|
|
25
25
|
)
|
|
26
|
-
from openai.types.shared_params import Reasoning
|
|
26
|
+
from openai.types.shared_params import Reasoning, ReasoningEffort
|
|
27
27
|
from openai.types.shared_params.response_format_json_object import (
|
|
28
28
|
ResponseFormatJSONObject,
|
|
29
29
|
)
|
|
@@ -33,12 +33,11 @@ from .....exceptions import FeatureNotSupportedError
|
|
|
33
33
|
from .....formatting import (
|
|
34
34
|
Format,
|
|
35
35
|
FormattableT,
|
|
36
|
-
_utils as _formatting_utils,
|
|
37
36
|
resolve_format,
|
|
38
37
|
)
|
|
39
38
|
from .....messages import AssistantMessage, Message, UserMessage
|
|
40
39
|
from .....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
|
|
41
|
-
from ....base import Params, _utils as _base_utils
|
|
40
|
+
from ....base import Params, ThinkingLevel, _utils as _base_utils
|
|
42
41
|
from ...model_id import OpenAIModelId, model_name
|
|
43
42
|
from ...model_info import (
|
|
44
43
|
MODELS_WITHOUT_JSON_OBJECT_SUPPORT,
|
|
@@ -46,6 +45,17 @@ from ...model_info import (
|
|
|
46
45
|
NON_REASONING_MODELS,
|
|
47
46
|
)
|
|
48
47
|
|
|
48
|
+
# Thinking level to a float multiplier % of max tokens
|
|
49
|
+
THINKING_LEVEL_TO_EFFORT: dict[ThinkingLevel, ReasoningEffort] = {
|
|
50
|
+
"default": "medium",
|
|
51
|
+
"none": "none",
|
|
52
|
+
"minimal": "minimal",
|
|
53
|
+
"low": "low",
|
|
54
|
+
"medium": "medium",
|
|
55
|
+
"high": "high",
|
|
56
|
+
"max": "xhigh",
|
|
57
|
+
}
|
|
58
|
+
|
|
49
59
|
|
|
50
60
|
class ResponseCreateKwargs(TypedDict, total=False):
|
|
51
61
|
"""Kwargs to the OpenAI `client.responses.create` method."""
|
|
@@ -237,12 +247,25 @@ def _create_strict_response_format(
|
|
|
237
247
|
return response_format
|
|
238
248
|
|
|
239
249
|
|
|
240
|
-
def _compute_reasoning(
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
250
|
+
def _compute_reasoning(
|
|
251
|
+
level: ThinkingLevel,
|
|
252
|
+
include_summaries: bool,
|
|
253
|
+
) -> Reasoning:
|
|
254
|
+
"""Compute the OpenAI `Reasoning` config based on ThinkingConfig.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
level: The thinking level
|
|
258
|
+
include_summaries: Whether to include summary (True/False for auto)
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
OpenAI Reasoning configuration
|
|
262
|
+
"""
|
|
263
|
+
reasoning: Reasoning = {"effort": THINKING_LEVEL_TO_EFFORT.get(level) or "medium"}
|
|
264
|
+
|
|
265
|
+
if include_summaries:
|
|
266
|
+
reasoning["summary"] = "auto"
|
|
267
|
+
|
|
268
|
+
return reasoning
|
|
246
269
|
|
|
247
270
|
|
|
248
271
|
def encode_request(
|
|
@@ -283,16 +306,21 @@ def encode_request(
|
|
|
283
306
|
if param_accessor.top_p is not None:
|
|
284
307
|
kwargs["top_p"] = param_accessor.top_p
|
|
285
308
|
if param_accessor.thinking is not None:
|
|
309
|
+
thinking_config = param_accessor.thinking
|
|
286
310
|
if base_model_name in NON_REASONING_MODELS:
|
|
287
311
|
param_accessor.emit_warning_for_unused_param(
|
|
288
|
-
"thinking",
|
|
312
|
+
"thinking", thinking_config, "openai", model_id
|
|
289
313
|
)
|
|
290
314
|
else:
|
|
291
315
|
# Assume model supports reasoning unless explicitly listed as non-reasoning
|
|
292
316
|
# This ensures new reasoning models work immediately without code updates
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
317
|
+
level = thinking_config.get("level")
|
|
318
|
+
include_summaries = thinking_config.get("include_summaries", True)
|
|
319
|
+
kwargs["reasoning"] = _compute_reasoning(level, include_summaries)
|
|
320
|
+
|
|
321
|
+
# Handle encode_thoughts_as_text from ThinkingConfig
|
|
322
|
+
if thinking_config.get("encode_thoughts_as_text"):
|
|
323
|
+
encode_thoughts = True
|
|
296
324
|
|
|
297
325
|
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
298
326
|
openai_tools = [_convert_tool_to_function_tool_param(tool) for tool in tools]
|
|
@@ -305,9 +333,9 @@ def encode_request(
|
|
|
305
333
|
if format.mode == "strict":
|
|
306
334
|
kwargs["text"] = {"format": _create_strict_response_format(format)}
|
|
307
335
|
elif format.mode == "tool":
|
|
308
|
-
|
|
336
|
+
format_tool_schema = format.create_tool_schema()
|
|
309
337
|
openai_tools.append(
|
|
310
|
-
_convert_tool_to_function_tool_param(
|
|
338
|
+
_convert_tool_to_function_tool_param(format_tool_schema)
|
|
311
339
|
)
|
|
312
340
|
if tools:
|
|
313
341
|
kwargs["tool_choice"] = ToolChoiceAllowedParam(
|