mirascope 2.0.0a3__py3-none-any.whl → 2.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/api/_generated/__init__.py +62 -6
- mirascope/api/_generated/client.py +8 -0
- mirascope/api/_generated/errors/__init__.py +11 -1
- mirascope/api/_generated/errors/conflict_error.py +15 -0
- mirascope/api/_generated/errors/forbidden_error.py +15 -0
- mirascope/api/_generated/errors/internal_server_error.py +15 -0
- mirascope/api/_generated/errors/not_found_error.py +15 -0
- mirascope/api/_generated/organizations/__init__.py +25 -0
- mirascope/api/_generated/organizations/client.py +380 -0
- mirascope/api/_generated/organizations/raw_client.py +876 -0
- mirascope/api/_generated/organizations/types/__init__.py +23 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +17 -0
- mirascope/api/_generated/projects/client.py +458 -0
- mirascope/api/_generated/projects/raw_client.py +1016 -0
- mirascope/api/_generated/projects/types/__init__.py +15 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
- mirascope/api/_generated/reference.md +586 -0
- mirascope/api/_generated/types/__init__.py +20 -4
- mirascope/api/_generated/types/already_exists_error.py +24 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/database_error.py +24 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/http_api_decode_error.py +1 -3
- mirascope/api/_generated/types/issue.py +1 -5
- mirascope/api/_generated/types/not_found_error_body.py +24 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/permission_denied_error.py +24 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
- mirascope/api/_generated/types/property_key.py +2 -2
- mirascope/api/_generated/types/{property_key_tag.py → property_key_key.py} +3 -5
- mirascope/api/_generated/types/{property_key_tag_tag.py → property_key_key_tag.py} +1 -1
- mirascope/llm/__init__.py +4 -0
- mirascope/llm/providers/__init__.py +6 -0
- mirascope/llm/providers/anthropic/__init__.py +6 -1
- mirascope/llm/providers/anthropic/_utils/__init__.py +15 -5
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
- mirascope/llm/providers/anthropic/_utils/decode.py +39 -7
- mirascope/llm/providers/anthropic/_utils/encode.py +156 -64
- mirascope/llm/providers/anthropic/beta_provider.py +322 -0
- mirascope/llm/providers/anthropic/model_id.py +10 -27
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +127 -145
- mirascope/llm/providers/base/_utils.py +15 -1
- mirascope/llm/providers/google/_utils/decode.py +55 -3
- mirascope/llm/providers/google/_utils/encode.py +14 -6
- mirascope/llm/providers/google/model_id.py +7 -13
- mirascope/llm/providers/google/model_info.py +62 -0
- mirascope/llm/providers/google/provider.py +8 -4
- mirascope/llm/providers/load_provider.py +8 -2
- mirascope/llm/providers/mlx/_utils.py +23 -1
- mirascope/llm/providers/mlx/encoding/transformers.py +17 -1
- mirascope/llm/providers/mlx/provider.py +4 -0
- mirascope/llm/providers/ollama/__init__.py +19 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/completions/__init__.py +6 -1
- mirascope/llm/providers/openai/completions/_utils/decode.py +57 -5
- mirascope/llm/providers/openai/completions/_utils/encode.py +9 -8
- mirascope/llm/providers/openai/completions/base_provider.py +513 -0
- mirascope/llm/providers/openai/completions/provider.py +13 -447
- mirascope/llm/providers/openai/model_info.py +57 -0
- mirascope/llm/providers/openai/provider.py +16 -4
- mirascope/llm/providers/openai/responses/_utils/decode.py +55 -4
- mirascope/llm/providers/openai/responses/_utils/encode.py +9 -9
- mirascope/llm/providers/openai/responses/provider.py +20 -21
- mirascope/llm/providers/provider_id.py +11 -1
- mirascope/llm/providers/provider_registry.py +3 -1
- mirascope/llm/providers/together/__init__.py +19 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +3 -0
- mirascope/llm/responses/base_response.py +4 -0
- mirascope/llm/responses/base_stream_response.py +25 -1
- mirascope/llm/responses/finish_reason.py +1 -0
- mirascope/llm/responses/response.py +9 -0
- mirascope/llm/responses/root_response.py +5 -1
- mirascope/llm/responses/usage.py +95 -0
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +3 -3
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a4.dist-info}/RECORD +91 -50
- mirascope/llm/providers/openai/shared/__init__.py +0 -7
- mirascope/llm/providers/openai/shared/_utils.py +0 -59
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +0 -0
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,20 +2,36 @@
|
|
|
2
2
|
|
|
3
3
|
# isort: skip_file
|
|
4
4
|
|
|
5
|
+
from .already_exists_error import AlreadyExistsError
|
|
6
|
+
from .already_exists_error_tag import AlreadyExistsErrorTag
|
|
7
|
+
from .database_error import DatabaseError
|
|
8
|
+
from .database_error_tag import DatabaseErrorTag
|
|
5
9
|
from .http_api_decode_error import HttpApiDecodeError
|
|
6
10
|
from .http_api_decode_error_tag import HttpApiDecodeErrorTag
|
|
7
11
|
from .issue import Issue
|
|
8
12
|
from .issue_tag import IssueTag
|
|
13
|
+
from .not_found_error_body import NotFoundErrorBody
|
|
14
|
+
from .not_found_error_tag import NotFoundErrorTag
|
|
15
|
+
from .permission_denied_error import PermissionDeniedError
|
|
16
|
+
from .permission_denied_error_tag import PermissionDeniedErrorTag
|
|
9
17
|
from .property_key import PropertyKey
|
|
10
|
-
from .
|
|
11
|
-
from .
|
|
18
|
+
from .property_key_key import PropertyKeyKey
|
|
19
|
+
from .property_key_key_tag import PropertyKeyKeyTag
|
|
12
20
|
|
|
13
21
|
__all__ = [
|
|
22
|
+
"AlreadyExistsError",
|
|
23
|
+
"AlreadyExistsErrorTag",
|
|
24
|
+
"DatabaseError",
|
|
25
|
+
"DatabaseErrorTag",
|
|
14
26
|
"HttpApiDecodeError",
|
|
15
27
|
"HttpApiDecodeErrorTag",
|
|
16
28
|
"Issue",
|
|
17
29
|
"IssueTag",
|
|
30
|
+
"NotFoundErrorBody",
|
|
31
|
+
"NotFoundErrorTag",
|
|
32
|
+
"PermissionDeniedError",
|
|
33
|
+
"PermissionDeniedErrorTag",
|
|
18
34
|
"PropertyKey",
|
|
19
|
-
"
|
|
20
|
-
"
|
|
35
|
+
"PropertyKeyKey",
|
|
36
|
+
"PropertyKeyKeyTag",
|
|
21
37
|
]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
import pydantic
|
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
7
|
+
from .already_exists_error_tag import AlreadyExistsErrorTag
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AlreadyExistsError(UniversalBaseModel):
|
|
11
|
+
message: str
|
|
12
|
+
resource: typing.Optional[str] = None
|
|
13
|
+
tag: AlreadyExistsErrorTag
|
|
14
|
+
|
|
15
|
+
if IS_PYDANTIC_V2:
|
|
16
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
17
|
+
extra="allow", frozen=True
|
|
18
|
+
) # type: ignore # Pydantic v2
|
|
19
|
+
else:
|
|
20
|
+
|
|
21
|
+
class Config:
|
|
22
|
+
frozen = True
|
|
23
|
+
smart_union = True
|
|
24
|
+
extra = pydantic.Extra.allow
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
import pydantic
|
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
7
|
+
from .database_error_tag import DatabaseErrorTag
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DatabaseError(UniversalBaseModel):
|
|
11
|
+
message: str
|
|
12
|
+
cause: typing.Optional[typing.Optional[typing.Any]] = None
|
|
13
|
+
tag: DatabaseErrorTag
|
|
14
|
+
|
|
15
|
+
if IS_PYDANTIC_V2:
|
|
16
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
17
|
+
extra="allow", frozen=True
|
|
18
|
+
) # type: ignore # Pydantic v2
|
|
19
|
+
else:
|
|
20
|
+
|
|
21
|
+
class Config:
|
|
22
|
+
frozen = True
|
|
23
|
+
smart_union = True
|
|
24
|
+
extra = pydantic.Extra.allow
|
|
@@ -3,9 +3,7 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
import pydantic
|
|
6
|
-
import typing_extensions
|
|
7
6
|
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
8
|
-
from ..core.serialization import FieldMetadata
|
|
9
7
|
from .http_api_decode_error_tag import HttpApiDecodeErrorTag
|
|
10
8
|
from .issue import Issue
|
|
11
9
|
|
|
@@ -17,7 +15,7 @@ class HttpApiDecodeError(UniversalBaseModel):
|
|
|
17
15
|
|
|
18
16
|
issues: typing.List[Issue]
|
|
19
17
|
message: str
|
|
20
|
-
tag:
|
|
18
|
+
tag: HttpApiDecodeErrorTag
|
|
21
19
|
|
|
22
20
|
if IS_PYDANTIC_V2:
|
|
23
21
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
@@ -3,9 +3,7 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
import pydantic
|
|
6
|
-
import typing_extensions
|
|
7
6
|
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
8
|
-
from ..core.serialization import FieldMetadata
|
|
9
7
|
from .issue_tag import IssueTag
|
|
10
8
|
from .property_key import PropertyKey
|
|
11
9
|
|
|
@@ -15,9 +13,7 @@ class Issue(UniversalBaseModel):
|
|
|
15
13
|
Represents an error encountered while parsing a value to match the schema
|
|
16
14
|
"""
|
|
17
15
|
|
|
18
|
-
tag:
|
|
19
|
-
pydantic.Field()
|
|
20
|
-
)
|
|
16
|
+
tag: IssueTag = pydantic.Field()
|
|
21
17
|
"""
|
|
22
18
|
The tag identifying the type of parse issue
|
|
23
19
|
"""
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
import pydantic
|
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
7
|
+
from .not_found_error_tag import NotFoundErrorTag
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NotFoundErrorBody(UniversalBaseModel):
|
|
11
|
+
message: str
|
|
12
|
+
resource: typing.Optional[str] = None
|
|
13
|
+
tag: NotFoundErrorTag
|
|
14
|
+
|
|
15
|
+
if IS_PYDANTIC_V2:
|
|
16
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
17
|
+
extra="allow", frozen=True
|
|
18
|
+
) # type: ignore # Pydantic v2
|
|
19
|
+
else:
|
|
20
|
+
|
|
21
|
+
class Config:
|
|
22
|
+
frozen = True
|
|
23
|
+
smart_union = True
|
|
24
|
+
extra = pydantic.Extra.allow
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
import pydantic
|
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
7
|
+
from .permission_denied_error_tag import PermissionDeniedErrorTag
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class PermissionDeniedError(UniversalBaseModel):
|
|
11
|
+
message: str
|
|
12
|
+
resource: typing.Optional[str] = None
|
|
13
|
+
tag: PermissionDeniedErrorTag
|
|
14
|
+
|
|
15
|
+
if IS_PYDANTIC_V2:
|
|
16
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
17
|
+
extra="allow", frozen=True
|
|
18
|
+
) # type: ignore # Pydantic v2
|
|
19
|
+
else:
|
|
20
|
+
|
|
21
|
+
class Config:
|
|
22
|
+
frozen = True
|
|
23
|
+
smart_union = True
|
|
24
|
+
extra = pydantic.Extra.allow
|
|
@@ -3,18 +3,16 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
import pydantic
|
|
6
|
-
import typing_extensions
|
|
7
6
|
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
8
|
-
from
|
|
9
|
-
from .property_key_tag_tag import PropertyKeyTagTag
|
|
7
|
+
from .property_key_key_tag import PropertyKeyKeyTag
|
|
10
8
|
|
|
11
9
|
|
|
12
|
-
class
|
|
10
|
+
class PropertyKeyKey(UniversalBaseModel):
|
|
13
11
|
"""
|
|
14
12
|
an object to be decoded into a globally shared symbol
|
|
15
13
|
"""
|
|
16
14
|
|
|
17
|
-
tag:
|
|
15
|
+
tag: PropertyKeyKeyTag
|
|
18
16
|
key: str
|
|
19
17
|
|
|
20
18
|
if IS_PYDANTIC_V2:
|
mirascope/llm/__init__.py
CHANGED
|
@@ -117,6 +117,8 @@ from .responses import (
|
|
|
117
117
|
TextStream,
|
|
118
118
|
ThoughtStream,
|
|
119
119
|
ToolCallStream,
|
|
120
|
+
Usage,
|
|
121
|
+
UsageDeltaChunk,
|
|
120
122
|
)
|
|
121
123
|
from .tools import (
|
|
122
124
|
AsyncContextTool,
|
|
@@ -219,6 +221,8 @@ __all__ = [
|
|
|
219
221
|
"ToolOutput",
|
|
220
222
|
"Toolkit",
|
|
221
223
|
"URLImageSource",
|
|
224
|
+
"Usage",
|
|
225
|
+
"UsageDeltaChunk",
|
|
222
226
|
"UserContent",
|
|
223
227
|
"UserContentPart",
|
|
224
228
|
"UserMessage",
|
|
@@ -9,28 +9,34 @@ from .google import GoogleModelId, GoogleProvider
|
|
|
9
9
|
from .load_provider import load, load_provider
|
|
10
10
|
from .mlx import MLXModelId, MLXProvider
|
|
11
11
|
from .model_id import ModelId
|
|
12
|
+
from .ollama import OllamaProvider
|
|
12
13
|
from .openai import (
|
|
13
14
|
OpenAIModelId,
|
|
14
15
|
OpenAIProvider,
|
|
15
16
|
)
|
|
17
|
+
from .openai.completions import BaseOpenAICompletionsProvider
|
|
16
18
|
from .provider_id import KNOWN_PROVIDER_IDS, ProviderId
|
|
17
19
|
from .provider_registry import get_provider_for_model, register_provider
|
|
20
|
+
from .together import TogetherProvider
|
|
18
21
|
|
|
19
22
|
__all__ = [
|
|
20
23
|
"KNOWN_PROVIDER_IDS",
|
|
21
24
|
"AnthropicModelId",
|
|
22
25
|
"AnthropicProvider",
|
|
26
|
+
"BaseOpenAICompletionsProvider",
|
|
23
27
|
"BaseProvider",
|
|
24
28
|
"GoogleModelId",
|
|
25
29
|
"GoogleProvider",
|
|
26
30
|
"MLXModelId",
|
|
27
31
|
"MLXProvider",
|
|
28
32
|
"ModelId",
|
|
33
|
+
"OllamaProvider",
|
|
29
34
|
"OpenAIModelId",
|
|
30
35
|
"OpenAIProvider",
|
|
31
36
|
"Params",
|
|
32
37
|
"Provider",
|
|
33
38
|
"ProviderId",
|
|
39
|
+
"TogetherProvider",
|
|
34
40
|
"get_provider_for_model",
|
|
35
41
|
"load",
|
|
36
42
|
"load_provider",
|
|
@@ -3,22 +3,27 @@
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
5
|
if TYPE_CHECKING:
|
|
6
|
+
from .beta_provider import AnthropicBetaProvider
|
|
6
7
|
from .model_id import AnthropicModelId
|
|
7
8
|
from .provider import AnthropicProvider
|
|
8
9
|
else:
|
|
9
10
|
try:
|
|
11
|
+
from .beta_provider import AnthropicBetaProvider
|
|
10
12
|
from .model_id import AnthropicModelId
|
|
11
13
|
from .provider import AnthropicProvider
|
|
12
14
|
except ImportError: # pragma: no cover
|
|
13
15
|
from .._missing_import_stubs import (
|
|
14
|
-
create_import_error_stub,
|
|
15
16
|
create_provider_stub,
|
|
16
17
|
)
|
|
17
18
|
|
|
19
|
+
AnthropicBetaProvider = create_provider_stub(
|
|
20
|
+
"anthropic", "AnthropicBetaProvider"
|
|
21
|
+
)
|
|
18
22
|
AnthropicProvider = create_provider_stub("anthropic", "AnthropicProvider")
|
|
19
23
|
AnthropicModelId = str
|
|
20
24
|
|
|
21
25
|
__all__ = [
|
|
26
|
+
"AnthropicBetaProvider",
|
|
22
27
|
"AnthropicModelId",
|
|
23
28
|
"AnthropicProvider",
|
|
24
29
|
]
|
|
@@ -1,13 +1,23 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
1
|
+
"""Shared Anthropic utilities."""
|
|
2
|
+
|
|
3
|
+
from .decode import decode_async_stream, decode_response, decode_stream
|
|
4
|
+
from .encode import (
|
|
5
|
+
DEFAULT_FORMAT_MODE,
|
|
6
|
+
DEFAULT_MAX_TOKENS,
|
|
7
|
+
AnthropicImageMimeType,
|
|
8
|
+
encode_image_mime_type,
|
|
9
|
+
encode_request,
|
|
10
|
+
process_params,
|
|
5
11
|
)
|
|
6
|
-
from .encode import encode_request
|
|
7
12
|
|
|
8
13
|
__all__ = [
|
|
14
|
+
"DEFAULT_FORMAT_MODE",
|
|
15
|
+
"DEFAULT_MAX_TOKENS",
|
|
16
|
+
"AnthropicImageMimeType",
|
|
9
17
|
"decode_async_stream",
|
|
10
18
|
"decode_response",
|
|
11
19
|
"decode_stream",
|
|
20
|
+
"encode_image_mime_type",
|
|
12
21
|
"encode_request",
|
|
22
|
+
"process_params",
|
|
13
23
|
]
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""Beta Anthropic response decoding."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, TypeAlias, cast
|
|
5
|
+
|
|
6
|
+
from anthropic.lib.streaming._beta_messages import (
|
|
7
|
+
BetaAsyncMessageStreamManager,
|
|
8
|
+
BetaMessageStreamManager,
|
|
9
|
+
)
|
|
10
|
+
from anthropic.types.beta import (
|
|
11
|
+
BetaContentBlock,
|
|
12
|
+
BetaRawMessageStreamEvent,
|
|
13
|
+
BetaRedactedThinkingBlockParam,
|
|
14
|
+
BetaTextBlockParam,
|
|
15
|
+
BetaThinkingBlockParam,
|
|
16
|
+
BetaToolUseBlockParam,
|
|
17
|
+
)
|
|
18
|
+
from anthropic.types.beta.parsed_beta_message import ParsedBetaMessage
|
|
19
|
+
|
|
20
|
+
from ....content import (
|
|
21
|
+
AssistantContentPart,
|
|
22
|
+
Text,
|
|
23
|
+
TextChunk,
|
|
24
|
+
TextEndChunk,
|
|
25
|
+
TextStartChunk,
|
|
26
|
+
Thought,
|
|
27
|
+
ThoughtChunk,
|
|
28
|
+
ThoughtEndChunk,
|
|
29
|
+
ThoughtStartChunk,
|
|
30
|
+
ToolCall,
|
|
31
|
+
ToolCallChunk,
|
|
32
|
+
ToolCallEndChunk,
|
|
33
|
+
ToolCallStartChunk,
|
|
34
|
+
)
|
|
35
|
+
from ....messages import AssistantMessage
|
|
36
|
+
from ....responses import (
|
|
37
|
+
AsyncChunkIterator,
|
|
38
|
+
ChunkIterator,
|
|
39
|
+
FinishReason,
|
|
40
|
+
FinishReasonChunk,
|
|
41
|
+
RawMessageChunk,
|
|
42
|
+
RawStreamEventChunk,
|
|
43
|
+
Usage,
|
|
44
|
+
UsageDeltaChunk,
|
|
45
|
+
)
|
|
46
|
+
from ..model_id import model_name
|
|
47
|
+
from .decode import decode_usage
|
|
48
|
+
|
|
49
|
+
BETA_FINISH_REASON_MAP = {
|
|
50
|
+
"max_tokens": FinishReason.MAX_TOKENS,
|
|
51
|
+
"refusal": FinishReason.REFUSAL,
|
|
52
|
+
"model_context_window_exceeded": FinishReason.CONTEXT_LENGTH_EXCEEDED,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _decode_beta_assistant_content(content: BetaContentBlock) -> AssistantContentPart:
|
|
57
|
+
"""Convert Beta content block to mirascope AssistantContentPart."""
|
|
58
|
+
if content.type == "text":
|
|
59
|
+
return Text(text=content.text)
|
|
60
|
+
elif content.type == "tool_use":
|
|
61
|
+
return ToolCall(
|
|
62
|
+
id=content.id,
|
|
63
|
+
name=content.name,
|
|
64
|
+
args=json.dumps(content.input),
|
|
65
|
+
)
|
|
66
|
+
elif content.type == "thinking":
|
|
67
|
+
return Thought(thought=content.thinking)
|
|
68
|
+
else:
|
|
69
|
+
raise NotImplementedError(
|
|
70
|
+
f"Support for beta content type `{content.type}` is not yet implemented."
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def beta_decode_response(
|
|
75
|
+
response: ParsedBetaMessage[Any],
|
|
76
|
+
model_id: str,
|
|
77
|
+
) -> tuple[AssistantMessage, FinishReason | None, Usage]:
|
|
78
|
+
"""Convert Beta message to mirascope AssistantMessage and usage."""
|
|
79
|
+
assistant_message = AssistantMessage(
|
|
80
|
+
content=[_decode_beta_assistant_content(part) for part in response.content],
|
|
81
|
+
provider_id="anthropic",
|
|
82
|
+
model_id=model_id,
|
|
83
|
+
provider_model_name=model_name(model_id),
|
|
84
|
+
raw_message={
|
|
85
|
+
"role": response.role,
|
|
86
|
+
"content": [
|
|
87
|
+
part.model_dump(exclude_none=True) for part in response.content
|
|
88
|
+
],
|
|
89
|
+
},
|
|
90
|
+
)
|
|
91
|
+
finish_reason = (
|
|
92
|
+
BETA_FINISH_REASON_MAP.get(response.stop_reason)
|
|
93
|
+
if response.stop_reason
|
|
94
|
+
else None
|
|
95
|
+
)
|
|
96
|
+
usage = decode_usage(response.usage)
|
|
97
|
+
return assistant_message, finish_reason, usage
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
BetaContentBlockParam: TypeAlias = (
|
|
101
|
+
BetaTextBlockParam
|
|
102
|
+
| BetaThinkingBlockParam
|
|
103
|
+
| BetaToolUseBlockParam
|
|
104
|
+
| BetaRedactedThinkingBlockParam
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class _BetaChunkProcessor:
|
|
109
|
+
"""Processes Beta stream events and maintains state across events."""
|
|
110
|
+
|
|
111
|
+
def __init__(self) -> None:
|
|
112
|
+
self.current_block_param: BetaContentBlockParam | None = None
|
|
113
|
+
self.accumulated_tool_json: str = ""
|
|
114
|
+
self.accumulated_blocks: list[BetaContentBlockParam] = []
|
|
115
|
+
|
|
116
|
+
def process_event(self, event: BetaRawMessageStreamEvent) -> ChunkIterator:
|
|
117
|
+
"""Process a single Beta event and yield the appropriate content chunks."""
|
|
118
|
+
yield RawStreamEventChunk(raw_stream_event=event)
|
|
119
|
+
|
|
120
|
+
if event.type == "content_block_start":
|
|
121
|
+
content_block = event.content_block
|
|
122
|
+
|
|
123
|
+
if content_block.type == "text":
|
|
124
|
+
self.current_block_param = {
|
|
125
|
+
"type": "text",
|
|
126
|
+
"text": content_block.text,
|
|
127
|
+
}
|
|
128
|
+
yield TextStartChunk()
|
|
129
|
+
elif content_block.type == "tool_use":
|
|
130
|
+
self.current_block_param = {
|
|
131
|
+
"type": "tool_use",
|
|
132
|
+
"id": content_block.id,
|
|
133
|
+
"name": content_block.name,
|
|
134
|
+
"input": {},
|
|
135
|
+
}
|
|
136
|
+
self.accumulated_tool_json = ""
|
|
137
|
+
yield ToolCallStartChunk(
|
|
138
|
+
id=content_block.id,
|
|
139
|
+
name=content_block.name,
|
|
140
|
+
)
|
|
141
|
+
elif content_block.type == "thinking":
|
|
142
|
+
self.current_block_param = {
|
|
143
|
+
"type": "thinking",
|
|
144
|
+
"thinking": "",
|
|
145
|
+
"signature": "",
|
|
146
|
+
}
|
|
147
|
+
yield ThoughtStartChunk()
|
|
148
|
+
elif content_block.type == "redacted_thinking": # pragma: no cover
|
|
149
|
+
self.current_block_param = {
|
|
150
|
+
"type": "redacted_thinking",
|
|
151
|
+
"data": content_block.data,
|
|
152
|
+
}
|
|
153
|
+
else:
|
|
154
|
+
raise NotImplementedError(
|
|
155
|
+
f"Support for beta content block type `{content_block.type}` "
|
|
156
|
+
"is not yet implemented."
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
elif event.type == "content_block_delta":
|
|
160
|
+
if self.current_block_param is None: # pragma: no cover
|
|
161
|
+
raise RuntimeError("Received delta without a current block")
|
|
162
|
+
|
|
163
|
+
delta = event.delta
|
|
164
|
+
if delta.type == "text_delta":
|
|
165
|
+
if self.current_block_param["type"] != "text": # pragma: no cover
|
|
166
|
+
raise RuntimeError(
|
|
167
|
+
f"Received text_delta for {self.current_block_param['type']} block"
|
|
168
|
+
)
|
|
169
|
+
self.current_block_param["text"] += delta.text
|
|
170
|
+
yield TextChunk(delta=delta.text)
|
|
171
|
+
elif delta.type == "input_json_delta":
|
|
172
|
+
if self.current_block_param["type"] != "tool_use": # pragma: no cover
|
|
173
|
+
raise RuntimeError(
|
|
174
|
+
f"Received input_json_delta for {self.current_block_param['type']} block"
|
|
175
|
+
)
|
|
176
|
+
self.accumulated_tool_json += delta.partial_json
|
|
177
|
+
yield ToolCallChunk(delta=delta.partial_json)
|
|
178
|
+
elif delta.type == "thinking_delta":
|
|
179
|
+
if self.current_block_param["type"] != "thinking": # pragma: no cover
|
|
180
|
+
raise RuntimeError(
|
|
181
|
+
f"Received thinking_delta for {self.current_block_param['type']} block"
|
|
182
|
+
)
|
|
183
|
+
self.current_block_param["thinking"] += delta.thinking
|
|
184
|
+
yield ThoughtChunk(delta=delta.thinking)
|
|
185
|
+
elif delta.type == "signature_delta":
|
|
186
|
+
if self.current_block_param["type"] != "thinking": # pragma: no cover
|
|
187
|
+
raise RuntimeError(
|
|
188
|
+
f"Received signature_delta for {self.current_block_param['type']} block"
|
|
189
|
+
)
|
|
190
|
+
self.current_block_param["signature"] += delta.signature
|
|
191
|
+
else:
|
|
192
|
+
raise RuntimeError(
|
|
193
|
+
f"Received unsupported delta type: {delta.type}"
|
|
194
|
+
) # pragma: no cover
|
|
195
|
+
|
|
196
|
+
elif event.type == "content_block_stop":
|
|
197
|
+
if self.current_block_param is None: # pragma: no cover
|
|
198
|
+
raise RuntimeError("Received stop without a current block")
|
|
199
|
+
|
|
200
|
+
block_type = self.current_block_param["type"]
|
|
201
|
+
|
|
202
|
+
if block_type == "text":
|
|
203
|
+
yield TextEndChunk()
|
|
204
|
+
elif block_type == "tool_use":
|
|
205
|
+
if self.current_block_param["type"] != "tool_use": # pragma: no cover
|
|
206
|
+
raise RuntimeError(
|
|
207
|
+
f"Block type mismatch: stored {self.current_block_param['type']}, expected tool_use"
|
|
208
|
+
)
|
|
209
|
+
self.current_block_param["input"] = (
|
|
210
|
+
json.loads(self.accumulated_tool_json)
|
|
211
|
+
if self.accumulated_tool_json
|
|
212
|
+
else {}
|
|
213
|
+
)
|
|
214
|
+
yield ToolCallEndChunk()
|
|
215
|
+
elif block_type == "thinking":
|
|
216
|
+
yield ThoughtEndChunk()
|
|
217
|
+
else:
|
|
218
|
+
raise NotImplementedError
|
|
219
|
+
|
|
220
|
+
self.accumulated_blocks.append(self.current_block_param)
|
|
221
|
+
self.current_block_param = None
|
|
222
|
+
|
|
223
|
+
elif event.type == "message_delta":
|
|
224
|
+
if event.delta.stop_reason:
|
|
225
|
+
finish_reason = BETA_FINISH_REASON_MAP.get(event.delta.stop_reason)
|
|
226
|
+
if finish_reason is not None:
|
|
227
|
+
yield FinishReasonChunk(finish_reason=finish_reason)
|
|
228
|
+
|
|
229
|
+
# Emit usage delta
|
|
230
|
+
usage = event.usage
|
|
231
|
+
yield UsageDeltaChunk(
|
|
232
|
+
input_tokens=usage.input_tokens or 0,
|
|
233
|
+
output_tokens=usage.output_tokens,
|
|
234
|
+
cache_read_tokens=usage.cache_read_input_tokens or 0,
|
|
235
|
+
cache_write_tokens=usage.cache_creation_input_tokens or 0,
|
|
236
|
+
reasoning_tokens=0,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
def raw_message_chunk(self) -> RawMessageChunk:
|
|
240
|
+
return RawMessageChunk(
|
|
241
|
+
raw_message=cast(
|
|
242
|
+
dict[str, Any],
|
|
243
|
+
{
|
|
244
|
+
"role": "assistant",
|
|
245
|
+
"content": self.accumulated_blocks,
|
|
246
|
+
},
|
|
247
|
+
)
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def beta_decode_stream(
|
|
252
|
+
beta_stream_manager: BetaMessageStreamManager[Any],
|
|
253
|
+
) -> ChunkIterator:
|
|
254
|
+
"""Returns a ChunkIterator converted from a Beta MessageStreamManager."""
|
|
255
|
+
processor = _BetaChunkProcessor()
|
|
256
|
+
with beta_stream_manager as stream:
|
|
257
|
+
for event in stream._raw_stream: # pyright: ignore[reportPrivateUsage]
|
|
258
|
+
yield from processor.process_event(event)
|
|
259
|
+
yield processor.raw_message_chunk()
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
async def beta_decode_async_stream(
|
|
263
|
+
beta_stream_manager: BetaAsyncMessageStreamManager[Any],
|
|
264
|
+
) -> AsyncChunkIterator:
|
|
265
|
+
"""Returns an AsyncChunkIterator converted from a Beta MessageStreamManager."""
|
|
266
|
+
processor = _BetaChunkProcessor()
|
|
267
|
+
async with beta_stream_manager as stream:
|
|
268
|
+
async for event in stream._raw_stream: # pyright: ignore[reportPrivateUsage]
|
|
269
|
+
for item in processor.process_event(event):
|
|
270
|
+
yield item
|
|
271
|
+
yield processor.raw_message_chunk()
|