mirascope 1.18.3__py3-none-any.whl → 1.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +20 -2
- mirascope/beta/openai/__init__.py +1 -1
- mirascope/beta/openai/realtime/__init__.py +1 -1
- mirascope/beta/openai/realtime/tool.py +1 -1
- mirascope/beta/rag/__init__.py +2 -2
- mirascope/beta/rag/base/__init__.py +2 -2
- mirascope/beta/rag/weaviate/__init__.py +1 -1
- mirascope/core/__init__.py +26 -8
- mirascope/core/anthropic/__init__.py +3 -3
- mirascope/core/anthropic/_utils/_calculate_cost.py +114 -47
- mirascope/core/anthropic/call_response.py +9 -1
- mirascope/core/anthropic/call_response_chunk.py +7 -0
- mirascope/core/anthropic/stream.py +3 -1
- mirascope/core/azure/__init__.py +2 -2
- mirascope/core/azure/_utils/_calculate_cost.py +4 -1
- mirascope/core/azure/call_response.py +9 -1
- mirascope/core/azure/call_response_chunk.py +5 -0
- mirascope/core/azure/stream.py +3 -1
- mirascope/core/base/__init__.py +11 -9
- mirascope/core/base/_utils/__init__.py +10 -10
- mirascope/core/base/_utils/_get_common_usage.py +8 -4
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +2 -2
- mirascope/core/base/_utils/_protocols.py +9 -8
- mirascope/core/base/call_response.py +22 -20
- mirascope/core/base/call_response_chunk.py +12 -1
- mirascope/core/base/stream.py +24 -21
- mirascope/core/base/tool.py +7 -5
- mirascope/core/base/types.py +22 -5
- mirascope/core/bedrock/__init__.py +3 -3
- mirascope/core/bedrock/_utils/_calculate_cost.py +4 -1
- mirascope/core/bedrock/call_response.py +8 -1
- mirascope/core/bedrock/call_response_chunk.py +5 -0
- mirascope/core/bedrock/stream.py +3 -1
- mirascope/core/cohere/__init__.py +2 -2
- mirascope/core/cohere/_utils/_calculate_cost.py +4 -3
- mirascope/core/cohere/call_response.py +9 -1
- mirascope/core/cohere/call_response_chunk.py +5 -0
- mirascope/core/cohere/stream.py +3 -1
- mirascope/core/gemini/__init__.py +2 -2
- mirascope/core/gemini/_utils/_calculate_cost.py +4 -1
- mirascope/core/gemini/_utils/_convert_message_params.py +1 -1
- mirascope/core/gemini/call_response.py +9 -1
- mirascope/core/gemini/call_response_chunk.py +5 -0
- mirascope/core/gemini/stream.py +3 -1
- mirascope/core/google/__init__.py +2 -2
- mirascope/core/google/_utils/_calculate_cost.py +141 -14
- mirascope/core/google/_utils/_convert_message_params.py +23 -51
- mirascope/core/google/_utils/_message_param_converter.py +34 -33
- mirascope/core/google/_utils/_validate_media_type.py +34 -0
- mirascope/core/google/call_response.py +26 -4
- mirascope/core/google/call_response_chunk.py +17 -9
- mirascope/core/google/stream.py +20 -2
- mirascope/core/groq/__init__.py +2 -2
- mirascope/core/groq/_utils/_calculate_cost.py +12 -11
- mirascope/core/groq/call_response.py +9 -1
- mirascope/core/groq/call_response_chunk.py +5 -0
- mirascope/core/groq/stream.py +3 -1
- mirascope/core/litellm/__init__.py +1 -1
- mirascope/core/litellm/_utils/_setup_call.py +7 -3
- mirascope/core/mistral/__init__.py +2 -2
- mirascope/core/mistral/_utils/_calculate_cost.py +10 -9
- mirascope/core/mistral/call_response.py +9 -1
- mirascope/core/mistral/call_response_chunk.py +5 -0
- mirascope/core/mistral/stream.py +3 -1
- mirascope/core/openai/__init__.py +2 -2
- mirascope/core/openai/_utils/_calculate_cost.py +78 -37
- mirascope/core/openai/call_params.py +13 -0
- mirascope/core/openai/call_response.py +14 -1
- mirascope/core/openai/call_response_chunk.py +12 -0
- mirascope/core/openai/stream.py +6 -4
- mirascope/core/vertex/__init__.py +1 -1
- mirascope/core/vertex/_utils/_calculate_cost.py +1 -0
- mirascope/core/vertex/_utils/_convert_message_params.py +1 -1
- mirascope/core/vertex/call_response.py +9 -1
- mirascope/core/vertex/call_response_chunk.py +5 -0
- mirascope/core/vertex/stream.py +3 -1
- mirascope/core/xai/__init__.py +28 -0
- mirascope/core/xai/_call.py +67 -0
- mirascope/core/xai/_utils/__init__.py +6 -0
- mirascope/core/xai/_utils/_calculate_cost.py +104 -0
- mirascope/core/xai/_utils/_setup_call.py +113 -0
- mirascope/core/xai/call_params.py +10 -0
- mirascope/core/xai/call_response.py +27 -0
- mirascope/core/xai/call_response_chunk.py +14 -0
- mirascope/core/xai/dynamic_config.py +8 -0
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +57 -0
- mirascope/core/xai/tool.py +13 -0
- mirascope/integrations/_middleware_factory.py +6 -6
- mirascope/integrations/logfire/_utils.py +1 -1
- mirascope/llm/__init__.py +2 -2
- mirascope/llm/_protocols.py +34 -28
- mirascope/llm/call_response.py +16 -7
- mirascope/llm/llm_call.py +50 -46
- mirascope/llm/stream.py +43 -31
- mirascope/retries/__init__.py +1 -1
- mirascope/tools/__init__.py +2 -2
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/METADATA +3 -1
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/RECORD +101 -88
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/WHEEL +0 -0
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/licenses/LICENSE +0 -0
mirascope/__init__.py
CHANGED
|
@@ -7,11 +7,20 @@ with suppress(ImportError):
|
|
|
7
7
|
from . import core as core
|
|
8
8
|
|
|
9
9
|
from .core import (
|
|
10
|
+
AudioPart,
|
|
11
|
+
AudioURLPart,
|
|
10
12
|
BaseDynamicConfig,
|
|
11
13
|
BaseMessageParam,
|
|
12
14
|
BaseTool,
|
|
13
15
|
BaseToolKit,
|
|
16
|
+
CacheControlPart,
|
|
17
|
+
DocumentPart,
|
|
18
|
+
ImagePart,
|
|
19
|
+
ImageURLPart,
|
|
14
20
|
Messages,
|
|
21
|
+
TextPart,
|
|
22
|
+
ToolCallPart,
|
|
23
|
+
ToolResultPart,
|
|
15
24
|
prompt_template,
|
|
16
25
|
)
|
|
17
26
|
|
|
@@ -24,14 +33,23 @@ with suppress(ImportError):
|
|
|
24
33
|
__version__ = importlib.metadata.version("mirascope")
|
|
25
34
|
|
|
26
35
|
__all__ = [
|
|
36
|
+
"AudioPart",
|
|
37
|
+
"AudioURLPart",
|
|
27
38
|
"BaseDynamicConfig",
|
|
28
39
|
"BaseMessageParam",
|
|
29
40
|
"BaseTool",
|
|
30
41
|
"BaseToolKit",
|
|
42
|
+
"CacheControlPart",
|
|
43
|
+
"DocumentPart",
|
|
44
|
+
"ImagePart",
|
|
45
|
+
"ImageURLPart",
|
|
46
|
+
"Messages",
|
|
47
|
+
"TextPart",
|
|
48
|
+
"ToolCallPart",
|
|
49
|
+
"ToolResultPart",
|
|
50
|
+
"__version__",
|
|
31
51
|
"core",
|
|
32
52
|
"integrations",
|
|
33
53
|
"prompt_template",
|
|
34
54
|
"retries",
|
|
35
|
-
"Messages",
|
|
36
|
-
"__version__",
|
|
37
55
|
]
|
mirascope/beta/rag/__init__.py
CHANGED
|
@@ -13,12 +13,12 @@ from .base import (
|
|
|
13
13
|
|
|
14
14
|
__all__ = [
|
|
15
15
|
"BaseChunker",
|
|
16
|
-
"TextChunker",
|
|
17
16
|
"BaseEmbedder",
|
|
18
17
|
"BaseEmbeddingParams",
|
|
19
18
|
"BaseEmbeddingResponse",
|
|
20
19
|
"BaseQueryResults",
|
|
21
|
-
"BaseVectorStoreParams",
|
|
22
20
|
"BaseVectorStore",
|
|
21
|
+
"BaseVectorStoreParams",
|
|
23
22
|
"Document",
|
|
23
|
+
"TextChunker",
|
|
24
24
|
]
|
|
@@ -11,12 +11,12 @@ from .vectorstores import BaseVectorStore
|
|
|
11
11
|
|
|
12
12
|
__all__ = [
|
|
13
13
|
"BaseChunker",
|
|
14
|
-
"TextChunker",
|
|
15
14
|
"BaseEmbedder",
|
|
16
15
|
"BaseEmbeddingParams",
|
|
17
16
|
"BaseEmbeddingResponse",
|
|
18
17
|
"BaseQueryResults",
|
|
19
|
-
"BaseVectorStoreParams",
|
|
20
18
|
"BaseVectorStore",
|
|
19
|
+
"BaseVectorStoreParams",
|
|
21
20
|
"Document",
|
|
21
|
+
"TextChunker",
|
|
22
22
|
]
|
mirascope/core/__init__.py
CHANGED
|
@@ -4,6 +4,8 @@ from contextlib import suppress
|
|
|
4
4
|
|
|
5
5
|
from . import base
|
|
6
6
|
from .base import (
|
|
7
|
+
AudioPart,
|
|
8
|
+
AudioURLPart,
|
|
7
9
|
BaseCallResponse,
|
|
8
10
|
BaseDynamicConfig,
|
|
9
11
|
BaseMessageParam,
|
|
@@ -11,9 +13,16 @@ from .base import (
|
|
|
11
13
|
BaseStream,
|
|
12
14
|
BaseTool,
|
|
13
15
|
BaseToolKit,
|
|
16
|
+
CacheControlPart,
|
|
17
|
+
DocumentPart,
|
|
14
18
|
FromCallArgs,
|
|
19
|
+
ImagePart,
|
|
20
|
+
ImageURLPart,
|
|
15
21
|
Messages,
|
|
16
22
|
ResponseModelConfigDict,
|
|
23
|
+
TextPart,
|
|
24
|
+
ToolCallPart,
|
|
25
|
+
ToolResultPart,
|
|
17
26
|
merge_decorators,
|
|
18
27
|
metadata,
|
|
19
28
|
prompt_template,
|
|
@@ -51,30 +60,39 @@ with suppress(ImportError):
|
|
|
51
60
|
from . import azure as azure
|
|
52
61
|
|
|
53
62
|
__all__ = [
|
|
54
|
-
"
|
|
55
|
-
"
|
|
56
|
-
"
|
|
63
|
+
"AudioPart",
|
|
64
|
+
"AudioURLPart",
|
|
65
|
+
"BaseCallResponse",
|
|
57
66
|
"BaseCallResponse",
|
|
58
67
|
"BaseDynamicConfig",
|
|
59
68
|
"BaseMessageParam",
|
|
60
69
|
"BasePrompt",
|
|
70
|
+
"BaseStream",
|
|
61
71
|
"BaseTool",
|
|
62
72
|
"BaseToolKit",
|
|
63
|
-
"
|
|
64
|
-
"
|
|
65
|
-
"cohere",
|
|
73
|
+
"CacheControlPart",
|
|
74
|
+
"DocumentPart",
|
|
66
75
|
"FromCallArgs",
|
|
76
|
+
"ImagePart",
|
|
77
|
+
"ImageURLPart",
|
|
78
|
+
"Messages",
|
|
79
|
+
"ResponseModelConfigDict",
|
|
80
|
+
"TextPart",
|
|
81
|
+
"ToolCallPart",
|
|
82
|
+
"ToolResultPart",
|
|
83
|
+
"anthropic",
|
|
84
|
+
"azure",
|
|
85
|
+
"base",
|
|
86
|
+
"cohere",
|
|
67
87
|
"gemini",
|
|
68
88
|
"google",
|
|
69
89
|
"groq",
|
|
70
90
|
"litellm",
|
|
71
91
|
"merge_decorators",
|
|
72
|
-
"Messages",
|
|
73
92
|
"metadata",
|
|
74
93
|
"mistral",
|
|
75
94
|
"openai",
|
|
76
95
|
"prompt_template",
|
|
77
|
-
"ResponseModelConfigDict",
|
|
78
96
|
"toolkit_tool",
|
|
79
97
|
"vertex",
|
|
80
98
|
]
|
|
@@ -17,15 +17,15 @@ from .tool import AnthropicTool, AnthropicToolConfig
|
|
|
17
17
|
AnthropicMessageParam: TypeAlias = MessageParam | BaseMessageParam
|
|
18
18
|
|
|
19
19
|
__all__ = [
|
|
20
|
-
"call",
|
|
21
|
-
"AsyncAnthropicDynamicConfig",
|
|
22
|
-
"AnthropicDynamicConfig",
|
|
23
20
|
"AnthropicCallParams",
|
|
24
21
|
"AnthropicCallResponse",
|
|
25
22
|
"AnthropicCallResponseChunk",
|
|
23
|
+
"AnthropicDynamicConfig",
|
|
26
24
|
"AnthropicMessageParam",
|
|
27
25
|
"AnthropicStream",
|
|
28
26
|
"AnthropicTool",
|
|
29
27
|
"AnthropicToolConfig",
|
|
28
|
+
"AsyncAnthropicDynamicConfig",
|
|
30
29
|
"anthropic_call",
|
|
30
|
+
"call",
|
|
31
31
|
]
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
def calculate_cost(
|
|
5
5
|
input_tokens: int | float | None,
|
|
6
|
+
cached_tokens: int | float | None,
|
|
6
7
|
output_tokens: int | float | None,
|
|
7
8
|
model: str = "claude-3-haiku-20240229",
|
|
8
9
|
) -> float | None:
|
|
@@ -10,112 +11,178 @@ def calculate_cost(
|
|
|
10
11
|
|
|
11
12
|
https://www.anthropic.com/api
|
|
12
13
|
|
|
13
|
-
|
|
14
|
-
claude-
|
|
15
|
-
claude-
|
|
16
|
-
claude-3-
|
|
17
|
-
claude-3-sonnet
|
|
18
|
-
claude-3-
|
|
14
|
+
Model Input Cached Output
|
|
15
|
+
claude-3-5-haiku $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
16
|
+
claude-3-5-haiku-20241022 $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
17
|
+
claude-3-5-sonnet $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
18
|
+
claude-3-5-sonnet-20241022 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
19
|
+
claude-3-5-sonnet-20240620 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
20
|
+
claude-3-haiku $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
21
|
+
claude-3-haiku-20240307 $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
22
|
+
claude-3-sonnet $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
23
|
+
claude-3-sonnet-20240620 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
24
|
+
claude-3-opus $15.00 / 1M tokens $1.50 / 1M tokens $75.00 / 1M tokens
|
|
25
|
+
claude-3-opus-20240229 $15.00 / 1M tokens $1.50 / 1M tokens $75.00 / 1M tokens
|
|
26
|
+
claude-2.1 $8.00 / 1M tokens $24.00 / 1M tokens
|
|
27
|
+
claude-2.0 $8.00 / 1M tokens $24.00 / 1M tokens
|
|
28
|
+
claude-instant-1.2 $0.80 / 1M tokens $2.40 / 1M tokens
|
|
29
|
+
anthropic.claude-3-5-sonnet-20241022-v2:0 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
30
|
+
anthropic.claude-3-5-sonnet-20241022-v1:0 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
31
|
+
anthropic.claude-3-5-haiku-20241022-v1:0 $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
32
|
+
anthropic.claude-3-sonnet-20240620-v1:0 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
33
|
+
anthropic.claude-3-haiku-20240307-v1:0 $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
34
|
+
anthropic.claude-3-opus-20240229-v1:0 $15.00 / 1M tokens $1.50 / 1M tokens $75.00 / 1M tokens
|
|
35
|
+
claude-3-5-sonnet@20241022 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
36
|
+
claude-3-5-haiku@20241022 $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
37
|
+
claude-3-sonnet@20240620 $3.00 / 1M tokens $0.30 / 1M tokens $15.00 / 1M tokens
|
|
38
|
+
claude-3-haiku@20240307 $0.80 / 1M tokens $0.08 / 1M tokens $4.00 / 1M tokens
|
|
39
|
+
claude-3-opus@20240229 $15.00 / 1M tokens $1.50 / 1M tokens $75.00 / 1M tokens
|
|
19
40
|
"""
|
|
20
41
|
pricing = {
|
|
21
|
-
|
|
42
|
+
# Anthropic models
|
|
43
|
+
"claude-3-5-haiku": {
|
|
22
44
|
"prompt": 0.000_000_8,
|
|
23
|
-
"completion": 0.
|
|
24
|
-
|
|
25
|
-
"claude-2.0": {
|
|
26
|
-
"prompt": 0.000_008,
|
|
27
|
-
"completion": 0.000_024,
|
|
28
|
-
},
|
|
29
|
-
"claude-2.1": {
|
|
30
|
-
"prompt": 0.000_008,
|
|
31
|
-
"completion": 0.000_024,
|
|
32
|
-
},
|
|
33
|
-
"claude-3-haiku-20240307": {
|
|
34
|
-
"prompt": 0.000_002_5,
|
|
35
|
-
"completion": 0.000_012_5,
|
|
45
|
+
"completion": 0.000_004,
|
|
46
|
+
"cached": 0.000_000_08,
|
|
36
47
|
},
|
|
37
48
|
"claude-3-5-haiku-20241022": {
|
|
38
|
-
"prompt": 0.
|
|
49
|
+
"prompt": 0.000_000_8,
|
|
39
50
|
"completion": 0.000_004,
|
|
51
|
+
"cached": 0.000_000_08,
|
|
40
52
|
},
|
|
41
|
-
"claude-3-sonnet
|
|
53
|
+
"claude-3-5-sonnet": {
|
|
42
54
|
"prompt": 0.000_003,
|
|
43
55
|
"completion": 0.000_015,
|
|
44
|
-
|
|
45
|
-
"claude-3-opus-20240229": {
|
|
46
|
-
"prompt": 0.000_015,
|
|
47
|
-
"completion": 0.000_075,
|
|
56
|
+
"cached": 0.000_000_3,
|
|
48
57
|
},
|
|
49
58
|
"claude-3-5-sonnet-20241022": {
|
|
50
59
|
"prompt": 0.000_003,
|
|
51
60
|
"completion": 0.000_015,
|
|
61
|
+
"cached": 0.000_000_3,
|
|
52
62
|
},
|
|
53
63
|
"claude-3-5-sonnet-20240620": {
|
|
54
64
|
"prompt": 0.000_003,
|
|
55
65
|
"completion": 0.000_015,
|
|
66
|
+
"cached": 0.000_000_3,
|
|
56
67
|
},
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
"
|
|
60
|
-
"
|
|
68
|
+
"claude-3-haiku": {
|
|
69
|
+
"prompt": 0.000_000_8,
|
|
70
|
+
"completion": 0.000_004,
|
|
71
|
+
"cached": 0.000_000_08,
|
|
61
72
|
},
|
|
62
|
-
"
|
|
63
|
-
"prompt": 0.
|
|
73
|
+
"claude-3-haiku-20240307": {
|
|
74
|
+
"prompt": 0.000_000_8,
|
|
64
75
|
"completion": 0.000_004,
|
|
76
|
+
"cached": 0.000_000_08,
|
|
65
77
|
},
|
|
66
|
-
"
|
|
78
|
+
"claude-3-sonnet": {
|
|
67
79
|
"prompt": 0.000_003,
|
|
68
80
|
"completion": 0.000_015,
|
|
81
|
+
"cached": 0.000_000_3,
|
|
69
82
|
},
|
|
70
|
-
"
|
|
83
|
+
"claude-3-sonnet-20240620": {
|
|
84
|
+
"prompt": 0.000_003,
|
|
85
|
+
"completion": 0.000_015,
|
|
86
|
+
"cached": 0.000_000_3,
|
|
87
|
+
},
|
|
88
|
+
"claude-3-opus": {
|
|
89
|
+
"prompt": 0.000_015,
|
|
90
|
+
"completion": 0.000_075,
|
|
91
|
+
"cached": 0.000_001_5,
|
|
92
|
+
},
|
|
93
|
+
"claude-3-opus-20240229": {
|
|
71
94
|
"prompt": 0.000_015,
|
|
72
95
|
"completion": 0.000_075,
|
|
96
|
+
"cached": 0.000_001_5,
|
|
97
|
+
},
|
|
98
|
+
"claude-2.1": {
|
|
99
|
+
"prompt": 0.000_008,
|
|
100
|
+
"completion": 0.000_024,
|
|
101
|
+
"cached": 0,
|
|
73
102
|
},
|
|
74
|
-
"
|
|
103
|
+
"claude-2.0": {
|
|
104
|
+
"prompt": 0.000_008,
|
|
105
|
+
"completion": 0.000_024,
|
|
106
|
+
"cached": 0,
|
|
107
|
+
},
|
|
108
|
+
"claude-instant-1.2": {
|
|
109
|
+
"prompt": 0.000_000_8,
|
|
110
|
+
"completion": 0.000_002_4,
|
|
111
|
+
"cached": 0,
|
|
112
|
+
},
|
|
113
|
+
# Bedrock models
|
|
114
|
+
"anthropic.claude-3-5-sonnet-20241022-v2:0": {
|
|
75
115
|
"prompt": 0.000_003,
|
|
76
116
|
"completion": 0.000_015,
|
|
117
|
+
"cached": 0.000_000_3,
|
|
77
118
|
},
|
|
78
119
|
"anthropic.claude-3-5-sonnet-20241022-v1:0": {
|
|
79
120
|
"prompt": 0.000_003,
|
|
80
121
|
"completion": 0.000_015,
|
|
122
|
+
"cached": 0.000_000_3,
|
|
81
123
|
},
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
"prompt": 0.000_002_5,
|
|
85
|
-
"completion": 0.000_012_5,
|
|
86
|
-
},
|
|
87
|
-
"claude-3-5-haiku@20241022": {
|
|
88
|
-
"prompt": 0.000_008,
|
|
124
|
+
"anthropic.claude-3-5-haiku-20241022-v1:0": {
|
|
125
|
+
"prompt": 0.000_000_8,
|
|
89
126
|
"completion": 0.000_004,
|
|
127
|
+
"cached": 0.000_000_08,
|
|
90
128
|
},
|
|
91
|
-
"claude-3-sonnet
|
|
129
|
+
"anthropic.claude-3-sonnet-20240620-v1:0": {
|
|
92
130
|
"prompt": 0.000_003,
|
|
93
131
|
"completion": 0.000_015,
|
|
132
|
+
"cached": 0.000_000_3,
|
|
94
133
|
},
|
|
95
|
-
"claude-3-
|
|
134
|
+
"anthropic.claude-3-haiku-20240307-v1:0": {
|
|
135
|
+
"prompt": 0.000_000_8,
|
|
136
|
+
"completion": 0.000_004,
|
|
137
|
+
"cached": 0.000_000_08,
|
|
138
|
+
},
|
|
139
|
+
"anthropic.claude-3-opus-20240229-v1:0": {
|
|
96
140
|
"prompt": 0.000_015,
|
|
97
141
|
"completion": 0.000_075,
|
|
142
|
+
"cached": 0.000_001_5,
|
|
98
143
|
},
|
|
99
|
-
|
|
144
|
+
# Vertex AI models
|
|
145
|
+
"claude-3-5-sonnet@20241022": {
|
|
100
146
|
"prompt": 0.000_003,
|
|
101
147
|
"completion": 0.000_015,
|
|
148
|
+
"cached": 0.000_000_3,
|
|
102
149
|
},
|
|
103
|
-
"claude-3-5-
|
|
150
|
+
"claude-3-5-haiku@20241022": {
|
|
151
|
+
"prompt": 0.000_000_8,
|
|
152
|
+
"completion": 0.000_004,
|
|
153
|
+
"cached": 0.000_000_08,
|
|
154
|
+
},
|
|
155
|
+
"claude-3-sonnet@20240620": {
|
|
104
156
|
"prompt": 0.000_003,
|
|
105
157
|
"completion": 0.000_015,
|
|
158
|
+
"cached": 0.000_000_3,
|
|
159
|
+
},
|
|
160
|
+
"claude-3-haiku@20240307": {
|
|
161
|
+
"prompt": 0.000_000_8,
|
|
162
|
+
"completion": 0.000_004,
|
|
163
|
+
"cached": 0.000_000_08,
|
|
164
|
+
},
|
|
165
|
+
"claude-3-opus@20240229": {
|
|
166
|
+
"prompt": 0.000_015,
|
|
167
|
+
"completion": 0.000_075,
|
|
168
|
+
"cached": 0.000_001_5,
|
|
106
169
|
},
|
|
107
170
|
}
|
|
108
171
|
|
|
109
172
|
if input_tokens is None or output_tokens is None:
|
|
110
173
|
return None
|
|
111
174
|
|
|
175
|
+
if cached_tokens is None:
|
|
176
|
+
cached_tokens = 0
|
|
177
|
+
|
|
112
178
|
try:
|
|
113
179
|
model_pricing = pricing[model]
|
|
114
180
|
except KeyError:
|
|
115
181
|
return None
|
|
116
182
|
|
|
117
183
|
prompt_cost = input_tokens * model_pricing["prompt"]
|
|
184
|
+
cached_cost = cached_tokens * model_pricing["cached"]
|
|
118
185
|
completion_cost = output_tokens * model_pricing["completion"]
|
|
119
|
-
total_cost = prompt_cost + completion_cost
|
|
186
|
+
total_cost = prompt_cost + cached_cost + completion_cost
|
|
120
187
|
|
|
121
188
|
return total_cost
|
|
@@ -100,6 +100,12 @@ class AnthropicCallResponse(
|
|
|
100
100
|
"""Returns the number of input tokens."""
|
|
101
101
|
return self.usage.input_tokens
|
|
102
102
|
|
|
103
|
+
@computed_field
|
|
104
|
+
@property
|
|
105
|
+
def cached_tokens(self) -> int:
|
|
106
|
+
"""Returns the number of cached tokens."""
|
|
107
|
+
return getattr(self.usage, "cache_read_input_tokens", 0)
|
|
108
|
+
|
|
103
109
|
@computed_field
|
|
104
110
|
@property
|
|
105
111
|
def output_tokens(self) -> int:
|
|
@@ -110,7 +116,9 @@ class AnthropicCallResponse(
|
|
|
110
116
|
@property
|
|
111
117
|
def cost(self) -> float | None:
|
|
112
118
|
"""Returns the cost of the call."""
|
|
113
|
-
return calculate_cost(
|
|
119
|
+
return calculate_cost(
|
|
120
|
+
self.input_tokens, self.cached_tokens, self.output_tokens, self.model
|
|
121
|
+
)
|
|
114
122
|
|
|
115
123
|
@computed_field
|
|
116
124
|
@cached_property
|
|
@@ -100,6 +100,13 @@ class AnthropicCallResponseChunk(
|
|
|
100
100
|
return usage.input_tokens
|
|
101
101
|
return None
|
|
102
102
|
|
|
103
|
+
@property
|
|
104
|
+
def cached_tokens(self) -> int | None:
|
|
105
|
+
"""Returns the number of cached tokens."""
|
|
106
|
+
if (usage := self.usage) and isinstance(usage, Usage):
|
|
107
|
+
return getattr(usage, "cache_read_input_tokens", 0)
|
|
108
|
+
return None
|
|
109
|
+
|
|
103
110
|
@property
|
|
104
111
|
def output_tokens(self) -> int | None:
|
|
105
112
|
"""Returns the number of output tokens."""
|
|
@@ -67,7 +67,9 @@ class AnthropicStream(
|
|
|
67
67
|
@property
|
|
68
68
|
def cost(self) -> float | None:
|
|
69
69
|
"""Returns the cost of the call."""
|
|
70
|
-
return calculate_cost(
|
|
70
|
+
return calculate_cost(
|
|
71
|
+
self.input_tokens, self.cached_tokens, self.output_tokens, self.model
|
|
72
|
+
)
|
|
71
73
|
|
|
72
74
|
def _construct_message_param(
|
|
73
75
|
self, tool_calls: list[ToolUseBlock] | None = None, content: str | None = None
|
mirascope/core/azure/__init__.py
CHANGED
|
@@ -18,14 +18,14 @@ AzureMessageParam: TypeAlias = ChatRequestMessage | BaseMessageParam
|
|
|
18
18
|
|
|
19
19
|
__all__ = [
|
|
20
20
|
"AsyncAzureDynamicConfig",
|
|
21
|
-
"call",
|
|
22
|
-
"AzureDynamicConfig",
|
|
23
21
|
"AzureCallParams",
|
|
24
22
|
"AzureCallResponse",
|
|
25
23
|
"AzureCallResponseChunk",
|
|
24
|
+
"AzureDynamicConfig",
|
|
26
25
|
"AzureMessageParam",
|
|
27
26
|
"AzureStream",
|
|
28
27
|
"AzureTool",
|
|
29
28
|
"AzureToolConfig",
|
|
30
29
|
"azure_call",
|
|
30
|
+
"call",
|
|
31
31
|
]
|
|
@@ -2,7 +2,10 @@
|
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
def calculate_cost(
|
|
5
|
-
input_tokens: int | float | None,
|
|
5
|
+
input_tokens: int | float | None,
|
|
6
|
+
cached_tokens: int | float | None,
|
|
7
|
+
output_tokens: int | float | None,
|
|
8
|
+
model: str,
|
|
6
9
|
) -> float | None:
|
|
7
10
|
"""Calculate the cost of a completion using the Azure API."""
|
|
8
11
|
return None
|
|
@@ -104,6 +104,12 @@ class AzureCallResponse(
|
|
|
104
104
|
"""Returns the number of input tokens."""
|
|
105
105
|
return self.usage.prompt_tokens if self.usage else None
|
|
106
106
|
|
|
107
|
+
@computed_field
|
|
108
|
+
@property
|
|
109
|
+
def cached_tokens(self) -> int | None:
|
|
110
|
+
"""Returns the number of cached tokens."""
|
|
111
|
+
return None
|
|
112
|
+
|
|
107
113
|
@computed_field
|
|
108
114
|
@property
|
|
109
115
|
def output_tokens(self) -> int | None:
|
|
@@ -114,7 +120,9 @@ class AzureCallResponse(
|
|
|
114
120
|
@property
|
|
115
121
|
def cost(self) -> float | None:
|
|
116
122
|
"""Returns the cost of the call."""
|
|
117
|
-
return calculate_cost(
|
|
123
|
+
return calculate_cost(
|
|
124
|
+
self.input_tokens, self.cached_tokens, self.output_tokens, self.model
|
|
125
|
+
)
|
|
118
126
|
|
|
119
127
|
@computed_field
|
|
120
128
|
@cached_property
|
|
@@ -84,6 +84,11 @@ class AzureCallResponseChunk(
|
|
|
84
84
|
"""Returns the number of input tokens."""
|
|
85
85
|
return self.usage.prompt_tokens
|
|
86
86
|
|
|
87
|
+
@property
|
|
88
|
+
def cached_tokens(self) -> int:
|
|
89
|
+
"""Returns the number of cached tokens."""
|
|
90
|
+
return 0
|
|
91
|
+
|
|
87
92
|
@property
|
|
88
93
|
def output_tokens(self) -> int:
|
|
89
94
|
"""Returns the number of output tokens."""
|
mirascope/core/azure/stream.py
CHANGED
|
@@ -69,7 +69,9 @@ class AzureStream(
|
|
|
69
69
|
@property
|
|
70
70
|
def cost(self) -> float | None:
|
|
71
71
|
"""Returns the cost of the call."""
|
|
72
|
-
return calculate_cost(
|
|
72
|
+
return calculate_cost(
|
|
73
|
+
self.input_tokens, self.cached_tokens, self.output_tokens, self.model
|
|
74
|
+
)
|
|
73
75
|
|
|
74
76
|
def _construct_message_param(
|
|
75
77
|
self,
|
mirascope/core/base/__init__.py
CHANGED
|
@@ -30,12 +30,12 @@ from .stream import BaseStream
|
|
|
30
30
|
from .structured_stream import BaseStructuredStream
|
|
31
31
|
from .tool import BaseTool, GenerateJsonSchemaNoTitles, ToolConfig
|
|
32
32
|
from .toolkit import BaseToolKit, toolkit_tool
|
|
33
|
-
from .types import AudioSegment
|
|
33
|
+
from .types import AudioSegment, JsonableType, Usage
|
|
34
34
|
|
|
35
35
|
__all__ = [
|
|
36
36
|
"AudioPart",
|
|
37
|
-
"AudioURLPart",
|
|
38
37
|
"AudioSegment",
|
|
38
|
+
"AudioURLPart",
|
|
39
39
|
"BaseCallKwargs",
|
|
40
40
|
"BaseCallParams",
|
|
41
41
|
"BaseCallResponse",
|
|
@@ -49,25 +49,27 @@ __all__ = [
|
|
|
49
49
|
"BaseToolKit",
|
|
50
50
|
"BaseType",
|
|
51
51
|
"CacheControlPart",
|
|
52
|
-
"call_factory",
|
|
53
52
|
"CommonCallParams",
|
|
54
53
|
"DocumentPart",
|
|
55
54
|
"FromCallArgs",
|
|
56
55
|
"GenerateJsonSchemaNoTitles",
|
|
57
56
|
"ImagePart",
|
|
58
57
|
"ImageURLPart",
|
|
59
|
-
"
|
|
60
|
-
"metadata",
|
|
58
|
+
"JsonableType",
|
|
61
59
|
"Messages",
|
|
62
60
|
"Metadata",
|
|
63
|
-
"prompt_template",
|
|
64
61
|
"ResponseModelConfigDict",
|
|
65
62
|
"TextPart",
|
|
66
|
-
"ToolConfig",
|
|
67
63
|
"ToolCallPart",
|
|
64
|
+
"ToolConfig",
|
|
68
65
|
"ToolResultPart",
|
|
69
|
-
"
|
|
70
|
-
"transform_tool_outputs",
|
|
66
|
+
"Usage",
|
|
71
67
|
"_partial",
|
|
72
68
|
"_utils",
|
|
69
|
+
"call_factory",
|
|
70
|
+
"merge_decorators",
|
|
71
|
+
"metadata",
|
|
72
|
+
"prompt_template",
|
|
73
|
+
"toolkit_tool",
|
|
74
|
+
"transform_tool_outputs",
|
|
73
75
|
]
|