mirascope 1.16.5__py3-none-any.whl → 1.16.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/core/anthropic/_utils/_calculate_cost.py +24 -0
- mirascope/core/anthropic/call_response.py +7 -0
- mirascope/core/azure/call_response.py +7 -0
- mirascope/core/base/call_response.py +8 -0
- mirascope/core/bedrock/call_response.py +7 -0
- mirascope/core/cohere/call_response.py +7 -0
- mirascope/core/gemini/_utils/_calculate_cost.py +58 -2
- mirascope/core/gemini/call_response.py +7 -0
- mirascope/core/groq/call_response.py +7 -0
- mirascope/core/litellm/call_response.py +2 -0
- mirascope/core/mistral/call_response.py +7 -0
- mirascope/core/openai/_utils/_calculate_cost.py +113 -23
- mirascope/core/openai/call_response.py +7 -0
- mirascope/core/vertex/call_response.py +7 -0
- mirascope/llm/call_response.py +2 -0
- {mirascope-1.16.5.dist-info → mirascope-1.16.6.dist-info}/METADATA +1 -1
- {mirascope-1.16.5.dist-info → mirascope-1.16.6.dist-info}/RECORD +19 -19
- {mirascope-1.16.5.dist-info → mirascope-1.16.6.dist-info}/WHEEL +0 -0
- {mirascope-1.16.5.dist-info → mirascope-1.16.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -34,6 +34,10 @@ def calculate_cost(
|
|
|
34
34
|
"prompt": 0.000_002_5,
|
|
35
35
|
"completion": 0.000_012_5,
|
|
36
36
|
},
|
|
37
|
+
"claude-3-5-haiku-20241022": {
|
|
38
|
+
"prompt": 0.000_008,
|
|
39
|
+
"completion": 0.000_004,
|
|
40
|
+
},
|
|
37
41
|
"claude-3-sonnet-20240229": {
|
|
38
42
|
"prompt": 0.000_003,
|
|
39
43
|
"completion": 0.000_015,
|
|
@@ -42,6 +46,10 @@ def calculate_cost(
|
|
|
42
46
|
"prompt": 0.000_015,
|
|
43
47
|
"completion": 0.000_075,
|
|
44
48
|
},
|
|
49
|
+
"claude-3-5-sonnet-20241022": {
|
|
50
|
+
"prompt": 0.000_003,
|
|
51
|
+
"completion": 0.000_015,
|
|
52
|
+
},
|
|
45
53
|
"claude-3-5-sonnet-20240620": {
|
|
46
54
|
"prompt": 0.000_003,
|
|
47
55
|
"completion": 0.000_015,
|
|
@@ -51,6 +59,10 @@ def calculate_cost(
|
|
|
51
59
|
"prompt": 0.000_002_5,
|
|
52
60
|
"completion": 0.000_012_5,
|
|
53
61
|
},
|
|
62
|
+
"anthropic.claude-3-5-haiku-20241022-v1:0": {
|
|
63
|
+
"prompt": 0.000_008,
|
|
64
|
+
"completion": 0.000_004,
|
|
65
|
+
},
|
|
54
66
|
"anthropic.claude-3-sonnet-20240229-v1:0": {
|
|
55
67
|
"prompt": 0.000_003,
|
|
56
68
|
"completion": 0.000_015,
|
|
@@ -63,11 +75,19 @@ def calculate_cost(
|
|
|
63
75
|
"prompt": 0.000_003,
|
|
64
76
|
"completion": 0.000_015,
|
|
65
77
|
},
|
|
78
|
+
"anthropic.claude-3-5-sonnet-20241022-v1:0": {
|
|
79
|
+
"prompt": 0.000_003,
|
|
80
|
+
"completion": 0.000_015,
|
|
81
|
+
},
|
|
66
82
|
# Vertex AI models
|
|
67
83
|
"claude-3-haiku@20240307": {
|
|
68
84
|
"prompt": 0.000_002_5,
|
|
69
85
|
"completion": 0.000_012_5,
|
|
70
86
|
},
|
|
87
|
+
"claude-3-5-haiku@20241022": {
|
|
88
|
+
"prompt": 0.000_008,
|
|
89
|
+
"completion": 0.000_004,
|
|
90
|
+
},
|
|
71
91
|
"claude-3-sonnet@20240229": {
|
|
72
92
|
"prompt": 0.000_003,
|
|
73
93
|
"completion": 0.000_015,
|
|
@@ -80,6 +100,10 @@ def calculate_cost(
|
|
|
80
100
|
"prompt": 0.000_003,
|
|
81
101
|
"completion": 0.000_015,
|
|
82
102
|
},
|
|
103
|
+
"claude-3-5-sonnet@20241022": {
|
|
104
|
+
"prompt": 0.000_003,
|
|
105
|
+
"completion": 0.000_015,
|
|
106
|
+
},
|
|
83
107
|
}
|
|
84
108
|
|
|
85
109
|
if input_tokens is None or output_tokens is None:
|
|
@@ -64,22 +64,26 @@ class AnthropicCallResponse(
|
|
|
64
64
|
|
|
65
65
|
_provider = "anthropic"
|
|
66
66
|
|
|
67
|
+
@computed_field
|
|
67
68
|
@property
|
|
68
69
|
def content(self) -> str:
|
|
69
70
|
"""Returns the string text of the 0th text block."""
|
|
70
71
|
block = self.response.content[0]
|
|
71
72
|
return block.text if block.type == "text" else ""
|
|
72
73
|
|
|
74
|
+
@computed_field
|
|
73
75
|
@property
|
|
74
76
|
def finish_reasons(self) -> list[str]:
|
|
75
77
|
"""Returns the finish reasons of the response."""
|
|
76
78
|
return [str(self.response.stop_reason)]
|
|
77
79
|
|
|
80
|
+
@computed_field
|
|
78
81
|
@property
|
|
79
82
|
def model(self) -> str:
|
|
80
83
|
"""Returns the name of the response model."""
|
|
81
84
|
return self.response.model
|
|
82
85
|
|
|
86
|
+
@computed_field
|
|
83
87
|
@property
|
|
84
88
|
def id(self) -> str:
|
|
85
89
|
"""Returns the id of the response."""
|
|
@@ -90,16 +94,19 @@ class AnthropicCallResponse(
|
|
|
90
94
|
"""Returns the usage of the message."""
|
|
91
95
|
return self.response.usage
|
|
92
96
|
|
|
97
|
+
@computed_field
|
|
93
98
|
@property
|
|
94
99
|
def input_tokens(self) -> int:
|
|
95
100
|
"""Returns the number of input tokens."""
|
|
96
101
|
return self.usage.input_tokens
|
|
97
102
|
|
|
103
|
+
@computed_field
|
|
98
104
|
@property
|
|
99
105
|
def output_tokens(self) -> int:
|
|
100
106
|
"""Returns the number of output tokens."""
|
|
101
107
|
return self.usage.output_tokens
|
|
102
108
|
|
|
109
|
+
@computed_field
|
|
103
110
|
@property
|
|
104
111
|
def cost(self) -> float | None:
|
|
105
112
|
"""Returns the cost of the call."""
|
|
@@ -68,22 +68,26 @@ class AzureCallResponse(
|
|
|
68
68
|
|
|
69
69
|
_provider = "azure"
|
|
70
70
|
|
|
71
|
+
@computed_field
|
|
71
72
|
@property
|
|
72
73
|
def content(self) -> str:
|
|
73
74
|
"""Returns the content of the chat completion for the 0th choice."""
|
|
74
75
|
message = self.response.choices[0].message
|
|
75
76
|
return message.content if message.content is not None else ""
|
|
76
77
|
|
|
78
|
+
@computed_field
|
|
77
79
|
@property
|
|
78
80
|
def finish_reasons(self) -> list[str]:
|
|
79
81
|
"""Returns the finish reasons of the response."""
|
|
80
82
|
return [str(choice.finish_reason) for choice in self.response.choices]
|
|
81
83
|
|
|
84
|
+
@computed_field
|
|
82
85
|
@property
|
|
83
86
|
def model(self) -> str:
|
|
84
87
|
"""Returns the name of the response model."""
|
|
85
88
|
return self.response.model
|
|
86
89
|
|
|
90
|
+
@computed_field
|
|
87
91
|
@property
|
|
88
92
|
def id(self) -> str:
|
|
89
93
|
"""Returns the id of the response."""
|
|
@@ -94,16 +98,19 @@ class AzureCallResponse(
|
|
|
94
98
|
"""Returns the usage of the chat completion."""
|
|
95
99
|
return self.response.usage
|
|
96
100
|
|
|
101
|
+
@computed_field
|
|
97
102
|
@property
|
|
98
103
|
def input_tokens(self) -> int | None:
|
|
99
104
|
"""Returns the number of input tokens."""
|
|
100
105
|
return self.usage.prompt_tokens if self.usage else None
|
|
101
106
|
|
|
107
|
+
@computed_field
|
|
102
108
|
@property
|
|
103
109
|
def output_tokens(self) -> int | None:
|
|
104
110
|
"""Returns the number of output tokens."""
|
|
105
111
|
return self.usage.completion_tokens if self.usage else None
|
|
106
112
|
|
|
113
|
+
@computed_field
|
|
107
114
|
@property
|
|
108
115
|
def cost(self) -> float | None:
|
|
109
116
|
"""Returns the cost of the call."""
|
|
@@ -155,6 +155,7 @@ class BaseCallResponse(
|
|
|
155
155
|
"""Returns the string content of the response."""
|
|
156
156
|
return self.content
|
|
157
157
|
|
|
158
|
+
@computed_field
|
|
158
159
|
@property
|
|
159
160
|
@abstractmethod
|
|
160
161
|
def content(self) -> str:
|
|
@@ -168,6 +169,7 @@ class BaseCallResponse(
|
|
|
168
169
|
"""
|
|
169
170
|
...
|
|
170
171
|
|
|
172
|
+
@computed_field
|
|
171
173
|
@property
|
|
172
174
|
@abstractmethod
|
|
173
175
|
def finish_reasons(self) -> list[str] | None:
|
|
@@ -177,18 +179,21 @@ class BaseCallResponse(
|
|
|
177
179
|
"""
|
|
178
180
|
...
|
|
179
181
|
|
|
182
|
+
@computed_field
|
|
180
183
|
@property
|
|
181
184
|
@abstractmethod
|
|
182
185
|
def model(self) -> str | None:
|
|
183
186
|
"""Should return the name of the response model."""
|
|
184
187
|
...
|
|
185
188
|
|
|
189
|
+
@computed_field
|
|
186
190
|
@property
|
|
187
191
|
@abstractmethod
|
|
188
192
|
def id(self) -> str | None:
|
|
189
193
|
"""Should return the id of the response."""
|
|
190
194
|
...
|
|
191
195
|
|
|
196
|
+
@computed_field
|
|
192
197
|
@property
|
|
193
198
|
@abstractmethod
|
|
194
199
|
def usage(self) -> Any: # noqa: ANN401
|
|
@@ -198,6 +203,7 @@ class BaseCallResponse(
|
|
|
198
203
|
"""
|
|
199
204
|
...
|
|
200
205
|
|
|
206
|
+
@computed_field
|
|
201
207
|
@property
|
|
202
208
|
@abstractmethod
|
|
203
209
|
def input_tokens(self) -> int | float | None:
|
|
@@ -207,6 +213,7 @@ class BaseCallResponse(
|
|
|
207
213
|
"""
|
|
208
214
|
...
|
|
209
215
|
|
|
216
|
+
@computed_field
|
|
210
217
|
@property
|
|
211
218
|
@abstractmethod
|
|
212
219
|
def output_tokens(self) -> int | float | None:
|
|
@@ -216,6 +223,7 @@ class BaseCallResponse(
|
|
|
216
223
|
"""
|
|
217
224
|
...
|
|
218
225
|
|
|
226
|
+
@computed_field
|
|
219
227
|
@property
|
|
220
228
|
@abstractmethod
|
|
221
229
|
def cost(self) -> float | None:
|
|
@@ -90,6 +90,7 @@ class BedrockCallResponse(
|
|
|
90
90
|
return cast(SyncMessageTypeDef | AsyncMessageTypeDef, message)
|
|
91
91
|
return None
|
|
92
92
|
|
|
93
|
+
@computed_field
|
|
93
94
|
@property
|
|
94
95
|
def content(self) -> str:
|
|
95
96
|
"""Returns the content of the chat completion for the 0th choice."""
|
|
@@ -99,16 +100,19 @@ class BedrockCallResponse(
|
|
|
99
100
|
return content[0].get("text", "")
|
|
100
101
|
return ""
|
|
101
102
|
|
|
103
|
+
@computed_field
|
|
102
104
|
@property
|
|
103
105
|
def finish_reasons(self) -> list[str]:
|
|
104
106
|
"""Returns the finish reasons of the response."""
|
|
105
107
|
return [self.response["stopReason"]]
|
|
106
108
|
|
|
109
|
+
@computed_field
|
|
107
110
|
@property
|
|
108
111
|
def model(self) -> str:
|
|
109
112
|
"""Returns the name of the response model."""
|
|
110
113
|
return cast(BedrockCallKwargs, self.call_kwargs)["modelId"]
|
|
111
114
|
|
|
115
|
+
@computed_field
|
|
112
116
|
@property
|
|
113
117
|
def id(self) -> str:
|
|
114
118
|
"""Returns the id of the response."""
|
|
@@ -119,16 +123,19 @@ class BedrockCallResponse(
|
|
|
119
123
|
"""Returns the usage of the chat completion."""
|
|
120
124
|
return self.response["usage"]
|
|
121
125
|
|
|
126
|
+
@computed_field
|
|
122
127
|
@property
|
|
123
128
|
def input_tokens(self) -> int | None:
|
|
124
129
|
"""Returns the number of input tokens."""
|
|
125
130
|
return self.usage["inputTokens"] if self.usage else None
|
|
126
131
|
|
|
132
|
+
@computed_field
|
|
127
133
|
@property
|
|
128
134
|
def output_tokens(self) -> int | None:
|
|
129
135
|
"""Returns the number of output tokens."""
|
|
130
136
|
return self.usage["outputTokens"] if self.usage else None
|
|
131
137
|
|
|
138
|
+
@computed_field
|
|
132
139
|
@property
|
|
133
140
|
def cost(self) -> float | None:
|
|
134
141
|
"""Returns the cost of the call."""
|
|
@@ -63,16 +63,19 @@ class CohereCallResponse(
|
|
|
63
63
|
|
|
64
64
|
_provider = "cohere"
|
|
65
65
|
|
|
66
|
+
@computed_field
|
|
66
67
|
@property
|
|
67
68
|
def content(self) -> str:
|
|
68
69
|
"""Returns the content of the chat completion for the 0th choice."""
|
|
69
70
|
return self.response.text
|
|
70
71
|
|
|
72
|
+
@computed_field
|
|
71
73
|
@property
|
|
72
74
|
def finish_reasons(self) -> list[str] | None:
|
|
73
75
|
"""Returns the finish reasons of the response."""
|
|
74
76
|
return [str(self.response.finish_reason)]
|
|
75
77
|
|
|
78
|
+
@computed_field
|
|
76
79
|
@property
|
|
77
80
|
def model(self) -> str:
|
|
78
81
|
"""Returns the name of the response model.
|
|
@@ -81,6 +84,7 @@ class CohereCallResponse(
|
|
|
81
84
|
"""
|
|
82
85
|
return self._model
|
|
83
86
|
|
|
87
|
+
@computed_field
|
|
84
88
|
@property
|
|
85
89
|
def id(self) -> str | None:
|
|
86
90
|
"""Returns the id of the response."""
|
|
@@ -93,6 +97,7 @@ class CohereCallResponse(
|
|
|
93
97
|
return self.response.meta.billed_units
|
|
94
98
|
return None
|
|
95
99
|
|
|
100
|
+
@computed_field
|
|
96
101
|
@property
|
|
97
102
|
def input_tokens(self) -> float | None:
|
|
98
103
|
"""Returns the number of input tokens."""
|
|
@@ -100,6 +105,7 @@ class CohereCallResponse(
|
|
|
100
105
|
return self.usage.input_tokens
|
|
101
106
|
return None
|
|
102
107
|
|
|
108
|
+
@computed_field
|
|
103
109
|
@property
|
|
104
110
|
def output_tokens(self) -> float | None:
|
|
105
111
|
"""Returns the number of output tokens."""
|
|
@@ -107,6 +113,7 @@ class CohereCallResponse(
|
|
|
107
113
|
return self.usage.output_tokens
|
|
108
114
|
return None
|
|
109
115
|
|
|
116
|
+
@computed_field
|
|
110
117
|
@property
|
|
111
118
|
def cost(self) -> float | None:
|
|
112
119
|
"""Returns the cost of the response."""
|
|
@@ -4,5 +4,61 @@
|
|
|
4
4
|
def calculate_cost(
|
|
5
5
|
input_tokens: int | float | None, output_tokens: int | float | None, model: str
|
|
6
6
|
) -> float | None:
|
|
7
|
-
"""Calculate the cost of a Gemini API call.
|
|
8
|
-
|
|
7
|
+
"""Calculate the cost of a Gemini API call.
|
|
8
|
+
|
|
9
|
+
https://ai.google.dev/pricing#1_5flash
|
|
10
|
+
|
|
11
|
+
Model Input (<=128K) Output (<=128K) Input (>128K) Output (>128K)
|
|
12
|
+
gemini-1.5-flash $0.075 / 1M $0.3 / 1M $0.15 / 1M $0.6 / 1M
|
|
13
|
+
gemini-1.5-flash-8b $0.0375 / 1M $0.15 / 1M $0.075 / 1M $0.3 / 1M
|
|
14
|
+
gemini-1.5-pro $1.25 / 1M $5.0 / 1M $2.5 / 1M $10.0 / 1M
|
|
15
|
+
gemini-1.0-pro $0.50 / 1M $1.5 / 1M $0.5 / 1M $1.5 / 1M
|
|
16
|
+
"""
|
|
17
|
+
pricing = {
|
|
18
|
+
"gemini-1.5-flash": {
|
|
19
|
+
"prompt_short": 0.000_000_075,
|
|
20
|
+
"completion_short": 0.000_000_3,
|
|
21
|
+
"prompt_long": 0.000_000_15,
|
|
22
|
+
"completion_long": 0.000_000_6,
|
|
23
|
+
},
|
|
24
|
+
"gemini-1.5-flash-8b": {
|
|
25
|
+
"prompt_short": 0.000_000_037_5,
|
|
26
|
+
"completion_short": 0.000_000_15,
|
|
27
|
+
"prompt_long": 0.000_000_075,
|
|
28
|
+
"completion_long": 0.000_000_3,
|
|
29
|
+
},
|
|
30
|
+
"gemini-1.5-pro": {
|
|
31
|
+
"prompt_short": 0.000_001_25,
|
|
32
|
+
"completion_short": 0.000_005,
|
|
33
|
+
"prompt_long": 0.000_002_5,
|
|
34
|
+
"completion_long": 0.000_01,
|
|
35
|
+
},
|
|
36
|
+
"gemini-1.0-pro": {
|
|
37
|
+
"prompt_short": 0.000_000_5,
|
|
38
|
+
"completion_short": 0.000_001_5,
|
|
39
|
+
"prompt_long": 0.000_000_5,
|
|
40
|
+
"completion_long": 0.000_001_5,
|
|
41
|
+
},
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
if input_tokens is None or output_tokens is None:
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
model_pricing = pricing[model]
|
|
49
|
+
except KeyError:
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
# Determine if we're using long context pricing
|
|
53
|
+
use_long_context = input_tokens > 128_000
|
|
54
|
+
|
|
55
|
+
prompt_price = model_pricing["prompt_long" if use_long_context else "prompt_short"]
|
|
56
|
+
completion_price = model_pricing[
|
|
57
|
+
"completion_long" if use_long_context else "completion_short"
|
|
58
|
+
]
|
|
59
|
+
|
|
60
|
+
prompt_cost = input_tokens * prompt_price
|
|
61
|
+
completion_cost = output_tokens * completion_price
|
|
62
|
+
total_cost = prompt_cost + completion_cost
|
|
63
|
+
|
|
64
|
+
return total_cost
|
|
@@ -65,11 +65,13 @@ class GeminiCallResponse(
|
|
|
65
65
|
|
|
66
66
|
_provider = "gemini"
|
|
67
67
|
|
|
68
|
+
@computed_field
|
|
68
69
|
@property
|
|
69
70
|
def content(self) -> str:
|
|
70
71
|
"""Returns the contained string content for the 0th choice."""
|
|
71
72
|
return self.response.candidates[0].content.parts[0].text
|
|
72
73
|
|
|
74
|
+
@computed_field
|
|
73
75
|
@property
|
|
74
76
|
def finish_reasons(self) -> list[str]:
|
|
75
77
|
"""Returns the finish reasons of the response."""
|
|
@@ -87,6 +89,7 @@ class GeminiCallResponse(
|
|
|
87
89
|
for candidate in self.response.candidates
|
|
88
90
|
]
|
|
89
91
|
|
|
92
|
+
@computed_field
|
|
90
93
|
@property
|
|
91
94
|
def model(self) -> str:
|
|
92
95
|
"""Returns the model name.
|
|
@@ -96,6 +99,7 @@ class GeminiCallResponse(
|
|
|
96
99
|
"""
|
|
97
100
|
return self._model
|
|
98
101
|
|
|
102
|
+
@computed_field
|
|
99
103
|
@property
|
|
100
104
|
def id(self) -> str | None:
|
|
101
105
|
"""Returns the id of the response.
|
|
@@ -112,16 +116,19 @@ class GeminiCallResponse(
|
|
|
112
116
|
"""
|
|
113
117
|
return None
|
|
114
118
|
|
|
119
|
+
@computed_field
|
|
115
120
|
@property
|
|
116
121
|
def input_tokens(self) -> None:
|
|
117
122
|
"""Returns the number of input tokens."""
|
|
118
123
|
return None
|
|
119
124
|
|
|
125
|
+
@computed_field
|
|
120
126
|
@property
|
|
121
127
|
def output_tokens(self) -> None:
|
|
122
128
|
"""Returns the number of output tokens."""
|
|
123
129
|
return None
|
|
124
130
|
|
|
131
|
+
@computed_field
|
|
125
132
|
@property
|
|
126
133
|
def cost(self) -> float | None:
|
|
127
134
|
"""Returns the cost of the call."""
|
|
@@ -62,22 +62,26 @@ class GroqCallResponse(
|
|
|
62
62
|
|
|
63
63
|
_provider = "groq"
|
|
64
64
|
|
|
65
|
+
@computed_field
|
|
65
66
|
@property
|
|
66
67
|
def content(self) -> str:
|
|
67
68
|
"""Returns the content of the chat completion for the 0th choice."""
|
|
68
69
|
message = self.response.choices[0].message
|
|
69
70
|
return message.content if message.content is not None else ""
|
|
70
71
|
|
|
72
|
+
@computed_field
|
|
71
73
|
@property
|
|
72
74
|
def finish_reasons(self) -> list[str]:
|
|
73
75
|
"""Returns the finish reasons of the response."""
|
|
74
76
|
return [str(choice.finish_reason) for choice in self.response.choices]
|
|
75
77
|
|
|
78
|
+
@computed_field
|
|
76
79
|
@property
|
|
77
80
|
def model(self) -> str:
|
|
78
81
|
"""Returns the name of the response model."""
|
|
79
82
|
return self.response.model
|
|
80
83
|
|
|
84
|
+
@computed_field
|
|
81
85
|
@property
|
|
82
86
|
def id(self) -> str:
|
|
83
87
|
"""Returns the id of the response."""
|
|
@@ -88,16 +92,19 @@ class GroqCallResponse(
|
|
|
88
92
|
"""Returns the usage of the chat completion."""
|
|
89
93
|
return self.response.usage
|
|
90
94
|
|
|
95
|
+
@computed_field
|
|
91
96
|
@property
|
|
92
97
|
def input_tokens(self) -> int | None:
|
|
93
98
|
"""Returns the number of input tokens."""
|
|
94
99
|
return self.usage.prompt_tokens if self.usage else None
|
|
95
100
|
|
|
101
|
+
@computed_field
|
|
96
102
|
@property
|
|
97
103
|
def output_tokens(self) -> int | None:
|
|
98
104
|
"""Returns the number of output tokens."""
|
|
99
105
|
return self.usage.completion_tokens if self.usage else None
|
|
100
106
|
|
|
107
|
+
@computed_field
|
|
101
108
|
@property
|
|
102
109
|
def cost(self) -> float | None:
|
|
103
110
|
"""Returns the cost of the call."""
|
|
@@ -4,6 +4,7 @@ usage docs: learn/calls.md#handling-responses
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
from litellm.cost_calculator import completion_cost
|
|
7
|
+
from pydantic import computed_field
|
|
7
8
|
|
|
8
9
|
from ..openai import OpenAICallResponse
|
|
9
10
|
|
|
@@ -17,6 +18,7 @@ class LiteLLMCallResponse(OpenAICallResponse):
|
|
|
17
18
|
|
|
18
19
|
_provider = "litellm"
|
|
19
20
|
|
|
21
|
+
@computed_field
|
|
20
22
|
@property
|
|
21
23
|
def cost(self) -> float | None:
|
|
22
24
|
"""Returns the cost of the call."""
|
|
@@ -69,11 +69,13 @@ class MistralCallResponse(
|
|
|
69
69
|
def _response_choices(self) -> list[ChatCompletionChoice]:
|
|
70
70
|
return self.response.choices or []
|
|
71
71
|
|
|
72
|
+
@computed_field
|
|
72
73
|
@property
|
|
73
74
|
def content(self) -> str:
|
|
74
75
|
"""The content of the chat completion for the 0th choice."""
|
|
75
76
|
return cast(str, self._response_choices[0].message.content) or ""
|
|
76
77
|
|
|
78
|
+
@computed_field
|
|
77
79
|
@property
|
|
78
80
|
def finish_reasons(self) -> list[str]:
|
|
79
81
|
"""Returns the finish reasons of the response."""
|
|
@@ -82,11 +84,13 @@ class MistralCallResponse(
|
|
|
82
84
|
for choice in self._response_choices
|
|
83
85
|
]
|
|
84
86
|
|
|
87
|
+
@computed_field
|
|
85
88
|
@property
|
|
86
89
|
def model(self) -> str:
|
|
87
90
|
"""Returns the name of the response model."""
|
|
88
91
|
return self.response.model
|
|
89
92
|
|
|
93
|
+
@computed_field
|
|
90
94
|
@property
|
|
91
95
|
def id(self) -> str:
|
|
92
96
|
"""Returns the id of the response."""
|
|
@@ -97,16 +101,19 @@ class MistralCallResponse(
|
|
|
97
101
|
"""Returns the usage of the chat completion."""
|
|
98
102
|
return self.response.usage
|
|
99
103
|
|
|
104
|
+
@computed_field
|
|
100
105
|
@property
|
|
101
106
|
def input_tokens(self) -> int:
|
|
102
107
|
"""Returns the number of input tokens."""
|
|
103
108
|
return self.usage.prompt_tokens
|
|
104
109
|
|
|
110
|
+
@computed_field
|
|
105
111
|
@property
|
|
106
112
|
def output_tokens(self) -> int | None:
|
|
107
113
|
"""Returns the number of output tokens."""
|
|
108
114
|
return self.usage.completion_tokens
|
|
109
115
|
|
|
116
|
+
@computed_field
|
|
110
117
|
@property
|
|
111
118
|
def cost(self) -> float | None:
|
|
112
119
|
"""Returns the cost of the call."""
|
|
@@ -10,35 +10,49 @@ def calculate_cost(
|
|
|
10
10
|
|
|
11
11
|
https://openai.com/pricing
|
|
12
12
|
|
|
13
|
-
Model
|
|
14
|
-
gpt-4o
|
|
15
|
-
gpt-4o-
|
|
16
|
-
gpt-4o
|
|
17
|
-
gpt-4o-2024-
|
|
18
|
-
gpt-4o-
|
|
19
|
-
gpt-
|
|
20
|
-
gpt-
|
|
21
|
-
gpt-
|
|
22
|
-
gpt-
|
|
23
|
-
gpt-
|
|
24
|
-
gpt-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
13
|
+
Model Input Output
|
|
14
|
+
gpt-4o $2.50 / 1M tokens $10.00 / 1M tokens
|
|
15
|
+
gpt-4o-2024-11-20 $2.50 / 1M tokens $10.00 / 1M tokens
|
|
16
|
+
gpt-4o-2024-08-06 $2.50 / 1M tokens $10.00 / 1M tokens
|
|
17
|
+
gpt-4o-2024-05-13 $5.00 / 1M tokens $15.00 / 1M tokens
|
|
18
|
+
gpt-4o-audio-preview $2.50 / 1M tokens $10.00 / 1M tokens
|
|
19
|
+
gpt-4o-audio-preview-2024-12-17 $2.50 / 1M tokens $10.00 / 1M tokens
|
|
20
|
+
gpt-4o-audio-preview-2024-10-01 $2.50 / 1M tokens $10.00 / 1M tokens
|
|
21
|
+
gpt-4o-realtime-preview $5.00 / 1M tokens $20.00 / 1M tokens
|
|
22
|
+
gpt-4o-realtime-preview-2024-12-17 $5.00 / 1M tokens $20.00 / 1M tokens
|
|
23
|
+
gpt-4o-realtime-preview-2024-10-01 $5.00 / 1M tokens $20.00 / 1M tokens
|
|
24
|
+
gpt-4o-mini $0.15 / 1M tokens $0.60 / 1M tokens
|
|
25
|
+
gpt-4o-mini-2024-07-18 $0.15 / 1M tokens $0.60 / 1M tokens
|
|
26
|
+
gpt-4o-mini-audio-preview $0.15 / 1M tokens $0.60 / 1M tokens
|
|
27
|
+
gpt-4o-mini-audio-preview-2024-12-17 $0.15 / 1M tokens $0.60 / 1M tokens
|
|
28
|
+
gpt-4o-mini-realtime-preview $0.60 / 1M tokens $2.40 / 1M tokens
|
|
29
|
+
gpt-4o-mini-realtime-preview-2024-12-17 $0.60 / 1M tokens $2.40 / 1M tokens
|
|
30
|
+
o1 $15.00 / 1M tokens $60.00 / 1M tokens
|
|
31
|
+
o1-2024-12-17 $15.00 / 1M tokens $60.00 / 1M tokens
|
|
32
|
+
o1-preview-2024-09-12 $15.00 / 1M tokens $60.00 / 1M tokens
|
|
33
|
+
o3-mini $1.10 / 1M tokens $4.40 / 1M tokens
|
|
34
|
+
o3-mini-2025-01-31 $1.10 / 1M tokens $4.40 / 1M tokens
|
|
35
|
+
o1-mini $1.10 / 1M tokens $4.40 / 1M tokens
|
|
36
|
+
o1-mini-2024-09-12 $1.10 / 1M tokens $4.40 / 1M tokens
|
|
37
|
+
gpt-4-turbo $10.00 / 1M tokens $30.00 / 1M tokens
|
|
38
|
+
gpt-4-turbo-2024-04-09 $10.00 / 1M tokens $30.00 / 1M tokens
|
|
39
|
+
gpt-3.5-turbo-0125 $0.50 / 1M tokens $1.50 / 1M tokens
|
|
40
|
+
gpt-3.5-turbo-1106 $1.00 / 1M tokens $2.00 / 1M tokens
|
|
41
|
+
gpt-4-1106-preview $10.00 / 1M tokens $30.00 / 1M tokens
|
|
42
|
+
gpt-4 $30.00 / 1M tokens $60.00 / 1M tokens
|
|
43
|
+
text-embedding-3-small $0.02 / 1M tokens
|
|
44
|
+
text-embedding-3-large $0.13 / 1M tokens
|
|
45
|
+
text-embedding-ada-0002 $0.10 / 1M tokens
|
|
28
46
|
"""
|
|
29
47
|
pricing = {
|
|
30
|
-
"gpt-4o-mini": {
|
|
31
|
-
"prompt": 0.000_000_15,
|
|
32
|
-
"completion": 0.000_000_6,
|
|
33
|
-
},
|
|
34
|
-
"gpt-4o-mini-2024-07-18": {
|
|
35
|
-
"prompt": 0.000_000_15,
|
|
36
|
-
"completion": 0.000_000_6,
|
|
37
|
-
},
|
|
38
48
|
"gpt-4o": {
|
|
39
49
|
"prompt": 0.000_002_5,
|
|
40
50
|
"completion": 0.000_01,
|
|
41
51
|
},
|
|
52
|
+
"gpt-4o-2024-11-20": {
|
|
53
|
+
"prompt": 0.000_002_5,
|
|
54
|
+
"completion": 0.000_01,
|
|
55
|
+
},
|
|
42
56
|
"gpt-4o-2024-08-06": {
|
|
43
57
|
"prompt": 0.000_002_5,
|
|
44
58
|
"completion": 0.000_01,
|
|
@@ -47,6 +61,82 @@ def calculate_cost(
|
|
|
47
61
|
"prompt": 0.000_005,
|
|
48
62
|
"completion": 0.000_015,
|
|
49
63
|
},
|
|
64
|
+
"gpt-4o-audio-preview": {
|
|
65
|
+
"prompt": 0.000_002_5,
|
|
66
|
+
"completion": 0.000_01,
|
|
67
|
+
},
|
|
68
|
+
"gpt-4o-audio-preview-2024-12-17": {
|
|
69
|
+
"prompt": 0.000_002_5,
|
|
70
|
+
"completion": 0.000_01,
|
|
71
|
+
},
|
|
72
|
+
"gpt-4o-audio-preview-2024-10-01": {
|
|
73
|
+
"prompt": 0.000_002_5,
|
|
74
|
+
"completion": 0.000_01,
|
|
75
|
+
},
|
|
76
|
+
"gpt-4o-realtime-preview": {
|
|
77
|
+
"prompt": 0.000_005,
|
|
78
|
+
"completion": 0.000_02,
|
|
79
|
+
},
|
|
80
|
+
"gpt-4o-realtime-preview-2024-12-17": {
|
|
81
|
+
"prompt": 0.000_005,
|
|
82
|
+
"completion": 0.000_02,
|
|
83
|
+
},
|
|
84
|
+
"gpt-4o-realtime-preview-2024-10-01": {
|
|
85
|
+
"prompt": 0.000_005,
|
|
86
|
+
"completion": 0.000_02,
|
|
87
|
+
},
|
|
88
|
+
"gpt-4o-mini": {
|
|
89
|
+
"prompt": 0.000_000_15,
|
|
90
|
+
"completion": 0.000_000_6,
|
|
91
|
+
},
|
|
92
|
+
"gpt-4o-mini-2024-07-18": {
|
|
93
|
+
"prompt": 0.000_000_15,
|
|
94
|
+
"completion": 0.000_000_6,
|
|
95
|
+
},
|
|
96
|
+
"gpt-4o-mini-audio-preview": {
|
|
97
|
+
"prompt": 0.000_000_15,
|
|
98
|
+
"completion": 0.000_000_6,
|
|
99
|
+
},
|
|
100
|
+
"gpt-4o-mini-audio-preview-2024-12-17": {
|
|
101
|
+
"prompt": 0.000_000_15,
|
|
102
|
+
"completion": 0.000_000_6,
|
|
103
|
+
},
|
|
104
|
+
"gpt-4o-mini-realtime-preview": {
|
|
105
|
+
"prompt": 0.000_000_6,
|
|
106
|
+
"completion": 0.000_002_4,
|
|
107
|
+
},
|
|
108
|
+
"gpt-4o-mini-realtime-preview-2024-12-17": {
|
|
109
|
+
"prompt": 0.000_000_6,
|
|
110
|
+
"completion": 0.000_002_4,
|
|
111
|
+
},
|
|
112
|
+
"o1": {
|
|
113
|
+
"prompt": 0.000_015,
|
|
114
|
+
"completion": 0.000_06,
|
|
115
|
+
},
|
|
116
|
+
"o1-2024-12-17": {
|
|
117
|
+
"prompt": 0.000_015,
|
|
118
|
+
"completion": 0.000_06,
|
|
119
|
+
},
|
|
120
|
+
"o1-preview-2024-09-12": {
|
|
121
|
+
"prompt": 0.000_015,
|
|
122
|
+
"completion": 0.000_06,
|
|
123
|
+
},
|
|
124
|
+
"o3-mini": {
|
|
125
|
+
"prompt": 0.000_001_1,
|
|
126
|
+
"completion": 0.000_004_4,
|
|
127
|
+
},
|
|
128
|
+
"o3-mini-2025-01-31": {
|
|
129
|
+
"prompt": 0.000_001_1,
|
|
130
|
+
"completion": 0.000_004_4,
|
|
131
|
+
},
|
|
132
|
+
"o1-mini": {
|
|
133
|
+
"prompt": 0.000_001_1,
|
|
134
|
+
"completion": 0.000_004_4,
|
|
135
|
+
},
|
|
136
|
+
"o1-mini-2024-09-12": {
|
|
137
|
+
"prompt": 0.000_001_1,
|
|
138
|
+
"completion": 0.000_004_4,
|
|
139
|
+
},
|
|
50
140
|
"gpt-4-turbo": {
|
|
51
141
|
"prompt": 0.000_01,
|
|
52
142
|
"completion": 0.000_03,
|
|
@@ -82,22 +82,26 @@ class OpenAICallResponse(
|
|
|
82
82
|
|
|
83
83
|
_provider = "openai"
|
|
84
84
|
|
|
85
|
+
@computed_field
|
|
85
86
|
@property
|
|
86
87
|
def content(self) -> str:
|
|
87
88
|
"""Returns the content of the chat completion for the 0th choice."""
|
|
88
89
|
message = self.response.choices[0].message
|
|
89
90
|
return message.content if message.content is not None else ""
|
|
90
91
|
|
|
92
|
+
@computed_field
|
|
91
93
|
@property
|
|
92
94
|
def finish_reasons(self) -> list[str]:
|
|
93
95
|
"""Returns the finish reasons of the response."""
|
|
94
96
|
return [str(choice.finish_reason) for choice in self.response.choices]
|
|
95
97
|
|
|
98
|
+
@computed_field
|
|
96
99
|
@property
|
|
97
100
|
def model(self) -> str:
|
|
98
101
|
"""Returns the name of the response model."""
|
|
99
102
|
return self.response.model
|
|
100
103
|
|
|
104
|
+
@computed_field
|
|
101
105
|
@property
|
|
102
106
|
def id(self) -> str:
|
|
103
107
|
"""Returns the id of the response."""
|
|
@@ -108,16 +112,19 @@ class OpenAICallResponse(
|
|
|
108
112
|
"""Returns the usage of the chat completion."""
|
|
109
113
|
return self.response.usage
|
|
110
114
|
|
|
115
|
+
@computed_field
|
|
111
116
|
@property
|
|
112
117
|
def input_tokens(self) -> int | None:
|
|
113
118
|
"""Returns the number of input tokens."""
|
|
114
119
|
return self.usage.prompt_tokens if self.usage else None
|
|
115
120
|
|
|
121
|
+
@computed_field
|
|
116
122
|
@property
|
|
117
123
|
def output_tokens(self) -> int | None:
|
|
118
124
|
"""Returns the number of output tokens."""
|
|
119
125
|
return self.usage.completion_tokens if self.usage else None
|
|
120
126
|
|
|
127
|
+
@computed_field
|
|
121
128
|
@property
|
|
122
129
|
def cost(self) -> float | None:
|
|
123
130
|
"""Returns the cost of the call."""
|
|
@@ -58,11 +58,13 @@ class VertexCallResponse(
|
|
|
58
58
|
|
|
59
59
|
_provider = "vertex"
|
|
60
60
|
|
|
61
|
+
@computed_field
|
|
61
62
|
@property
|
|
62
63
|
def content(self) -> str:
|
|
63
64
|
"""Returns the contained string content for the 0th choice."""
|
|
64
65
|
return self.response.candidates[0].content.parts[0].text
|
|
65
66
|
|
|
67
|
+
@computed_field
|
|
66
68
|
@property
|
|
67
69
|
def finish_reasons(self) -> list[str]:
|
|
68
70
|
"""Returns the finish reasons of the response."""
|
|
@@ -80,6 +82,7 @@ class VertexCallResponse(
|
|
|
80
82
|
for candidate in self.response.candidates
|
|
81
83
|
]
|
|
82
84
|
|
|
85
|
+
@computed_field
|
|
83
86
|
@property
|
|
84
87
|
def model(self) -> str:
|
|
85
88
|
"""Returns the model name.
|
|
@@ -89,6 +92,7 @@ class VertexCallResponse(
|
|
|
89
92
|
"""
|
|
90
93
|
return self._model
|
|
91
94
|
|
|
95
|
+
@computed_field
|
|
92
96
|
@property
|
|
93
97
|
def id(self) -> str | None:
|
|
94
98
|
"""Returns the id of the response.
|
|
@@ -102,16 +106,19 @@ class VertexCallResponse(
|
|
|
102
106
|
"""Returns the usage of the chat completion."""
|
|
103
107
|
return self.response.usage_metadata
|
|
104
108
|
|
|
109
|
+
@computed_field
|
|
105
110
|
@property
|
|
106
111
|
def input_tokens(self) -> int:
|
|
107
112
|
"""Returns the number of input tokens."""
|
|
108
113
|
return self.usage.prompt_token_count
|
|
109
114
|
|
|
115
|
+
@computed_field
|
|
110
116
|
@property
|
|
111
117
|
def output_tokens(self) -> int:
|
|
112
118
|
"""Returns the number of output tokens."""
|
|
113
119
|
return self.usage.candidates_token_count
|
|
114
120
|
|
|
121
|
+
@computed_field
|
|
115
122
|
@property
|
|
116
123
|
def cost(self) -> float | None:
|
|
117
124
|
"""Returns the cost of the call."""
|
mirascope/llm/call_response.py
CHANGED
|
@@ -84,6 +84,8 @@ class CallResponse(
|
|
|
84
84
|
def __str__(self) -> str:
|
|
85
85
|
return str(self._response)
|
|
86
86
|
|
|
87
|
+
@computed_field
|
|
88
|
+
@computed_field
|
|
87
89
|
@property
|
|
88
90
|
def finish_reasons(self) -> list[FinishReason] | None: # pyright: ignore [reportIncompatibleMethodOverride]
|
|
89
91
|
return self._response.common_finish_reasons
|
|
@@ -47,14 +47,14 @@ mirascope/core/anthropic/__init__.py,sha256=0ObxoxWzpsyf3tm5SldosVDxVWiIu1jxuGmc
|
|
|
47
47
|
mirascope/core/anthropic/_call.py,sha256=LXUR__AyexD-hsPMPKpA7IFuh8Cfc0uAg1GrJSxiWnU,2358
|
|
48
48
|
mirascope/core/anthropic/_call_kwargs.py,sha256=EoXSl2B5FoLD_Nv03-ttXjiKlpBihZGXu6U-Ol3qwZ8,389
|
|
49
49
|
mirascope/core/anthropic/call_params.py,sha256=K51kCyIf6us3Tl2SPgkqrZoacZTNwaMuVj23hFJcVBk,1238
|
|
50
|
-
mirascope/core/anthropic/call_response.py,sha256=
|
|
50
|
+
mirascope/core/anthropic/call_response.py,sha256=tjc4KmS_gV-F4fBdDRmhsAI7brvl1e4esCjdevnSkto,5816
|
|
51
51
|
mirascope/core/anthropic/call_response_chunk.py,sha256=GZgvJRkVUUED69Mq5TyEe4OIH8AXq3hCqqU6eHTuqWc,3543
|
|
52
52
|
mirascope/core/anthropic/dynamic_config.py,sha256=kZV4ApAnm3P1X5gKPJ3hbr45K6tgaNX8L6Ca8NjTkxU,1192
|
|
53
53
|
mirascope/core/anthropic/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
54
54
|
mirascope/core/anthropic/stream.py,sha256=HxHoF3NC4X8jy4OQZsQ-yoVO3hrNse8EyOQUHw8Ezv8,4870
|
|
55
55
|
mirascope/core/anthropic/tool.py,sha256=HtbYV5j4itV8v6lTyLDY72NMX2kxRaXVgpZ_m89HqIk,2891
|
|
56
56
|
mirascope/core/anthropic/_utils/__init__.py,sha256=xHjaWpLBcUOW_tuBrOBQ2MewFr5Kga-LBYuqw1ZTP_U,559
|
|
57
|
-
mirascope/core/anthropic/_utils/_calculate_cost.py,sha256=
|
|
57
|
+
mirascope/core/anthropic/_utils/_calculate_cost.py,sha256=dHM8t__tMmDdkzaR5PVUInoXh4r2HqDoM0q8wUEmd3U,3762
|
|
58
58
|
mirascope/core/anthropic/_utils/_convert_common_call_params.py,sha256=ILd7AH_atmPUPj7I74EsmxG3rmWC7b5tgjnlR24jKUs,765
|
|
59
59
|
mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py,sha256=UqqiDEaw20_nDbQUvRJC-ZneCd35f_2GEUpiUNMibr0,704
|
|
60
60
|
mirascope/core/anthropic/_utils/_convert_message_params.py,sha256=G2VZ_xZEs4HE3KFMgTeEpd2BOK8CNz8ac_KofifN8ws,3843
|
|
@@ -66,7 +66,7 @@ mirascope/core/azure/__init__.py,sha256=ozfFhyCC0bFLDUA7m2v1POywSFpLJi6E7xZ2bhBI
|
|
|
66
66
|
mirascope/core/azure/_call.py,sha256=SHqSJe6_4zgn4Y9PkpDl4vXvLuT4QmVnWUcws9e_RR8,2237
|
|
67
67
|
mirascope/core/azure/_call_kwargs.py,sha256=q38xKSgCBWi8DLScepG-KnUfgi67AU6xr2uOHwCZ2mI,435
|
|
68
68
|
mirascope/core/azure/call_params.py,sha256=o5xhlWlyUB8bTewp9fj3l0jvbCpoOsZFnaGwhkEWTD0,1366
|
|
69
|
-
mirascope/core/azure/call_response.py,sha256=
|
|
69
|
+
mirascope/core/azure/call_response.py,sha256=XbIE03zY9qbyCtx6lEixnfavj1FSe5dSa-j5DaIBxyU,6604
|
|
70
70
|
mirascope/core/azure/call_response_chunk.py,sha256=tcLgURISaGONGDvWjWDfDPs2c0hQJT_tVELiDqL33SQ,2884
|
|
71
71
|
mirascope/core/azure/dynamic_config.py,sha256=6SBMGFce7tuXdwHrlKNISpZxVxUnnumbIQB9lGR6nbs,1066
|
|
72
72
|
mirascope/core/azure/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -90,7 +90,7 @@ mirascope/core/base/_extract_with_tools.py,sha256=MW4v8D1xty7LqLb5RwMFkX-peQqA73
|
|
|
90
90
|
mirascope/core/base/_partial.py,sha256=w_ACCgsDKNLtMyAP-lNmfRdrFEPmzh2BT4aninajxyY,3240
|
|
91
91
|
mirascope/core/base/call_kwargs.py,sha256=0mznCsrj1dYxvdwYNF0RKbc9CiU5G6WvvcjPqOMsOE4,351
|
|
92
92
|
mirascope/core/base/call_params.py,sha256=wtuuOY-SwIZYCDBKfn_xRC0Kf1cUuI4eSQaXu6VrtaE,1331
|
|
93
|
-
mirascope/core/base/call_response.py,sha256=
|
|
93
|
+
mirascope/core/base/call_response.py,sha256=PfNtUN8BuKxN_6BIU8tVZJhM-D0HPqUft94aVddhEOc,9095
|
|
94
94
|
mirascope/core/base/call_response_chunk.py,sha256=pvy6K2bM_wDiurfZ7M98SxEY--X6YrLjwCAWHwkFieA,2897
|
|
95
95
|
mirascope/core/base/dynamic_config.py,sha256=V5IG2X5gPFpfQ47uO8JU1zoC2eNdRftsRZEmwhRPaYI,2859
|
|
96
96
|
mirascope/core/base/from_call_args.py,sha256=8ijMX7PN6a4o6uLdmXJlSRnE-rEVJU5NLxUmNrS8dvU,909
|
|
@@ -146,7 +146,7 @@ mirascope/core/bedrock/_call.py,sha256=8Z8sdzpTdJsMHBev35B1KH3O16_eMLbtTkOmPB7bz
|
|
|
146
146
|
mirascope/core/bedrock/_call_kwargs.py,sha256=N1d_iglnwZW3JrcaT8WTOeuLT5MYcVLU5vS8u8uyEL4,408
|
|
147
147
|
mirascope/core/bedrock/_types.py,sha256=ntmzYsgT6wuigv1GavkdqCvJnAYRsFvVuIwxafE4DFY,3229
|
|
148
148
|
mirascope/core/bedrock/call_params.py,sha256=3eKNYTteCTaPLqvAcy1vHU5aY9nMVNhmApL45ugPbrQ,1716
|
|
149
|
-
mirascope/core/bedrock/call_response.py,sha256=
|
|
149
|
+
mirascope/core/bedrock/call_response.py,sha256=a_cWVsamahS_0mWvMLVcHKJdc1iDRXqHPd0Njl0EvQs,7905
|
|
150
150
|
mirascope/core/bedrock/call_response_chunk.py,sha256=m_It9rKXv4jtrXJh_BuEcb2807SJi80hA2iejPLmYSs,3219
|
|
151
151
|
mirascope/core/bedrock/dynamic_config.py,sha256=X6v93X9g14mfvkGLL08yX-xTFGgX8y8bVngNmExdUhQ,1166
|
|
152
152
|
mirascope/core/bedrock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -166,7 +166,7 @@ mirascope/core/cohere/_call.py,sha256=y0nB_7h7FWCNxHRPywtAVCYXyeYX3uzTyYBPWnuLwU
|
|
|
166
166
|
mirascope/core/cohere/_call_kwargs.py,sha256=YmHwiofs0QADGp0wXUtOr_Z5Pt849zaCtIZmVyjw2OM,292
|
|
167
167
|
mirascope/core/cohere/_types.py,sha256=dMcep2mhuUUUmKvFUmdoxkq4Zg5AtB2xquROiBbwRvo,1017
|
|
168
168
|
mirascope/core/cohere/call_params.py,sha256=xtmELsLkjfyfUoNbZpn3JET-gJxo1EIvlcwxgMw3gcw,1860
|
|
169
|
-
mirascope/core/cohere/call_response.py,sha256=
|
|
169
|
+
mirascope/core/cohere/call_response.py,sha256=1ASEG27hTWtsD95Cf2J6VVsh9KHFgZF3vGQHLeKHj4w,5895
|
|
170
170
|
mirascope/core/cohere/call_response_chunk.py,sha256=SVJrSulaQQiXIUptLqDzslRHTOQ8xc8UWtnp69n73Wg,3499
|
|
171
171
|
mirascope/core/cohere/dynamic_config.py,sha256=noH36l6qGGnClVz0EtMqeW_0e4-oTCviU5SLIl8YS64,941
|
|
172
172
|
mirascope/core/cohere/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -185,13 +185,13 @@ mirascope/core/gemini/__init__.py,sha256=mJvRB06dJ2qTqmyPp3-RgZl4WInVxdkFglzapvB
|
|
|
185
185
|
mirascope/core/gemini/_call.py,sha256=g47rUaE4V_onORvRUP9GlgnQKda28dV1Ge2YACvrD-c,2344
|
|
186
186
|
mirascope/core/gemini/_call_kwargs.py,sha256=4f34gl1BPM14wkd0fGJw_58jYzxgGgNvZkjVI5d1hgU,360
|
|
187
187
|
mirascope/core/gemini/call_params.py,sha256=aEXhgZVB0npcT6wL_p7GVGIE3vi_JOiMKdgWtpXTezQ,1723
|
|
188
|
-
mirascope/core/gemini/call_response.py,sha256=
|
|
188
|
+
mirascope/core/gemini/call_response.py,sha256=cB7b1bfdctfrJ-lXC6autG0i2711iU4qEiKygFXMgYg,5928
|
|
189
189
|
mirascope/core/gemini/call_response_chunk.py,sha256=AqKWWaRGEOgenxHzWLsNdbZDH-H0M5DI9CTJiwnS9Tw,2640
|
|
190
190
|
mirascope/core/gemini/dynamic_config.py,sha256=_bmJUVHFyrr3zKea96lES20q4GPOelK3W7K1DcX0mZ8,836
|
|
191
191
|
mirascope/core/gemini/stream.py,sha256=TPK4zKE_A0pTUKvoPktoq6BdFwxbE0S1yAeY2f9iSSg,3697
|
|
192
192
|
mirascope/core/gemini/tool.py,sha256=ohO2kJPuAnYmO-t5WdavRbeSMgSfn66-A-6PEYraDPA,3073
|
|
193
193
|
mirascope/core/gemini/_utils/__init__.py,sha256=rRJHluu810Jel3Bu3ok_8uyfPWnXYC0r1K5QuKPOAUo,454
|
|
194
|
-
mirascope/core/gemini/_utils/_calculate_cost.py,sha256=
|
|
194
|
+
mirascope/core/gemini/_utils/_calculate_cost.py,sha256=vF1XWvNnp2cTv-JnM3x_htIJ4WMUthTqpH0Sb2lGmso,2254
|
|
195
195
|
mirascope/core/gemini/_utils/_convert_common_call_params.py,sha256=1ZTpwqain90Va70xC9r9-_1YEIyvyZdjMiejN7E6yY4,1072
|
|
196
196
|
mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py,sha256=jkZM8hpkZjR1izwSyKTVwkkN_nfLROwx0V_yQsVDiB8,761
|
|
197
197
|
mirascope/core/gemini/_utils/_convert_message_params.py,sha256=a5WaerYG2INBtxGg3qkLsAwVtJVAvK79O_83zJmHH3A,4621
|
|
@@ -203,7 +203,7 @@ mirascope/core/groq/__init__.py,sha256=wo-_txqiLC3iswnXmPX4C6IgsU-_wv1DbBlNDY4rE
|
|
|
203
203
|
mirascope/core/groq/_call.py,sha256=gR8VN5IaYWIFXc0csn995q59FM0nBs-xVFjkVycPjMM,2223
|
|
204
204
|
mirascope/core/groq/_call_kwargs.py,sha256=trT8AdQ-jdQPYKlGngIMRwwQuvKuvAbvI1yyozftOuI,425
|
|
205
205
|
mirascope/core/groq/call_params.py,sha256=FchtsaeohTzYKzY9f2fUIzjgG2y4OtsnRWiHsUBLdi0,1619
|
|
206
|
-
mirascope/core/groq/call_response.py,sha256=
|
|
206
|
+
mirascope/core/groq/call_response.py,sha256=ovz3ZggjZq1s-k77i03KeEhZ2P_4OiYx_ZobjzuSa8s,6019
|
|
207
207
|
mirascope/core/groq/call_response_chunk.py,sha256=5gKDAzncgQ8m-HKR38PJ1G3aFX1KoyabNxsy1UZ7koI,2792
|
|
208
208
|
mirascope/core/groq/dynamic_config.py,sha256=AjcXBVeBdMiI6ObHanX3TVMKYxm4iWhXju3m6d-ZWMY,937
|
|
209
209
|
mirascope/core/groq/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -220,7 +220,7 @@ mirascope/core/groq/_utils/_setup_call.py,sha256=fsXbP1NpzpJ3rq3oMvNEvgN4TJzudYb
|
|
|
220
220
|
mirascope/core/litellm/__init__.py,sha256=eBLmGsbY2SNEf3DPLYS-WgpskwaWbBeonpcBc3Zxh94,779
|
|
221
221
|
mirascope/core/litellm/_call.py,sha256=mSCU9nT0ZQTru6BppGJgtudAWqWFs0a6m5q-VYbM-ow,2391
|
|
222
222
|
mirascope/core/litellm/call_params.py,sha256=6bnAHDkHaltwMzaF-REE80kZgZxLldL6QD341a1m-PI,270
|
|
223
|
-
mirascope/core/litellm/call_response.py,sha256=
|
|
223
|
+
mirascope/core/litellm/call_response.py,sha256=cPhGUKmRWboiwXr-qofgRhFNzKFhvv7i9n1u9wqVi40,706
|
|
224
224
|
mirascope/core/litellm/call_response_chunk.py,sha256=cd43hZunl0VFtwInjMIJPIOl3mjomAvbG2Bzg2KZsoY,460
|
|
225
225
|
mirascope/core/litellm/dynamic_config.py,sha256=ZKyVTht2qfJ2ams3HrlRierq2sE01SqiBimh51rE_6A,296
|
|
226
226
|
mirascope/core/litellm/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -232,7 +232,7 @@ mirascope/core/mistral/__init__.py,sha256=6Jz9mYmijycfsCXYKgxhxMEwmQEqOwZXmJt0F7
|
|
|
232
232
|
mirascope/core/mistral/_call.py,sha256=p9aSLYVSNgaIGA5SqCgGuT7iWN5WLfwmXubk4IF-w_I,2274
|
|
233
233
|
mirascope/core/mistral/_call_kwargs.py,sha256=vZxlADPx4muIePARGdfKOVQpxpIoaXT9tCG6kY5oxSQ,513
|
|
234
234
|
mirascope/core/mistral/call_params.py,sha256=wWHWI9hRnfloGhQurMwCcka9c1u_TwgcN84Ih6qVBXs,1054
|
|
235
|
-
mirascope/core/mistral/call_response.py,sha256=
|
|
235
|
+
mirascope/core/mistral/call_response.py,sha256=kK7EIf0nVeUHuR-Aq24K92YdU5QJo4G1RAF6X0YxRiI,5815
|
|
236
236
|
mirascope/core/mistral/call_response_chunk.py,sha256=4TC3F5h_Ii3WrbDDunCOudl9wIlXMVCOigIPnJ5FWGE,2835
|
|
237
237
|
mirascope/core/mistral/dynamic_config.py,sha256=-pzTvXf870NxEhjpgjqPahFWqqifzMhSbvM0kXs2G_s,937
|
|
238
238
|
mirascope/core/mistral/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -251,14 +251,14 @@ mirascope/core/openai/__init__.py,sha256=1-iKWt3nEk2GjB9UuH2WcAiPajsp9B3J6G-v5Ly
|
|
|
251
251
|
mirascope/core/openai/_call.py,sha256=ExXdY3rjBbil0ija2HlGMRvcOE2zOOj13rgliw8nmFc,2260
|
|
252
252
|
mirascope/core/openai/_call_kwargs.py,sha256=x53EZmxqroNewR194M_JkRP1Ejuh4BTtDL-b7XNSo2Q,435
|
|
253
253
|
mirascope/core/openai/call_params.py,sha256=hexjEPRuPpq7dkyMgdL48jjY-J5zvHHvaHMKWGnWYHI,2494
|
|
254
|
-
mirascope/core/openai/call_response.py,sha256=
|
|
254
|
+
mirascope/core/openai/call_response.py,sha256=AWAf-uYpbj_oggbxKpM-eGKndlIIOVT8X-PVQkTxYq4,7573
|
|
255
255
|
mirascope/core/openai/call_response_chunk.py,sha256=yMjzGQa1sMDbFBn_tZPIuR6FkxyrHqxaxoHwrEQHV80,3722
|
|
256
256
|
mirascope/core/openai/dynamic_config.py,sha256=D36E3CMpXSaj5I8FEmtzMJz9gtTsNz1pVW_iM3dOCcw,1045
|
|
257
257
|
mirascope/core/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
258
258
|
mirascope/core/openai/stream.py,sha256=1MttfMbYcOxmdUx9HLmP0uBruEvmHHxMidhgUTFptp4,6252
|
|
259
259
|
mirascope/core/openai/tool.py,sha256=iJWJQrY3-1Rq5OywzKFO9JUAcglneGD0UtkS3pcA0pg,3154
|
|
260
260
|
mirascope/core/openai/_utils/__init__.py,sha256=J4ZMAuU4X0PN-nbYj2ikX2EgYRq-T00GbontdmkTcH0,454
|
|
261
|
-
mirascope/core/openai/_utils/_calculate_cost.py,sha256=
|
|
261
|
+
mirascope/core/openai/_utils/_calculate_cost.py,sha256=jm7TlGdnDlcWIHPlPo1TzJW70WIFo8PjuHurnroUsB4,7587
|
|
262
262
|
mirascope/core/openai/_utils/_convert_common_call_params.py,sha256=gvxsRdULxiC2137M9l53hUmF0ZkBxFQFurhWBcl_5Cg,739
|
|
263
263
|
mirascope/core/openai/_utils/_convert_message_params.py,sha256=xQNntjHh91jiRI_lQgJ6hGwwF5wcltBjEgZHIkQcDtw,4594
|
|
264
264
|
mirascope/core/openai/_utils/_get_json_output.py,sha256=Q_5R6NFFDvmLoz9BQiymC5AEyYvxKPH2_XnOQZ8hIkU,1215
|
|
@@ -269,7 +269,7 @@ mirascope/core/vertex/__init__.py,sha256=rhvMVCoN29wuryxGSD9JUKKSlLsWeOnw6Dkk2Cq
|
|
|
269
269
|
mirascope/core/vertex/_call.py,sha256=ebQmWoQLnxScyxhnGKU3MmHkXXzzs_Sw2Yf-d3nZFwU,2323
|
|
270
270
|
mirascope/core/vertex/_call_kwargs.py,sha256=6JxQt1bAscbhPWTGESG1TiskB-i5imDHqLMgbMHmyfI,353
|
|
271
271
|
mirascope/core/vertex/call_params.py,sha256=ISBnMITxAtvuGmpLF9UdkqcDS43RwtuuVakk01YIHDs,706
|
|
272
|
-
mirascope/core/vertex/call_response.py,sha256=
|
|
272
|
+
mirascope/core/vertex/call_response.py,sha256=ulZrs9ZJnw204eWyKOIlEk2mdA7bLMmpUzGc_dxNb9Y,5877
|
|
273
273
|
mirascope/core/vertex/call_response_chunk.py,sha256=yzVY9A18eZQyd5YnksKaJaXZ4s2yAK214wJEXPoQVHI,2627
|
|
274
274
|
mirascope/core/vertex/dynamic_config.py,sha256=KISQf7c2Rf1EpaS_2Ik6beA1w9uz_dAvMBk4nQcrdaM,809
|
|
275
275
|
mirascope/core/vertex/stream.py,sha256=81p04LZ47V6usjf1eQ91csLc4ZVOWSc0BAP2Vc9dCbQ,3620
|
|
@@ -299,7 +299,7 @@ mirascope/integrations/otel/_with_otel.py,sha256=tbjd6BEbcSfnsm5CWHBoHwbRNrHt6-t
|
|
|
299
299
|
mirascope/llm/__init__.py,sha256=6JWQFeluDzPC4naQY2WneSwsS-LOTeP0NpmoJ2g8zps,94
|
|
300
300
|
mirascope/llm/_protocols.py,sha256=adcuSqKmi7M-N8Yy6GWFBlE9wIgtdLbnTq8S6IdAt7g,16380
|
|
301
301
|
mirascope/llm/_response_metaclass.py,sha256=6DLQb5IrqMldyEXHT_pAsr2DlUVc9CmZuZiBXG37WK8,851
|
|
302
|
-
mirascope/llm/call_response.py,sha256=
|
|
302
|
+
mirascope/llm/call_response.py,sha256=Pi1RRUBlJGaz8jgSw3wv2PF8XWWm1Cod2jXsJpBGPkU,4435
|
|
303
303
|
mirascope/llm/call_response_chunk.py,sha256=9Vyi5_hpgill5CB8BwfSj33VR8sirY2ceTRbru0G3Sw,1820
|
|
304
304
|
mirascope/llm/llm_call.py,sha256=6ErSt8mtT0GQUF92snNewy8TAYgo-gVu7Dd1KC-ob5o,8398
|
|
305
305
|
mirascope/llm/llm_override.py,sha256=7L222CGbJjQPB-lCoGB29XYHyzCvqEyDtcPV-L4Ao7I,6163
|
|
@@ -331,7 +331,7 @@ mirascope/v0/base/ops_utils.py,sha256=1Qq-VIwgHBaYutiZsS2MUQ4OgPC3APyywI5bTiTAmA
|
|
|
331
331
|
mirascope/v0/base/prompts.py,sha256=FM2Yz98cSnDceYogiwPrp4BALf3_F3d4fIOCGAkd-SE,1298
|
|
332
332
|
mirascope/v0/base/types.py,sha256=ZfatJoX0Yl0e3jhv0D_MhiSVHLYUeJsdN3um3iE10zY,352
|
|
333
333
|
mirascope/v0/base/utils.py,sha256=XREPENRQTu8gpMhHU8RC8qH_am3FfGUvY-dJ6x8i-mw,681
|
|
334
|
-
mirascope-1.16.
|
|
335
|
-
mirascope-1.16.
|
|
336
|
-
mirascope-1.16.
|
|
337
|
-
mirascope-1.16.
|
|
334
|
+
mirascope-1.16.6.dist-info/METADATA,sha256=pv4MnpoLCGEBql-jZRyXAtCighS8l9bejXTnRQhTkdU,8545
|
|
335
|
+
mirascope-1.16.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
336
|
+
mirascope-1.16.6.dist-info/licenses/LICENSE,sha256=LAs5Q8mdawTsVdONpDGukwsoc4KEUBmmonDEL39b23Y,1072
|
|
337
|
+
mirascope-1.16.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|