mirascope 1.19.0__py3-none-any.whl → 1.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. mirascope/__init__.py +4 -0
  2. mirascope/beta/openai/realtime/realtime.py +7 -8
  3. mirascope/beta/openai/realtime/tool.py +2 -2
  4. mirascope/core/__init__.py +8 -1
  5. mirascope/core/anthropic/_utils/__init__.py +0 -2
  6. mirascope/core/anthropic/_utils/_convert_message_params.py +1 -7
  7. mirascope/core/anthropic/_utils/_message_param_converter.py +48 -31
  8. mirascope/core/anthropic/call_response.py +7 -9
  9. mirascope/core/anthropic/call_response_chunk.py +10 -0
  10. mirascope/core/anthropic/stream.py +6 -8
  11. mirascope/core/azure/_utils/__init__.py +0 -2
  12. mirascope/core/azure/call_response.py +7 -10
  13. mirascope/core/azure/call_response_chunk.py +6 -1
  14. mirascope/core/azure/stream.py +6 -8
  15. mirascope/core/base/__init__.py +2 -1
  16. mirascope/core/base/_utils/__init__.py +2 -0
  17. mirascope/core/base/_utils/_get_image_dimensions.py +39 -0
  18. mirascope/core/base/call_response.py +36 -6
  19. mirascope/core/base/call_response_chunk.py +15 -1
  20. mirascope/core/base/stream.py +25 -3
  21. mirascope/core/base/types.py +276 -2
  22. mirascope/core/bedrock/_utils/__init__.py +0 -2
  23. mirascope/core/bedrock/call_response.py +7 -10
  24. mirascope/core/bedrock/call_response_chunk.py +6 -0
  25. mirascope/core/bedrock/stream.py +6 -10
  26. mirascope/core/cohere/_utils/__init__.py +0 -2
  27. mirascope/core/cohere/call_response.py +7 -10
  28. mirascope/core/cohere/call_response_chunk.py +6 -0
  29. mirascope/core/cohere/stream.py +5 -8
  30. mirascope/core/costs/__init__.py +5 -0
  31. mirascope/core/{anthropic/_utils/_calculate_cost.py → costs/_anthropic_calculate_cost.py} +45 -14
  32. mirascope/core/{azure/_utils/_calculate_cost.py → costs/_azure_calculate_cost.py} +3 -3
  33. mirascope/core/{bedrock/_utils/_calculate_cost.py → costs/_bedrock_calculate_cost.py} +3 -3
  34. mirascope/core/{cohere/_utils/_calculate_cost.py → costs/_cohere_calculate_cost.py} +12 -8
  35. mirascope/core/{gemini/_utils/_calculate_cost.py → costs/_gemini_calculate_cost.py} +7 -7
  36. mirascope/core/costs/_google_calculate_cost.py +427 -0
  37. mirascope/core/costs/_groq_calculate_cost.py +156 -0
  38. mirascope/core/costs/_litellm_calculate_cost.py +11 -0
  39. mirascope/core/costs/_mistral_calculate_cost.py +64 -0
  40. mirascope/core/costs/_openai_calculate_cost.py +416 -0
  41. mirascope/core/{vertex/_utils/_calculate_cost.py → costs/_vertex_calculate_cost.py} +8 -7
  42. mirascope/core/{xai/_utils/_calculate_cost.py → costs/_xai_calculate_cost.py} +9 -9
  43. mirascope/core/costs/calculate_cost.py +86 -0
  44. mirascope/core/gemini/_utils/__init__.py +0 -2
  45. mirascope/core/gemini/call_response.py +7 -10
  46. mirascope/core/gemini/call_response_chunk.py +6 -1
  47. mirascope/core/gemini/stream.py +5 -8
  48. mirascope/core/google/_utils/__init__.py +0 -2
  49. mirascope/core/google/_utils/_setup_call.py +21 -2
  50. mirascope/core/google/call_response.py +9 -10
  51. mirascope/core/google/call_response_chunk.py +6 -1
  52. mirascope/core/google/stream.py +5 -8
  53. mirascope/core/groq/_utils/__init__.py +0 -2
  54. mirascope/core/groq/call_response.py +22 -10
  55. mirascope/core/groq/call_response_chunk.py +6 -0
  56. mirascope/core/groq/stream.py +5 -8
  57. mirascope/core/litellm/call_response.py +3 -4
  58. mirascope/core/litellm/stream.py +30 -22
  59. mirascope/core/mistral/_utils/__init__.py +0 -2
  60. mirascope/core/mistral/call_response.py +7 -10
  61. mirascope/core/mistral/call_response_chunk.py +6 -0
  62. mirascope/core/mistral/stream.py +5 -8
  63. mirascope/core/openai/_utils/__init__.py +0 -2
  64. mirascope/core/openai/_utils/_convert_message_params.py +4 -4
  65. mirascope/core/openai/call_response.py +30 -10
  66. mirascope/core/openai/call_response_chunk.py +6 -0
  67. mirascope/core/openai/stream.py +5 -8
  68. mirascope/core/vertex/_utils/__init__.py +0 -2
  69. mirascope/core/vertex/call_response.py +5 -10
  70. mirascope/core/vertex/call_response_chunk.py +6 -0
  71. mirascope/core/vertex/stream.py +5 -8
  72. mirascope/core/xai/_utils/__init__.py +1 -2
  73. mirascope/core/xai/call_response.py +0 -11
  74. mirascope/llm/__init__.py +9 -2
  75. mirascope/llm/_protocols.py +8 -28
  76. mirascope/llm/call_response.py +6 -6
  77. mirascope/llm/call_response_chunk.py +12 -3
  78. mirascope/llm/llm_call.py +21 -23
  79. mirascope/llm/llm_override.py +56 -27
  80. mirascope/llm/stream.py +7 -7
  81. mirascope/llm/tool.py +1 -1
  82. mirascope/retries/fallback.py +1 -1
  83. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/METADATA +1 -1
  84. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/RECORD +86 -82
  85. mirascope/core/google/_utils/_calculate_cost.py +0 -215
  86. mirascope/core/groq/_utils/_calculate_cost.py +0 -69
  87. mirascope/core/mistral/_utils/_calculate_cost.py +0 -48
  88. mirascope/core/openai/_utils/_calculate_cost.py +0 -246
  89. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/WHEEL +0 -0
  90. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,69 +0,0 @@
1
- """Calculate the cost of a completion using the Groq API."""
2
-
3
-
4
- def calculate_cost(
5
- input_tokens: int | float | None,
6
- cached_tokens: int | float | None,
7
- output_tokens: int | float | None,
8
- model: str = "mixtral-8x7b-32768",
9
- ) -> float | None:
10
- """Calculate the cost of a completion using the Groq API.
11
-
12
- https://wow.groq.com/
13
-
14
- Model Input Cached Output
15
- llama-3.1-405b-reasoning N/A N/A
16
- llama-3.1-70b-versatile N/A N/A
17
- llama-3.1-8b-instant N/A N/A
18
- llama3-groq-70b-8192-tool-use-preview $0.89 / 1M tokens $0.89 / 1M tokens
19
- llama3-groq-8b-8192-tool-use-preview $0.19 / 1M tokens $0.19 / 1M tokens
20
- llama3-70b-8192 $0.59 / 1M tokens $0.79 / 1M tokens
21
- llama3-8b-8192 $0.05 / 1M tokens $0.08 / 1M tokens
22
- mixtral-8x7b-32768 $0.27 / 1M tokens $0.27 / 1M tokens
23
- gemma-7b-it $0.07 / 1M tokens $0.07 / 1M tokens
24
- gemma2-9b-it $0.20 / 1M tokens $0.20 / 1M tokens
25
- """
26
- pricing = {
27
- "llama3-groq-70b-8192-tool-use-preview": {
28
- "prompt": 0.000_000_89,
29
- "completion": 0.000_000_89,
30
- },
31
- "llama3-groq-8b-8192-tool-use-preview": {
32
- "prompt": 0.000_000_19,
33
- "completion": 0.000_000_19,
34
- },
35
- "llama3-70b-8192": {
36
- "prompt": 0.000_000_59,
37
- "completion": 0.000_000_79,
38
- },
39
- "llama3-8b-8192": {
40
- "prompt": 0.000_000_05,
41
- "completion": 0.000_000_08,
42
- },
43
- "mixtral-8x7b-32768": {
44
- "prompt": 0.000_000_24,
45
- "completion": 0.000_000_24,
46
- },
47
- "gemma-7b-it": {
48
- "prompt": 0.000_000_07,
49
- "completion": 0.000_000_07,
50
- },
51
- "gemma2-9b-it": {
52
- "prompt": 0.000_000_2,
53
- "completion": 0.000_000_2,
54
- },
55
- }
56
-
57
- if input_tokens is None or output_tokens is None:
58
- return None
59
-
60
- try:
61
- model_pricing = pricing[model]
62
- except KeyError:
63
- return None
64
-
65
- prompt_cost = input_tokens * model_pricing["prompt"]
66
- completion_cost = output_tokens * model_pricing["completion"]
67
- total_cost = prompt_cost + completion_cost
68
-
69
- return total_cost
@@ -1,48 +0,0 @@
1
- """Calculate the cost of a completion using the Mistral API."""
2
-
3
-
4
- def calculate_cost(
5
- input_tokens: int | float | None,
6
- cached_tokens: int | float | None,
7
- output_tokens: int | float | None,
8
- model: str = "open-mistral-7b",
9
- ) -> float | None:
10
- """Calculate the cost of a completion using the Mistral API.
11
-
12
- https://mistral.ai/technology/#pricing
13
-
14
- Model Input Cached Output
15
- open-mistral-nemo $0.3/1M tokens $0.3/1M tokens
16
- mistral-large-latest $3/1M tokens $9/1M tokens
17
- codestral-2405 $1/1M tokens $3/1M tokens
18
- open-mistral-7b $0.25/1M tokens $0.25/1M tokens
19
- open-mixtral-8x7b $0.7/1M tokens $0.7/1M tokens
20
- open-mixtral-8x22b $2/1M tokens $6/1M tokens
21
- mistral-small-latest $2/1M tokens $6/1M tokens
22
- mistral-medium-latest $2.75/1M tokens $8.1/1M tokens
23
- """
24
- pricing = {
25
- "open-mistral-nemo": {"prompt": 0.000_000_3, "completion": 0.000_000_3},
26
- "open-mistral-nemo-2407": {"prompt": 0.000_000_3, "completion": 0.000_000_3},
27
- "mistral-large-latest": {"prompt": 0.000_003, "completion": 0.000_009},
28
- "mistral-large-2407": {"prompt": 0.000_003, "completion": 0.000_009},
29
- "open-mistral-7b": {"prompt": 0.000_000_25, "completion": 0.000_000_25},
30
- "open-mixtral-8x7b": {"prompt": 0.000_000_7, "completion": 0.000_000_7},
31
- "open-mixtral-8x22b": {"prompt": 0.000_002, "completion": 0.000_006},
32
- "mistral-small-latest": {"prompt": 0.000_002, "completion": 0.000_006},
33
- "mistral-medium-latest": {"prompt": 0.000_002_75, "completion": 0.000_008_1},
34
- }
35
-
36
- if input_tokens is None or output_tokens is None:
37
- return None
38
-
39
- try:
40
- model_pricing = pricing[model]
41
- except KeyError:
42
- return None
43
-
44
- prompt_cost = input_tokens * model_pricing["prompt"]
45
- completion_cost = output_tokens * model_pricing["completion"]
46
- total_cost = prompt_cost + completion_cost
47
-
48
- return total_cost
@@ -1,246 +0,0 @@
1
- """Calculate the cost of a completion using the OpenAI API."""
2
-
3
-
4
- def calculate_cost(
5
- input_tokens: int | float | None,
6
- cached_tokens: int | float | None,
7
- output_tokens: int | float | None,
8
- model: str = "gpt-3.5-turbo-16k",
9
- ) -> float | None:
10
- """Calculate the cost of a completion using the OpenAI API.
11
-
12
- https://openai.com/pricing
13
-
14
- Model Input Cached Output
15
- gpt-4o $2.50 / 1M tokens $1.25 / 1M tokens $10.00 / 1M tokens
16
- gpt-4o-2024-11-20 $2.50 / 1M tokens $1.25 / 1M tokens $10.00 / 1M tokens
17
- gpt-4o-2024-08-06 $2.50 / 1M tokens $1.25 / 1M tokens $10.00 / 1M tokens
18
- gpt-4o-2024-05-13 $5.00 / 1M tokens $2.50 / 1M tokens $15.00 / 1M tokens
19
- gpt-4o-audio-preview $2.50 / 1M tokens $1.25 / 1M tokens $10.00 / 1M tokens
20
- gpt-4o-audio-preview-2024-12-17 $2.50 / 1M tokens $1.25 / 1M tokens $10.00 / 1M tokens
21
- gpt-4o-audio-preview-2024-10-01 $2.50 / 1M tokens $1.25 / 1M tokens $10.00 / 1M tokens
22
- gpt-4o-realtime-preview $5.00 / 1M tokens $2.50 / 1M tokens $20.00 / 1M tokens
23
- gpt-4o-realtime-preview-2024-12-17 $5.00 / 1M tokens $2.50 / 1M tokens $20.00 / 1M tokens
24
- gpt-4o-realtime-preview-2024-10-01 $5.00 / 1M tokens $2.50 / 1M tokens $20.00 / 1M tokens
25
- gpt-4o-mini $0.15 / 1M tokens $0.08 / 1M tokens $0.60 / 1M tokens
26
- gpt-4o-mini-2024-07-18 $0.15 / 1M tokens $0.08 / 1M tokens $0.60 / 1M tokens
27
- gpt-4o-mini-audio-preview $0.15 / 1M tokens $0.08 / 1M tokens $0.60 / 1M tokens
28
- gpt-4o-mini-audio-preview-2024-12-17 $0.15 / 1M tokens $0.08 / 1M tokens $0.60 / 1M tokens
29
- gpt-4o-mini-realtime-preview $0.60 / 1M tokens $0.30 / 1M tokens $2.40 / 1M tokens
30
- gpt-4o-mini-realtime-preview-2024-12-17 $0.60 / 1M tokens $0.30 / 1M tokens $2.40 / 1M tokens
31
- o1 $15.00 / 1M tokens $7.50 / 1M tokens $60.00 / 1M tokens
32
- o1-2024-12-17 $15.00 / 1M tokens $7.50 / 1M tokens $60.00 / 1M tokens
33
- o1-preview-2024-09-12 $15.00 / 1M tokens $7.50 / 1M tokens $60.00 / 1M tokens
34
- o3-mini $1.10 / 1M tokens $0.55 / 1M tokens $4.40 / 1M tokens
35
- o3-mini-2025-01-31 $1.10 / 1M tokens $0.55 / 1M tokens $4.40 / 1M tokens
36
- o1-mini $1.10 / 1M tokens $0.55 / 1M tokens $4.40 / 1M tokens
37
- o1-mini-2024-09-12 $1.10 / 1M tokens $0.55 / 1M tokens $4.40 / 1M tokens
38
- gpt-4-turbo $10.00 / 1M tokens $30.00 / 1M tokens
39
- gpt-4-turbo-2024-04-09 $10.00 / 1M tokens $30.00 / 1M tokens
40
- gpt-3.5-turbo-0125 $0.50 / 1M tokens $1.50 / 1M tokens
41
- gpt-3.5-turbo-1106 $1.00 / 1M tokens $2.00 / 1M tokens
42
- gpt-4-1106-preview $10.00 / 1M tokens $30.00 / 1M tokens
43
- gpt-4 $30.00 / 1M tokens $60.00 / 1M tokens
44
- text-embedding-3-small $0.02 / 1M tokens
45
- text-embedding-3-large $0.13 / 1M tokens
46
- text-embedding-ada-0002 $0.10 / 1M tokens
47
- """
48
- pricing = {
49
- "gpt-4o": {
50
- "prompt": 0.000_002_5,
51
- "cached": 0.000_001_25,
52
- "completion": 0.000_01,
53
- },
54
- "gpt-4o-2024-11-20": {
55
- "prompt": 0.000_002_5,
56
- "cached": 0.000_001_25,
57
- "completion": 0.000_01,
58
- },
59
- "gpt-4o-2024-08-06": {
60
- "prompt": 0.000_002_5,
61
- "cached": 0.000_001_25,
62
- "completion": 0.000_01,
63
- },
64
- "gpt-4o-2024-05-13": {
65
- "prompt": 0.000_005,
66
- "cached": 0.000_002_5,
67
- "completion": 0.000_015,
68
- },
69
- "gpt-4o-audio-preview": {
70
- "prompt": 0.000_002_5,
71
- "cached": 0.000_001_25,
72
- "completion": 0.000_01,
73
- },
74
- "gpt-4o-audio-preview-2024-12-17": {
75
- "prompt": 0.000_002_5,
76
- "cached": 0.000_001_25,
77
- "completion": 0.000_01,
78
- },
79
- "gpt-4o-audio-preview-2024-10-01": {
80
- "prompt": 0.000_002_5,
81
- "cached": 0.000_001_25,
82
- "completion": 0.000_01,
83
- },
84
- "gpt-4o-realtime-preview": {
85
- "prompt": 0.000_005,
86
- "cached": 0.000_002_5,
87
- "completion": 0.000_02,
88
- },
89
- "gpt-4o-realtime-preview-2024-12-17": {
90
- "prompt": 0.000_005,
91
- "cached": 0.000_002_5,
92
- "completion": 0.000_02,
93
- },
94
- "gpt-4o-realtime-preview-2024-10-01": {
95
- "prompt": 0.000_005,
96
- "cached": 0.000_002_5,
97
- "completion": 0.000_02,
98
- },
99
- "gpt-4o-mini": {
100
- "prompt": 0.000_000_15,
101
- "cached": 0.000_000_08,
102
- "completion": 0.000_000_6,
103
- },
104
- "gpt-4o-mini-2024-07-18": {
105
- "prompt": 0.000_000_15,
106
- "cached": 0.000_000_08,
107
- "completion": 0.000_000_6,
108
- },
109
- "gpt-4o-mini-audio-preview": {
110
- "prompt": 0.000_000_15,
111
- "cached": 0.000_000_08,
112
- "completion": 0.000_000_6,
113
- },
114
- "gpt-4o-mini-audio-preview-2024-12-17": {
115
- "prompt": 0.000_000_15,
116
- "cached": 0.000_000_08,
117
- "completion": 0.000_000_6,
118
- },
119
- "gpt-4o-mini-realtime-preview": {
120
- "prompt": 0.000_000_6,
121
- "cached": 0.000_000_3,
122
- "completion": 0.000_002_4,
123
- },
124
- "gpt-4o-mini-realtime-preview-2024-12-17": {
125
- "prompt": 0.000_000_6,
126
- "cached": 0.000_000_3,
127
- "completion": 0.000_002_4,
128
- },
129
- "o1": {
130
- "prompt": 0.000_015,
131
- "cached": 0.000_007_5,
132
- "completion": 0.000_06,
133
- },
134
- "o1-2024-12-17": {
135
- "prompt": 0.000_015,
136
- "cached": 0.000_007_5,
137
- "completion": 0.000_06,
138
- },
139
- "o1-preview-2024-09-12": {
140
- "prompt": 0.000_015,
141
- "cached": 0.000_007_5,
142
- "completion": 0.000_06,
143
- },
144
- "o3-mini": {
145
- "prompt": 0.000_001_1,
146
- "cached": 0.000_000_55,
147
- "completion": 0.000_004_4,
148
- },
149
- "o3-mini-2025-01-31": {
150
- "prompt": 0.000_001_1,
151
- "cached": 0.000_000_55,
152
- "completion": 0.000_004_4,
153
- },
154
- "o1-mini": {
155
- "prompt": 0.000_001_1,
156
- "cached": 0.000_000_55,
157
- "completion": 0.000_004_4,
158
- },
159
- "o1-mini-2024-09-12": {
160
- "prompt": 0.000_001_1,
161
- "cached": 0.000_000_55,
162
- "completion": 0.000_004_4,
163
- },
164
- "gpt-4-turbo": {
165
- "prompt": 0.000_01,
166
- "cached": 0,
167
- "completion": 0.000_03,
168
- },
169
- "gpt-4-turbo-2024-04-09": {
170
- "prompt": 0.000_01,
171
- "cached": 0,
172
- "completion": 0.000_03,
173
- },
174
- "gpt-3.5-turbo-0125": {
175
- "prompt": 0.000_000_5,
176
- "cached": 0,
177
- "completion": 0.000_001_5,
178
- },
179
- "gpt-3.5-turbo-1106": {
180
- "prompt": 0.000_001,
181
- "cached": 0,
182
- "completion": 0.000_002,
183
- },
184
- "gpt-4-1106-preview": {
185
- "prompt": 0.000_01,
186
- "cached": 0,
187
- "completion": 0.000_03,
188
- },
189
- "gpt-4": {
190
- "prompt": 0.000_003,
191
- "cached": 0,
192
- "completion": 0.000_006,
193
- },
194
- "gpt-3.5-turbo-4k": {
195
- "prompt": 0.000_015,
196
- "cached": 0,
197
- "completion": 0.000_02,
198
- },
199
- "gpt-3.5-turbo-16k": {
200
- "prompt": 0.000_003,
201
- "cached": 0,
202
- "completion": 0.000_004,
203
- },
204
- "gpt-4-8k": {
205
- "prompt": 0.000_003,
206
- "cached": 0,
207
- "completion": 0.000_006,
208
- },
209
- "gpt-4-32k": {
210
- "prompt": 0.000_006,
211
- "cached": 0,
212
- "completion": 0.000_012,
213
- },
214
- "text-embedding-3-small": {
215
- "prompt": 0.000_000_02,
216
- "cached": 0,
217
- "completion": 0,
218
- },
219
- "text-embedding-ada-002": {
220
- "prompt": 0.000_000_1,
221
- "cached": 0,
222
- "completion": 0,
223
- },
224
- "text-embedding-3-large": {
225
- "prompt": 0.000_000_13,
226
- "cached": 0,
227
- "completion": 0,
228
- },
229
- }
230
- if input_tokens is None or output_tokens is None:
231
- return None
232
-
233
- if cached_tokens is None:
234
- cached_tokens = 0
235
-
236
- try:
237
- model_pricing = pricing[model]
238
- except KeyError:
239
- return None
240
-
241
- prompt_cost = input_tokens * model_pricing["prompt"]
242
- cached_cost = cached_tokens * model_pricing["cached"]
243
- completion_cost = output_tokens * model_pricing["completion"]
244
- total_cost = prompt_cost + cached_cost + completion_cost
245
-
246
- return total_cost