mirascope 1.19.0__py3-none-any.whl → 1.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. mirascope/__init__.py +4 -0
  2. mirascope/beta/openai/realtime/realtime.py +7 -8
  3. mirascope/beta/openai/realtime/tool.py +2 -2
  4. mirascope/core/__init__.py +8 -1
  5. mirascope/core/anthropic/_utils/__init__.py +0 -2
  6. mirascope/core/anthropic/_utils/_convert_message_params.py +1 -7
  7. mirascope/core/anthropic/_utils/_message_param_converter.py +48 -31
  8. mirascope/core/anthropic/call_response.py +7 -9
  9. mirascope/core/anthropic/call_response_chunk.py +10 -0
  10. mirascope/core/anthropic/stream.py +6 -8
  11. mirascope/core/azure/_utils/__init__.py +0 -2
  12. mirascope/core/azure/call_response.py +7 -10
  13. mirascope/core/azure/call_response_chunk.py +6 -1
  14. mirascope/core/azure/stream.py +6 -8
  15. mirascope/core/base/__init__.py +2 -1
  16. mirascope/core/base/_utils/__init__.py +2 -0
  17. mirascope/core/base/_utils/_get_image_dimensions.py +39 -0
  18. mirascope/core/base/call_response.py +36 -6
  19. mirascope/core/base/call_response_chunk.py +15 -1
  20. mirascope/core/base/stream.py +25 -3
  21. mirascope/core/base/types.py +276 -2
  22. mirascope/core/bedrock/_utils/__init__.py +0 -2
  23. mirascope/core/bedrock/call_response.py +7 -10
  24. mirascope/core/bedrock/call_response_chunk.py +6 -0
  25. mirascope/core/bedrock/stream.py +6 -10
  26. mirascope/core/cohere/_utils/__init__.py +0 -2
  27. mirascope/core/cohere/call_response.py +7 -10
  28. mirascope/core/cohere/call_response_chunk.py +6 -0
  29. mirascope/core/cohere/stream.py +5 -8
  30. mirascope/core/costs/__init__.py +5 -0
  31. mirascope/core/{anthropic/_utils/_calculate_cost.py → costs/_anthropic_calculate_cost.py} +45 -14
  32. mirascope/core/{azure/_utils/_calculate_cost.py → costs/_azure_calculate_cost.py} +3 -3
  33. mirascope/core/{bedrock/_utils/_calculate_cost.py → costs/_bedrock_calculate_cost.py} +3 -3
  34. mirascope/core/{cohere/_utils/_calculate_cost.py → costs/_cohere_calculate_cost.py} +12 -8
  35. mirascope/core/{gemini/_utils/_calculate_cost.py → costs/_gemini_calculate_cost.py} +7 -7
  36. mirascope/core/costs/_google_calculate_cost.py +427 -0
  37. mirascope/core/costs/_groq_calculate_cost.py +156 -0
  38. mirascope/core/costs/_litellm_calculate_cost.py +11 -0
  39. mirascope/core/costs/_mistral_calculate_cost.py +64 -0
  40. mirascope/core/costs/_openai_calculate_cost.py +416 -0
  41. mirascope/core/{vertex/_utils/_calculate_cost.py → costs/_vertex_calculate_cost.py} +8 -7
  42. mirascope/core/{xai/_utils/_calculate_cost.py → costs/_xai_calculate_cost.py} +9 -9
  43. mirascope/core/costs/calculate_cost.py +86 -0
  44. mirascope/core/gemini/_utils/__init__.py +0 -2
  45. mirascope/core/gemini/call_response.py +7 -10
  46. mirascope/core/gemini/call_response_chunk.py +6 -1
  47. mirascope/core/gemini/stream.py +5 -8
  48. mirascope/core/google/_utils/__init__.py +0 -2
  49. mirascope/core/google/_utils/_setup_call.py +21 -2
  50. mirascope/core/google/call_response.py +9 -10
  51. mirascope/core/google/call_response_chunk.py +6 -1
  52. mirascope/core/google/stream.py +5 -8
  53. mirascope/core/groq/_utils/__init__.py +0 -2
  54. mirascope/core/groq/call_response.py +22 -10
  55. mirascope/core/groq/call_response_chunk.py +6 -0
  56. mirascope/core/groq/stream.py +5 -8
  57. mirascope/core/litellm/call_response.py +3 -4
  58. mirascope/core/litellm/stream.py +30 -22
  59. mirascope/core/mistral/_utils/__init__.py +0 -2
  60. mirascope/core/mistral/call_response.py +7 -10
  61. mirascope/core/mistral/call_response_chunk.py +6 -0
  62. mirascope/core/mistral/stream.py +5 -8
  63. mirascope/core/openai/_utils/__init__.py +0 -2
  64. mirascope/core/openai/_utils/_convert_message_params.py +4 -4
  65. mirascope/core/openai/call_response.py +30 -10
  66. mirascope/core/openai/call_response_chunk.py +6 -0
  67. mirascope/core/openai/stream.py +5 -8
  68. mirascope/core/vertex/_utils/__init__.py +0 -2
  69. mirascope/core/vertex/call_response.py +5 -10
  70. mirascope/core/vertex/call_response_chunk.py +6 -0
  71. mirascope/core/vertex/stream.py +5 -8
  72. mirascope/core/xai/_utils/__init__.py +1 -2
  73. mirascope/core/xai/call_response.py +0 -11
  74. mirascope/llm/__init__.py +9 -2
  75. mirascope/llm/_protocols.py +8 -28
  76. mirascope/llm/call_response.py +6 -6
  77. mirascope/llm/call_response_chunk.py +12 -3
  78. mirascope/llm/llm_call.py +21 -23
  79. mirascope/llm/llm_override.py +56 -27
  80. mirascope/llm/stream.py +7 -7
  81. mirascope/llm/tool.py +1 -1
  82. mirascope/retries/fallback.py +1 -1
  83. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/METADATA +1 -1
  84. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/RECORD +86 -82
  85. mirascope/core/google/_utils/_calculate_cost.py +0 -215
  86. mirascope/core/groq/_utils/_calculate_cost.py +0 -69
  87. mirascope/core/mistral/_utils/_calculate_cost.py +0 -48
  88. mirascope/core/openai/_utils/_calculate_cost.py +0 -246
  89. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/WHEEL +0 -0
  90. {mirascope-1.19.0.dist-info → mirascope-1.20.0.dist-info}/licenses/LICENSE +0 -0
@@ -17,8 +17,7 @@ from pydantic import computed_field
17
17
 
18
18
  from .. import BaseMessageParam
19
19
  from ..base import BaseCallResponse, transform_tool_outputs
20
- from ..base.types import FinishReason
21
- from ._utils import calculate_cost
20
+ from ..base.types import CostMetadata, FinishReason
22
21
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
23
22
  _convert_finish_reasons_to_common_finish_reasons,
24
23
  )
@@ -134,14 +133,6 @@ class GeminiCallResponse(
134
133
  """Returns the number of output tokens."""
135
134
  return None
136
135
 
137
- @computed_field
138
- @property
139
- def cost(self) -> float | None:
140
- """Returns the cost of the call."""
141
- return calculate_cost(
142
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
143
- )
144
-
145
136
  @computed_field
146
137
  @cached_property
147
138
  def message_param(self) -> ContentDict:
@@ -213,3 +204,9 @@ class GeminiCallResponse(
213
204
  if not self.user_message_param:
214
205
  return None
215
206
  return GeminiMessageParamConverter.from_provider([self.user_message_param])[0]
207
+
208
+ @computed_field
209
+ @property
210
+ def cost_metadata(self) -> CostMetadata:
211
+ """Get metadata required for cost calculation."""
212
+ return super().cost_metadata
@@ -7,7 +7,7 @@ from google.ai.generativelanguage import Candidate
7
7
  from google.generativeai.types import GenerateContentResponse
8
8
 
9
9
  from ..base import BaseCallResponseChunk
10
- from ..base.types import FinishReason
10
+ from ..base.types import CostMetadata, FinishReason
11
11
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
12
12
  _convert_finish_reasons_to_common_finish_reasons,
13
13
  )
@@ -88,6 +88,11 @@ class GeminiCallResponseChunk(
88
88
  """Returns the number of output tokens."""
89
89
  return None
90
90
 
91
+ @property
92
+ def cost_metadata(self) -> CostMetadata:
93
+ """Returns the cost metadata."""
94
+ return super().cost_metadata
95
+
91
96
  @property
92
97
  def common_finish_reasons(self) -> list[FinishReason] | None:
93
98
  return _convert_finish_reasons_to_common_finish_reasons(
@@ -22,7 +22,7 @@ from google.generativeai.types import (
22
22
  from google.generativeai.types.content_types import PartType
23
23
 
24
24
  from ..base.stream import BaseStream
25
- from ._utils import calculate_cost
25
+ from ..base.types import CostMetadata
26
26
  from .call_params import GeminiCallParams
27
27
  from .call_response import GeminiCallResponse
28
28
  from .call_response_chunk import GeminiCallResponseChunk
@@ -66,13 +66,6 @@ class GeminiStream(
66
66
 
67
67
  _provider = "gemini"
68
68
 
69
- @property
70
- def cost(self) -> float | None:
71
- """Returns the cost of the call."""
72
- return calculate_cost(
73
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
74
- )
75
-
76
69
  def _construct_message_param(
77
70
  self, tool_calls: list[FunctionCall] | None = None, content: str | None = None
78
71
  ) -> ContentDict:
@@ -121,3 +114,7 @@ class GeminiStream(
121
114
  start_time=self.start_time,
122
115
  end_time=self.end_time,
123
116
  )
117
+
118
+ @property
119
+ def cost_metadata(self) -> CostMetadata:
120
+ return super().cost_metadata
@@ -1,13 +1,11 @@
1
1
  """Google utilities for decorator factories."""
2
2
 
3
- from ._calculate_cost import calculate_cost
4
3
  from ._convert_message_params import convert_message_params
5
4
  from ._get_json_output import get_json_output
6
5
  from ._handle_stream import handle_stream, handle_stream_async
7
6
  from ._setup_call import setup_call
8
7
 
9
8
  __all__ = [
10
- "calculate_cost",
11
9
  "convert_message_params",
12
10
  "get_json_output",
13
11
  "handle_stream",
@@ -176,5 +176,24 @@ def setup_call(
176
176
  client.models.generate_content, client.models.generate_content_stream
177
177
  )
178
178
  )
179
-
180
- return create, prompt_template, messages, tool_types, call_kwargs
179
+ if client.vertexai:
180
+ if isinstance(dynamic_config, dict):
181
+ metadata = dynamic_config.get("metadata", {})
182
+ tags = metadata.get("tags", set())
183
+ tags.add("use_vertex_ai")
184
+ metadata["tags"] = tags
185
+ dynamic_config["metadata"] = metadata
186
+ else:
187
+ metadata = getattr(fn, "_metadata", {})
188
+ tags = metadata.get("tags", set())
189
+ tags.add("use_vertex_ai")
190
+ metadata["tags"] = tags
191
+ fn._metadata = metadata
192
+
193
+ return (
194
+ create,
195
+ prompt_template,
196
+ messages,
197
+ tool_types,
198
+ call_kwargs,
199
+ )
@@ -21,8 +21,7 @@ from pydantic import computed_field
21
21
 
22
22
  from .. import BaseMessageParam
23
23
  from ..base import BaseCallResponse, transform_tool_outputs
24
- from ..base.types import FinishReason
25
- from ._utils import calculate_cost
24
+ from ..base.types import CostMetadata, FinishReason, GoogleMetadata
26
25
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
27
26
  _convert_finish_reasons_to_common_finish_reasons,
28
27
  )
@@ -145,14 +144,6 @@ class GoogleCallResponse(
145
144
  else None
146
145
  )
147
146
 
148
- @computed_field
149
- @property
150
- def cost(self) -> float | None:
151
- """Returns the cost of the call."""
152
- return calculate_cost(
153
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
154
- )
155
-
156
147
  @computed_field
157
148
  @cached_property
158
149
  def message_param(self) -> ContentDict:
@@ -228,3 +219,11 @@ class GoogleCallResponse(
228
219
  if not self.user_message_param:
229
220
  return None
230
221
  return GoogleMessageParamConverter.from_provider([self.user_message_param])[0]
222
+
223
+ @property
224
+ def cost_metadata(self) -> CostMetadata:
225
+ cost_metadata = super().cost_metadata
226
+ cost_metadata.google = GoogleMetadata(
227
+ use_vertex_ai="use_vertex_ai" in self.metadata.get("tags", [])
228
+ )
229
+ return cost_metadata
@@ -11,7 +11,7 @@ from google.genai.types import (
11
11
  GenerateContentResponseUsageMetadata,
12
12
  )
13
13
 
14
- from mirascope.core.base.types import FinishReason
14
+ from mirascope.core.base.types import CostMetadata, FinishReason
15
15
 
16
16
  from ..base import BaseCallResponseChunk
17
17
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
@@ -98,6 +98,11 @@ class GoogleCallResponseChunk(
98
98
  """Returns the number of output tokens."""
99
99
  return self.usage.candidates_token_count if self.usage else None
100
100
 
101
+ @property
102
+ def cost_metadata(self) -> CostMetadata:
103
+ """Returns the cost metadata."""
104
+ return super().cost_metadata
105
+
101
106
  @property
102
107
  def common_finish_reasons(self) -> list[FinishReason] | None:
103
108
  return _convert_finish_reasons_to_common_finish_reasons(
@@ -20,7 +20,7 @@ from google.genai.types import (
20
20
  )
21
21
 
22
22
  from ..base.stream import BaseStream
23
- from ._utils import calculate_cost
23
+ from ..base.types import CostMetadata
24
24
  from .call_params import GoogleCallParams
25
25
  from .call_response import GoogleCallResponse
26
26
  from .call_response_chunk import GoogleCallResponseChunk
@@ -64,13 +64,6 @@ class GoogleStream(
64
64
 
65
65
  _provider = "google"
66
66
 
67
- @property
68
- def cost(self) -> float | None:
69
- """Returns the cost of the call."""
70
- return calculate_cost(
71
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
72
- )
73
-
74
67
  def _construct_message_param(
75
68
  self, tool_calls: list[FunctionCall] | None = None, content: str | None = None
76
69
  ) -> ContentDict:
@@ -144,3 +137,7 @@ class GoogleStream(
144
137
  start_time=self.start_time,
145
138
  end_time=self.end_time,
146
139
  )
140
+
141
+ @property
142
+ def cost_metadata(self) -> CostMetadata:
143
+ return super().cost_metadata
@@ -1,13 +1,11 @@
1
1
  """Groq utilities for decorator factories."""
2
2
 
3
- from ._calculate_cost import calculate_cost
4
3
  from ._convert_message_params import convert_message_params
5
4
  from ._get_json_output import get_json_output
6
5
  from ._handle_stream import handle_stream, handle_stream_async
7
6
  from ._setup_call import setup_call
8
7
 
9
8
  __all__ = [
10
- "calculate_cost",
11
9
  "convert_message_params",
12
10
  "get_json_output",
13
11
  "handle_stream",
@@ -19,8 +19,7 @@ from pydantic import SerializeAsAny, computed_field
19
19
 
20
20
  from .. import BaseMessageParam
21
21
  from ..base import BaseCallResponse, transform_tool_outputs
22
- from ..base.types import FinishReason
23
- from ._utils import calculate_cost
22
+ from ..base.types import CostMetadata, FinishReason, ImageMetadata
24
23
  from ._utils._message_param_converter import GroqMessageParamConverter
25
24
  from .call_params import GroqCallParams
26
25
  from .dynamic_config import AsyncGroqDynamicConfig, GroqDynamicConfig
@@ -110,14 +109,6 @@ class GroqCallResponse(
110
109
  """Returns the number of output tokens."""
111
110
  return self.usage.completion_tokens if self.usage else None
112
111
 
113
- @computed_field
114
- @property
115
- def cost(self) -> float | None:
116
- """Returns the cost of the call."""
117
- return calculate_cost(
118
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
119
- )
120
-
121
112
  @computed_field
122
113
  @cached_property
123
114
  def message_param(self) -> SerializeAsAny[ChatCompletionAssistantMessageParam]:
@@ -195,3 +186,24 @@ class GroqCallResponse(
195
186
  if not self.user_message_param:
196
187
  return None
197
188
  return GroqMessageParamConverter.from_provider([self.user_message_param])[0]
189
+
190
+ @property
191
+ def cost_metadata(self) -> CostMetadata:
192
+ cost_metadata = super().cost_metadata
193
+ image_metadata = []
194
+
195
+ for message in self.messages:
196
+ if "content" not in message:
197
+ continue
198
+ content = message["content"]
199
+
200
+ if not isinstance(content, list):
201
+ continue
202
+ for part in content:
203
+ # Check if this part is an image_url
204
+ if isinstance(part, dict) and part.get("type") == "image_url":
205
+ # Only count the image if it has a URL
206
+ image_metadata.append(ImageMetadata(width=0, height=0))
207
+
208
+ cost_metadata.images = image_metadata
209
+ return cost_metadata
@@ -10,6 +10,7 @@ from groq.types.chat.chat_completion import Choice
10
10
  from groq.types.completion_usage import CompletionUsage
11
11
 
12
12
  from ..base import BaseCallResponseChunk
13
+ from ..base.types import CostMetadata
13
14
 
14
15
  FinishReason = Choice.__annotations__["finish_reason"]
15
16
 
@@ -93,6 +94,11 @@ class GroqCallResponseChunk(BaseCallResponseChunk[ChatCompletionChunk, FinishRea
93
94
  return self.usage.completion_tokens
94
95
  return None
95
96
 
97
+ @property
98
+ def cost_metadata(self) -> CostMetadata:
99
+ """Returns the cost metadata."""
100
+ return super().cost_metadata
101
+
96
102
  @property
97
103
  def common_finish_reasons(self) -> list[FinishReason] | None:
98
104
  return cast(list[FinishReason], self.finish_reasons)
@@ -17,7 +17,7 @@ from groq.types.chat.chat_completion_message import ChatCompletionMessage
17
17
  from groq.types.completion_usage import CompletionUsage
18
18
 
19
19
  from ..base.stream import BaseStream
20
- from ._utils import calculate_cost
20
+ from ..base.types import CostMetadata
21
21
  from .call_params import GroqCallParams
22
22
  from .call_response import GroqCallResponse
23
23
  from .call_response_chunk import GroqCallResponseChunk
@@ -63,13 +63,6 @@ class GroqStream(
63
63
 
64
64
  _provider = "groq"
65
65
 
66
- @property
67
- def cost(self) -> float | None:
68
- """Returns the cost of the call."""
69
- return calculate_cost(
70
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
71
- )
72
-
73
66
  def _construct_message_param(
74
67
  self,
75
68
  tool_calls: list[ChatCompletionMessageToolCallParam] | None = None,
@@ -136,3 +129,7 @@ class GroqStream(
136
129
  start_time=self.start_time,
137
130
  end_time=self.end_time,
138
131
  )
132
+
133
+ @property
134
+ def cost_metadata(self) -> CostMetadata:
135
+ return super().cost_metadata
@@ -3,9 +3,9 @@
3
3
  usage docs: learn/calls.md#handling-responses
4
4
  """
5
5
 
6
- from litellm.cost_calculator import completion_cost
7
6
  from pydantic import computed_field
8
7
 
8
+ from ..base.types import CostMetadata
9
9
  from ..openai import OpenAICallResponse
10
10
 
11
11
 
@@ -20,6 +20,5 @@ class LiteLLMCallResponse(OpenAICallResponse):
20
20
 
21
21
  @computed_field
22
22
  @property
23
- def cost(self) -> float | None:
24
- """Returns the cost of the call."""
25
- return completion_cost(self.response)
23
+ def cost_metadata(self) -> CostMetadata:
24
+ return CostMetadata(cost=self.response._hidden_params["response_cost"]) # pyright: ignore [reportAttributeAccessIssue]
@@ -6,8 +6,10 @@ usage docs: learn/streams.md
6
6
  from collections.abc import AsyncGenerator, Generator
7
7
 
8
8
  from litellm import Choices, Message
9
+ from litellm.cost_calculator import completion_cost
9
10
  from litellm.types.utils import ModelResponse
10
11
 
12
+ from ..base.types import CostMetadata
11
13
  from ..openai import OpenAIStream, OpenAITool
12
14
  from .call_response import LiteLLMCallResponse
13
15
  from .call_response_chunk import LiteLLMCallResponseChunk
@@ -34,35 +36,41 @@ class LiteLLMStream(OpenAIStream):
34
36
  return super().__aiter__() # pyright: ignore [reportReturnType] # pragma: no cover
35
37
 
36
38
  @property
37
- def cost(self) -> float | None:
38
- """Returns the cost of the call."""
39
+ def cost_metadata(self) -> CostMetadata:
40
+ """Returns metadata needed for cost calculation."""
39
41
  response = self.construct_call_response()
40
- return response.cost
42
+ return CostMetadata(
43
+ cost=response.cost,
44
+ )
41
45
 
42
46
  def construct_call_response(self) -> LiteLLMCallResponse:
43
47
  openai_call_response = super().construct_call_response()
44
48
  openai_response = openai_call_response.response
49
+ model_response = ModelResponse(
50
+ id=openai_response.id,
51
+ choices=[
52
+ Choices(
53
+ finish_reason=choice.finish_reason,
54
+ index=choice.index,
55
+ message=Message(**choice.message.model_dump()),
56
+ logprobs=choice.logprobs,
57
+ )
58
+ for choice in openai_response.choices
59
+ ],
60
+ created=openai_response.created,
61
+ model=openai_response.model,
62
+ object=openai_response.object,
63
+ system_fingerprint=openai_response.system_fingerprint,
64
+ usage=openai_response.usage.model_dump() if openai_response.usage else None,
65
+ )
66
+ model_response._hidden_params["response_cost"] = completion_cost(
67
+ model=self.model,
68
+ messages=openai_call_response.messages,
69
+ completion=openai_call_response.content,
70
+ )
45
71
  response = LiteLLMCallResponse(
46
72
  metadata=openai_call_response.metadata,
47
- response=ModelResponse(
48
- id=openai_response.id,
49
- choices=[
50
- Choices(
51
- finish_reason=choice.finish_reason,
52
- index=choice.index,
53
- message=Message(**choice.message.model_dump()),
54
- logprobs=choice.logprobs,
55
- )
56
- for choice in openai_response.choices
57
- ],
58
- created=openai_response.created,
59
- model=openai_response.model,
60
- object=openai_response.object,
61
- system_fingerprint=openai_response.system_fingerprint,
62
- usage=openai_response.usage.model_dump()
63
- if openai_response.usage
64
- else None,
65
- ), # pyright: ignore [reportArgumentType]
73
+ response=model_response, # pyright: ignore [reportArgumentType]
66
74
  tool_types=openai_call_response.tool_types,
67
75
  prompt_template=openai_call_response.prompt_template,
68
76
  fn_args=openai_call_response.fn_args,
@@ -1,13 +1,11 @@
1
1
  """Mistral utilities for decorator factories."""
2
2
 
3
- from ._calculate_cost import calculate_cost
4
3
  from ._convert_message_params import convert_message_params
5
4
  from ._get_json_output import get_json_output
6
5
  from ._handle_stream import handle_stream, handle_stream_async
7
6
  from ._setup_call import setup_call
8
7
 
9
8
  __all__ = [
10
- "calculate_cost",
11
9
  "convert_message_params",
12
10
  "get_json_output",
13
11
  "handle_stream",
@@ -19,8 +19,7 @@ from pydantic import computed_field
19
19
 
20
20
  from .. import BaseMessageParam
21
21
  from ..base import BaseCallResponse, transform_tool_outputs
22
- from ..base.types import FinishReason
23
- from ._utils import calculate_cost
22
+ from ..base.types import CostMetadata, FinishReason
24
23
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
25
24
  _convert_finish_reasons_to_common_finish_reasons,
26
25
  )
@@ -119,14 +118,6 @@ class MistralCallResponse(
119
118
  """Returns the number of output tokens."""
120
119
  return self.usage.completion_tokens
121
120
 
122
- @computed_field
123
- @property
124
- def cost(self) -> float | None:
125
- """Returns the cost of the call."""
126
- return calculate_cost(
127
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
128
- )
129
-
130
121
  @computed_field
131
122
  @cached_property
132
123
  def message_param(self) -> AssistantMessage:
@@ -200,3 +191,9 @@ class MistralCallResponse(
200
191
  if not self.user_message_param:
201
192
  return None
202
193
  return MistralMessageParamConverter.from_provider([self.user_message_param])[0]
194
+
195
+ @computed_field
196
+ @property
197
+ def cost_metadata(self) -> CostMetadata:
198
+ """Get metadata required for cost calculation."""
199
+ return super().cost_metadata
@@ -8,6 +8,7 @@ from typing import cast
8
8
  from mistralai.models import CompletionChunk, FinishReason, UsageInfo
9
9
 
10
10
  from ..base import BaseCallResponseChunk, types
11
+ from ..base.types import CostMetadata
11
12
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
12
13
  _convert_finish_reasons_to_common_finish_reasons,
13
14
  )
@@ -92,6 +93,11 @@ class MistralCallResponseChunk(BaseCallResponseChunk[CompletionChunk, FinishReas
92
93
  return self.usage.completion_tokens
93
94
  return None
94
95
 
96
+ @property
97
+ def cost_metadata(self) -> CostMetadata:
98
+ """Returns the cost metadata."""
99
+ return super().cost_metadata
100
+
95
101
  @property
96
102
  def common_finish_reasons(self) -> list[types.FinishReason] | None:
97
103
  return _convert_finish_reasons_to_common_finish_reasons(
@@ -17,7 +17,7 @@ from mistralai.models import (
17
17
  )
18
18
 
19
19
  from ..base.stream import BaseStream
20
- from ._utils import calculate_cost
20
+ from ..base.types import CostMetadata
21
21
  from .call_params import MistralCallParams
22
22
  from .call_response import MistralCallResponse
23
23
  from .call_response_chunk import MistralCallResponseChunk
@@ -62,13 +62,6 @@ class MistralStream(
62
62
 
63
63
  _provider = "mistral"
64
64
 
65
- @property
66
- def cost(self) -> float | None:
67
- """Returns the cost of the call."""
68
- return calculate_cost(
69
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
70
- )
71
-
72
65
  def _construct_message_param(
73
66
  self, tool_calls: list | None = None, content: str | None = None
74
67
  ) -> AssistantMessage:
@@ -121,3 +114,7 @@ class MistralStream(
121
114
  start_time=self.start_time,
122
115
  end_time=self.end_time,
123
116
  )
117
+
118
+ @property
119
+ def cost_metadata(self) -> CostMetadata:
120
+ return super().cost_metadata
@@ -1,13 +1,11 @@
1
1
  """OpenAI utilities for decorator factories."""
2
2
 
3
- from ._calculate_cost import calculate_cost
4
3
  from ._convert_message_params import convert_message_params
5
4
  from ._get_json_output import get_json_output
6
5
  from ._handle_stream import handle_stream, handle_stream_async
7
6
  from ._setup_call import setup_call
8
7
 
9
8
  __all__ = [
10
- "calculate_cost",
11
9
  "convert_message_params",
12
10
  "get_json_output",
13
11
  "handle_stream",
@@ -83,17 +83,17 @@ def convert_message_params(
83
83
  audio = _load_media(part.url)
84
84
  audio_type = get_audio_type(audio)
85
85
  if audio_type not in [
86
- "audio/wav",
87
- "audio/mp3",
86
+ "wav",
87
+ "mp3",
88
88
  ]:
89
89
  raise ValueError(
90
- f"Unsupported audio media type: {audio_type}. "
90
+ f"Unsupported audio media type: audio/{audio_type}. "
91
91
  "OpenAI currently only supports WAV and MP3 audio file types."
92
92
  )
93
93
  converted_content.append(
94
94
  {
95
95
  "input_audio": {
96
- "format": audio_type.split("/")[-1],
96
+ "format": audio_type,
97
97
  "data": base64.b64encode(audio).decode("utf-8"),
98
98
  },
99
99
  "type": "input_audio",
@@ -23,8 +23,13 @@ from ..base import (
23
23
  BaseCallResponse,
24
24
  transform_tool_outputs,
25
25
  )
26
- from ..base.types import FinishReason
27
- from ._utils import calculate_cost
26
+ from ..base._utils._get_image_dimensions import (
27
+ get_image_dimensions,
28
+ )
29
+ from ..base.types import (
30
+ CostMetadata,
31
+ FinishReason,
32
+ )
28
33
  from ._utils._message_param_converter import OpenAIMessageParamConverter
29
34
  from .call_params import OpenAICallParams
30
35
  from .dynamic_config import OpenAIDynamicConfig
@@ -135,14 +140,6 @@ class OpenAICallResponse(
135
140
  """Returns the number of output tokens."""
136
141
  return self.usage.completion_tokens if self.usage else None
137
142
 
138
- @computed_field
139
- @property
140
- def cost(self) -> float | None:
141
- """Returns the cost of the call."""
142
- return calculate_cost(
143
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
144
- )
145
-
146
143
  @computed_field
147
144
  @cached_property
148
145
  def message_param(self) -> SerializeAsAny[ChatCompletionAssistantMessageParam]:
@@ -248,3 +245,26 @@ class OpenAICallResponse(
248
245
  if not self.user_message_param:
249
246
  return None
250
247
  return OpenAIMessageParamConverter.from_provider([self.user_message_param])[0]
248
+
249
+ @property
250
+ def cost_metadata(self) -> CostMetadata:
251
+ cost_metadata = super().cost_metadata
252
+ for message in self.messages:
253
+ if message.get("role") != "user":
254
+ continue
255
+ for part in message.get("content") or []:
256
+ if not isinstance(part, dict):
257
+ continue
258
+ if not (part.get("type") == "image_url" and "image_url" in part):
259
+ continue
260
+ url = part["image_url"].get("url", "")
261
+ detail = part["image_url"].get("detail", "auto")
262
+
263
+ dimensions = get_image_dimensions(url)
264
+ if not dimensions:
265
+ continue
266
+ dimensions.detail = detail
267
+ if cost_metadata.images is None:
268
+ cost_metadata.images = []
269
+ cost_metadata.images.append(dimensions)
270
+ return cost_metadata
@@ -14,6 +14,7 @@ from openai.types.completion_usage import CompletionUsage
14
14
  from pydantic import SkipValidation, computed_field
15
15
 
16
16
  from ..base import BaseCallResponseChunk
17
+ from ..base.types import CostMetadata
17
18
 
18
19
  FinishReason = Choice.__annotations__["finish_reason"]
19
20
 
@@ -127,6 +128,11 @@ class OpenAICallResponseChunk(BaseCallResponseChunk[ChatCompletionChunk, FinishR
127
128
  return audio.get("transcript")
128
129
  return None
129
130
 
131
+ @property
132
+ def cost_metadata(self) -> CostMetadata:
133
+ """Returns the cost metadata."""
134
+ return super().cost_metadata
135
+
130
136
  @property
131
137
  def common_finish_reasons(self) -> list[FinishReason] | None:
132
138
  """Provider-agnostic finish reasons."""