mirascope 1.19.0__py3-none-any.whl → 1.20.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. mirascope/__init__.py +4 -0
  2. mirascope/beta/openai/realtime/realtime.py +7 -8
  3. mirascope/beta/openai/realtime/tool.py +2 -2
  4. mirascope/core/__init__.py +10 -1
  5. mirascope/core/anthropic/_utils/__init__.py +0 -2
  6. mirascope/core/anthropic/_utils/_convert_message_params.py +1 -7
  7. mirascope/core/anthropic/_utils/_message_param_converter.py +48 -31
  8. mirascope/core/anthropic/call_response.py +7 -9
  9. mirascope/core/anthropic/call_response_chunk.py +10 -0
  10. mirascope/core/anthropic/stream.py +6 -8
  11. mirascope/core/azure/_utils/__init__.py +0 -2
  12. mirascope/core/azure/call_response.py +7 -10
  13. mirascope/core/azure/call_response_chunk.py +6 -1
  14. mirascope/core/azure/stream.py +6 -8
  15. mirascope/core/base/__init__.py +10 -1
  16. mirascope/core/base/_utils/__init__.py +2 -0
  17. mirascope/core/base/_utils/_get_image_dimensions.py +39 -0
  18. mirascope/core/base/call_response.py +36 -6
  19. mirascope/core/base/call_response_chunk.py +15 -1
  20. mirascope/core/base/stream.py +25 -3
  21. mirascope/core/base/types.py +276 -2
  22. mirascope/core/bedrock/_utils/__init__.py +0 -2
  23. mirascope/core/bedrock/call_response.py +7 -10
  24. mirascope/core/bedrock/call_response_chunk.py +6 -0
  25. mirascope/core/bedrock/stream.py +6 -10
  26. mirascope/core/cohere/_utils/__init__.py +0 -2
  27. mirascope/core/cohere/call_response.py +7 -10
  28. mirascope/core/cohere/call_response_chunk.py +6 -0
  29. mirascope/core/cohere/stream.py +5 -8
  30. mirascope/core/costs/__init__.py +5 -0
  31. mirascope/core/{anthropic/_utils/_calculate_cost.py → costs/_anthropic_calculate_cost.py} +45 -14
  32. mirascope/core/{azure/_utils/_calculate_cost.py → costs/_azure_calculate_cost.py} +3 -3
  33. mirascope/core/{bedrock/_utils/_calculate_cost.py → costs/_bedrock_calculate_cost.py} +3 -3
  34. mirascope/core/{cohere/_utils/_calculate_cost.py → costs/_cohere_calculate_cost.py} +12 -8
  35. mirascope/core/{gemini/_utils/_calculate_cost.py → costs/_gemini_calculate_cost.py} +7 -7
  36. mirascope/core/costs/_google_calculate_cost.py +427 -0
  37. mirascope/core/costs/_groq_calculate_cost.py +156 -0
  38. mirascope/core/costs/_litellm_calculate_cost.py +11 -0
  39. mirascope/core/costs/_mistral_calculate_cost.py +64 -0
  40. mirascope/core/costs/_openai_calculate_cost.py +416 -0
  41. mirascope/core/{vertex/_utils/_calculate_cost.py → costs/_vertex_calculate_cost.py} +8 -7
  42. mirascope/core/{xai/_utils/_calculate_cost.py → costs/_xai_calculate_cost.py} +9 -9
  43. mirascope/core/costs/calculate_cost.py +86 -0
  44. mirascope/core/gemini/_utils/__init__.py +0 -2
  45. mirascope/core/gemini/call_response.py +7 -10
  46. mirascope/core/gemini/call_response_chunk.py +6 -1
  47. mirascope/core/gemini/stream.py +5 -8
  48. mirascope/core/google/_utils/__init__.py +0 -2
  49. mirascope/core/google/_utils/_setup_call.py +21 -2
  50. mirascope/core/google/call_response.py +9 -10
  51. mirascope/core/google/call_response_chunk.py +6 -1
  52. mirascope/core/google/stream.py +5 -8
  53. mirascope/core/groq/_utils/__init__.py +0 -2
  54. mirascope/core/groq/call_response.py +22 -10
  55. mirascope/core/groq/call_response_chunk.py +6 -0
  56. mirascope/core/groq/stream.py +5 -8
  57. mirascope/core/litellm/call_response.py +3 -4
  58. mirascope/core/litellm/stream.py +30 -22
  59. mirascope/core/mistral/_utils/__init__.py +0 -2
  60. mirascope/core/mistral/call_response.py +7 -10
  61. mirascope/core/mistral/call_response_chunk.py +6 -0
  62. mirascope/core/mistral/stream.py +5 -8
  63. mirascope/core/openai/_utils/__init__.py +0 -2
  64. mirascope/core/openai/_utils/_convert_message_params.py +4 -4
  65. mirascope/core/openai/call_response.py +30 -10
  66. mirascope/core/openai/call_response_chunk.py +6 -0
  67. mirascope/core/openai/stream.py +5 -8
  68. mirascope/core/vertex/_utils/__init__.py +0 -2
  69. mirascope/core/vertex/call_response.py +5 -10
  70. mirascope/core/vertex/call_response_chunk.py +6 -0
  71. mirascope/core/vertex/stream.py +5 -8
  72. mirascope/core/xai/_utils/__init__.py +1 -2
  73. mirascope/core/xai/call_response.py +0 -11
  74. mirascope/llm/__init__.py +10 -2
  75. mirascope/llm/_protocols.py +8 -28
  76. mirascope/llm/call_response.py +6 -6
  77. mirascope/llm/call_response_chunk.py +12 -3
  78. mirascope/llm/llm_call.py +21 -23
  79. mirascope/llm/llm_override.py +56 -27
  80. mirascope/llm/stream.py +7 -7
  81. mirascope/llm/tool.py +1 -1
  82. mirascope/retries/fallback.py +1 -1
  83. {mirascope-1.19.0.dist-info → mirascope-1.20.1.dist-info}/METADATA +1 -1
  84. {mirascope-1.19.0.dist-info → mirascope-1.20.1.dist-info}/RECORD +86 -82
  85. mirascope/core/google/_utils/_calculate_cost.py +0 -215
  86. mirascope/core/groq/_utils/_calculate_cost.py +0 -69
  87. mirascope/core/mistral/_utils/_calculate_cost.py +0 -48
  88. mirascope/core/openai/_utils/_calculate_cost.py +0 -246
  89. {mirascope-1.19.0.dist-info → mirascope-1.20.1.dist-info}/WHEEL +0 -0
  90. {mirascope-1.19.0.dist-info → mirascope-1.20.1.dist-info}/licenses/LICENSE +0 -0
@@ -8,6 +8,7 @@ from typing import cast
8
8
  from mistralai.models import CompletionChunk, FinishReason, UsageInfo
9
9
 
10
10
  from ..base import BaseCallResponseChunk, types
11
+ from ..base.types import CostMetadata
11
12
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
12
13
  _convert_finish_reasons_to_common_finish_reasons,
13
14
  )
@@ -92,6 +93,11 @@ class MistralCallResponseChunk(BaseCallResponseChunk[CompletionChunk, FinishReas
92
93
  return self.usage.completion_tokens
93
94
  return None
94
95
 
96
+ @property
97
+ def cost_metadata(self) -> CostMetadata:
98
+ """Returns the cost metadata."""
99
+ return super().cost_metadata
100
+
95
101
  @property
96
102
  def common_finish_reasons(self) -> list[types.FinishReason] | None:
97
103
  return _convert_finish_reasons_to_common_finish_reasons(
@@ -17,7 +17,7 @@ from mistralai.models import (
17
17
  )
18
18
 
19
19
  from ..base.stream import BaseStream
20
- from ._utils import calculate_cost
20
+ from ..base.types import CostMetadata
21
21
  from .call_params import MistralCallParams
22
22
  from .call_response import MistralCallResponse
23
23
  from .call_response_chunk import MistralCallResponseChunk
@@ -62,13 +62,6 @@ class MistralStream(
62
62
 
63
63
  _provider = "mistral"
64
64
 
65
- @property
66
- def cost(self) -> float | None:
67
- """Returns the cost of the call."""
68
- return calculate_cost(
69
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
70
- )
71
-
72
65
  def _construct_message_param(
73
66
  self, tool_calls: list | None = None, content: str | None = None
74
67
  ) -> AssistantMessage:
@@ -121,3 +114,7 @@ class MistralStream(
121
114
  start_time=self.start_time,
122
115
  end_time=self.end_time,
123
116
  )
117
+
118
+ @property
119
+ def cost_metadata(self) -> CostMetadata:
120
+ return super().cost_metadata
@@ -1,13 +1,11 @@
1
1
  """OpenAI utilities for decorator factories."""
2
2
 
3
- from ._calculate_cost import calculate_cost
4
3
  from ._convert_message_params import convert_message_params
5
4
  from ._get_json_output import get_json_output
6
5
  from ._handle_stream import handle_stream, handle_stream_async
7
6
  from ._setup_call import setup_call
8
7
 
9
8
  __all__ = [
10
- "calculate_cost",
11
9
  "convert_message_params",
12
10
  "get_json_output",
13
11
  "handle_stream",
@@ -83,17 +83,17 @@ def convert_message_params(
83
83
  audio = _load_media(part.url)
84
84
  audio_type = get_audio_type(audio)
85
85
  if audio_type not in [
86
- "audio/wav",
87
- "audio/mp3",
86
+ "wav",
87
+ "mp3",
88
88
  ]:
89
89
  raise ValueError(
90
- f"Unsupported audio media type: {audio_type}. "
90
+ f"Unsupported audio media type: audio/{audio_type}. "
91
91
  "OpenAI currently only supports WAV and MP3 audio file types."
92
92
  )
93
93
  converted_content.append(
94
94
  {
95
95
  "input_audio": {
96
- "format": audio_type.split("/")[-1],
96
+ "format": audio_type,
97
97
  "data": base64.b64encode(audio).decode("utf-8"),
98
98
  },
99
99
  "type": "input_audio",
@@ -23,8 +23,13 @@ from ..base import (
23
23
  BaseCallResponse,
24
24
  transform_tool_outputs,
25
25
  )
26
- from ..base.types import FinishReason
27
- from ._utils import calculate_cost
26
+ from ..base._utils._get_image_dimensions import (
27
+ get_image_dimensions,
28
+ )
29
+ from ..base.types import (
30
+ CostMetadata,
31
+ FinishReason,
32
+ )
28
33
  from ._utils._message_param_converter import OpenAIMessageParamConverter
29
34
  from .call_params import OpenAICallParams
30
35
  from .dynamic_config import OpenAIDynamicConfig
@@ -135,14 +140,6 @@ class OpenAICallResponse(
135
140
  """Returns the number of output tokens."""
136
141
  return self.usage.completion_tokens if self.usage else None
137
142
 
138
- @computed_field
139
- @property
140
- def cost(self) -> float | None:
141
- """Returns the cost of the call."""
142
- return calculate_cost(
143
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
144
- )
145
-
146
143
  @computed_field
147
144
  @cached_property
148
145
  def message_param(self) -> SerializeAsAny[ChatCompletionAssistantMessageParam]:
@@ -248,3 +245,26 @@ class OpenAICallResponse(
248
245
  if not self.user_message_param:
249
246
  return None
250
247
  return OpenAIMessageParamConverter.from_provider([self.user_message_param])[0]
248
+
249
+ @property
250
+ def cost_metadata(self) -> CostMetadata:
251
+ cost_metadata = super().cost_metadata
252
+ for message in self.messages:
253
+ if message.get("role") != "user":
254
+ continue
255
+ for part in message.get("content") or []:
256
+ if not isinstance(part, dict):
257
+ continue
258
+ if not (part.get("type") == "image_url" and "image_url" in part):
259
+ continue
260
+ url = part["image_url"].get("url", "")
261
+ detail = part["image_url"].get("detail", "auto")
262
+
263
+ dimensions = get_image_dimensions(url)
264
+ if not dimensions:
265
+ continue
266
+ dimensions.detail = detail
267
+ if cost_metadata.images is None:
268
+ cost_metadata.images = []
269
+ cost_metadata.images.append(dimensions)
270
+ return cost_metadata
@@ -14,6 +14,7 @@ from openai.types.completion_usage import CompletionUsage
14
14
  from pydantic import SkipValidation, computed_field
15
15
 
16
16
  from ..base import BaseCallResponseChunk
17
+ from ..base.types import CostMetadata
17
18
 
18
19
  FinishReason = Choice.__annotations__["finish_reason"]
19
20
 
@@ -127,6 +128,11 @@ class OpenAICallResponseChunk(BaseCallResponseChunk[ChatCompletionChunk, FinishR
127
128
  return audio.get("transcript")
128
129
  return None
129
130
 
131
+ @property
132
+ def cost_metadata(self) -> CostMetadata:
133
+ """Returns the cost metadata."""
134
+ return super().cost_metadata
135
+
130
136
  @property
131
137
  def common_finish_reasons(self) -> list[FinishReason] | None:
132
138
  """Provider-agnostic finish reasons."""
@@ -21,7 +21,7 @@ from openai.types.chat.chat_completion_message_tool_call_param import Function
21
21
  from openai.types.completion_usage import CompletionUsage
22
22
 
23
23
  from ..base.stream import BaseStream
24
- from ._utils import calculate_cost
24
+ from ..base.types import CostMetadata
25
25
  from .call_params import OpenAICallParams
26
26
  from .call_response import OpenAICallResponse
27
27
  from .call_response_chunk import OpenAICallResponseChunk
@@ -101,13 +101,6 @@ class OpenAIStream(
101
101
 
102
102
  return generator()
103
103
 
104
- @property
105
- def cost(self) -> float | None:
106
- """Returns the cost of the call."""
107
- return calculate_cost(
108
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
109
- )
110
-
111
104
  def _construct_message_param(
112
105
  self,
113
106
  tool_calls: list[ChatCompletionMessageToolCall] | None = None,
@@ -186,3 +179,7 @@ class OpenAIStream(
186
179
  start_time=self.start_time,
187
180
  end_time=self.end_time,
188
181
  )
182
+
183
+ @property
184
+ def cost_metadata(self) -> CostMetadata:
185
+ return super().cost_metadata
@@ -1,13 +1,11 @@
1
1
  """Vertex utilities for decorator factories."""
2
2
 
3
- from ._calculate_cost import calculate_cost
4
3
  from ._convert_message_params import convert_message_params
5
4
  from ._get_json_output import get_json_output
6
5
  from ._handle_stream import handle_stream, handle_stream_async
7
6
  from ._setup_call import setup_call
8
7
 
9
8
  __all__ = [
10
- "calculate_cost",
11
9
  "convert_message_params",
12
10
  "get_json_output",
13
11
  "handle_stream",
@@ -11,8 +11,7 @@ from vertexai.generative_models import Content, GenerationResponse, Part, Tool
11
11
 
12
12
  from .. import BaseMessageParam
13
13
  from ..base import BaseCallResponse, transform_tool_outputs
14
- from ..base.types import FinishReason
15
- from ._utils import calculate_cost
14
+ from ..base.types import CostMetadata, FinishReason
16
15
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
17
16
  _convert_finish_reasons_to_common_finish_reasons,
18
17
  )
@@ -124,14 +123,6 @@ class VertexCallResponse(
124
123
  """Returns the number of output tokens."""
125
124
  return self.usage.candidates_token_count
126
125
 
127
- @computed_field
128
- @property
129
- def cost(self) -> float | None:
130
- """Returns the cost of the call."""
131
- return calculate_cost(
132
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
133
- )
134
-
135
126
  @computed_field
136
127
  @cached_property
137
128
  def message_param(self) -> Content:
@@ -205,3 +196,7 @@ class VertexCallResponse(
205
196
  if not self.user_message_param:
206
197
  return None
207
198
  return VertexMessageParamConverter.from_provider([self.user_message_param])[0]
199
+
200
+ @property
201
+ def cost_metadata(self) -> CostMetadata:
202
+ return super().cost_metadata
@@ -6,6 +6,7 @@ usage docs: learn/streams.md#handling-streamed-responses
6
6
  from vertexai.generative_models import FinishReason, GenerationResponse
7
7
 
8
8
  from ..base import BaseCallResponseChunk, types
9
+ from ..base.types import CostMetadata
9
10
  from ._utils._convert_finish_reason_to_common_finish_reasons import (
10
11
  _convert_finish_reasons_to_common_finish_reasons,
11
12
  )
@@ -86,6 +87,11 @@ class VertexCallResponseChunk(
86
87
  """Returns the number of output tokens."""
87
88
  return None
88
89
 
90
+ @property
91
+ def cost_metadata(self) -> CostMetadata:
92
+ """Returns the cost metadata."""
93
+ return super().cost_metadata
94
+
89
95
  @property
90
96
  def common_finish_reasons(self) -> list[types.FinishReason] | None:
91
97
  return _convert_finish_reasons_to_common_finish_reasons(
@@ -16,7 +16,7 @@ from vertexai.generative_models import (
16
16
  )
17
17
 
18
18
  from ..base.stream import BaseStream
19
- from ._utils import calculate_cost
19
+ from ..base.types import CostMetadata
20
20
  from .call_params import VertexCallParams
21
21
  from .call_response import VertexCallResponse
22
22
  from .call_response_chunk import VertexCallResponseChunk
@@ -61,13 +61,6 @@ class VertexStream(
61
61
 
62
62
  _provider = "vertex"
63
63
 
64
- @property
65
- def cost(self) -> float | None:
66
- """Returns the cost of the call."""
67
- return calculate_cost(
68
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
69
- )
70
-
71
64
  def _construct_message_param(
72
65
  self,
73
66
  tool_calls: list[FunctionCall] | None = None,
@@ -120,3 +113,7 @@ class VertexStream(
120
113
  start_time=self.start_time,
121
114
  end_time=self.end_time,
122
115
  )
116
+
117
+ @property
118
+ def cost_metadata(self) -> CostMetadata:
119
+ return super().cost_metadata
@@ -1,6 +1,5 @@
1
1
  """xAI utilities for decorator factories."""
2
2
 
3
- from ._calculate_cost import calculate_cost
4
3
  from ._setup_call import setup_call
5
4
 
6
- __all__ = ["calculate_cost", "setup_call"]
5
+ __all__ = ["setup_call"]
@@ -3,10 +3,7 @@
3
3
  usage docs: learn/calls.md#handling-responses
4
4
  """
5
5
 
6
- from pydantic import computed_field
7
-
8
6
  from ..openai import OpenAICallResponse
9
- from ._utils import calculate_cost
10
7
 
11
8
 
12
9
  class XAICallResponse(OpenAICallResponse):
@@ -17,11 +14,3 @@ class XAICallResponse(OpenAICallResponse):
17
14
  """
18
15
 
19
16
  _provider = "xai"
20
-
21
- @computed_field
22
- @property
23
- def cost(self) -> float | None:
24
- """Returns the cost of the call."""
25
- return calculate_cost(
26
- self.input_tokens, self.cached_tokens, self.output_tokens, self.model
27
- )
mirascope/llm/__init__.py CHANGED
@@ -1,6 +1,14 @@
1
- from ._protocols import LocalProvider, Provider
1
+ from ..core import CostMetadata, LocalProvider, Provider, calculate_cost
2
2
  from .call_response import CallResponse
3
3
  from .llm_call import call
4
4
  from .llm_override import override
5
5
 
6
- __all__ = ["CallResponse", "LocalProvider", "Provider", "call", "override"]
6
+ __all__ = [
7
+ "CallResponse",
8
+ "CostMetadata",
9
+ "LocalProvider",
10
+ "Provider",
11
+ "calculate_cost",
12
+ "call",
13
+ "override",
14
+ ]
@@ -19,22 +19,23 @@ from typing import (
19
19
 
20
20
  from pydantic import BaseModel
21
21
 
22
- from mirascope.core import BaseDynamicConfig, BaseTool
23
- from mirascope.core.base import (
22
+ from ..core import BaseDynamicConfig, BaseTool
23
+ from ..core.base import (
24
24
  BaseCallResponse,
25
25
  BaseCallResponseChunk,
26
26
  BaseType,
27
27
  CommonCallParams,
28
28
  )
29
- from mirascope.core.base._utils._protocols import (
29
+ from ..core.base._utils._protocols import (
30
30
  AsyncLLMFunctionDecorator,
31
31
  LLMFunctionDecorator,
32
32
  SyncLLMFunctionDecorator,
33
33
  )
34
- from mirascope.core.base.stream_config import StreamConfig
35
- from mirascope.llm.call_response import CallResponse
36
- from mirascope.llm.call_response_chunk import CallResponseChunk
37
- from mirascope.llm.stream import Stream
34
+ from ..core.base.stream_config import StreamConfig
35
+ from ..core.base.types import LocalProvider, Provider
36
+ from .call_response import CallResponse
37
+ from .call_response_chunk import CallResponseChunk
38
+ from .stream import Stream
38
39
 
39
40
  _BaseStreamT = TypeVar("_BaseStreamT", covariant=True)
40
41
  _ResponseModelT = TypeVar("_ResponseModelT", bound=BaseModel | BaseType | Enum)
@@ -61,27 +62,6 @@ _BaseCallResponseChunkT = TypeVar(
61
62
  )
62
63
 
63
64
 
64
- Provider: TypeAlias = Literal[
65
- "anthropic",
66
- "azure",
67
- "bedrock",
68
- "cohere",
69
- "gemini",
70
- "google",
71
- "groq",
72
- "litellm",
73
- "mistral",
74
- "openai",
75
- "vertex",
76
- "xai",
77
- ]
78
-
79
- LocalProvider: TypeAlias = Literal[
80
- "ollama",
81
- "vllm",
82
- ]
83
-
84
-
85
65
  class _CallDecorator(
86
66
  Protocol[
87
67
  _BaseCallResponseT,
@@ -7,8 +7,8 @@ from typing import Any, TypeVar
7
7
 
8
8
  from pydantic import computed_field
9
9
 
10
- from mirascope.core import BaseDynamicConfig
11
- from mirascope.core.base import (
10
+ from ..core import BaseDynamicConfig
11
+ from ..core.base import (
12
12
  BaseCallParams,
13
13
  BaseCallResponse,
14
14
  BaseMessageParam,
@@ -16,10 +16,10 @@ from mirascope.core.base import (
16
16
  Usage,
17
17
  transform_tool_outputs,
18
18
  )
19
- from mirascope.core.base.message_param import ToolResultPart
20
- from mirascope.core.base.types import FinishReason
21
- from mirascope.llm._response_metaclass import _ResponseMetaclass
22
- from mirascope.llm.tool import Tool
19
+ from ..core.base.message_param import ToolResultPart
20
+ from ..core.base.types import FinishReason
21
+ from ._response_metaclass import _ResponseMetaclass
22
+ from .tool import Tool
23
23
 
24
24
  _ResponseT = TypeVar("_ResponseT")
25
25
 
@@ -4,9 +4,9 @@ from __future__ import annotations
4
4
 
5
5
  from typing import Any, Generic, TypeVar
6
6
 
7
- from mirascope.core.base.call_response_chunk import BaseCallResponseChunk
8
- from mirascope.core.base.types import FinishReason, Usage
9
- from mirascope.llm._response_metaclass import _ResponseMetaclass
7
+ from ..core.base.call_response_chunk import BaseCallResponseChunk
8
+ from ..core.base.types import CostMetadata, FinishReason, Usage
9
+ from ._response_metaclass import _ResponseMetaclass
10
10
 
11
11
  _ChunkT = TypeVar("_ChunkT")
12
12
 
@@ -40,6 +40,9 @@ class CallResponseChunk(
40
40
  "__pydantic_private__",
41
41
  "__class_getitem__",
42
42
  "_properties",
43
+ "cost_metadata",
44
+ "finish_reasons",
45
+ "usage",
43
46
  } | set(object.__getattribute__(self, "_properties"))
44
47
 
45
48
  if name in special_names:
@@ -58,3 +61,9 @@ class CallResponseChunk(
58
61
  @property
59
62
  def usage(self) -> Usage | None:
60
63
  return self._response.common_usage
64
+
65
+ @property
66
+ def cost_metadata(self) -> CostMetadata:
67
+ """Get metadata required for cost calculation."""
68
+
69
+ return self._response.cost_metadata
mirascope/llm/llm_call.py CHANGED
@@ -9,27 +9,25 @@ from typing import Any, ParamSpec, TypeVar, cast, get_args
9
9
 
10
10
  from pydantic import BaseModel
11
11
 
12
- from mirascope.core import BaseTool
13
- from mirascope.core.base import (
12
+ from ..core import BaseTool
13
+ from ..core.base import (
14
14
  BaseCallResponse,
15
15
  BaseCallResponseChunk,
16
16
  BaseStream,
17
17
  BaseType,
18
18
  CommonCallParams,
19
19
  )
20
- from mirascope.core.base._utils import fn_is_async
21
- from mirascope.llm.call_response import CallResponse
22
- from mirascope.llm.stream import Stream
23
-
20
+ from ..core.base._utils import fn_is_async
24
21
  from ..core.base.stream_config import StreamConfig
22
+ from ..core.base.types import LocalProvider, Provider
25
23
  from ._protocols import (
26
24
  AsyncLLMFunctionDecorator,
27
25
  CallDecorator,
28
26
  LLMFunctionDecorator,
29
- LocalProvider,
30
- Provider,
31
27
  SyncLLMFunctionDecorator,
32
28
  )
29
+ from .call_response import CallResponse
30
+ from .stream import Stream
33
31
 
34
32
  _P = ParamSpec("_P")
35
33
  _R = TypeVar("_R")
@@ -53,7 +51,7 @@ def _get_local_provider_call(
53
51
  client: Any | None, # noqa: ANN401
54
52
  ) -> tuple[Callable, Any | None]:
55
53
  if provider == "ollama":
56
- from mirascope.core.openai import openai_call
54
+ from ..core.openai import openai_call
57
55
 
58
56
  if client:
59
57
  return openai_call, client
@@ -62,7 +60,7 @@ def _get_local_provider_call(
62
60
  client = OpenAI(api_key="ollama", base_url="http://localhost:11434/v1")
63
61
  return openai_call, client
64
62
  else: # provider == "vllm"
65
- from mirascope.core.openai import openai_call
63
+ from ..core.openai import openai_call
66
64
 
67
65
  if client:
68
66
  return openai_call, client
@@ -75,51 +73,51 @@ def _get_local_provider_call(
75
73
  def _get_provider_call(provider: Provider) -> Callable:
76
74
  """Returns the provider-specific call decorator based on the provider name."""
77
75
  if provider == "anthropic":
78
- from mirascope.core.anthropic import anthropic_call
76
+ from ..core.anthropic import anthropic_call
79
77
 
80
78
  return anthropic_call
81
79
  elif provider == "azure":
82
- from mirascope.core.azure import azure_call
80
+ from ..core.azure import azure_call
83
81
 
84
82
  return azure_call
85
83
  elif provider == "bedrock":
86
- from mirascope.core.bedrock import bedrock_call
84
+ from ..core.bedrock import bedrock_call
87
85
 
88
86
  return bedrock_call
89
87
  elif provider == "cohere":
90
- from mirascope.core.cohere import cohere_call
88
+ from ..core.cohere import cohere_call
91
89
 
92
90
  return cohere_call
93
91
  elif provider == "gemini":
94
- from mirascope.core.gemini import gemini_call
92
+ from ..core.gemini import gemini_call
95
93
 
96
94
  return gemini_call
97
95
  elif provider == "google":
98
- from mirascope.core.google import google_call
96
+ from ..core.google import google_call
99
97
 
100
98
  return google_call
101
99
  elif provider == "groq":
102
- from mirascope.core.groq import groq_call
100
+ from ..core.groq import groq_call
103
101
 
104
102
  return groq_call
105
103
  elif provider == "litellm":
106
- from mirascope.core.litellm import litellm_call
104
+ from ..core.litellm import litellm_call
107
105
 
108
106
  return litellm_call
109
107
  elif provider == "mistral":
110
- from mirascope.core.mistral import mistral_call
108
+ from ..core.mistral import mistral_call
111
109
 
112
110
  return mistral_call
113
111
  elif provider == "openai":
114
- from mirascope.core.openai import openai_call
112
+ from ..core.openai import openai_call
115
113
 
116
114
  return openai_call
117
115
  elif provider == "vertex":
118
- from mirascope.core.vertex import vertex_call
116
+ from ..core.vertex import vertex_call
119
117
 
120
118
  return vertex_call
121
119
  elif provider == "xai":
122
- from mirascope.core.xai import xai_call
120
+ from ..core.xai import xai_call
123
121
 
124
122
  return xai_call
125
123
  raise ValueError(f"Unsupported provider: {provider}")
@@ -264,7 +262,7 @@ template.
264
262
  Example:
265
263
 
266
264
  ```python
267
- from mirascope.llm import call
265
+ from ..llm import call
268
266
 
269
267
 
270
268
  @call(provider="openai", model="gpt-4o-mini")