mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,427 +0,0 @@
|
|
|
1
|
-
"""Calculate the cost of a Gemini API call."""
|
|
2
|
-
|
|
3
|
-
from ..base.types import CostMetadata
|
|
4
|
-
|
|
5
|
-
# Standard Gemini API pricing table
|
|
6
|
-
GEMINI_API_PRICING: dict[str, dict[str, float]] = {
|
|
7
|
-
"gemini-2.0-pro": {
|
|
8
|
-
"prompt_short": 0.000_001_25,
|
|
9
|
-
"completion_short": 0.000_005,
|
|
10
|
-
"prompt_long": 0.000_002_5,
|
|
11
|
-
"completion_long": 0.000_01,
|
|
12
|
-
"cached": 0.000_000_625,
|
|
13
|
-
},
|
|
14
|
-
"gemini-2.0-pro-preview-1206": {
|
|
15
|
-
"prompt_short": 0.000_001_25,
|
|
16
|
-
"completion_short": 0.000_005,
|
|
17
|
-
"prompt_long": 0.000_002_5,
|
|
18
|
-
"completion_long": 0.000_01,
|
|
19
|
-
"cached": 0.000_000_625,
|
|
20
|
-
},
|
|
21
|
-
"gemini-2.0-flash": {
|
|
22
|
-
"prompt_short": 0.000_000_10,
|
|
23
|
-
"completion_short": 0.000_000_40,
|
|
24
|
-
"prompt_long": 0.000_000_10,
|
|
25
|
-
"completion_long": 0.000_000_40,
|
|
26
|
-
"cached": 0.000_000_037_5,
|
|
27
|
-
},
|
|
28
|
-
"gemini-2.0-flash-latest": {
|
|
29
|
-
"prompt_short": 0.000_000_10,
|
|
30
|
-
"completion_short": 0.000_000_40,
|
|
31
|
-
"prompt_long": 0.000_000_10,
|
|
32
|
-
"completion_long": 0.000_000_40,
|
|
33
|
-
"cached": 0.000_000_037_5,
|
|
34
|
-
},
|
|
35
|
-
"gemini-2.0-flash-001": {
|
|
36
|
-
"prompt_short": 0.000_000_10,
|
|
37
|
-
"completion_short": 0.000_000_40,
|
|
38
|
-
"prompt_long": 0.000_000_10,
|
|
39
|
-
"completion_long": 0.000_000_40,
|
|
40
|
-
"cached": 0.000_000_037_5,
|
|
41
|
-
},
|
|
42
|
-
"gemini-2.0-flash-lite": {
|
|
43
|
-
"prompt_short": 0.000_000_075,
|
|
44
|
-
"completion_short": 0.000_000_30,
|
|
45
|
-
"prompt_long": 0.000_000_075,
|
|
46
|
-
"completion_long": 0.000_000_30,
|
|
47
|
-
"cached": 0.000_000_037_5,
|
|
48
|
-
},
|
|
49
|
-
"gemini-2.0-flash-lite-preview-02-05": {
|
|
50
|
-
"prompt_short": 0.000_000_075,
|
|
51
|
-
"completion_short": 0.000_000_30,
|
|
52
|
-
"prompt_long": 0.000_000_075,
|
|
53
|
-
"completion_long": 0.000_000_30,
|
|
54
|
-
"cached": 0.000_000_037_5,
|
|
55
|
-
},
|
|
56
|
-
"gemini-1.5-pro": {
|
|
57
|
-
"prompt_short": 0.000_001_25,
|
|
58
|
-
"completion_short": 0.000_005,
|
|
59
|
-
"prompt_long": 0.000_002_5,
|
|
60
|
-
"completion_long": 0.000_01,
|
|
61
|
-
"cached": 0.000_000_625,
|
|
62
|
-
},
|
|
63
|
-
"gemini-1.5-pro-latest": {
|
|
64
|
-
"prompt_short": 0.000_001_25,
|
|
65
|
-
"completion_short": 0.000_005,
|
|
66
|
-
"prompt_long": 0.000_002_5,
|
|
67
|
-
"completion_long": 0.000_01,
|
|
68
|
-
"cached": 0.000_000_625,
|
|
69
|
-
},
|
|
70
|
-
"gemini-1.5-pro-001": {
|
|
71
|
-
"prompt_short": 0.000_001_25,
|
|
72
|
-
"completion_short": 0.000_005,
|
|
73
|
-
"prompt_long": 0.000_002_5,
|
|
74
|
-
"completion_long": 0.000_01,
|
|
75
|
-
"cached": 0.000_000_625,
|
|
76
|
-
},
|
|
77
|
-
"gemini-1.5-pro-002": {
|
|
78
|
-
"prompt_short": 0.000_001_25,
|
|
79
|
-
"completion_short": 0.000_005,
|
|
80
|
-
"prompt_long": 0.000_002_5,
|
|
81
|
-
"completion_long": 0.000_01,
|
|
82
|
-
"cached": 0.000_000_625,
|
|
83
|
-
},
|
|
84
|
-
"gemini-1.5-flash": {
|
|
85
|
-
"prompt_short": 0.000_000_075,
|
|
86
|
-
"completion_short": 0.000_000_30,
|
|
87
|
-
"prompt_long": 0.000_000_15,
|
|
88
|
-
"completion_long": 0.000_000_60,
|
|
89
|
-
"cached": 0.000_000_037_5,
|
|
90
|
-
},
|
|
91
|
-
"gemini-1.5-flash-latest": {
|
|
92
|
-
"prompt_short": 0.000_000_075,
|
|
93
|
-
"completion_short": 0.000_000_30,
|
|
94
|
-
"prompt_long": 0.000_000_15,
|
|
95
|
-
"completion_long": 0.000_000_60,
|
|
96
|
-
"cached": 0.000_000_037_5,
|
|
97
|
-
},
|
|
98
|
-
"gemini-1.5-flash-001": {
|
|
99
|
-
"prompt_short": 0.000_000_075,
|
|
100
|
-
"completion_short": 0.000_000_30,
|
|
101
|
-
"prompt_long": 0.000_000_15,
|
|
102
|
-
"completion_long": 0.000_000_60,
|
|
103
|
-
"cached": 0.000_000_037_5,
|
|
104
|
-
},
|
|
105
|
-
"gemini-1.5-flash-002": {
|
|
106
|
-
"prompt_short": 0.000_000_075,
|
|
107
|
-
"completion_short": 0.000_000_30,
|
|
108
|
-
"prompt_long": 0.000_000_15,
|
|
109
|
-
"completion_long": 0.000_000_60,
|
|
110
|
-
"cached": 0.000_000_037_5,
|
|
111
|
-
},
|
|
112
|
-
"gemini-1.5-flash-8b": {
|
|
113
|
-
"prompt_short": 0.000_000_037_5,
|
|
114
|
-
"completion_short": 0.000_000_15,
|
|
115
|
-
"prompt_long": 0.000_000_075,
|
|
116
|
-
"completion_long": 0.000_000_30,
|
|
117
|
-
"cached": 0.000_000_025,
|
|
118
|
-
},
|
|
119
|
-
"gemini-1.5-flash-8b-latest": {
|
|
120
|
-
"prompt_short": 0.000_000_037_5,
|
|
121
|
-
"completion_short": 0.000_000_15,
|
|
122
|
-
"prompt_long": 0.000_000_075,
|
|
123
|
-
"completion_long": 0.000_000_30,
|
|
124
|
-
"cached": 0.000_000_025,
|
|
125
|
-
},
|
|
126
|
-
"gemini-1.5-flash-8b-001": {
|
|
127
|
-
"prompt_short": 0.000_000_037_5,
|
|
128
|
-
"completion_short": 0.000_000_15,
|
|
129
|
-
"prompt_long": 0.000_000_075,
|
|
130
|
-
"completion_long": 0.000_000_30,
|
|
131
|
-
"cached": 0.000_000_025,
|
|
132
|
-
},
|
|
133
|
-
"gemini-1.5-flash-8b-002": {
|
|
134
|
-
"prompt_short": 0.000_000_037_5,
|
|
135
|
-
"completion_short": 0.000_000_15,
|
|
136
|
-
"prompt_long": 0.000_000_075,
|
|
137
|
-
"completion_long": 0.000_000_30,
|
|
138
|
-
"cached": 0.000_000_025,
|
|
139
|
-
},
|
|
140
|
-
"gemini-1.0-pro": {
|
|
141
|
-
"prompt_short": 0.000_000_5,
|
|
142
|
-
"completion_short": 0.000_001_5,
|
|
143
|
-
"prompt_long": 0.000_000_5,
|
|
144
|
-
"completion_long": 0.000_001_5,
|
|
145
|
-
"cached": 0.000_000,
|
|
146
|
-
},
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
# Vertex AI pricing table
|
|
150
|
-
VERTEX_AI_PRICING: dict[str, dict[str, float]] = {
|
|
151
|
-
"gemini-2.0-flash": {
|
|
152
|
-
"text_input": 0.000_000_15,
|
|
153
|
-
"image_input": 0.000_000_15,
|
|
154
|
-
"video_input": 0.000_000_15,
|
|
155
|
-
"audio_input": 0.000_001_00,
|
|
156
|
-
"output": 0.000_000_60,
|
|
157
|
-
"cached": 0.000_000_037_5,
|
|
158
|
-
"cache_storage_per_hour": 0.000_001_00,
|
|
159
|
-
},
|
|
160
|
-
"gemini-2.0-flash-lite": {
|
|
161
|
-
"text_input": 0.000_000_075,
|
|
162
|
-
"image_input": 0.000_000_075,
|
|
163
|
-
"video_input": 0.000_000_075,
|
|
164
|
-
"audio_input": 0.000_000_075,
|
|
165
|
-
"output": 0.000_000_30,
|
|
166
|
-
"cached": 0.000_000_037_5,
|
|
167
|
-
"cache_storage_per_hour": 0.000_001_00,
|
|
168
|
-
},
|
|
169
|
-
# Vertex AI pricing for Gemini 1.5 models is based on modalities rather than tokens
|
|
170
|
-
"gemini-1.5-flash": {
|
|
171
|
-
"text_input": 0.000_000_075, # per 1K chars (approx. 250 tokens)
|
|
172
|
-
"image_input": 0.000_02, # per image
|
|
173
|
-
"video_input": 0.000_02, # per second
|
|
174
|
-
"audio_input": 0.000_002, # per second
|
|
175
|
-
"output": 0.000_000_30, # per 1K chars
|
|
176
|
-
"cached_text": 0.000_000_046_875, # per 1K chars
|
|
177
|
-
"cached_image": 0.000_005, # per image
|
|
178
|
-
"cached_video": 0.000_005, # per second
|
|
179
|
-
"cached_audio": 0.000_000_5, # per second
|
|
180
|
-
"cache_storage_text": 0.000_25, # per 1K chars per hour
|
|
181
|
-
"cache_storage_image": 0.000_263, # per image per hour
|
|
182
|
-
"cache_storage_video": 0.000_263, # per second per hour
|
|
183
|
-
"cache_storage_audio": 0.000_025, # per second per hour
|
|
184
|
-
},
|
|
185
|
-
"gemini-1.5-pro": {
|
|
186
|
-
"text_input": 0.000_001_25, # per 1K chars (approx. 250 tokens)
|
|
187
|
-
"image_input": 0.000_32875, # per image
|
|
188
|
-
"video_input": 0.000_32875, # per second
|
|
189
|
-
"audio_input": 0.000_03125, # per second
|
|
190
|
-
"output": 0.000_005, # per 1K chars
|
|
191
|
-
"cached_text": 0.000_000_078125, # per 1K chars
|
|
192
|
-
"cached_image": 0.000_0821875, # per image
|
|
193
|
-
"cached_video": 0.000_0821875, # per second
|
|
194
|
-
"cached_audio": 0.000_0078125, # per second
|
|
195
|
-
"cache_storage_text": 0.001125, # per 1K chars per hour
|
|
196
|
-
"cache_storage_image": 0.0011835, # per image per hour
|
|
197
|
-
"cache_storage_video": 0.0011835, # per second per hour
|
|
198
|
-
"cache_storage_audio": 0.0001125, # per second per hour
|
|
199
|
-
},
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
def _calculate_context_cache_cost(
|
|
204
|
-
metadata: CostMetadata,
|
|
205
|
-
model_pricing: dict[str, float],
|
|
206
|
-
model: str,
|
|
207
|
-
use_vertex_ai: bool = False,
|
|
208
|
-
) -> float:
|
|
209
|
-
"""Calculate cost for context caching."""
|
|
210
|
-
if metadata.context_cache_tokens is None or metadata.context_cache_hours is None:
|
|
211
|
-
return 0.0
|
|
212
|
-
|
|
213
|
-
if use_vertex_ai:
|
|
214
|
-
# Vertex AI pricing depends on the model family
|
|
215
|
-
if model.startswith("gemini-2.0"):
|
|
216
|
-
return (
|
|
217
|
-
metadata.context_cache_tokens
|
|
218
|
-
* model_pricing.get("cache_storage_per_hour", 0)
|
|
219
|
-
* metadata.context_cache_hours
|
|
220
|
-
)
|
|
221
|
-
elif model.startswith("gemini-1.5"):
|
|
222
|
-
# Convert cache tokens to characters (approx)
|
|
223
|
-
cache_chars = metadata.context_cache_tokens * 4
|
|
224
|
-
return (
|
|
225
|
-
(cache_chars / 1000)
|
|
226
|
-
* model_pricing["cache_storage_text"]
|
|
227
|
-
* metadata.context_cache_hours
|
|
228
|
-
)
|
|
229
|
-
|
|
230
|
-
# Standard Gemini API pricing - storage cost per token-hour
|
|
231
|
-
storage_rate_per_token = 0.000001 # $1.00 per million tokens per hour
|
|
232
|
-
if "flash-8b" in model:
|
|
233
|
-
storage_rate_per_token = 0.00000025 # $0.25 per million tokens for 8B models
|
|
234
|
-
|
|
235
|
-
return (
|
|
236
|
-
metadata.context_cache_tokens
|
|
237
|
-
* storage_rate_per_token
|
|
238
|
-
* metadata.context_cache_hours
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
def _calculate_grounding_cost(metadata: CostMetadata, model: str) -> float:
|
|
243
|
-
"""Calculate cost for grounding requests."""
|
|
244
|
-
if metadata.google is None or metadata.google.grounding_requests is None:
|
|
245
|
-
return 0.0
|
|
246
|
-
|
|
247
|
-
# First 1,500 requests per day are free for Gemini 2.0 Flash models in Vertex AI
|
|
248
|
-
if (
|
|
249
|
-
model == "gemini-2.0-flash"
|
|
250
|
-
and metadata.google.use_vertex_ai
|
|
251
|
-
and metadata.google.grounding_requests <= 1500
|
|
252
|
-
):
|
|
253
|
-
return 0.0
|
|
254
|
-
|
|
255
|
-
# $35 per 1,000 requests for excess
|
|
256
|
-
if metadata.google.use_vertex_ai and model == "gemini-2.0-flash":
|
|
257
|
-
excess_requests = max(0, metadata.google.grounding_requests - 1500)
|
|
258
|
-
else:
|
|
259
|
-
excess_requests = metadata.google.grounding_requests
|
|
260
|
-
|
|
261
|
-
return (excess_requests / 1000) * 35.0
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
def _calculate_vertex_2_0_cost(
|
|
265
|
-
metadata: CostMetadata, model_pricing: dict[str, float], model: str
|
|
266
|
-
) -> float:
|
|
267
|
-
"""Calculate cost for Vertex AI's Gemini 2.0 models."""
|
|
268
|
-
# Text tokens cost
|
|
269
|
-
prompt_cost = (metadata.input_tokens or 0) * model_pricing["text_input"]
|
|
270
|
-
completion_cost = (metadata.output_tokens or 0) * model_pricing["output"]
|
|
271
|
-
cached_cost = (metadata.cached_tokens or 0) * model_pricing.get("cached", 0)
|
|
272
|
-
|
|
273
|
-
# Context cache costs
|
|
274
|
-
context_cache_cost = _calculate_context_cache_cost(
|
|
275
|
-
metadata, model_pricing, model, use_vertex_ai=True
|
|
276
|
-
)
|
|
277
|
-
|
|
278
|
-
# Grounding costs
|
|
279
|
-
grounding_cost = _calculate_grounding_cost(metadata, model)
|
|
280
|
-
|
|
281
|
-
# Apply batch mode discount (50% for Vertex AI)
|
|
282
|
-
if metadata.batch_mode:
|
|
283
|
-
prompt_cost *= 0.5
|
|
284
|
-
completion_cost *= 0.5
|
|
285
|
-
context_cache_cost *= 0.5
|
|
286
|
-
# Note: We don't discount grounding costs
|
|
287
|
-
|
|
288
|
-
total_cost = (
|
|
289
|
-
prompt_cost
|
|
290
|
-
+ completion_cost
|
|
291
|
-
+ cached_cost
|
|
292
|
-
+ context_cache_cost
|
|
293
|
-
+ grounding_cost
|
|
294
|
-
)
|
|
295
|
-
|
|
296
|
-
return total_cost
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
def _calculate_vertex_1_5_cost(
|
|
300
|
-
metadata: CostMetadata, model_pricing: dict[str, float], model: str
|
|
301
|
-
) -> float:
|
|
302
|
-
"""Calculate cost for Vertex AI's Gemini 1.5 models."""
|
|
303
|
-
# Text cost - convert tokens to characters (approx 4 chars per token)
|
|
304
|
-
text_chars = (metadata.input_tokens or 0) * 4 # Approximation
|
|
305
|
-
text_cost = (text_chars / 1000) * model_pricing["text_input"]
|
|
306
|
-
|
|
307
|
-
# Output cost
|
|
308
|
-
output_chars = (metadata.output_tokens or 0) * 4 # Approximation
|
|
309
|
-
output_cost = (output_chars / 1000) * model_pricing["output"]
|
|
310
|
-
|
|
311
|
-
# Context cache costs
|
|
312
|
-
context_cache_cost = _calculate_context_cache_cost(
|
|
313
|
-
metadata, model_pricing, model, use_vertex_ai=True
|
|
314
|
-
)
|
|
315
|
-
|
|
316
|
-
# Grounding costs
|
|
317
|
-
grounding_cost = _calculate_grounding_cost(metadata, model)
|
|
318
|
-
|
|
319
|
-
# Apply batch mode discount if applicable (50% off for Vertex AI)
|
|
320
|
-
if metadata.batch_mode:
|
|
321
|
-
text_cost *= 0.5
|
|
322
|
-
output_cost *= 0.5
|
|
323
|
-
context_cache_cost *= 0.5
|
|
324
|
-
# Note: We don't discount grounding costs
|
|
325
|
-
|
|
326
|
-
total_cost = text_cost + output_cost + context_cache_cost + grounding_cost
|
|
327
|
-
|
|
328
|
-
return total_cost
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
def _calculate_standard_gemini_cost(
|
|
332
|
-
metadata: CostMetadata,
|
|
333
|
-
model_pricing: dict[str, float],
|
|
334
|
-
model: str,
|
|
335
|
-
use_long_context: bool,
|
|
336
|
-
) -> float:
|
|
337
|
-
"""Calculate cost for standard Gemini API."""
|
|
338
|
-
# Determine prices based on context length
|
|
339
|
-
prompt_price = model_pricing["prompt_long" if use_long_context else "prompt_short"]
|
|
340
|
-
cached_price = model_pricing["cached"]
|
|
341
|
-
completion_price = model_pricing[
|
|
342
|
-
"completion_long" if use_long_context else "completion_short"
|
|
343
|
-
]
|
|
344
|
-
|
|
345
|
-
# Basic token costs
|
|
346
|
-
prompt_cost = (metadata.input_tokens or 0) * prompt_price
|
|
347
|
-
cached_cost = (metadata.cached_tokens or 0) * cached_price
|
|
348
|
-
completion_cost = (metadata.output_tokens or 0) * completion_price
|
|
349
|
-
|
|
350
|
-
# Media token costs is included in the prompt/completion cost
|
|
351
|
-
|
|
352
|
-
# Context cache costs
|
|
353
|
-
context_cache_cost = _calculate_context_cache_cost(
|
|
354
|
-
metadata, model_pricing, model, use_vertex_ai=False
|
|
355
|
-
)
|
|
356
|
-
|
|
357
|
-
# Grounding costs - only applies to certain models
|
|
358
|
-
grounding_cost = _calculate_grounding_cost(metadata, model)
|
|
359
|
-
|
|
360
|
-
total_cost = (
|
|
361
|
-
prompt_cost
|
|
362
|
-
+ cached_cost
|
|
363
|
-
+ completion_cost
|
|
364
|
-
+ context_cache_cost
|
|
365
|
-
+ grounding_cost
|
|
366
|
-
)
|
|
367
|
-
|
|
368
|
-
return total_cost
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
def calculate_cost(
|
|
372
|
-
metadata: CostMetadata,
|
|
373
|
-
model: str,
|
|
374
|
-
) -> float | None:
|
|
375
|
-
"""Calculate the cost of a Google API call.
|
|
376
|
-
|
|
377
|
-
This function supports both direct Gemini API and Vertex AI pricing.
|
|
378
|
-
It handles different media types (text, image, video, audio) and special features
|
|
379
|
-
like context caching and grounding.
|
|
380
|
-
|
|
381
|
-
https://ai.google.dev/pricing
|
|
382
|
-
https://cloud.google.com/vertex-ai/generative-ai/pricing
|
|
383
|
-
|
|
384
|
-
Args:
|
|
385
|
-
metadata: Additional metadata required for cost calculation
|
|
386
|
-
model: Model name to use for pricing calculation
|
|
387
|
-
|
|
388
|
-
Returns:
|
|
389
|
-
Total cost in USD or None if invalid input
|
|
390
|
-
"""
|
|
391
|
-
# Basic validation
|
|
392
|
-
if metadata.input_tokens is None or metadata.output_tokens is None:
|
|
393
|
-
return None
|
|
394
|
-
|
|
395
|
-
# Initialize default values
|
|
396
|
-
if metadata.cached_tokens is None:
|
|
397
|
-
metadata.cached_tokens = 0
|
|
398
|
-
|
|
399
|
-
# Check if we're using Vertex AI pricing
|
|
400
|
-
use_vertex_ai = metadata.google and metadata.google.use_vertex_ai
|
|
401
|
-
|
|
402
|
-
# Determine if we're using long context pricing
|
|
403
|
-
use_long_context = (
|
|
404
|
-
metadata.context_length is not None and metadata.context_length > 128_000
|
|
405
|
-
) or (metadata.input_tokens > 128_000)
|
|
406
|
-
|
|
407
|
-
# Get the appropriate pricing table
|
|
408
|
-
try:
|
|
409
|
-
if use_vertex_ai and model in VERTEX_AI_PRICING:
|
|
410
|
-
model_pricing = VERTEX_AI_PRICING[model]
|
|
411
|
-
else:
|
|
412
|
-
model_pricing = GEMINI_API_PRICING[model]
|
|
413
|
-
except KeyError:
|
|
414
|
-
# Unknown model
|
|
415
|
-
return None
|
|
416
|
-
|
|
417
|
-
# Calculate cost based on API type
|
|
418
|
-
if use_vertex_ai:
|
|
419
|
-
if model.startswith("gemini-2.0"):
|
|
420
|
-
return _calculate_vertex_2_0_cost(metadata, model_pricing, model)
|
|
421
|
-
elif model.startswith("gemini-1.5"): # pragma: no cover
|
|
422
|
-
return _calculate_vertex_1_5_cost(metadata, model_pricing, model)
|
|
423
|
-
else:
|
|
424
|
-
# Standard Gemini API pricing
|
|
425
|
-
return _calculate_standard_gemini_cost(
|
|
426
|
-
metadata, model_pricing, model, use_long_context
|
|
427
|
-
)
|
|
@@ -1,156 +0,0 @@
|
|
|
1
|
-
"""Calculate the cost of a completion using the Groq API."""
|
|
2
|
-
|
|
3
|
-
from ..base.types import CostMetadata
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def calculate_cost(
|
|
7
|
-
metadata: CostMetadata,
|
|
8
|
-
model: str = "mixtral-8x7b-32768",
|
|
9
|
-
) -> float | None:
|
|
10
|
-
"""Calculate the cost of a completion using the Groq API.
|
|
11
|
-
|
|
12
|
-
https://wow.groq.com/
|
|
13
|
-
|
|
14
|
-
Model Input Output
|
|
15
|
-
llama-3.3-70b-versatile $0.59 / 1M tokens $0.79 / 1M tokens
|
|
16
|
-
llama-3.3-70b-specdec $0.59 / 1M tokens $0.99 / 1M tokens
|
|
17
|
-
llama-3.1-8b-instant $0.05 / 1M tokens $0.08 / 1M tokens
|
|
18
|
-
llama3-70b-8192 $0.59 / 1M tokens $0.79 / 1M tokens
|
|
19
|
-
llama3-8b-8192 $0.05 / 1M tokens $0.08 / 1M tokens
|
|
20
|
-
llama-guard-3-8b $0.20 / 1M tokens $0.20 / 1M tokens
|
|
21
|
-
mixtral-8x7b-32768 $0.24 / 1M tokens $0.24 / 1M tokens
|
|
22
|
-
gemma-7b-it $0.07 / 1M tokens $0.07 / 1M tokens
|
|
23
|
-
gemma2-9b-it $0.20 / 1M tokens $0.20 / 1M tokens
|
|
24
|
-
mistral-saba-24b $0.79 / 1M tokens $0.79 / 1M tokens
|
|
25
|
-
qwen-2.5-32b $0.79 / 1M tokens $0.79 / 1M tokens
|
|
26
|
-
qwen-2.5-coder-32b $0.79 / 1M tokens $0.79 / 1M tokens
|
|
27
|
-
deepseek-r1-distill-qwen-32b $0.69 / 1M tokens $0.69 / 1M tokens
|
|
28
|
-
deepseek-r1-distill-llama-70b $0.75 / 1M tokens $0.99 / 1M tokens
|
|
29
|
-
deepseek-r1-distill-llama-70b-specdec $0.75 / 1M tokens $0.99 / 1M tokens
|
|
30
|
-
llama-3.2-1b-preview $0.04 / 1M tokens $0.04 / 1M tokens
|
|
31
|
-
llama-3.2-3b-preview $0.06 / 1M tokens $0.06 / 1M tokens
|
|
32
|
-
llama-3.2-11b-vision-preview $0.18 / 1M tokens $0.18 / 1M tokens
|
|
33
|
-
llama-3.2-90b-vision-preview $0.90 / 1M tokens $0.90 / 1M tokens
|
|
34
|
-
"""
|
|
35
|
-
pricing = {
|
|
36
|
-
"llama-3.3-70b-versatile": {
|
|
37
|
-
"prompt": 0.000_000_59,
|
|
38
|
-
"completion": 0.000_000_79,
|
|
39
|
-
},
|
|
40
|
-
"llama-3.3-70b-specdec": {
|
|
41
|
-
"prompt": 0.000_000_59,
|
|
42
|
-
"completion": 0.000_000_99,
|
|
43
|
-
},
|
|
44
|
-
"llama3-groq-70b-8192-tool-use-preview": {
|
|
45
|
-
"prompt": 0.000_000_89,
|
|
46
|
-
"completion": 0.000_000_89,
|
|
47
|
-
},
|
|
48
|
-
"llama3-groq-8b-8192-tool-use-preview": {
|
|
49
|
-
"prompt": 0.000_000_19,
|
|
50
|
-
"completion": 0.000_000_19,
|
|
51
|
-
},
|
|
52
|
-
"llama-3.1-8b-instant": {
|
|
53
|
-
"prompt": 0.000_000_05,
|
|
54
|
-
"completion": 0.000_000_08,
|
|
55
|
-
},
|
|
56
|
-
"llama-guard-3-8b": {
|
|
57
|
-
"prompt": 0.000_000_2,
|
|
58
|
-
"completion": 0.000_000_2,
|
|
59
|
-
},
|
|
60
|
-
"llama3-70b-8192": {
|
|
61
|
-
"prompt": 0.000_000_59,
|
|
62
|
-
"completion": 0.000_000_79,
|
|
63
|
-
},
|
|
64
|
-
"llama3-8b-8192": {
|
|
65
|
-
"prompt": 0.000_000_05,
|
|
66
|
-
"completion": 0.000_000_08,
|
|
67
|
-
},
|
|
68
|
-
"mixtral-8x7b-32768": {
|
|
69
|
-
"prompt": 0.000_000_24,
|
|
70
|
-
"completion": 0.000_000_24,
|
|
71
|
-
},
|
|
72
|
-
"gemma-7b-it": {
|
|
73
|
-
"prompt": 0.000_000_07,
|
|
74
|
-
"completion": 0.000_000_07,
|
|
75
|
-
},
|
|
76
|
-
"gemma2-9b-it": {
|
|
77
|
-
"prompt": 0.000_000_2,
|
|
78
|
-
"completion": 0.000_000_2,
|
|
79
|
-
},
|
|
80
|
-
"mistral-saba-24b": {
|
|
81
|
-
"prompt": 0.000_000_79,
|
|
82
|
-
"completion": 0.000_000_79,
|
|
83
|
-
},
|
|
84
|
-
"qwen-2.5-32b": {
|
|
85
|
-
"prompt": 0.000_000_79,
|
|
86
|
-
"completion": 0.000_000_79,
|
|
87
|
-
},
|
|
88
|
-
"qwen-2.5-coder-32b": {
|
|
89
|
-
"prompt": 0.000_000_79,
|
|
90
|
-
"completion": 0.000_000_79,
|
|
91
|
-
},
|
|
92
|
-
"deepseek-r1-distill-qwen-32b": {
|
|
93
|
-
"prompt": 0.000_000_69,
|
|
94
|
-
"completion": 0.000_000_69,
|
|
95
|
-
},
|
|
96
|
-
"deepseek-r1-distill-llama-70b": {
|
|
97
|
-
"prompt": 0.000_000_75,
|
|
98
|
-
"completion": 0.000_000_99,
|
|
99
|
-
},
|
|
100
|
-
"deepseek-r1-distill-llama-70b-specdec": {
|
|
101
|
-
"prompt": 0.000_000_75,
|
|
102
|
-
"completion": 0.000_000_99,
|
|
103
|
-
},
|
|
104
|
-
"llama-3.2-1b-preview": {
|
|
105
|
-
"prompt": 0.000_000_04,
|
|
106
|
-
"completion": 0.000_000_04,
|
|
107
|
-
},
|
|
108
|
-
"llama-3.2-3b-preview": {
|
|
109
|
-
"prompt": 0.000_000_06,
|
|
110
|
-
"completion": 0.000_000_06,
|
|
111
|
-
},
|
|
112
|
-
# Vision models
|
|
113
|
-
"llama-3.2-11b-vision-preview": {
|
|
114
|
-
"prompt": 0.000_000_18,
|
|
115
|
-
"completion": 0.000_000_18,
|
|
116
|
-
},
|
|
117
|
-
"llama-3.2-90b-vision-preview": {
|
|
118
|
-
"prompt": 0.000_000_90,
|
|
119
|
-
"completion": 0.000_000_90,
|
|
120
|
-
},
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
if metadata.input_tokens is None or metadata.output_tokens is None:
|
|
124
|
-
return None
|
|
125
|
-
|
|
126
|
-
try:
|
|
127
|
-
model_pricing = pricing[model]
|
|
128
|
-
except KeyError:
|
|
129
|
-
return None
|
|
130
|
-
|
|
131
|
-
# Calculate cost for text tokens
|
|
132
|
-
prompt_cost = metadata.input_tokens * model_pricing["prompt"]
|
|
133
|
-
completion_cost = metadata.output_tokens * model_pricing["completion"]
|
|
134
|
-
total_cost = prompt_cost + completion_cost
|
|
135
|
-
|
|
136
|
-
# Calculate cost for images if present
|
|
137
|
-
# Groq bills 6,400 tokens per image for vision models
|
|
138
|
-
# https://groq.com/pricing/
|
|
139
|
-
image_cost = 0.0
|
|
140
|
-
if metadata.images and "vision" in model:
|
|
141
|
-
# For Groq vision models, each image is billed at 6,400 tokens
|
|
142
|
-
tokens_per_image = 6400
|
|
143
|
-
|
|
144
|
-
# Count the number of images
|
|
145
|
-
image_count = len(metadata.images)
|
|
146
|
-
|
|
147
|
-
# Calculate total image tokens
|
|
148
|
-
total_image_tokens = image_count * tokens_per_image
|
|
149
|
-
|
|
150
|
-
# Images are charged at the prompt token rate
|
|
151
|
-
image_cost = total_image_tokens * model_pricing["prompt"]
|
|
152
|
-
|
|
153
|
-
# Add image cost to total
|
|
154
|
-
total_cost += image_cost
|
|
155
|
-
|
|
156
|
-
return total_cost
|
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
"""Calculate the cost of a completion using the Mistral API."""
|
|
2
|
-
|
|
3
|
-
from ..base.types import CostMetadata
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def calculate_cost(
|
|
7
|
-
metadata: CostMetadata,
|
|
8
|
-
model: str = "open-mistral-7b",
|
|
9
|
-
) -> float | None:
|
|
10
|
-
"""Calculate the cost of a completion using the Mistral API.
|
|
11
|
-
|
|
12
|
-
https://mistral.ai/technology/#pricing
|
|
13
|
-
|
|
14
|
-
Model Input Cached Output
|
|
15
|
-
mistral-large-latest $2/1M tokens $6/1M tokens
|
|
16
|
-
pixtral-large-latest $2/1M tokens $6/1M tokens
|
|
17
|
-
mistral-small-latest $0.1/1M tokens $0.3/1M tokens
|
|
18
|
-
mistral-saba-latest $0.2/1M tokens $0.6/1M tokens
|
|
19
|
-
codestral-latest $0.3/1M tokens $0.9/1M tokens
|
|
20
|
-
ministral-8b-latest $0.1/1M tokens $0.1/1M tokens
|
|
21
|
-
ministral-3b-latest $0.04/1M tokens $0.04/1M tokens
|
|
22
|
-
mistral-embed $0.1/1M tokens -
|
|
23
|
-
mistral-moderation-latest $0.1/1M tokens -
|
|
24
|
-
open-mistral-nemo $0.3/1M tokens $0.3/1M tokens
|
|
25
|
-
open-mistral-7b $0.25/1M tokens $0.25/1M tokens
|
|
26
|
-
open-mixtral-8x7b $0.7/1M tokens $0.7/1M tokens
|
|
27
|
-
open-mixtral-8x22b $2/1M tokens $6/1M tokens
|
|
28
|
-
"""
|
|
29
|
-
pricing = {
|
|
30
|
-
"mistral-large-latest": {"prompt": 0.000_002, "completion": 0.000_006},
|
|
31
|
-
"pixtral-large-latest": {"prompt": 0.000_002, "completion": 0.000_006},
|
|
32
|
-
"mistral-small-latest": {"prompt": 0.000_000_1, "completion": 0.000_000_3},
|
|
33
|
-
"mistral-saba-latest": {"prompt": 0.000_000_2, "completion": 0.000_000_6},
|
|
34
|
-
"codestral-latest": {"prompt": 0.000_000_3, "completion": 0.000_000_9},
|
|
35
|
-
"ministral-8b-latest": {"prompt": 0.000_000_1, "completion": 0.000_000_1},
|
|
36
|
-
"ministral-3b-latest": {"prompt": 0.000_000_04, "completion": 0.000_000_04},
|
|
37
|
-
"mistral-embed": {"prompt": 0.000_000_1, "completion": 0},
|
|
38
|
-
"mistral-moderation-latest": {"prompt": 0.000_000_1, "completion": 0},
|
|
39
|
-
"open-mistral-nemo": {"prompt": 0.000_000_3, "completion": 0.000_000_3},
|
|
40
|
-
"open-mistral-nemo-2407": {"prompt": 0.000_000_3, "completion": 0.000_000_3},
|
|
41
|
-
"open-mistral-7b": {"prompt": 0.000_000_25, "completion": 0.000_000_25},
|
|
42
|
-
"open-mixtral-8x7b": {"prompt": 0.000_000_7, "completion": 0.000_000_7},
|
|
43
|
-
"open-mixtral-8x22b": {"prompt": 0.000_002, "completion": 0.000_006},
|
|
44
|
-
"mistral-large-2407": {"prompt": 0.000_003, "completion": 0.000_009},
|
|
45
|
-
"mistral-medium-latest": {"prompt": 0.000_002_75, "completion": 0.000_008_1},
|
|
46
|
-
"pixtral-12b-2409": {"prompt": 0.000_002, "completion": 0.000_006},
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
if metadata.input_tokens is None or metadata.output_tokens is None:
|
|
50
|
-
return None
|
|
51
|
-
|
|
52
|
-
try:
|
|
53
|
-
model_pricing = pricing[model]
|
|
54
|
-
except KeyError:
|
|
55
|
-
return None
|
|
56
|
-
|
|
57
|
-
# Calculate cost for text tokens
|
|
58
|
-
prompt_cost = metadata.input_tokens * model_pricing["prompt"]
|
|
59
|
-
completion_cost = metadata.output_tokens * model_pricing["completion"]
|
|
60
|
-
total_cost = prompt_cost + completion_cost
|
|
61
|
-
|
|
62
|
-
# Image tokens is included in the cost
|
|
63
|
-
|
|
64
|
-
return total_cost
|