mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Base Kwargs for client-specific kwarg dicts."""
|
|
2
|
+
|
|
3
|
+
from typing import TypeVar
|
|
4
|
+
|
|
5
|
+
# Use TypedDict from typing_extensions for compatibility with google-genai types
|
|
6
|
+
from typing_extensions import TypedDict as TypingExtensionsTypedDict
|
|
7
|
+
|
|
8
|
+
KwargsT = TypeVar("KwargsT", bound="BaseKwargs")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BaseKwargs(TypingExtensionsTypedDict, total=False):
|
|
12
|
+
"""Base class for kwargs by which clients specify options."""
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""Base parameters for LLM providers."""
|
|
2
|
+
|
|
3
|
+
from typing import TypedDict
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Params(TypedDict, total=False):
|
|
7
|
+
"""Common parameters shared across LLM providers.
|
|
8
|
+
|
|
9
|
+
Note: Each provider may handle these parameters differently or not support them at all.
|
|
10
|
+
Please check provider-specific documentation for parameter support and behavior.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
temperature: float
|
|
14
|
+
"""Controls randomness in the output (0.0 to 1.0).
|
|
15
|
+
|
|
16
|
+
Lower temperatures are good for prompts that require a less open-ended or
|
|
17
|
+
creative response, while higher temperatures can lead to more diverse or
|
|
18
|
+
creative results.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
max_tokens: int
|
|
22
|
+
"""Maximum number of tokens to generate."""
|
|
23
|
+
|
|
24
|
+
top_p: float
|
|
25
|
+
"""Nucleus sampling parameter (0.0 to 1.0).
|
|
26
|
+
|
|
27
|
+
Tokens are selected from the most to least probable until the sum of their
|
|
28
|
+
probabilities equals this value. Use a lower value for less random responses and a
|
|
29
|
+
higher value for more random responses.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
top_k: int
|
|
33
|
+
"""Limits token selection to the k most probable tokens (typically 1 to 100).
|
|
34
|
+
|
|
35
|
+
For each token selection step, the ``top_k`` tokens with the
|
|
36
|
+
highest probabilities are sampled. Then tokens are further filtered based
|
|
37
|
+
on ``top_p`` with the final token selected using temperature sampling. Use
|
|
38
|
+
a lower number for less random responses and a higher number for more
|
|
39
|
+
random responses.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
seed: int
|
|
43
|
+
"""Random seed for reproducibility.
|
|
44
|
+
|
|
45
|
+
When ``seed`` is fixed to a specific number, the model makes a best
|
|
46
|
+
effort to provide the same response for repeated requests.
|
|
47
|
+
|
|
48
|
+
Not supported by all providers, and does not guarantee strict reproducibility.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
stop_sequences: list[str]
|
|
52
|
+
"""Stop sequences to end generation.
|
|
53
|
+
|
|
54
|
+
The model will stop generating text if one of these strings is encountered in the
|
|
55
|
+
response.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
thinking: bool
|
|
59
|
+
"""Configures whether the model should use thinking.
|
|
60
|
+
|
|
61
|
+
Thinking is a process where the model spends additional tokens thinking about the
|
|
62
|
+
prompt before generating a response. You may configure thinking either by passing
|
|
63
|
+
a bool to enable or disable it.
|
|
64
|
+
|
|
65
|
+
If `params.thinking` is `True`, then thinking and thought summaries will be enabled
|
|
66
|
+
(if supported by the model/provider), with a default budget for thinking tokens.
|
|
67
|
+
|
|
68
|
+
If `params.thinking` is `False`, then thinking will be wholly disabled, assuming
|
|
69
|
+
the model allows this (some models, e.g. `google:gemini-2.5-pro`, do not allow
|
|
70
|
+
disabling thinking).
|
|
71
|
+
|
|
72
|
+
If `params.thinking` is unset (or `None`), then we will use provider-specific default
|
|
73
|
+
behavior for the chosen model.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
encode_thoughts_as_text: bool
|
|
77
|
+
"""Configures whether `Thought` content should be re-encoded as text for model consumption.
|
|
78
|
+
|
|
79
|
+
If `True`, then when an `AssistantMessage` contains `Thoughts` and is being passed back
|
|
80
|
+
to an LLM, those `Thoughts` will be encoded as `Text`, so that the assistant can read
|
|
81
|
+
those thoughts. That ensures the assistant has access to (at least the summarized output of)
|
|
82
|
+
its reasoning process, and contrasts with provider default behaviors which may ignore
|
|
83
|
+
prior thoughts, particularly if tool calls are not involved.
|
|
84
|
+
|
|
85
|
+
When `True`, we will always re-encode Mirascope messages being passed to the provider,
|
|
86
|
+
rather than reusing raw provider response content. This may disable provider-specific
|
|
87
|
+
behavior like cached reasoning tokens.
|
|
88
|
+
|
|
89
|
+
If `False`, then `Thoughts` will not be encoded as text, and whether reasoning context
|
|
90
|
+
is available to the model depends entirely on the provider's behavior.
|
|
91
|
+
|
|
92
|
+
Defaults to `False` if unset.
|
|
93
|
+
"""
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""Google response decoding."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
5
|
+
from typing import Literal
|
|
6
|
+
|
|
7
|
+
from google.genai import types as genai_types
|
|
8
|
+
|
|
9
|
+
from ....content import (
|
|
10
|
+
AssistantContentPart,
|
|
11
|
+
Text,
|
|
12
|
+
TextChunk,
|
|
13
|
+
TextEndChunk,
|
|
14
|
+
TextStartChunk,
|
|
15
|
+
Thought,
|
|
16
|
+
ThoughtChunk,
|
|
17
|
+
ThoughtEndChunk,
|
|
18
|
+
ThoughtStartChunk,
|
|
19
|
+
ToolCall,
|
|
20
|
+
ToolCallChunk,
|
|
21
|
+
ToolCallEndChunk,
|
|
22
|
+
ToolCallStartChunk,
|
|
23
|
+
)
|
|
24
|
+
from ....messages import AssistantMessage
|
|
25
|
+
from ....responses import (
|
|
26
|
+
AsyncChunkIterator,
|
|
27
|
+
ChunkIterator,
|
|
28
|
+
FinishReason,
|
|
29
|
+
FinishReasonChunk,
|
|
30
|
+
RawMessageChunk,
|
|
31
|
+
RawStreamEventChunk,
|
|
32
|
+
)
|
|
33
|
+
from ..model_ids import GoogleModelId
|
|
34
|
+
from .encode import UNKNOWN_TOOL_ID
|
|
35
|
+
|
|
36
|
+
GOOGLE_FINISH_REASON_MAP = {
|
|
37
|
+
"MAX_TOKENS": FinishReason.MAX_TOKENS,
|
|
38
|
+
"SAFETY": FinishReason.REFUSAL,
|
|
39
|
+
"RECITATION": FinishReason.REFUSAL,
|
|
40
|
+
"BLOCKLIST": FinishReason.REFUSAL,
|
|
41
|
+
"PROHIBITED_CONTENT": FinishReason.REFUSAL,
|
|
42
|
+
"SPII": FinishReason.REFUSAL,
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _decode_content_part(part: genai_types.Part) -> AssistantContentPart | None:
|
|
47
|
+
"""Returns an `AssistantContentPart` (or `None`) decoded from a google `Part`"""
|
|
48
|
+
if part.thought and part.text:
|
|
49
|
+
return Thought(thought=part.text)
|
|
50
|
+
elif part.text:
|
|
51
|
+
return Text(text=part.text)
|
|
52
|
+
elif part.video_metadata:
|
|
53
|
+
raise NotImplementedError("Support for video metadata not implemented.")
|
|
54
|
+
elif part.inline_data:
|
|
55
|
+
raise NotImplementedError("Support for inline data (Blob) not implemented.")
|
|
56
|
+
elif part.file_data:
|
|
57
|
+
raise NotImplementedError("Support for file data (FileData) not implemented.")
|
|
58
|
+
elif part.code_execution_result:
|
|
59
|
+
raise NotImplementedError("Support for code execution results not implemented.")
|
|
60
|
+
elif part.executable_code:
|
|
61
|
+
raise NotImplementedError("Support for executable code not implemented.")
|
|
62
|
+
elif function_call := part.function_call:
|
|
63
|
+
id = function_call.id
|
|
64
|
+
name = function_call.name
|
|
65
|
+
args = function_call.args
|
|
66
|
+
if not name or args is None:
|
|
67
|
+
raise ValueError(
|
|
68
|
+
"Google function_call does not match spec"
|
|
69
|
+
) # pragma: no cover
|
|
70
|
+
return ToolCall(id=id or UNKNOWN_TOOL_ID, name=name, args=json.dumps(args))
|
|
71
|
+
elif part.function_response: # pragma: no cover
|
|
72
|
+
raise NotImplementedError(
|
|
73
|
+
"function_response part does not decode to AssistantContent."
|
|
74
|
+
)
|
|
75
|
+
elif part.thought_signature: # pragma: no cover
|
|
76
|
+
raise NotImplementedError("Support for thought signature not implemented.")
|
|
77
|
+
else: # pragma: no cover
|
|
78
|
+
# Per Part docstring, this should never happen:
|
|
79
|
+
# > Exactly one field within a Part should be set, representing the specific type
|
|
80
|
+
# > of content being conveyed. Using multiple fields within the same `Part`
|
|
81
|
+
# > instance is considered invalid.
|
|
82
|
+
# However, in testing, this can happen, so we will do our best to handle
|
|
83
|
+
# it as empty content.
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _decode_candidate_content(
|
|
88
|
+
candidate: genai_types.Candidate,
|
|
89
|
+
) -> Sequence[AssistantContentPart]:
|
|
90
|
+
"""Returns a sequence of `AssistantContentPart` decoded from a google `Candidate`"""
|
|
91
|
+
content_parts = []
|
|
92
|
+
if candidate.content and candidate.content.parts:
|
|
93
|
+
for part in candidate.content.parts:
|
|
94
|
+
decoded_part = _decode_content_part(part)
|
|
95
|
+
if decoded_part:
|
|
96
|
+
content_parts.append(decoded_part)
|
|
97
|
+
return content_parts
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def decode_response(
|
|
101
|
+
response: genai_types.GenerateContentResponse, model_id: GoogleModelId
|
|
102
|
+
) -> tuple[AssistantMessage, FinishReason | None]:
|
|
103
|
+
"""Returns an `AssistantMessage` and `FinishReason` extracted from a `GenerateContentResponse`"""
|
|
104
|
+
content: Sequence[AssistantContentPart] = []
|
|
105
|
+
candidate_content: genai_types.Content | None = None
|
|
106
|
+
finish_reason: FinishReason | None = None
|
|
107
|
+
|
|
108
|
+
if response.candidates and (candidate := response.candidates[0]):
|
|
109
|
+
content = _decode_candidate_content(candidate)
|
|
110
|
+
candidate_content = candidate.content
|
|
111
|
+
if candidate.finish_reason:
|
|
112
|
+
finish_reason = GOOGLE_FINISH_REASON_MAP.get(candidate.finish_reason)
|
|
113
|
+
|
|
114
|
+
candidate_content = candidate_content or genai_types.Content()
|
|
115
|
+
|
|
116
|
+
assistant_message = AssistantMessage(
|
|
117
|
+
content=content,
|
|
118
|
+
provider="google",
|
|
119
|
+
model_id=model_id,
|
|
120
|
+
raw_message=candidate_content.model_dump(),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
return assistant_message, finish_reason
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class _GoogleChunkProcessor:
|
|
127
|
+
"""Processes Google stream chunks and maintains state across chunks."""
|
|
128
|
+
|
|
129
|
+
def __init__(self) -> None:
|
|
130
|
+
self.current_content_type: Literal["text", "tool_call", "thought"] | None = None
|
|
131
|
+
self.accumulated_parts: list[genai_types.Part] = []
|
|
132
|
+
self.reconstructed_content = genai_types.Content(parts=[])
|
|
133
|
+
|
|
134
|
+
def process_chunk(
|
|
135
|
+
self, chunk: genai_types.GenerateContentResponse
|
|
136
|
+
) -> ChunkIterator:
|
|
137
|
+
"""Process a single Google chunk and yield the appropriate content chunks."""
|
|
138
|
+
yield RawStreamEventChunk(raw_stream_event=chunk)
|
|
139
|
+
|
|
140
|
+
candidate = chunk.candidates[0] if chunk.candidates else None
|
|
141
|
+
if not candidate or not candidate.content or not candidate.content.parts:
|
|
142
|
+
return # pragma: no cover
|
|
143
|
+
|
|
144
|
+
for part in candidate.content.parts:
|
|
145
|
+
self.accumulated_parts.append(part)
|
|
146
|
+
if self.current_content_type == "thought" and not part.thought:
|
|
147
|
+
yield ThoughtEndChunk()
|
|
148
|
+
self.current_content_type = None
|
|
149
|
+
elif self.current_content_type == "text" and not part.text:
|
|
150
|
+
yield TextEndChunk() # pragma: no cover
|
|
151
|
+
self.current_content_type = None # pragma: no cover
|
|
152
|
+
elif self.current_content_type == "tool_call" and not part.function_call:
|
|
153
|
+
# In testing, Gemini never emits tool calls and text in the same message
|
|
154
|
+
# (even when specifically asked in system and user prompt), so
|
|
155
|
+
# the following code is uncovered but included for completeness
|
|
156
|
+
yield ToolCallEndChunk() # pragma: no cover
|
|
157
|
+
self.current_content_type = None # pragma: no cover
|
|
158
|
+
|
|
159
|
+
if part.thought:
|
|
160
|
+
if self.current_content_type is None:
|
|
161
|
+
yield ThoughtStartChunk()
|
|
162
|
+
self.current_content_type = "thought"
|
|
163
|
+
if not part.text:
|
|
164
|
+
raise ValueError(
|
|
165
|
+
"Inside thought part with no text content"
|
|
166
|
+
) # pragma: no cover
|
|
167
|
+
yield ThoughtChunk(delta=part.text)
|
|
168
|
+
|
|
169
|
+
elif part.text:
|
|
170
|
+
if self.current_content_type is None:
|
|
171
|
+
yield TextStartChunk()
|
|
172
|
+
self.current_content_type = "text"
|
|
173
|
+
|
|
174
|
+
yield TextChunk(delta=part.text)
|
|
175
|
+
|
|
176
|
+
elif function_call := part.function_call:
|
|
177
|
+
if not function_call.name:
|
|
178
|
+
raise RuntimeError(
|
|
179
|
+
"Required name missing on Google function call"
|
|
180
|
+
) # pragma: no cover
|
|
181
|
+
|
|
182
|
+
yield ToolCallStartChunk(
|
|
183
|
+
id=function_call.id or UNKNOWN_TOOL_ID,
|
|
184
|
+
name=function_call.name,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
yield ToolCallChunk(
|
|
188
|
+
delta=json.dumps(function_call.args)
|
|
189
|
+
if function_call.args
|
|
190
|
+
else "{}",
|
|
191
|
+
)
|
|
192
|
+
yield ToolCallEndChunk()
|
|
193
|
+
|
|
194
|
+
if candidate.finish_reason:
|
|
195
|
+
if self.current_content_type == "text":
|
|
196
|
+
yield TextEndChunk()
|
|
197
|
+
elif self.current_content_type == "thought":
|
|
198
|
+
yield ThoughtEndChunk() # pragma: no cover
|
|
199
|
+
elif self.current_content_type is not None:
|
|
200
|
+
raise NotImplementedError
|
|
201
|
+
|
|
202
|
+
self.current_content_type = None
|
|
203
|
+
|
|
204
|
+
finish_reason = GOOGLE_FINISH_REASON_MAP.get(candidate.finish_reason)
|
|
205
|
+
if finish_reason is not None:
|
|
206
|
+
yield FinishReasonChunk(finish_reason=finish_reason)
|
|
207
|
+
|
|
208
|
+
def raw_message_chunk(self) -> RawMessageChunk:
|
|
209
|
+
content = genai_types.Content(role="model", parts=self.accumulated_parts)
|
|
210
|
+
return RawMessageChunk(raw_message=content.model_dump())
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def decode_stream(
|
|
214
|
+
google_stream: Iterator[genai_types.GenerateContentResponse],
|
|
215
|
+
) -> ChunkIterator:
|
|
216
|
+
"""Returns a ChunkIterator converted from a Google stream."""
|
|
217
|
+
processor = _GoogleChunkProcessor()
|
|
218
|
+
for chunk in google_stream:
|
|
219
|
+
yield from processor.process_chunk(chunk)
|
|
220
|
+
yield processor.raw_message_chunk()
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
async def decode_async_stream(
|
|
224
|
+
google_stream: AsyncIterator[genai_types.GenerateContentResponse],
|
|
225
|
+
) -> AsyncChunkIterator:
|
|
226
|
+
"""Returns an AsyncChunkIterator converted from a Google async stream."""
|
|
227
|
+
processor = _GoogleChunkProcessor()
|
|
228
|
+
async for chunk in google_stream:
|
|
229
|
+
for item in processor.process_chunk(chunk):
|
|
230
|
+
yield item
|
|
231
|
+
yield processor.raw_message_chunk()
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
"""Google message encoding and request preparation."""
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
import json
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from functools import lru_cache
|
|
7
|
+
from typing import Any, cast
|
|
8
|
+
|
|
9
|
+
from google.genai import types as genai_types
|
|
10
|
+
|
|
11
|
+
from ....content import ContentPart
|
|
12
|
+
from ....exceptions import FeatureNotSupportedError
|
|
13
|
+
from ....formatting import (
|
|
14
|
+
Format,
|
|
15
|
+
FormattableT,
|
|
16
|
+
_utils as _formatting_utils,
|
|
17
|
+
resolve_format,
|
|
18
|
+
)
|
|
19
|
+
from ....messages import AssistantMessage, Message, UserMessage
|
|
20
|
+
from ....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
|
|
21
|
+
from ...base import BaseKwargs, Params, _utils as _base_utils
|
|
22
|
+
from ..model_ids import GoogleModelId
|
|
23
|
+
|
|
24
|
+
UNKNOWN_TOOL_ID = "google_unknown_tool_id"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class GoogleKwargs(BaseKwargs, genai_types.GenerateContentConfigDict):
|
|
28
|
+
"""Google's `GenerateContentConfigDict` typed dict, subclassing BaseKwargs for type safety."""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _resolve_refs(
|
|
32
|
+
schema: dict[str, Any], defs: dict[str, Any] | None = None
|
|
33
|
+
) -> dict[str, Any]:
|
|
34
|
+
"""Recursively resolve $ref references in a JSON Schema object."""
|
|
35
|
+
if isinstance(schema, dict):
|
|
36
|
+
if "$ref" in schema:
|
|
37
|
+
ref_path = schema["$ref"]
|
|
38
|
+
if ref_path.startswith("#/$defs/"):
|
|
39
|
+
ref_name = ref_path.split("/")[-1]
|
|
40
|
+
if defs and ref_name in defs:
|
|
41
|
+
return _resolve_refs(defs[ref_name], defs)
|
|
42
|
+
return schema # pragma: no cover
|
|
43
|
+
else:
|
|
44
|
+
return {k: _resolve_refs(v, defs) for k, v in schema.items()}
|
|
45
|
+
elif isinstance(schema, list):
|
|
46
|
+
return [_resolve_refs(item, defs) for item in schema]
|
|
47
|
+
else:
|
|
48
|
+
return schema
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _encode_content(
|
|
52
|
+
content: Sequence[ContentPart], encode_thoughts: bool
|
|
53
|
+
) -> list[genai_types.PartDict]:
|
|
54
|
+
"""Returns a list of google `PartDicts` converted from a sequence of Mirascope `ContentPart`s"""
|
|
55
|
+
result = []
|
|
56
|
+
|
|
57
|
+
for part in content:
|
|
58
|
+
if part.type == "text":
|
|
59
|
+
result.append(genai_types.PartDict(text=part.text))
|
|
60
|
+
elif part.type == "image":
|
|
61
|
+
if part.source.type == "base64_image_source":
|
|
62
|
+
result.append(
|
|
63
|
+
genai_types.PartDict(
|
|
64
|
+
inline_data=genai_types.BlobDict(
|
|
65
|
+
data=base64.b64decode(part.source.data),
|
|
66
|
+
mime_type=part.source.mime_type,
|
|
67
|
+
)
|
|
68
|
+
)
|
|
69
|
+
)
|
|
70
|
+
elif part.source.type == "url_image_source":
|
|
71
|
+
raise FeatureNotSupportedError(
|
|
72
|
+
"url_image_source",
|
|
73
|
+
"google",
|
|
74
|
+
message="Google does not support URL-referenced images. Try `llm.Image.download(...) or `llm.Image.download_async(...)`",
|
|
75
|
+
)
|
|
76
|
+
elif part.type == "audio":
|
|
77
|
+
if part.source.type == "base64_audio_source":
|
|
78
|
+
result.append(
|
|
79
|
+
genai_types.PartDict(
|
|
80
|
+
inline_data=genai_types.BlobDict(
|
|
81
|
+
data=base64.b64decode(part.source.data),
|
|
82
|
+
mime_type=part.source.mime_type,
|
|
83
|
+
)
|
|
84
|
+
)
|
|
85
|
+
)
|
|
86
|
+
elif part.type == "tool_call":
|
|
87
|
+
result.append(
|
|
88
|
+
genai_types.PartDict(
|
|
89
|
+
function_call=genai_types.FunctionCallDict(
|
|
90
|
+
name=part.name,
|
|
91
|
+
args=json.loads(part.args),
|
|
92
|
+
id=part.id if part.id != UNKNOWN_TOOL_ID else None,
|
|
93
|
+
)
|
|
94
|
+
)
|
|
95
|
+
)
|
|
96
|
+
elif part.type == "tool_output":
|
|
97
|
+
result.append(
|
|
98
|
+
genai_types.PartDict(
|
|
99
|
+
function_response=genai_types.FunctionResponseDict(
|
|
100
|
+
id=part.id if part.id != UNKNOWN_TOOL_ID else None,
|
|
101
|
+
name=part.name,
|
|
102
|
+
response={"output": str(part.value)},
|
|
103
|
+
)
|
|
104
|
+
)
|
|
105
|
+
)
|
|
106
|
+
elif part.type == "thought":
|
|
107
|
+
if encode_thoughts:
|
|
108
|
+
result.append(
|
|
109
|
+
genai_types.PartDict(text="**Thinking:** " + part.thought)
|
|
110
|
+
)
|
|
111
|
+
else:
|
|
112
|
+
raise NotImplementedError(f"Unsupported content type: {part.type}")
|
|
113
|
+
return result
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _encode_message(
|
|
117
|
+
message: UserMessage | AssistantMessage,
|
|
118
|
+
model_id: GoogleModelId,
|
|
119
|
+
encode_thoughts: bool,
|
|
120
|
+
) -> genai_types.ContentDict:
|
|
121
|
+
"""Returns a Google `ContentDict` converted from a Mirascope `Message`"""
|
|
122
|
+
if (
|
|
123
|
+
message.role == "assistant"
|
|
124
|
+
and message.provider == "google"
|
|
125
|
+
and message.model_id == model_id
|
|
126
|
+
and message.raw_message
|
|
127
|
+
and not encode_thoughts
|
|
128
|
+
):
|
|
129
|
+
return cast(genai_types.ContentDict, message.raw_message)
|
|
130
|
+
return genai_types.ContentDict(
|
|
131
|
+
role="model" if message.role == "assistant" else message.role,
|
|
132
|
+
parts=_encode_content(message.content, encode_thoughts),
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _encode_messages(
|
|
137
|
+
messages: Sequence[UserMessage | AssistantMessage],
|
|
138
|
+
model_id: GoogleModelId,
|
|
139
|
+
encode_thoughts: bool,
|
|
140
|
+
) -> genai_types.ContentListUnionDict:
|
|
141
|
+
"""Returns a `ContentListUnionDict` converted from a sequence of user or assistant `Messages`s"""
|
|
142
|
+
return [_encode_message(message, model_id, encode_thoughts) for message in messages]
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
@lru_cache(maxsize=128)
|
|
146
|
+
def _convert_tool_to_function_declaration(
|
|
147
|
+
tool: ToolSchema,
|
|
148
|
+
) -> genai_types.FunctionDeclarationDict:
|
|
149
|
+
"""Convert a single Mirascope tool to Google FunctionDeclaration format with caching."""
|
|
150
|
+
schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
|
|
151
|
+
schema_dict["type"] = "object"
|
|
152
|
+
|
|
153
|
+
defs = schema_dict.get("$defs")
|
|
154
|
+
properties = schema_dict.get("properties", {})
|
|
155
|
+
if defs:
|
|
156
|
+
properties = _resolve_refs(properties, defs)
|
|
157
|
+
|
|
158
|
+
return genai_types.FunctionDeclarationDict(
|
|
159
|
+
name=tool.name,
|
|
160
|
+
description=tool.description,
|
|
161
|
+
parameters=genai_types.SchemaDict(
|
|
162
|
+
type=genai_types.Type.OBJECT,
|
|
163
|
+
properties=properties,
|
|
164
|
+
required=schema_dict.get("required", []),
|
|
165
|
+
),
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def encode_request(
|
|
170
|
+
*,
|
|
171
|
+
model_id: GoogleModelId,
|
|
172
|
+
messages: Sequence[Message],
|
|
173
|
+
tools: Sequence[ToolSchema] | BaseToolkit | None,
|
|
174
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
175
|
+
params: Params,
|
|
176
|
+
) -> tuple[
|
|
177
|
+
Sequence[Message],
|
|
178
|
+
Format[FormattableT] | None,
|
|
179
|
+
genai_types.ContentListUnionDict,
|
|
180
|
+
GoogleKwargs,
|
|
181
|
+
]:
|
|
182
|
+
"""Prepares a request for the genai `Client.models.generate_content` method."""
|
|
183
|
+
google_config: GoogleKwargs = GoogleKwargs()
|
|
184
|
+
encode_thoughts = False
|
|
185
|
+
|
|
186
|
+
with _base_utils.ensure_all_params_accessed(
|
|
187
|
+
params=params, provider="google"
|
|
188
|
+
) as param_accessor:
|
|
189
|
+
if param_accessor.temperature is not None:
|
|
190
|
+
google_config["temperature"] = param_accessor.temperature
|
|
191
|
+
if param_accessor.max_tokens is not None:
|
|
192
|
+
google_config["max_output_tokens"] = param_accessor.max_tokens
|
|
193
|
+
if param_accessor.top_p is not None:
|
|
194
|
+
google_config["top_p"] = param_accessor.top_p
|
|
195
|
+
if param_accessor.top_k is not None:
|
|
196
|
+
google_config["top_k"] = param_accessor.top_k
|
|
197
|
+
if param_accessor.seed is not None:
|
|
198
|
+
google_config["seed"] = param_accessor.seed
|
|
199
|
+
if param_accessor.stop_sequences is not None:
|
|
200
|
+
google_config["stop_sequences"] = param_accessor.stop_sequences
|
|
201
|
+
if param_accessor.thinking is not None:
|
|
202
|
+
if param_accessor.thinking:
|
|
203
|
+
google_config["thinking_config"] = genai_types.ThinkingConfigDict(
|
|
204
|
+
thinking_budget=-1, # automatic budget
|
|
205
|
+
include_thoughts=True,
|
|
206
|
+
)
|
|
207
|
+
else:
|
|
208
|
+
google_config["thinking_config"] = genai_types.ThinkingConfigDict(
|
|
209
|
+
include_thoughts=False, thinking_budget=0
|
|
210
|
+
)
|
|
211
|
+
if param_accessor.encode_thoughts_as_text:
|
|
212
|
+
encode_thoughts = True
|
|
213
|
+
|
|
214
|
+
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
215
|
+
google_tools: list[genai_types.ToolDict] = []
|
|
216
|
+
|
|
217
|
+
format = resolve_format(
|
|
218
|
+
format,
|
|
219
|
+
# Google does not support strict outputs when tools are present
|
|
220
|
+
# (Gemini 2.5 will error, 2.0 and below will ignore tools)
|
|
221
|
+
default_mode="strict" if not tools else "tool",
|
|
222
|
+
)
|
|
223
|
+
if format is not None:
|
|
224
|
+
if format.mode in ("strict", "json") and tools:
|
|
225
|
+
raise FeatureNotSupportedError(
|
|
226
|
+
feature=f"formatting_mode:{format.mode} with tools",
|
|
227
|
+
provider="google",
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
if format.mode == "strict":
|
|
231
|
+
google_config["response_mime_type"] = "application/json"
|
|
232
|
+
google_config["response_schema"] = format.schema
|
|
233
|
+
elif format.mode == "tool":
|
|
234
|
+
format_tool_schema = _formatting_utils.create_tool_schema(format)
|
|
235
|
+
format_tool = _convert_tool_to_function_declaration(format_tool_schema)
|
|
236
|
+
google_tools.append(
|
|
237
|
+
genai_types.ToolDict(function_declarations=[format_tool])
|
|
238
|
+
)
|
|
239
|
+
function_calling_config = genai_types.FunctionCallingConfigDict(
|
|
240
|
+
mode=genai_types.FunctionCallingConfigMode.ANY
|
|
241
|
+
)
|
|
242
|
+
if not tools:
|
|
243
|
+
function_calling_config["allowed_function_names"] = [FORMAT_TOOL_NAME]
|
|
244
|
+
|
|
245
|
+
google_config["tool_config"] = genai_types.ToolConfigDict(
|
|
246
|
+
function_calling_config=function_calling_config
|
|
247
|
+
)
|
|
248
|
+
elif format.mode == "json":
|
|
249
|
+
google_config["response_mime_type"] = "application/json"
|
|
250
|
+
|
|
251
|
+
if format.formatting_instructions:
|
|
252
|
+
messages = _base_utils.add_system_instructions(
|
|
253
|
+
messages, format.formatting_instructions
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if tools:
|
|
257
|
+
function_declarations = [
|
|
258
|
+
_convert_tool_to_function_declaration(tool) for tool in tools
|
|
259
|
+
]
|
|
260
|
+
google_tools.append(
|
|
261
|
+
genai_types.ToolDict(function_declarations=function_declarations)
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
if google_tools:
|
|
265
|
+
google_config["tools"] = cast(genai_types.ToolListUnionDict, google_tools)
|
|
266
|
+
|
|
267
|
+
system_message_content, remaining_messages = _base_utils.extract_system_message(
|
|
268
|
+
messages
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
if system_message_content:
|
|
272
|
+
google_config["system_instruction"] = system_message_content
|
|
273
|
+
|
|
274
|
+
return (
|
|
275
|
+
messages,
|
|
276
|
+
format,
|
|
277
|
+
_encode_messages(remaining_messages, model_id, encode_thoughts),
|
|
278
|
+
google_config,
|
|
279
|
+
)
|