mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Google registered LLM models."""
|
|
2
|
+
|
|
3
|
+
from typing import Literal, TypeAlias
|
|
4
|
+
|
|
5
|
+
GoogleModelId: TypeAlias = (
|
|
6
|
+
Literal[
|
|
7
|
+
"gemini-2.5-pro",
|
|
8
|
+
"gemini-2.5-flash",
|
|
9
|
+
"gemini-2.5-flash-lite",
|
|
10
|
+
"gemini-2.0-flash",
|
|
11
|
+
"gemini-2.0-flash-lite",
|
|
12
|
+
]
|
|
13
|
+
| str
|
|
14
|
+
)
|
|
15
|
+
"""The Google model ids registered with Mirascope."""
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""OpenAI client implementation."""
|
|
2
|
+
|
|
3
|
+
from .completions import (
|
|
4
|
+
OpenAICompletionsClient,
|
|
5
|
+
OpenAICompletionsModelId,
|
|
6
|
+
client as completions_client,
|
|
7
|
+
get_client as get_completions_client,
|
|
8
|
+
)
|
|
9
|
+
from .responses import (
|
|
10
|
+
OpenAIResponsesClient,
|
|
11
|
+
OpenAIResponsesModelId,
|
|
12
|
+
client as responses_client,
|
|
13
|
+
get_client as get_responses_client,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"OpenAICompletionsClient",
|
|
18
|
+
"OpenAICompletionsModelId",
|
|
19
|
+
"OpenAIResponsesClient",
|
|
20
|
+
"OpenAIResponsesModelId",
|
|
21
|
+
"completions_client",
|
|
22
|
+
"get_completions_client",
|
|
23
|
+
"get_responses_client",
|
|
24
|
+
"responses_client",
|
|
25
|
+
]
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""OpenAI completions response decoding."""
|
|
2
|
+
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
from openai import AsyncStream, Stream
|
|
6
|
+
from openai.types import chat as openai_types
|
|
7
|
+
|
|
8
|
+
from .....content import (
|
|
9
|
+
AssistantContentPart,
|
|
10
|
+
Text,
|
|
11
|
+
TextChunk,
|
|
12
|
+
TextEndChunk,
|
|
13
|
+
TextStartChunk,
|
|
14
|
+
ToolCall,
|
|
15
|
+
ToolCallChunk,
|
|
16
|
+
ToolCallEndChunk,
|
|
17
|
+
ToolCallStartChunk,
|
|
18
|
+
)
|
|
19
|
+
from .....messages import AssistantMessage
|
|
20
|
+
from .....responses import (
|
|
21
|
+
AsyncChunkIterator,
|
|
22
|
+
ChunkIterator,
|
|
23
|
+
FinishReason,
|
|
24
|
+
FinishReasonChunk,
|
|
25
|
+
RawStreamEventChunk,
|
|
26
|
+
)
|
|
27
|
+
from ..model_ids import OpenAICompletionsModelId
|
|
28
|
+
|
|
29
|
+
OPENAI_FINISH_REASON_MAP = {
|
|
30
|
+
"length": FinishReason.MAX_TOKENS,
|
|
31
|
+
"content_filter": FinishReason.REFUSAL,
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def decode_response(
|
|
36
|
+
response: openai_types.ChatCompletion,
|
|
37
|
+
model_id: OpenAICompletionsModelId,
|
|
38
|
+
) -> tuple[AssistantMessage, FinishReason | None]:
|
|
39
|
+
"""Convert OpenAI ChatCompletion to mirascope AssistantMessage."""
|
|
40
|
+
choice = response.choices[0]
|
|
41
|
+
message = choice.message
|
|
42
|
+
refused = False
|
|
43
|
+
|
|
44
|
+
parts: list[AssistantContentPart] = []
|
|
45
|
+
if message.content:
|
|
46
|
+
parts.append(Text(text=message.content))
|
|
47
|
+
if message.refusal:
|
|
48
|
+
parts.append(Text(text=message.refusal))
|
|
49
|
+
refused = True
|
|
50
|
+
if message.tool_calls:
|
|
51
|
+
for tool_call in message.tool_calls:
|
|
52
|
+
if tool_call.type == "custom":
|
|
53
|
+
# This should never happen, because we never create "custom" tools
|
|
54
|
+
# https://platform.openai.com/docs/guides/function-calling#custom-tools
|
|
55
|
+
raise NotImplementedError("OpenAI custom tools are not supported.")
|
|
56
|
+
parts.append(
|
|
57
|
+
ToolCall(
|
|
58
|
+
id=tool_call.id,
|
|
59
|
+
name=tool_call.function.name,
|
|
60
|
+
args=tool_call.function.arguments,
|
|
61
|
+
)
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
finish_reason = (
|
|
65
|
+
FinishReason.REFUSAL
|
|
66
|
+
if refused
|
|
67
|
+
else OPENAI_FINISH_REASON_MAP.get(choice.finish_reason)
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
assistant_message = AssistantMessage(
|
|
71
|
+
content=parts,
|
|
72
|
+
provider="openai:completions",
|
|
73
|
+
model_id=model_id,
|
|
74
|
+
raw_message=message.model_dump(exclude_none=True),
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
return assistant_message, finish_reason
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class _OpenAIChunkProcessor:
|
|
81
|
+
"""Processes OpenAI chat completion chunks and maintains state across chunks."""
|
|
82
|
+
|
|
83
|
+
def __init__(self) -> None:
|
|
84
|
+
self.current_content_type: Literal["text", "tool_call"] | None = None
|
|
85
|
+
self.current_tool_index: int | None = None
|
|
86
|
+
self.refusal_encountered = False
|
|
87
|
+
|
|
88
|
+
def process_chunk(self, chunk: openai_types.ChatCompletionChunk) -> ChunkIterator:
|
|
89
|
+
"""Process a single OpenAI chunk and yield the appropriate content chunks."""
|
|
90
|
+
yield RawStreamEventChunk(raw_stream_event=chunk)
|
|
91
|
+
|
|
92
|
+
choice = chunk.choices[0] if chunk.choices else None
|
|
93
|
+
if not choice:
|
|
94
|
+
return # pragma: no cover
|
|
95
|
+
|
|
96
|
+
delta = choice.delta
|
|
97
|
+
|
|
98
|
+
content = delta.content or delta.refusal
|
|
99
|
+
if delta.refusal:
|
|
100
|
+
self.refusal_encountered = True
|
|
101
|
+
if content is not None:
|
|
102
|
+
if self.current_content_type is None:
|
|
103
|
+
yield TextStartChunk()
|
|
104
|
+
self.current_content_type = "text"
|
|
105
|
+
yield TextChunk(delta=content)
|
|
106
|
+
|
|
107
|
+
if delta.tool_calls:
|
|
108
|
+
if self.current_content_type == "text":
|
|
109
|
+
# In testing, I can't get OpenAI to emit text and tool calls in the same chunk
|
|
110
|
+
# But we handle this defensively.
|
|
111
|
+
yield TextEndChunk() # pragma: no cover
|
|
112
|
+
self.current_content_type = "tool_call"
|
|
113
|
+
|
|
114
|
+
for tool_call_delta in delta.tool_calls:
|
|
115
|
+
index = tool_call_delta.index
|
|
116
|
+
|
|
117
|
+
if (
|
|
118
|
+
self.current_tool_index is not None
|
|
119
|
+
and self.current_tool_index > index
|
|
120
|
+
):
|
|
121
|
+
raise RuntimeError(
|
|
122
|
+
f"Received tool data for already-finished tool at index {index}"
|
|
123
|
+
) # pragma: no cover
|
|
124
|
+
|
|
125
|
+
if (
|
|
126
|
+
self.current_tool_index is not None
|
|
127
|
+
and self.current_tool_index < index
|
|
128
|
+
):
|
|
129
|
+
yield ToolCallEndChunk()
|
|
130
|
+
self.current_tool_index = None
|
|
131
|
+
|
|
132
|
+
if self.current_tool_index is None:
|
|
133
|
+
if not tool_call_delta.function or not (
|
|
134
|
+
name := tool_call_delta.function.name
|
|
135
|
+
):
|
|
136
|
+
raise RuntimeError(
|
|
137
|
+
f"Missing name for tool call at index {index}"
|
|
138
|
+
) # pragma: no cover
|
|
139
|
+
|
|
140
|
+
self.current_tool_index = index
|
|
141
|
+
if not (tool_id := tool_call_delta.id):
|
|
142
|
+
raise RuntimeError(
|
|
143
|
+
f"Missing id for tool call at index {index}"
|
|
144
|
+
) # pragma: no cover
|
|
145
|
+
|
|
146
|
+
yield ToolCallStartChunk(
|
|
147
|
+
id=tool_id,
|
|
148
|
+
name=name,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if tool_call_delta.function and tool_call_delta.function.arguments:
|
|
152
|
+
yield ToolCallChunk(delta=tool_call_delta.function.arguments)
|
|
153
|
+
|
|
154
|
+
if choice.finish_reason:
|
|
155
|
+
if self.current_content_type == "text":
|
|
156
|
+
yield TextEndChunk()
|
|
157
|
+
elif self.current_content_type == "tool_call":
|
|
158
|
+
yield ToolCallEndChunk()
|
|
159
|
+
elif self.current_content_type is not None: # pragma: no cover
|
|
160
|
+
raise NotImplementedError()
|
|
161
|
+
|
|
162
|
+
finish_reason = (
|
|
163
|
+
FinishReason.REFUSAL
|
|
164
|
+
if self.refusal_encountered
|
|
165
|
+
else OPENAI_FINISH_REASON_MAP.get(choice.finish_reason)
|
|
166
|
+
)
|
|
167
|
+
if finish_reason is not None:
|
|
168
|
+
yield FinishReasonChunk(finish_reason=finish_reason)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def decode_stream(
|
|
172
|
+
openai_stream: Stream[openai_types.ChatCompletionChunk],
|
|
173
|
+
) -> ChunkIterator:
|
|
174
|
+
"""Returns a ChunkIterator converted from an OpenAI Stream[ChatCompletionChunk]"""
|
|
175
|
+
processor = _OpenAIChunkProcessor()
|
|
176
|
+
for chunk in openai_stream:
|
|
177
|
+
yield from processor.process_chunk(chunk)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
async def decode_async_stream(
|
|
181
|
+
openai_stream: AsyncStream[openai_types.ChatCompletionChunk],
|
|
182
|
+
) -> AsyncChunkIterator:
|
|
183
|
+
"""Returns an AsyncChunkIterator converted from an OpenAI AsyncStream[ChatCompletionChunk]"""
|
|
184
|
+
processor = _OpenAIChunkProcessor()
|
|
185
|
+
async for chunk in openai_stream:
|
|
186
|
+
for item in processor.process_chunk(chunk):
|
|
187
|
+
yield item
|
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
"""OpenAI completions message encoding and request preparation."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from functools import lru_cache
|
|
5
|
+
from typing import TypedDict, cast
|
|
6
|
+
|
|
7
|
+
from openai import Omit
|
|
8
|
+
from openai.types import chat as openai_types, shared_params as shared_openai_types
|
|
9
|
+
from openai.types.shared_params.response_format_json_schema import JSONSchema
|
|
10
|
+
|
|
11
|
+
from .....exceptions import (
|
|
12
|
+
FeatureNotSupportedError,
|
|
13
|
+
FormattingModeNotSupportedError,
|
|
14
|
+
)
|
|
15
|
+
from .....formatting import (
|
|
16
|
+
Format,
|
|
17
|
+
FormattableT,
|
|
18
|
+
_utils as _formatting_utils,
|
|
19
|
+
resolve_format,
|
|
20
|
+
)
|
|
21
|
+
from .....messages import AssistantMessage, Message, UserMessage
|
|
22
|
+
from .....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
|
|
23
|
+
from ....base import Params, _utils as _base_utils
|
|
24
|
+
from ...shared import _utils as _shared_utils
|
|
25
|
+
from ..model_ids import OpenAICompletionsModelId
|
|
26
|
+
from .model_features import MODEL_FEATURES
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ChatCompletionCreateKwargs(TypedDict, total=False):
|
|
30
|
+
"""Kwargs for OpenAI ChatCompletion.create method."""
|
|
31
|
+
|
|
32
|
+
model: str
|
|
33
|
+
messages: Sequence[openai_types.ChatCompletionMessageParam]
|
|
34
|
+
tools: Sequence[openai_types.ChatCompletionToolParam] | Omit
|
|
35
|
+
response_format: (
|
|
36
|
+
shared_openai_types.ResponseFormatJSONObject
|
|
37
|
+
| shared_openai_types.ResponseFormatJSONSchema
|
|
38
|
+
| Omit
|
|
39
|
+
)
|
|
40
|
+
tool_choice: openai_types.ChatCompletionToolChoiceOptionParam | Omit
|
|
41
|
+
parallel_tool_calls: bool | Omit
|
|
42
|
+
temperature: float | Omit
|
|
43
|
+
max_tokens: int | Omit
|
|
44
|
+
top_p: float | Omit
|
|
45
|
+
seed: int | Omit
|
|
46
|
+
stop: str | list[str] | Omit
|
|
47
|
+
reasoning_effort: shared_openai_types.ReasoningEffort | Omit
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _encode_user_message(
|
|
51
|
+
message: UserMessage,
|
|
52
|
+
model_id: OpenAICompletionsModelId,
|
|
53
|
+
) -> list[openai_types.ChatCompletionMessageParam]:
|
|
54
|
+
"""Convert Mirascope `UserMessage` to a list of OpenAI `ChatCompletionMessageParam`.
|
|
55
|
+
|
|
56
|
+
Multiple text content parts are combined into a single user message.
|
|
57
|
+
Tool outputs become separate tool messages.
|
|
58
|
+
"""
|
|
59
|
+
current_content: list[
|
|
60
|
+
openai_types.ChatCompletionContentPartTextParam
|
|
61
|
+
| openai_types.ChatCompletionContentPartImageParam
|
|
62
|
+
| openai_types.ChatCompletionContentPartInputAudioParam
|
|
63
|
+
] = []
|
|
64
|
+
result: list[openai_types.ChatCompletionMessageParam] = []
|
|
65
|
+
|
|
66
|
+
def flush_message_content() -> None:
|
|
67
|
+
nonlocal current_content
|
|
68
|
+
if current_content:
|
|
69
|
+
if len(current_content) == 1 and current_content[0]["type"] == "text":
|
|
70
|
+
result.append(
|
|
71
|
+
openai_types.ChatCompletionUserMessageParam(
|
|
72
|
+
role="user", content=current_content[0]["text"]
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
else:
|
|
76
|
+
result.append(
|
|
77
|
+
openai_types.ChatCompletionUserMessageParam(
|
|
78
|
+
role="user", content=current_content
|
|
79
|
+
)
|
|
80
|
+
)
|
|
81
|
+
current_content = []
|
|
82
|
+
|
|
83
|
+
for part in message.content:
|
|
84
|
+
if part.type == "text":
|
|
85
|
+
current_content.append(
|
|
86
|
+
openai_types.ChatCompletionContentPartTextParam(
|
|
87
|
+
text=part.text, type="text"
|
|
88
|
+
)
|
|
89
|
+
)
|
|
90
|
+
elif part.type == "image":
|
|
91
|
+
url = (
|
|
92
|
+
part.source.url
|
|
93
|
+
if part.source.type == "url_image_source"
|
|
94
|
+
else f"data:{part.source.mime_type};base64,{part.source.data}"
|
|
95
|
+
)
|
|
96
|
+
content = openai_types.ChatCompletionContentPartImageParam(
|
|
97
|
+
type="image_url", image_url={"url": url, "detail": "auto"}
|
|
98
|
+
)
|
|
99
|
+
current_content.append(content)
|
|
100
|
+
elif part.type == "audio":
|
|
101
|
+
model_status = MODEL_FEATURES.get(model_id)
|
|
102
|
+
if model_status == "no_audio_support":
|
|
103
|
+
raise FeatureNotSupportedError(
|
|
104
|
+
feature="Audio inputs",
|
|
105
|
+
provider="openai:completions",
|
|
106
|
+
message=f"Model '{model_id}' does not support audio inputs.",
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if part.source.type == "base64_audio_source":
|
|
110
|
+
audio_format = part.source.mime_type.split("/")[1]
|
|
111
|
+
if audio_format not in ("wav", "mp3"):
|
|
112
|
+
raise FeatureNotSupportedError(
|
|
113
|
+
feature=f"Audio format: {audio_format}",
|
|
114
|
+
provider="openai:completions",
|
|
115
|
+
message="OpenAI only supports 'wav' and 'mp3' audio formats.",
|
|
116
|
+
) # pragma: no cover
|
|
117
|
+
audio_content = openai_types.ChatCompletionContentPartInputAudioParam(
|
|
118
|
+
type="input_audio",
|
|
119
|
+
input_audio={
|
|
120
|
+
"data": part.source.data,
|
|
121
|
+
"format": audio_format,
|
|
122
|
+
},
|
|
123
|
+
)
|
|
124
|
+
current_content.append(audio_content)
|
|
125
|
+
elif part.type == "tool_output":
|
|
126
|
+
flush_message_content()
|
|
127
|
+
result.append(
|
|
128
|
+
openai_types.ChatCompletionToolMessageParam(
|
|
129
|
+
role="tool",
|
|
130
|
+
content=str(part.value),
|
|
131
|
+
tool_call_id=part.id,
|
|
132
|
+
)
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
raise NotImplementedError(
|
|
136
|
+
f"Unsupported user content part type: {part.type}"
|
|
137
|
+
)
|
|
138
|
+
flush_message_content()
|
|
139
|
+
|
|
140
|
+
return result
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _encode_assistant_message(
|
|
144
|
+
message: AssistantMessage, model_id: OpenAICompletionsModelId, encode_thoughts: bool
|
|
145
|
+
) -> openai_types.ChatCompletionAssistantMessageParam:
|
|
146
|
+
"""Convert Mirascope `AssistantMessage` to OpenAI `ChatCompletionAssistantMessageParam`."""
|
|
147
|
+
|
|
148
|
+
if (
|
|
149
|
+
message.provider == "openai:completions"
|
|
150
|
+
and message.model_id == model_id
|
|
151
|
+
and message.raw_message
|
|
152
|
+
and not encode_thoughts
|
|
153
|
+
):
|
|
154
|
+
return cast(
|
|
155
|
+
openai_types.ChatCompletionAssistantMessageParam, message.raw_message
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
text_params: list[openai_types.ChatCompletionContentPartTextParam] = []
|
|
159
|
+
tool_call_params: list[openai_types.ChatCompletionMessageToolCallParam] = []
|
|
160
|
+
for part in message.content:
|
|
161
|
+
if part.type == "text":
|
|
162
|
+
text_params.append(
|
|
163
|
+
openai_types.ChatCompletionContentPartTextParam(
|
|
164
|
+
text=part.text, type="text"
|
|
165
|
+
)
|
|
166
|
+
)
|
|
167
|
+
elif part.type == "tool_call":
|
|
168
|
+
tool_call_params.append(
|
|
169
|
+
openai_types.ChatCompletionMessageToolCallParam(
|
|
170
|
+
id=part.id,
|
|
171
|
+
type="function",
|
|
172
|
+
function={"name": part.name, "arguments": part.args},
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
elif part.type == "thought":
|
|
176
|
+
if encode_thoughts:
|
|
177
|
+
text_params.append(
|
|
178
|
+
openai_types.ChatCompletionContentPartTextParam(
|
|
179
|
+
text="**Thinking:** " + part.thought, type="text"
|
|
180
|
+
)
|
|
181
|
+
)
|
|
182
|
+
else:
|
|
183
|
+
raise NotImplementedError(f"Unsupported content type: {part.type}")
|
|
184
|
+
|
|
185
|
+
content: str | list[openai_types.ChatCompletionContentPartTextParam] | None = None
|
|
186
|
+
if len(text_params) == 1:
|
|
187
|
+
content = text_params[0]["text"]
|
|
188
|
+
elif text_params:
|
|
189
|
+
content = text_params
|
|
190
|
+
|
|
191
|
+
message_params = {
|
|
192
|
+
"role": "assistant",
|
|
193
|
+
"content": content,
|
|
194
|
+
}
|
|
195
|
+
if tool_call_params:
|
|
196
|
+
message_params["tool_calls"] = tool_call_params
|
|
197
|
+
|
|
198
|
+
return openai_types.ChatCompletionAssistantMessageParam(**message_params)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _encode_message(
|
|
202
|
+
message: Message, model_id: OpenAICompletionsModelId, encode_thoughts: bool
|
|
203
|
+
) -> list[openai_types.ChatCompletionMessageParam]:
|
|
204
|
+
"""Convert a Mirascope `Message` to OpenAI `ChatCompletionMessageParam` format.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
message: A Mirascope message (system, user, or assistant)
|
|
208
|
+
model_id: The model ID being used
|
|
209
|
+
encode_thoughts: Whether to encode thoughts as text
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
A list of OpenAI `ChatCompletionMessageParam` (may be multiple for tool outputs)
|
|
213
|
+
"""
|
|
214
|
+
if message.role == "system":
|
|
215
|
+
return [
|
|
216
|
+
openai_types.ChatCompletionSystemMessageParam(
|
|
217
|
+
role="system", content=message.content.text
|
|
218
|
+
)
|
|
219
|
+
]
|
|
220
|
+
elif message.role == "user":
|
|
221
|
+
return _encode_user_message(message, model_id)
|
|
222
|
+
elif message.role == "assistant":
|
|
223
|
+
return [_encode_assistant_message(message, model_id, encode_thoughts)]
|
|
224
|
+
else:
|
|
225
|
+
raise ValueError(f"Unsupported role: {message.role}") # pragma: no cover
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
@lru_cache(maxsize=128)
|
|
229
|
+
def _convert_tool_to_tool_param(
|
|
230
|
+
tool: ToolSchema,
|
|
231
|
+
) -> openai_types.ChatCompletionToolParam:
|
|
232
|
+
"""Convert a single Mirascope `Tool` to OpenAI ChatCompletionToolParam with caching."""
|
|
233
|
+
schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
|
|
234
|
+
schema_dict["type"] = "object"
|
|
235
|
+
_shared_utils._ensure_additional_properties_false(schema_dict)
|
|
236
|
+
return openai_types.ChatCompletionToolParam(
|
|
237
|
+
type="function",
|
|
238
|
+
function={
|
|
239
|
+
"name": tool.name,
|
|
240
|
+
"description": tool.description,
|
|
241
|
+
"parameters": schema_dict,
|
|
242
|
+
"strict": tool.strict,
|
|
243
|
+
},
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def _create_strict_response_format(
|
|
248
|
+
format: Format[FormattableT],
|
|
249
|
+
) -> shared_openai_types.ResponseFormatJSONSchema:
|
|
250
|
+
"""Create OpenAI strict response format from a Mirascope Format.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
format: The `Format` instance containing schema and metadata
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
Dictionary containing OpenAI response_format specification
|
|
257
|
+
"""
|
|
258
|
+
schema = format.schema.copy()
|
|
259
|
+
|
|
260
|
+
_shared_utils._ensure_additional_properties_false(schema)
|
|
261
|
+
|
|
262
|
+
json_schema = JSONSchema(
|
|
263
|
+
name=format.name,
|
|
264
|
+
schema=schema,
|
|
265
|
+
strict=True,
|
|
266
|
+
)
|
|
267
|
+
if format.description:
|
|
268
|
+
json_schema["description"] = format.description
|
|
269
|
+
|
|
270
|
+
return shared_openai_types.ResponseFormatJSONSchema(
|
|
271
|
+
type="json_schema", json_schema=json_schema
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
def encode_request(
|
|
276
|
+
*,
|
|
277
|
+
model_id: OpenAICompletionsModelId,
|
|
278
|
+
messages: Sequence[Message],
|
|
279
|
+
tools: Sequence[ToolSchema] | BaseToolkit | None,
|
|
280
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
281
|
+
params: Params,
|
|
282
|
+
) -> tuple[Sequence[Message], Format[FormattableT] | None, ChatCompletionCreateKwargs]:
|
|
283
|
+
"""Prepares a request for the `OpenAI.chat.completions.create` method."""
|
|
284
|
+
kwargs: ChatCompletionCreateKwargs = ChatCompletionCreateKwargs(
|
|
285
|
+
{
|
|
286
|
+
"model": model_id,
|
|
287
|
+
}
|
|
288
|
+
)
|
|
289
|
+
encode_thoughts = False
|
|
290
|
+
|
|
291
|
+
with _base_utils.ensure_all_params_accessed(
|
|
292
|
+
params=params,
|
|
293
|
+
provider="openai:completions",
|
|
294
|
+
unsupported_params=["top_k", "thinking"],
|
|
295
|
+
) as param_accessor:
|
|
296
|
+
if param_accessor.temperature is not None:
|
|
297
|
+
kwargs["temperature"] = param_accessor.temperature
|
|
298
|
+
if param_accessor.max_tokens is not None:
|
|
299
|
+
kwargs["max_tokens"] = param_accessor.max_tokens
|
|
300
|
+
if param_accessor.top_p is not None:
|
|
301
|
+
kwargs["top_p"] = param_accessor.top_p
|
|
302
|
+
|
|
303
|
+
if param_accessor.seed is not None:
|
|
304
|
+
kwargs["seed"] = param_accessor.seed
|
|
305
|
+
if param_accessor.stop_sequences is not None:
|
|
306
|
+
kwargs["stop"] = param_accessor.stop_sequences
|
|
307
|
+
if param_accessor.encode_thoughts_as_text is not None:
|
|
308
|
+
encode_thoughts = True
|
|
309
|
+
|
|
310
|
+
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
311
|
+
|
|
312
|
+
openai_tools = [_convert_tool_to_tool_param(tool) for tool in tools]
|
|
313
|
+
|
|
314
|
+
model_supports_strict = (
|
|
315
|
+
model_id not in _shared_utils.MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
|
|
316
|
+
)
|
|
317
|
+
default_mode = "strict" if model_supports_strict else "tool"
|
|
318
|
+
format = resolve_format(format, default_mode=default_mode)
|
|
319
|
+
if format is not None:
|
|
320
|
+
if format.mode == "strict":
|
|
321
|
+
if not model_supports_strict:
|
|
322
|
+
raise FormattingModeNotSupportedError(
|
|
323
|
+
formatting_mode="strict",
|
|
324
|
+
provider="openai:completions",
|
|
325
|
+
model_id=model_id,
|
|
326
|
+
)
|
|
327
|
+
kwargs["response_format"] = _create_strict_response_format(format)
|
|
328
|
+
elif format.mode == "tool":
|
|
329
|
+
if tools:
|
|
330
|
+
kwargs["tool_choice"] = "required"
|
|
331
|
+
else:
|
|
332
|
+
kwargs["tool_choice"] = {
|
|
333
|
+
"type": "function",
|
|
334
|
+
"function": {"name": FORMAT_TOOL_NAME},
|
|
335
|
+
}
|
|
336
|
+
kwargs["parallel_tool_calls"] = False
|
|
337
|
+
format_tool_schema = _formatting_utils.create_tool_schema(format)
|
|
338
|
+
openai_tools.append(_convert_tool_to_tool_param(format_tool_schema))
|
|
339
|
+
elif (
|
|
340
|
+
format.mode == "json"
|
|
341
|
+
and model_id not in _shared_utils.MODELS_WITHOUT_JSON_OBJECT_SUPPORT
|
|
342
|
+
):
|
|
343
|
+
kwargs["response_format"] = {"type": "json_object"}
|
|
344
|
+
|
|
345
|
+
if format.formatting_instructions:
|
|
346
|
+
messages = _base_utils.add_system_instructions(
|
|
347
|
+
messages, format.formatting_instructions
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
if openai_tools:
|
|
351
|
+
kwargs["tools"] = openai_tools
|
|
352
|
+
|
|
353
|
+
encoded_messages: list[openai_types.ChatCompletionMessageParam] = []
|
|
354
|
+
for message in messages:
|
|
355
|
+
encoded_messages.extend(_encode_message(message, model_id, encode_thoughts))
|
|
356
|
+
kwargs["messages"] = encoded_messages
|
|
357
|
+
|
|
358
|
+
return messages, format, kwargs
|