mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"""OpenAI Responses response decoding."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Literal
|
|
4
|
+
|
|
5
|
+
from openai import AsyncStream, Stream
|
|
6
|
+
from openai.types import responses as openai_types
|
|
7
|
+
from openai.types.responses.response_stream_event import ResponseStreamEvent
|
|
8
|
+
|
|
9
|
+
from .....content import (
|
|
10
|
+
AssistantContentPart,
|
|
11
|
+
Text,
|
|
12
|
+
TextChunk,
|
|
13
|
+
TextEndChunk,
|
|
14
|
+
TextStartChunk,
|
|
15
|
+
Thought,
|
|
16
|
+
ThoughtChunk,
|
|
17
|
+
ThoughtEndChunk,
|
|
18
|
+
ThoughtStartChunk,
|
|
19
|
+
ToolCall,
|
|
20
|
+
ToolCallChunk,
|
|
21
|
+
ToolCallEndChunk,
|
|
22
|
+
ToolCallStartChunk,
|
|
23
|
+
)
|
|
24
|
+
from .....messages import AssistantMessage
|
|
25
|
+
from .....responses import (
|
|
26
|
+
AsyncChunkIterator,
|
|
27
|
+
ChunkIterator,
|
|
28
|
+
FinishReason,
|
|
29
|
+
FinishReasonChunk,
|
|
30
|
+
RawMessageChunk,
|
|
31
|
+
RawStreamEventChunk,
|
|
32
|
+
)
|
|
33
|
+
from ..model_ids import OpenAIResponsesModelId
|
|
34
|
+
|
|
35
|
+
INCOMPLETE_DETAILS_TO_FINISH_REASON = {
|
|
36
|
+
"max_output_tokens": FinishReason.MAX_TOKENS,
|
|
37
|
+
"content_filter": FinishReason.REFUSAL,
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _serialize_output_item(
|
|
42
|
+
item: openai_types.ResponseOutputItem,
|
|
43
|
+
) -> dict[str, Any]:
|
|
44
|
+
"""Returns the item serialized as a dictionary."""
|
|
45
|
+
return {key: value for key, value in item.model_dump().items() if value is not None}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def decode_response(
|
|
49
|
+
response: openai_types.Response,
|
|
50
|
+
model_id: OpenAIResponsesModelId,
|
|
51
|
+
) -> tuple[AssistantMessage, FinishReason | None]:
|
|
52
|
+
"""Convert OpenAI Responses Response to mirascope AssistantMessage."""
|
|
53
|
+
parts: list[AssistantContentPart] = []
|
|
54
|
+
finish_reason: FinishReason | None = None
|
|
55
|
+
refused = False
|
|
56
|
+
|
|
57
|
+
for output_item in response.output:
|
|
58
|
+
if output_item.type == "message":
|
|
59
|
+
for content in output_item.content:
|
|
60
|
+
if content.type == "output_text":
|
|
61
|
+
parts.append(Text(text=content.text))
|
|
62
|
+
elif content.type == "refusal":
|
|
63
|
+
parts.append(Text(text=content.refusal))
|
|
64
|
+
refused = True
|
|
65
|
+
elif output_item.type == "function_call":
|
|
66
|
+
parts.append(
|
|
67
|
+
ToolCall(
|
|
68
|
+
id=output_item.call_id,
|
|
69
|
+
name=output_item.name,
|
|
70
|
+
args=output_item.arguments,
|
|
71
|
+
)
|
|
72
|
+
)
|
|
73
|
+
elif output_item.type == "reasoning":
|
|
74
|
+
for summary_part in output_item.summary:
|
|
75
|
+
if summary_part.type == "summary_text":
|
|
76
|
+
parts.append(Thought(thought=summary_part.text))
|
|
77
|
+
if output_item.content: # pragma: no cover
|
|
78
|
+
# TODO: Add test case covering this
|
|
79
|
+
# (Likely their open-source models output reasoning_text rather than summaries)
|
|
80
|
+
for reasoning_content in output_item.content:
|
|
81
|
+
if reasoning_content.type == "reasoning_text":
|
|
82
|
+
parts.append(Thought(thought=reasoning_content.text))
|
|
83
|
+
|
|
84
|
+
else:
|
|
85
|
+
raise NotImplementedError(f"Unsupported output item: {output_item.type}")
|
|
86
|
+
|
|
87
|
+
if refused:
|
|
88
|
+
finish_reason = FinishReason.REFUSAL
|
|
89
|
+
elif details := response.incomplete_details:
|
|
90
|
+
finish_reason = INCOMPLETE_DETAILS_TO_FINISH_REASON.get(details.reason or "")
|
|
91
|
+
|
|
92
|
+
assistant_message = AssistantMessage(
|
|
93
|
+
content=parts,
|
|
94
|
+
provider="openai:responses",
|
|
95
|
+
model_id=model_id,
|
|
96
|
+
raw_message=[
|
|
97
|
+
_serialize_output_item(output_item) for output_item in response.output
|
|
98
|
+
],
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
return assistant_message, finish_reason
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class _OpenAIResponsesChunkProcessor:
|
|
105
|
+
"""Processes OpenAI Responses streaming events and maintains state across chunks."""
|
|
106
|
+
|
|
107
|
+
def __init__(self) -> None:
|
|
108
|
+
self.current_content_type: Literal["text", "tool_call", "thought"] | None = None
|
|
109
|
+
self.refusal_encountered = False
|
|
110
|
+
|
|
111
|
+
def process_chunk(self, event: ResponseStreamEvent) -> ChunkIterator:
|
|
112
|
+
"""Process a single OpenAI Responses stream event and yield the appropriate content chunks."""
|
|
113
|
+
yield RawStreamEventChunk(raw_stream_event=event)
|
|
114
|
+
|
|
115
|
+
if hasattr(event, "type"):
|
|
116
|
+
if event.type == "response.output_text.delta":
|
|
117
|
+
if not self.current_content_type:
|
|
118
|
+
yield TextStartChunk()
|
|
119
|
+
self.current_content_type = "text"
|
|
120
|
+
yield TextChunk(delta=event.delta)
|
|
121
|
+
elif event.type == "response.output_text.done":
|
|
122
|
+
yield TextEndChunk()
|
|
123
|
+
self.current_content_type = None
|
|
124
|
+
if event.type == "response.refusal.delta":
|
|
125
|
+
if not self.current_content_type:
|
|
126
|
+
yield TextStartChunk()
|
|
127
|
+
self.current_content_type = "text"
|
|
128
|
+
yield TextChunk(delta=event.delta)
|
|
129
|
+
elif event.type == "response.refusal.done":
|
|
130
|
+
yield TextEndChunk()
|
|
131
|
+
self.refusal_encountered = True
|
|
132
|
+
self.current_content_type = None
|
|
133
|
+
elif event.type == "response.output_item.added":
|
|
134
|
+
item = event.item
|
|
135
|
+
if item.type == "function_call":
|
|
136
|
+
self.current_tool_call_id = item.call_id
|
|
137
|
+
self.current_tool_call_name = item.name
|
|
138
|
+
yield ToolCallStartChunk(
|
|
139
|
+
id=item.call_id,
|
|
140
|
+
name=item.name,
|
|
141
|
+
)
|
|
142
|
+
self.current_content_type = "tool_call"
|
|
143
|
+
elif event.type == "response.function_call_arguments.delta":
|
|
144
|
+
yield ToolCallChunk(delta=event.delta)
|
|
145
|
+
elif event.type == "response.function_call_arguments.done":
|
|
146
|
+
yield ToolCallEndChunk()
|
|
147
|
+
self.current_content_type = None
|
|
148
|
+
elif (
|
|
149
|
+
event.type == "response.reasoning_text.delta"
|
|
150
|
+
or event.type == "response.reasoning_summary_text.delta"
|
|
151
|
+
):
|
|
152
|
+
if not self.current_content_type:
|
|
153
|
+
yield ThoughtStartChunk()
|
|
154
|
+
self.current_content_type = "thought"
|
|
155
|
+
yield ThoughtChunk(delta=event.delta)
|
|
156
|
+
elif (
|
|
157
|
+
event.type == "response.reasoning_summary_text.done"
|
|
158
|
+
or event.type == "response.reasoning_text.done"
|
|
159
|
+
):
|
|
160
|
+
yield ThoughtEndChunk()
|
|
161
|
+
self.current_content_type = None
|
|
162
|
+
elif event.type == "response.incomplete":
|
|
163
|
+
details = event.response.incomplete_details
|
|
164
|
+
reason = (details and details.reason) or ""
|
|
165
|
+
finish_reason = INCOMPLETE_DETAILS_TO_FINISH_REASON.get(reason)
|
|
166
|
+
if finish_reason:
|
|
167
|
+
yield FinishReasonChunk(finish_reason=finish_reason)
|
|
168
|
+
elif event.type == "response.completed":
|
|
169
|
+
yield RawMessageChunk(
|
|
170
|
+
raw_message=[
|
|
171
|
+
_serialize_output_item(item) for item in event.response.output
|
|
172
|
+
]
|
|
173
|
+
)
|
|
174
|
+
if self.refusal_encountered:
|
|
175
|
+
yield FinishReasonChunk(finish_reason=FinishReason.REFUSAL)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def decode_stream(
|
|
179
|
+
openai_stream: Stream[ResponseStreamEvent],
|
|
180
|
+
) -> ChunkIterator:
|
|
181
|
+
"""Returns a ChunkIterator converted from an OpenAI Stream[ResponseStreamEvent]"""
|
|
182
|
+
processor = _OpenAIResponsesChunkProcessor()
|
|
183
|
+
for event in openai_stream:
|
|
184
|
+
yield from processor.process_chunk(event)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
async def decode_async_stream(
|
|
188
|
+
openai_stream: AsyncStream[ResponseStreamEvent],
|
|
189
|
+
) -> AsyncChunkIterator:
|
|
190
|
+
"""Returns an AsyncChunkIterator converted from an OpenAI AsyncStream[ResponseStreamEvent]"""
|
|
191
|
+
processor = _OpenAIResponsesChunkProcessor()
|
|
192
|
+
async for event in openai_stream:
|
|
193
|
+
for item in processor.process_chunk(event):
|
|
194
|
+
yield item
|
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
"""OpenAI Responses message encoding and request preparation."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from typing import TypedDict, cast
|
|
5
|
+
|
|
6
|
+
from openai import Omit
|
|
7
|
+
from openai.types.responses import (
|
|
8
|
+
FunctionToolParam,
|
|
9
|
+
ResponseFormatTextJSONSchemaConfigParam,
|
|
10
|
+
ResponseFunctionToolCallParam,
|
|
11
|
+
ResponseInputContentParam,
|
|
12
|
+
ResponseInputItemParam,
|
|
13
|
+
ResponseInputParam,
|
|
14
|
+
ResponseInputTextParam,
|
|
15
|
+
ResponseTextConfigParam,
|
|
16
|
+
ToolChoiceAllowedParam,
|
|
17
|
+
ToolChoiceFunctionParam,
|
|
18
|
+
response_create_params,
|
|
19
|
+
)
|
|
20
|
+
from openai.types.responses.easy_input_message_param import EasyInputMessageParam
|
|
21
|
+
from openai.types.responses.response_input_image_param import ResponseInputImageParam
|
|
22
|
+
from openai.types.responses.response_input_param import (
|
|
23
|
+
FunctionCallOutput,
|
|
24
|
+
Message as ResponseInputMessageParam,
|
|
25
|
+
)
|
|
26
|
+
from openai.types.shared_params import Reasoning
|
|
27
|
+
from openai.types.shared_params.response_format_json_object import (
|
|
28
|
+
ResponseFormatJSONObject,
|
|
29
|
+
)
|
|
30
|
+
from openai.types.shared_params.responses_model import ResponsesModel
|
|
31
|
+
|
|
32
|
+
from .....exceptions import FeatureNotSupportedError
|
|
33
|
+
from .....formatting import (
|
|
34
|
+
Format,
|
|
35
|
+
FormattableT,
|
|
36
|
+
_utils as _formatting_utils,
|
|
37
|
+
resolve_format,
|
|
38
|
+
)
|
|
39
|
+
from .....messages import AssistantMessage, Message, UserMessage
|
|
40
|
+
from .....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
|
|
41
|
+
from ....base import Params, _utils as _base_utils
|
|
42
|
+
from ...shared import _utils as _shared_utils
|
|
43
|
+
from ..model_ids import OpenAIResponsesModelId
|
|
44
|
+
from .model_features import NON_REASONING_MODELS
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class ResponseCreateKwargs(TypedDict, total=False):
|
|
48
|
+
"""Kwargs to the OpenAI `client.responses.create` method."""
|
|
49
|
+
|
|
50
|
+
model: ResponsesModel
|
|
51
|
+
input: str | ResponseInputParam
|
|
52
|
+
instructions: str
|
|
53
|
+
temperature: float
|
|
54
|
+
max_output_tokens: int
|
|
55
|
+
top_p: float
|
|
56
|
+
tools: list[FunctionToolParam] | Omit
|
|
57
|
+
tool_choice: response_create_params.ToolChoice | Omit
|
|
58
|
+
text: ResponseTextConfigParam
|
|
59
|
+
reasoning: Reasoning | Omit
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _encode_user_message(
|
|
63
|
+
message: UserMessage,
|
|
64
|
+
) -> ResponseInputParam:
|
|
65
|
+
if len(message.content) == 1 and (first := message.content[0]).type == "text":
|
|
66
|
+
return [EasyInputMessageParam(content=first.text, role="user")]
|
|
67
|
+
|
|
68
|
+
current_content: list[ResponseInputContentParam] = []
|
|
69
|
+
result: ResponseInputParam = []
|
|
70
|
+
|
|
71
|
+
def flush_message_content() -> None:
|
|
72
|
+
nonlocal current_content
|
|
73
|
+
if current_content:
|
|
74
|
+
result.append(
|
|
75
|
+
ResponseInputMessageParam(
|
|
76
|
+
content=current_content, role="user", type="message"
|
|
77
|
+
)
|
|
78
|
+
)
|
|
79
|
+
current_content = []
|
|
80
|
+
|
|
81
|
+
for part in message.content:
|
|
82
|
+
if part.type == "text":
|
|
83
|
+
current_content.append(
|
|
84
|
+
ResponseInputTextParam(text=part.text, type="input_text")
|
|
85
|
+
)
|
|
86
|
+
elif part.type == "image":
|
|
87
|
+
image_url = (
|
|
88
|
+
part.source.url
|
|
89
|
+
if part.source.type == "url_image_source"
|
|
90
|
+
else f"data:{part.source.mime_type};base64,{part.source.data}"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
current_content.append(
|
|
94
|
+
ResponseInputImageParam(
|
|
95
|
+
image_url=image_url, detail="auto", type="input_image"
|
|
96
|
+
)
|
|
97
|
+
)
|
|
98
|
+
elif part.type == "tool_output":
|
|
99
|
+
flush_message_content()
|
|
100
|
+
result.append(
|
|
101
|
+
FunctionCallOutput(
|
|
102
|
+
call_id=part.id,
|
|
103
|
+
output=str(part.value),
|
|
104
|
+
type="function_call_output",
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
elif part.type == "audio":
|
|
108
|
+
raise FeatureNotSupportedError(
|
|
109
|
+
"audio input",
|
|
110
|
+
"openai:responses",
|
|
111
|
+
message='provider "openai:responses" does not support audio inputs. Try using "openai:completions" instead',
|
|
112
|
+
)
|
|
113
|
+
else:
|
|
114
|
+
raise NotImplementedError(
|
|
115
|
+
f"Unsupported user content part type: {part.type}"
|
|
116
|
+
)
|
|
117
|
+
flush_message_content()
|
|
118
|
+
|
|
119
|
+
return result
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _encode_assistant_message(
|
|
123
|
+
message: AssistantMessage, encode_thoughts: bool
|
|
124
|
+
) -> ResponseInputParam:
|
|
125
|
+
result: ResponseInputParam = []
|
|
126
|
+
|
|
127
|
+
# Note: OpenAI does not provide any way to encode multiplie pieces of assistant-generated
|
|
128
|
+
# text as adjacent content within the same Message, except as part of
|
|
129
|
+
# ResponseOutputMessageParam which requires OpenAI-provided `id` and `status` on the message,
|
|
130
|
+
# and `annotations` and `logprobs` on the output text.
|
|
131
|
+
# Rather than generating a fake or nonexistent fields and triggering potentially undefined
|
|
132
|
+
# server-side behavior, we use `EasyInputMessageParam` for assistant generated text,
|
|
133
|
+
# with the caveat that assistant messages containing multiple text parts will be encoded
|
|
134
|
+
# as though they are separate messages.
|
|
135
|
+
# (It would seem as though the `Message` class in `response_input_param.py` would be suitable,
|
|
136
|
+
# especially as it supports the "assistant" role; however attempting to use it triggers a server
|
|
137
|
+
# error when text of type input_text is passed as part of an assistant message.)
|
|
138
|
+
for part in message.content:
|
|
139
|
+
if part.type == "text":
|
|
140
|
+
result.append(EasyInputMessageParam(content=part.text, role="assistant"))
|
|
141
|
+
elif part.type == "thought":
|
|
142
|
+
if encode_thoughts:
|
|
143
|
+
result.append(
|
|
144
|
+
EasyInputMessageParam(
|
|
145
|
+
content="**Thinking:** " + part.thought, role="assistant"
|
|
146
|
+
)
|
|
147
|
+
)
|
|
148
|
+
elif part.type == "tool_call":
|
|
149
|
+
result.append(
|
|
150
|
+
ResponseFunctionToolCallParam(
|
|
151
|
+
call_id=part.id,
|
|
152
|
+
name=part.name,
|
|
153
|
+
arguments=part.args,
|
|
154
|
+
type="function_call",
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
else:
|
|
158
|
+
raise NotImplementedError(
|
|
159
|
+
f"Unsupported assistant content part type: {part.type}"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return result
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _encode_message(
|
|
166
|
+
message: Message, model_id: OpenAIResponsesModelId, encode_thoughts: bool
|
|
167
|
+
) -> ResponseInputParam:
|
|
168
|
+
"""Convert a Mirascope Message to OpenAI Responses input items.
|
|
169
|
+
|
|
170
|
+
Returns a list because tool calls and tool outputs become separate input items
|
|
171
|
+
in the Responses API, not part of message content.
|
|
172
|
+
"""
|
|
173
|
+
|
|
174
|
+
if message.role == "system":
|
|
175
|
+
# Responses API allows multiple "developer" messages, so rather than using the
|
|
176
|
+
# instructions field, we convert system messages as we find them.
|
|
177
|
+
# Unlike other LLM APIs, the system message does not need to be the first message.
|
|
178
|
+
return [EasyInputMessageParam(role="developer", content=message.content.text)]
|
|
179
|
+
|
|
180
|
+
if (
|
|
181
|
+
message.role == "assistant"
|
|
182
|
+
and message.provider == "openai:responses"
|
|
183
|
+
and message.model_id == model_id
|
|
184
|
+
and message.raw_message
|
|
185
|
+
and not encode_thoughts
|
|
186
|
+
):
|
|
187
|
+
return cast(ResponseInputParam, message.raw_message)
|
|
188
|
+
|
|
189
|
+
if message.role == "assistant":
|
|
190
|
+
return _encode_assistant_message(message, encode_thoughts)
|
|
191
|
+
else:
|
|
192
|
+
return _encode_user_message(message)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _convert_tool_to_function_tool_param(tool: ToolSchema) -> FunctionToolParam:
|
|
196
|
+
"""Convert a Mirascope ToolSchema to OpenAI Responses FunctionToolParam."""
|
|
197
|
+
schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
|
|
198
|
+
schema_dict["type"] = "object"
|
|
199
|
+
_shared_utils._ensure_additional_properties_false(schema_dict)
|
|
200
|
+
|
|
201
|
+
return FunctionToolParam(
|
|
202
|
+
type="function",
|
|
203
|
+
name=tool.name,
|
|
204
|
+
description=tool.description,
|
|
205
|
+
parameters=schema_dict,
|
|
206
|
+
strict=tool.strict,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _create_strict_response_format(
|
|
211
|
+
format: Format[FormattableT],
|
|
212
|
+
) -> ResponseFormatTextJSONSchemaConfigParam:
|
|
213
|
+
"""Create OpenAI Responses strict response format from a Mirascope Format.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
format: The `Format` instance containing schema and metadata
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
ResponseFormatTextJSONSchemaConfigParam for strict structured outputs
|
|
220
|
+
"""
|
|
221
|
+
schema = format.schema.copy()
|
|
222
|
+
_shared_utils._ensure_additional_properties_false(schema)
|
|
223
|
+
|
|
224
|
+
response_format: ResponseFormatTextJSONSchemaConfigParam = {
|
|
225
|
+
"type": "json_schema",
|
|
226
|
+
"name": format.name,
|
|
227
|
+
"schema": schema,
|
|
228
|
+
}
|
|
229
|
+
if format.description:
|
|
230
|
+
response_format["description"] = format.description
|
|
231
|
+
response_format["strict"] = True
|
|
232
|
+
|
|
233
|
+
return response_format
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _compute_reasoning(thinking: bool) -> Reasoning:
|
|
237
|
+
"""Compute the OpenAI `Reasoning` config based on thinking settings."""
|
|
238
|
+
if thinking:
|
|
239
|
+
return {"effort": "medium", "summary": "auto"}
|
|
240
|
+
else:
|
|
241
|
+
return {"effort": "minimal"}
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def encode_request(
|
|
245
|
+
*,
|
|
246
|
+
model_id: OpenAIResponsesModelId,
|
|
247
|
+
messages: Sequence[Message],
|
|
248
|
+
tools: Sequence[ToolSchema] | BaseToolkit | None,
|
|
249
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
250
|
+
params: Params,
|
|
251
|
+
) -> tuple[Sequence[Message], Format[FormattableT] | None, ResponseCreateKwargs]:
|
|
252
|
+
"""Prepares a request for the `OpenAI.responses.create` method."""
|
|
253
|
+
kwargs: ResponseCreateKwargs = ResponseCreateKwargs(
|
|
254
|
+
{
|
|
255
|
+
"model": model_id,
|
|
256
|
+
}
|
|
257
|
+
)
|
|
258
|
+
encode_thoughts = False
|
|
259
|
+
|
|
260
|
+
with _base_utils.ensure_all_params_accessed(
|
|
261
|
+
params=params,
|
|
262
|
+
provider="openai:responses",
|
|
263
|
+
unsupported_params=["top_k", "seed", "stop_sequences"],
|
|
264
|
+
) as param_accessor:
|
|
265
|
+
if param_accessor.temperature is not None:
|
|
266
|
+
kwargs["temperature"] = param_accessor.temperature
|
|
267
|
+
if param_accessor.max_tokens is not None:
|
|
268
|
+
kwargs["max_output_tokens"] = param_accessor.max_tokens
|
|
269
|
+
if param_accessor.top_p is not None:
|
|
270
|
+
kwargs["top_p"] = param_accessor.top_p
|
|
271
|
+
if param_accessor.thinking is not None:
|
|
272
|
+
if model_id in NON_REASONING_MODELS:
|
|
273
|
+
param_accessor.emit_warning_for_unused_param(
|
|
274
|
+
"thinking", param_accessor.thinking, "openai:responses", model_id
|
|
275
|
+
)
|
|
276
|
+
else:
|
|
277
|
+
# Assume model supports reasoning unless explicitly listed as non-reasoning
|
|
278
|
+
# This ensures new reasoning models work immediately without code updates
|
|
279
|
+
kwargs["reasoning"] = _compute_reasoning(param_accessor.thinking)
|
|
280
|
+
if param_accessor.encode_thoughts_as_text:
|
|
281
|
+
encode_thoughts = True
|
|
282
|
+
|
|
283
|
+
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
284
|
+
openai_tools = [_convert_tool_to_function_tool_param(tool) for tool in tools]
|
|
285
|
+
|
|
286
|
+
model_supports_strict = (
|
|
287
|
+
model_id not in _shared_utils.MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
|
|
288
|
+
)
|
|
289
|
+
default_mode = "strict" if model_supports_strict else "tool"
|
|
290
|
+
|
|
291
|
+
format = resolve_format(format, default_mode=default_mode)
|
|
292
|
+
if format is not None:
|
|
293
|
+
if format.mode == "strict":
|
|
294
|
+
kwargs["text"] = {"format": _create_strict_response_format(format)}
|
|
295
|
+
elif format.mode == "tool":
|
|
296
|
+
format_tool_shared_utils = _formatting_utils.create_tool_schema(format)
|
|
297
|
+
openai_tools.append(
|
|
298
|
+
_convert_tool_to_function_tool_param(format_tool_shared_utils)
|
|
299
|
+
)
|
|
300
|
+
if tools:
|
|
301
|
+
kwargs["tool_choice"] = ToolChoiceAllowedParam(
|
|
302
|
+
type="allowed_tools",
|
|
303
|
+
mode="required",
|
|
304
|
+
tools=[
|
|
305
|
+
{"type": "function", "name": tool["name"]}
|
|
306
|
+
for tool in openai_tools
|
|
307
|
+
],
|
|
308
|
+
)
|
|
309
|
+
else:
|
|
310
|
+
kwargs["tool_choice"] = ToolChoiceFunctionParam(
|
|
311
|
+
type="function",
|
|
312
|
+
name=FORMAT_TOOL_NAME,
|
|
313
|
+
)
|
|
314
|
+
elif (
|
|
315
|
+
format.mode == "json"
|
|
316
|
+
and model_id not in _shared_utils.MODELS_WITHOUT_JSON_OBJECT_SUPPORT
|
|
317
|
+
):
|
|
318
|
+
kwargs["text"] = {"format": ResponseFormatJSONObject(type="json_object")}
|
|
319
|
+
|
|
320
|
+
if format.formatting_instructions:
|
|
321
|
+
messages = _base_utils.add_system_instructions(
|
|
322
|
+
messages, format.formatting_instructions
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
encoded_messages: list[ResponseInputItemParam] = []
|
|
326
|
+
for message in messages:
|
|
327
|
+
encoded_messages.extend(_encode_message(message, model_id, encode_thoughts))
|
|
328
|
+
kwargs["input"] = encoded_messages
|
|
329
|
+
|
|
330
|
+
if openai_tools:
|
|
331
|
+
kwargs["tools"] = openai_tools
|
|
332
|
+
|
|
333
|
+
return messages, format, kwargs
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""OpenAI Responses models categorized by reasoning support.
|
|
2
|
+
|
|
3
|
+
This file is auto-generated by scripts/update_openai_responses_model_features.py
|
|
4
|
+
Run that script to update these sets when OpenAI releases new models.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
REASONING_MODELS: set[str] = {
|
|
8
|
+
"codex-mini-latest",
|
|
9
|
+
"gpt-5",
|
|
10
|
+
"gpt-5-2025-08-07",
|
|
11
|
+
"gpt-5-mini",
|
|
12
|
+
"gpt-5-mini-2025-08-07",
|
|
13
|
+
"gpt-5-nano",
|
|
14
|
+
"gpt-5-nano-2025-08-07",
|
|
15
|
+
"o1",
|
|
16
|
+
"o1-2024-12-17",
|
|
17
|
+
"o1-pro",
|
|
18
|
+
"o1-pro-2025-03-19",
|
|
19
|
+
"o3",
|
|
20
|
+
"o3-2025-04-16",
|
|
21
|
+
"o3-mini",
|
|
22
|
+
"o3-mini-2025-01-31",
|
|
23
|
+
"o3-pro",
|
|
24
|
+
"o3-pro-2025-06-10",
|
|
25
|
+
"o4-mini",
|
|
26
|
+
"o4-mini-2025-04-16",
|
|
27
|
+
}
|
|
28
|
+
"""Models that have been tested and confirmed to support the reasoning parameter."""
|
|
29
|
+
|
|
30
|
+
NON_REASONING_MODELS: set[str] = {
|
|
31
|
+
"chatgpt-4o-latest",
|
|
32
|
+
"gpt-3.5-turbo",
|
|
33
|
+
"gpt-3.5-turbo-0125",
|
|
34
|
+
"gpt-3.5-turbo-1106",
|
|
35
|
+
"gpt-4",
|
|
36
|
+
"gpt-4-0125-preview",
|
|
37
|
+
"gpt-4-0314",
|
|
38
|
+
"gpt-4-0613",
|
|
39
|
+
"gpt-4-1106-preview",
|
|
40
|
+
"gpt-4-turbo",
|
|
41
|
+
"gpt-4-turbo-2024-04-09",
|
|
42
|
+
"gpt-4-turbo-preview",
|
|
43
|
+
"gpt-4.1",
|
|
44
|
+
"gpt-4.1-2025-04-14",
|
|
45
|
+
"gpt-4.1-mini",
|
|
46
|
+
"gpt-4.1-mini-2025-04-14",
|
|
47
|
+
"gpt-4.1-nano",
|
|
48
|
+
"gpt-4.1-nano-2025-04-14",
|
|
49
|
+
"gpt-4o",
|
|
50
|
+
"gpt-4o-2024-05-13",
|
|
51
|
+
"gpt-4o-2024-08-06",
|
|
52
|
+
"gpt-4o-2024-11-20",
|
|
53
|
+
"gpt-4o-mini",
|
|
54
|
+
"gpt-4o-mini-2024-07-18",
|
|
55
|
+
"gpt-5-chat-latest",
|
|
56
|
+
}
|
|
57
|
+
"""Models that have been tested and confirmed to NOT support the reasoning parameter."""
|
|
58
|
+
|
|
59
|
+
NON_EXISTENT_MODELS: set[str] = {
|
|
60
|
+
"gpt-3.5-turbo-0301",
|
|
61
|
+
"gpt-3.5-turbo-0613",
|
|
62
|
+
"gpt-3.5-turbo-16k",
|
|
63
|
+
"gpt-3.5-turbo-16k-0613",
|
|
64
|
+
"gpt-4-32k",
|
|
65
|
+
"gpt-4-32k-0314",
|
|
66
|
+
"gpt-4-32k-0613",
|
|
67
|
+
"gpt-4-vision-preview",
|
|
68
|
+
}
|
|
69
|
+
"""Models that are listed in OpenAI's types but no longer exist in their API."""
|
|
70
|
+
|
|
71
|
+
NO_RESPONSES_API_SUPPORT_MODELS: set[str] = {
|
|
72
|
+
"gpt-4o-audio-preview",
|
|
73
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
74
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
75
|
+
"gpt-4o-audio-preview-2025-06-03",
|
|
76
|
+
"gpt-4o-mini-audio-preview",
|
|
77
|
+
"gpt-4o-mini-audio-preview-2024-12-17",
|
|
78
|
+
"gpt-4o-mini-search-preview",
|
|
79
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
80
|
+
"gpt-4o-search-preview",
|
|
81
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
82
|
+
"o1-mini",
|
|
83
|
+
"o1-mini-2024-09-12",
|
|
84
|
+
"o1-preview",
|
|
85
|
+
"o1-preview-2024-09-12",
|
|
86
|
+
}
|
|
87
|
+
"""Models that do not support the Responses API."""
|