mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""The reason the LLM finished generating a response."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from typing import Literal
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class FinishReason(str, Enum):
|
|
9
|
+
"""The reason why the LLM finished generating a response.
|
|
10
|
+
|
|
11
|
+
`FinishReason` is only set when the response did not have a normal finish (e.g. it
|
|
12
|
+
ran out of tokens). When a response finishes generating normally, no finish reason
|
|
13
|
+
is set.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
MAX_TOKENS = "max_tokens"
|
|
17
|
+
REFUSAL = "refusal"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass(kw_only=True)
|
|
21
|
+
class FinishReasonChunk:
|
|
22
|
+
"""Represents the finish reason for a completed stream."""
|
|
23
|
+
|
|
24
|
+
type: Literal["finish_reason_chunk"] = "finish_reason_chunk"
|
|
25
|
+
|
|
26
|
+
finish_reason: FinishReason
|
|
27
|
+
"""The reason the stream finished."""
|
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
"""Implements Response and AsyncResponse."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Generic, overload
|
|
6
|
+
|
|
7
|
+
from ..content import ToolOutput
|
|
8
|
+
from ..context import Context, DepsT
|
|
9
|
+
from ..formatting import Format, FormattableT
|
|
10
|
+
from ..messages import AssistantMessage, Message, UserContent
|
|
11
|
+
from ..tools import (
|
|
12
|
+
AsyncContextTool,
|
|
13
|
+
AsyncContextToolkit,
|
|
14
|
+
AsyncTool,
|
|
15
|
+
AsyncToolkit,
|
|
16
|
+
ContextTool,
|
|
17
|
+
ContextToolkit,
|
|
18
|
+
Tool,
|
|
19
|
+
Toolkit,
|
|
20
|
+
)
|
|
21
|
+
from .base_response import BaseResponse
|
|
22
|
+
from .finish_reason import FinishReason
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from ..clients import ModelId, Params, Provider
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class Response(BaseResponse[Toolkit, FormattableT]):
|
|
29
|
+
"""The response generated by an LLM."""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
raw: Any, # noqa: ANN401
|
|
35
|
+
provider: "Provider",
|
|
36
|
+
model_id: "ModelId",
|
|
37
|
+
params: "Params",
|
|
38
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
39
|
+
format: Format[FormattableT] | None = None,
|
|
40
|
+
input_messages: Sequence[Message],
|
|
41
|
+
assistant_message: AssistantMessage,
|
|
42
|
+
finish_reason: FinishReason | None,
|
|
43
|
+
) -> None:
|
|
44
|
+
"""Initialize a `Response`."""
|
|
45
|
+
toolkit = tools if isinstance(tools, Toolkit) else Toolkit(tools=tools)
|
|
46
|
+
super().__init__(
|
|
47
|
+
raw=raw,
|
|
48
|
+
provider=provider,
|
|
49
|
+
model_id=model_id,
|
|
50
|
+
params=params,
|
|
51
|
+
toolkit=toolkit,
|
|
52
|
+
format=format,
|
|
53
|
+
input_messages=input_messages,
|
|
54
|
+
assistant_message=assistant_message,
|
|
55
|
+
finish_reason=finish_reason,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def execute_tools(self) -> Sequence[ToolOutput]:
|
|
59
|
+
"""Execute and return all of the tool calls in the response.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
A sequence containing a `ToolOutput` for every tool call in the order they appeared.
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
ToolNotFoundError: If one of the response's tool calls has no matching tool.
|
|
66
|
+
Exception: If one of the tools throws an exception.
|
|
67
|
+
"""
|
|
68
|
+
return [self.toolkit.execute(tool_call) for tool_call in self.tool_calls]
|
|
69
|
+
|
|
70
|
+
@overload
|
|
71
|
+
def resume(self: "Response", content: UserContent) -> "Response": ...
|
|
72
|
+
|
|
73
|
+
@overload
|
|
74
|
+
def resume(
|
|
75
|
+
self: "Response[FormattableT]", content: UserContent
|
|
76
|
+
) -> "Response[FormattableT]": ...
|
|
77
|
+
|
|
78
|
+
def resume(self, content: UserContent) -> "Response | Response[FormattableT]":
|
|
79
|
+
"""Generate a new `Response` using this response's messages with additional user content.
|
|
80
|
+
|
|
81
|
+
Uses this response's tools and format type. Also uses this response's provider,
|
|
82
|
+
model, client, and params, unless the model context manager is being used to
|
|
83
|
+
provide a new LLM as an override.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
content: The new user message content to append to the message history.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
A new `Response` instance generated from the extended message history.
|
|
90
|
+
"""
|
|
91
|
+
return self.model.resume(
|
|
92
|
+
response=self,
|
|
93
|
+
content=content,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class AsyncResponse(BaseResponse[AsyncToolkit, FormattableT]):
|
|
98
|
+
"""The response generated by an LLM in async mode."""
|
|
99
|
+
|
|
100
|
+
def __init__(
|
|
101
|
+
self,
|
|
102
|
+
*,
|
|
103
|
+
raw: Any, # noqa: ANN401
|
|
104
|
+
provider: "Provider",
|
|
105
|
+
model_id: "ModelId",
|
|
106
|
+
params: "Params",
|
|
107
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
108
|
+
format: Format[FormattableT] | None = None,
|
|
109
|
+
input_messages: Sequence[Message],
|
|
110
|
+
assistant_message: AssistantMessage,
|
|
111
|
+
finish_reason: FinishReason | None,
|
|
112
|
+
) -> None:
|
|
113
|
+
"""Initialize an `AsyncResponse`."""
|
|
114
|
+
toolkit = (
|
|
115
|
+
tools if isinstance(tools, AsyncToolkit) else AsyncToolkit(tools=tools)
|
|
116
|
+
)
|
|
117
|
+
super().__init__(
|
|
118
|
+
raw=raw,
|
|
119
|
+
provider=provider,
|
|
120
|
+
model_id=model_id,
|
|
121
|
+
params=params,
|
|
122
|
+
toolkit=toolkit,
|
|
123
|
+
format=format,
|
|
124
|
+
input_messages=input_messages,
|
|
125
|
+
assistant_message=assistant_message,
|
|
126
|
+
finish_reason=finish_reason,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
async def execute_tools(self) -> Sequence[ToolOutput]:
|
|
130
|
+
"""Execute and return all of the tool calls in the response.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
A sequence containing a `ToolOutput` for every tool call in the order they appeared.
|
|
134
|
+
|
|
135
|
+
Raises:
|
|
136
|
+
ToolNotFoundError: If one of the response's tool calls has no matching tool.
|
|
137
|
+
Exception: If one of the tools throws an exception.
|
|
138
|
+
"""
|
|
139
|
+
tasks = [self.toolkit.execute(tool_call) for tool_call in self.tool_calls]
|
|
140
|
+
return await asyncio.gather(*tasks)
|
|
141
|
+
|
|
142
|
+
@overload
|
|
143
|
+
async def resume(
|
|
144
|
+
self: "AsyncResponse", content: UserContent
|
|
145
|
+
) -> "AsyncResponse": ...
|
|
146
|
+
|
|
147
|
+
@overload
|
|
148
|
+
async def resume(
|
|
149
|
+
self: "AsyncResponse[FormattableT]", content: UserContent
|
|
150
|
+
) -> "AsyncResponse[FormattableT]": ...
|
|
151
|
+
|
|
152
|
+
async def resume(
|
|
153
|
+
self, content: UserContent
|
|
154
|
+
) -> "AsyncResponse | AsyncResponse[FormattableT]":
|
|
155
|
+
"""Generate a new `AsyncResponse` using this response's messages with additional user content.
|
|
156
|
+
|
|
157
|
+
Uses this response's tools and format type. Also uses this response's provider,
|
|
158
|
+
model, client, and params, unless the model context manager is being used to
|
|
159
|
+
provide a new LLM as an override.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
content: The new user message content to append to the message history.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
A new `AsyncResponse` instance generated from the extended message history.
|
|
166
|
+
"""
|
|
167
|
+
return await self.model.resume_async(
|
|
168
|
+
response=self,
|
|
169
|
+
content=content,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class ContextResponse(
|
|
174
|
+
BaseResponse[ContextToolkit[DepsT], FormattableT], Generic[DepsT, FormattableT]
|
|
175
|
+
):
|
|
176
|
+
"""The response generated by an LLM from a context call."""
|
|
177
|
+
|
|
178
|
+
def __init__(
|
|
179
|
+
self,
|
|
180
|
+
*,
|
|
181
|
+
raw: Any, # noqa: ANN401
|
|
182
|
+
provider: "Provider",
|
|
183
|
+
model_id: "ModelId",
|
|
184
|
+
params: "Params",
|
|
185
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
186
|
+
| ContextToolkit[DepsT]
|
|
187
|
+
| None = None,
|
|
188
|
+
format: Format[FormattableT] | None = None,
|
|
189
|
+
input_messages: Sequence[Message],
|
|
190
|
+
assistant_message: AssistantMessage,
|
|
191
|
+
finish_reason: FinishReason | None,
|
|
192
|
+
) -> None:
|
|
193
|
+
"""Initialize a `ContextResponse`."""
|
|
194
|
+
toolkit = (
|
|
195
|
+
tools if isinstance(tools, ContextToolkit) else ContextToolkit(tools=tools)
|
|
196
|
+
)
|
|
197
|
+
super().__init__(
|
|
198
|
+
raw=raw,
|
|
199
|
+
provider=provider,
|
|
200
|
+
model_id=model_id,
|
|
201
|
+
params=params,
|
|
202
|
+
toolkit=toolkit,
|
|
203
|
+
format=format,
|
|
204
|
+
input_messages=input_messages,
|
|
205
|
+
assistant_message=assistant_message,
|
|
206
|
+
finish_reason=finish_reason,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
|
|
210
|
+
"""Execute and return all of the tool calls in the response.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
ctx: A `Context` with the required deps type.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
A sequence containing a `ToolOutput` for every tool call.
|
|
217
|
+
|
|
218
|
+
Raises:
|
|
219
|
+
ToolNotFoundError: If one of the response's tool calls has no matching tool.
|
|
220
|
+
Exception: If one of the tools throws an exception.
|
|
221
|
+
"""
|
|
222
|
+
return [self.toolkit.execute(ctx, tool_call) for tool_call in self.tool_calls]
|
|
223
|
+
|
|
224
|
+
@overload
|
|
225
|
+
def resume(
|
|
226
|
+
self: "ContextResponse[DepsT]", ctx: Context[DepsT], content: UserContent
|
|
227
|
+
) -> "ContextResponse[DepsT]": ...
|
|
228
|
+
|
|
229
|
+
@overload
|
|
230
|
+
def resume(
|
|
231
|
+
self: "ContextResponse[DepsT, FormattableT]",
|
|
232
|
+
ctx: Context[DepsT],
|
|
233
|
+
content: UserContent,
|
|
234
|
+
) -> "ContextResponse[DepsT, FormattableT]": ...
|
|
235
|
+
|
|
236
|
+
def resume(
|
|
237
|
+
self, ctx: Context[DepsT], content: UserContent
|
|
238
|
+
) -> "ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]":
|
|
239
|
+
"""Generate a new `ContextResponse` using this response's messages with additional user content.
|
|
240
|
+
|
|
241
|
+
Uses this response's tools and format type. Also uses this response's provider,
|
|
242
|
+
model, client, and params, unless the model context manager is being used to
|
|
243
|
+
provide a new LLM as an override.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
ctx: A `Context` with the required deps type.
|
|
247
|
+
content: The new user message content to append to the message history.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
A new `ContextResponse` instance generated from the extended message history.
|
|
251
|
+
"""
|
|
252
|
+
return self.model.context_resume(
|
|
253
|
+
ctx=ctx,
|
|
254
|
+
response=self,
|
|
255
|
+
content=content,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
class AsyncContextResponse(
|
|
260
|
+
BaseResponse[AsyncContextToolkit[DepsT], FormattableT], Generic[DepsT, FormattableT]
|
|
261
|
+
):
|
|
262
|
+
"""The response generated by an LLM from an async context call."""
|
|
263
|
+
|
|
264
|
+
def __init__(
|
|
265
|
+
self,
|
|
266
|
+
*,
|
|
267
|
+
raw: Any, # noqa: ANN401
|
|
268
|
+
provider: "Provider",
|
|
269
|
+
model_id: "ModelId",
|
|
270
|
+
params: "Params",
|
|
271
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
272
|
+
| AsyncContextToolkit[DepsT]
|
|
273
|
+
| None = None,
|
|
274
|
+
format: Format[FormattableT] | None = None,
|
|
275
|
+
input_messages: Sequence[Message],
|
|
276
|
+
assistant_message: AssistantMessage,
|
|
277
|
+
finish_reason: FinishReason | None,
|
|
278
|
+
) -> None:
|
|
279
|
+
"""Initialize an `AsyncContextResponse`."""
|
|
280
|
+
toolkit = (
|
|
281
|
+
tools
|
|
282
|
+
if isinstance(tools, AsyncContextToolkit)
|
|
283
|
+
else AsyncContextToolkit(tools=tools)
|
|
284
|
+
)
|
|
285
|
+
super().__init__(
|
|
286
|
+
raw=raw,
|
|
287
|
+
provider=provider,
|
|
288
|
+
model_id=model_id,
|
|
289
|
+
params=params,
|
|
290
|
+
toolkit=toolkit,
|
|
291
|
+
format=format,
|
|
292
|
+
input_messages=input_messages,
|
|
293
|
+
assistant_message=assistant_message,
|
|
294
|
+
finish_reason=finish_reason,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
async def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
|
|
298
|
+
"""Execute and return all of the tool calls in the response.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
ctx: A `Context` with the required deps type.
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
A sequence containing a `ToolOutput` for every tool call in the order they appeared.
|
|
305
|
+
|
|
306
|
+
Raises:
|
|
307
|
+
ToolNotFoundError: If one of the response's tool calls has no matching tool.
|
|
308
|
+
Exception: If one of the tools throws an exception.
|
|
309
|
+
"""
|
|
310
|
+
tasks = [self.toolkit.execute(ctx, tool_call) for tool_call in self.tool_calls]
|
|
311
|
+
return await asyncio.gather(*tasks)
|
|
312
|
+
|
|
313
|
+
@overload
|
|
314
|
+
async def resume(
|
|
315
|
+
self: "AsyncContextResponse[DepsT]", ctx: Context[DepsT], content: UserContent
|
|
316
|
+
) -> "AsyncContextResponse[DepsT]": ...
|
|
317
|
+
|
|
318
|
+
@overload
|
|
319
|
+
async def resume(
|
|
320
|
+
self: "AsyncContextResponse[DepsT, FormattableT]",
|
|
321
|
+
ctx: Context[DepsT],
|
|
322
|
+
content: UserContent,
|
|
323
|
+
) -> "AsyncContextResponse[DepsT, FormattableT]": ...
|
|
324
|
+
|
|
325
|
+
async def resume(
|
|
326
|
+
self, ctx: Context[DepsT], content: UserContent
|
|
327
|
+
) -> "AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]":
|
|
328
|
+
"""Generate a new `AsyncContextResponse` using this response's messages with additional user content.
|
|
329
|
+
|
|
330
|
+
Uses this response's tools and format type. Also uses this response's provider,
|
|
331
|
+
model, client, and params, unless the model context manager is being used to
|
|
332
|
+
provide a new LLM as an override.
|
|
333
|
+
|
|
334
|
+
Args:
|
|
335
|
+
ctx: A Context with the required deps type.
|
|
336
|
+
content: The new user message content to append to the message history.
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
A new `AsyncContextResponse` instance generated from the extended message history.
|
|
340
|
+
"""
|
|
341
|
+
return await self.model.context_resume_async(
|
|
342
|
+
ctx=ctx,
|
|
343
|
+
response=self,
|
|
344
|
+
content=content,
|
|
345
|
+
)
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"""Base interface for all LLM responses."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from types import NoneType
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Generic, Literal, overload
|
|
7
|
+
|
|
8
|
+
from ..content import AssistantContentPart, Text, Thought, ToolCall
|
|
9
|
+
from ..formatting import Format, FormattableT, Partial
|
|
10
|
+
from ..messages import Message
|
|
11
|
+
from ..tools import ToolkitT
|
|
12
|
+
from . import _utils
|
|
13
|
+
from .finish_reason import FinishReason
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from ..clients import ModelId, Params, Provider
|
|
17
|
+
from ..models import Model
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class RootResponse(Generic[ToolkitT, FormattableT], ABC):
|
|
21
|
+
"""Base class for LLM responses."""
|
|
22
|
+
|
|
23
|
+
raw: Any
|
|
24
|
+
"""The raw response from the LLM."""
|
|
25
|
+
|
|
26
|
+
provider: "Provider"
|
|
27
|
+
"""The provider that generated this response."""
|
|
28
|
+
|
|
29
|
+
model_id: "ModelId"
|
|
30
|
+
"""The model id that generated this response."""
|
|
31
|
+
|
|
32
|
+
params: "Params"
|
|
33
|
+
"""The params that were used to generate this response (or None)."""
|
|
34
|
+
|
|
35
|
+
toolkit: ToolkitT
|
|
36
|
+
"""The toolkit containing the tools used when generating this response."""
|
|
37
|
+
|
|
38
|
+
messages: list[Message]
|
|
39
|
+
"""The message history, including the most recent assistant message."""
|
|
40
|
+
|
|
41
|
+
content: Sequence[AssistantContentPart]
|
|
42
|
+
"""The content generated by the LLM."""
|
|
43
|
+
|
|
44
|
+
texts: Sequence[Text]
|
|
45
|
+
"""The text content in the generated response, if any."""
|
|
46
|
+
|
|
47
|
+
tool_calls: Sequence[ToolCall]
|
|
48
|
+
"""The tools the LLM wants called on its behalf, if any."""
|
|
49
|
+
|
|
50
|
+
thoughts: Sequence[Thought]
|
|
51
|
+
"""The readable thoughts from the model's thinking process, if any.
|
|
52
|
+
|
|
53
|
+
The thoughts may be direct output from the model thinking process, or may be a
|
|
54
|
+
generated summary. (This depends on the provider; newer models tend to summarize.)
|
|
55
|
+
"""
|
|
56
|
+
finish_reason: FinishReason | None
|
|
57
|
+
"""The reason why the LLM finished generating a response, if set.
|
|
58
|
+
|
|
59
|
+
`finish_reason` is only set if the response did not finish generating normally,
|
|
60
|
+
e.g. `FinishReason.MAX_TOKENS` if the model ran out of tokens before completing.
|
|
61
|
+
When the response generates normally, `response.finish_reason` will be `None`.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
format: Format[FormattableT] | None
|
|
65
|
+
"""The `Format` describing the structured response format, if available."""
|
|
66
|
+
|
|
67
|
+
@overload
|
|
68
|
+
def parse(self: "RootResponse[ToolkitT, None]", partial: Literal[True]) -> None:
|
|
69
|
+
"""Format the response into a `Partial[BaseModel]` (with optional fields).
|
|
70
|
+
|
|
71
|
+
This is useful for when the stream is only partially consumed, in which case the
|
|
72
|
+
structured output may only be partially available.
|
|
73
|
+
"""
|
|
74
|
+
...
|
|
75
|
+
|
|
76
|
+
@overload
|
|
77
|
+
def parse(
|
|
78
|
+
self: "RootResponse[ToolkitT, FormattableT]", partial: Literal[True]
|
|
79
|
+
) -> Partial[FormattableT]:
|
|
80
|
+
"""Format the response into a `Partial[BaseModel]` (with optional fields).
|
|
81
|
+
|
|
82
|
+
This is useful for when the stream is only partially consumed, in which case the
|
|
83
|
+
structured output may only be partially available.
|
|
84
|
+
"""
|
|
85
|
+
...
|
|
86
|
+
|
|
87
|
+
@overload
|
|
88
|
+
def parse(
|
|
89
|
+
self: "RootResponse[ToolkitT, None]", partial: Literal[False] = False
|
|
90
|
+
) -> None:
|
|
91
|
+
"""Overload when the format type is `None`."""
|
|
92
|
+
...
|
|
93
|
+
|
|
94
|
+
@overload
|
|
95
|
+
def parse(
|
|
96
|
+
self: "RootResponse[ToolkitT, FormattableT]", partial: Literal[False] = False
|
|
97
|
+
) -> FormattableT:
|
|
98
|
+
"""Overload when the format type is not `None`."""
|
|
99
|
+
...
|
|
100
|
+
|
|
101
|
+
def parse(
|
|
102
|
+
self, partial: bool = False
|
|
103
|
+
) -> FormattableT | Partial[FormattableT] | None:
|
|
104
|
+
"""Format the response according to the response format parser.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
The formatted response object of type FormatT.
|
|
108
|
+
|
|
109
|
+
Raises:
|
|
110
|
+
json.JSONDecodeError: If the response's textual content can't be parsed as
|
|
111
|
+
JSON.
|
|
112
|
+
pydantic.ValidationError: If the response's content fails validation for the
|
|
113
|
+
format type.
|
|
114
|
+
"""
|
|
115
|
+
if self.format is None:
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
formattable = self.format.formattable
|
|
119
|
+
if formattable is None or formattable is NoneType:
|
|
120
|
+
return None # pragma: no cover
|
|
121
|
+
|
|
122
|
+
if partial:
|
|
123
|
+
raise NotImplementedError
|
|
124
|
+
|
|
125
|
+
text = "".join(text.text for text in self.texts)
|
|
126
|
+
json_text = _utils.extract_serialized_json(text)
|
|
127
|
+
|
|
128
|
+
return formattable.model_validate_json(json_text)
|
|
129
|
+
|
|
130
|
+
def pretty(self) -> str:
|
|
131
|
+
"""Return a string representation of all response content.
|
|
132
|
+
|
|
133
|
+
The response content will be represented in a way that emphasies clarity and
|
|
134
|
+
readability, but may not include all metadata (like thinking signatures or tool
|
|
135
|
+
call ids), and thus cannot be used to reconstruct the response. For example:
|
|
136
|
+
|
|
137
|
+
**Thinking:**
|
|
138
|
+
The user is asking a math problem. I should use the calculator tool.
|
|
139
|
+
|
|
140
|
+
**Tool Call (calculator)** {'operation': 'mult', 'a': 1337, 'b': 4242}
|
|
141
|
+
|
|
142
|
+
I am going to use the calculator and answer your question for you!
|
|
143
|
+
"""
|
|
144
|
+
if not self.content:
|
|
145
|
+
return "**[No Content]**"
|
|
146
|
+
|
|
147
|
+
pretty_parts: list[str] = []
|
|
148
|
+
for part in self.content:
|
|
149
|
+
if isinstance(part, Text):
|
|
150
|
+
pretty_parts.append(part.text)
|
|
151
|
+
elif isinstance(part, ToolCall):
|
|
152
|
+
pretty_parts.append(f"**ToolCall ({part.name}):** {part.args}")
|
|
153
|
+
elif isinstance(part, Thought):
|
|
154
|
+
indented_thinking = "\n".join(
|
|
155
|
+
f" {line}" for line in part.thought.split("\n")
|
|
156
|
+
)
|
|
157
|
+
pretty_parts.append(f"**Thinking:**\n{indented_thinking}")
|
|
158
|
+
else:
|
|
159
|
+
pretty_parts.append(
|
|
160
|
+
f"[{type(part).__name__}: {str(part)}]"
|
|
161
|
+
) # pragma: no cover
|
|
162
|
+
|
|
163
|
+
return "\n\n".join(pretty_parts)
|
|
164
|
+
|
|
165
|
+
@property
|
|
166
|
+
def model(self) -> "Model":
|
|
167
|
+
"""A `Model` with parameters matching this response."""
|
|
168
|
+
from ..models import Model, get_model_from_context
|
|
169
|
+
|
|
170
|
+
if context_model := get_model_from_context():
|
|
171
|
+
return context_model
|
|
172
|
+
|
|
173
|
+
return Model(
|
|
174
|
+
provider=self.provider,
|
|
175
|
+
model_id=self.model_id,
|
|
176
|
+
**self.params,
|
|
177
|
+
)
|