mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1243 @@
|
|
|
1
|
+
"""The model context manager for the `llm` module."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Iterator, Sequence
|
|
6
|
+
from contextlib import contextmanager
|
|
7
|
+
from contextvars import ContextVar
|
|
8
|
+
from typing import TYPE_CHECKING, overload
|
|
9
|
+
from typing_extensions import Unpack
|
|
10
|
+
|
|
11
|
+
from ..clients import PROVIDERS, get_client
|
|
12
|
+
from ..context import Context, DepsT
|
|
13
|
+
from ..formatting import Format, FormattableT
|
|
14
|
+
from ..messages import Message, UserContent
|
|
15
|
+
from ..responses import (
|
|
16
|
+
AsyncContextResponse,
|
|
17
|
+
AsyncContextStreamResponse,
|
|
18
|
+
AsyncResponse,
|
|
19
|
+
AsyncStreamResponse,
|
|
20
|
+
ContextResponse,
|
|
21
|
+
ContextStreamResponse,
|
|
22
|
+
Response,
|
|
23
|
+
StreamResponse,
|
|
24
|
+
)
|
|
25
|
+
from ..tools import (
|
|
26
|
+
AsyncContextTool,
|
|
27
|
+
AsyncContextToolkit,
|
|
28
|
+
AsyncTool,
|
|
29
|
+
AsyncToolkit,
|
|
30
|
+
ContextTool,
|
|
31
|
+
ContextToolkit,
|
|
32
|
+
Tool,
|
|
33
|
+
Toolkit,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
if TYPE_CHECKING:
|
|
37
|
+
from ..clients import (
|
|
38
|
+
ModelId,
|
|
39
|
+
Params,
|
|
40
|
+
Provider,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_model_from_context() -> Model | None:
|
|
48
|
+
"""Get the LLM currently set via context, if any."""
|
|
49
|
+
return MODEL_CONTEXT.get()
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class Model:
|
|
53
|
+
"""The unified LLM interface that delegates to provider-specific clients.
|
|
54
|
+
|
|
55
|
+
This class provides a consistent interface for interacting with language models
|
|
56
|
+
from various providers. It handles the common operations like generating responses,
|
|
57
|
+
streaming, and async variants by delegating to the appropriate client methods.
|
|
58
|
+
|
|
59
|
+
**Usage Note:** In most cases, you should use `llm.use_model()` instead of instantiating
|
|
60
|
+
`Model` directly. This preserves the ability to override the model at runtime using
|
|
61
|
+
the `llm.model()` context manager. Only instantiate `Model` directly if you want to
|
|
62
|
+
hardcode a specific model and prevent it from being overridden by context.
|
|
63
|
+
|
|
64
|
+
Example (recommended - allows override):
|
|
65
|
+
|
|
66
|
+
```python
|
|
67
|
+
from mirascope import llm
|
|
68
|
+
|
|
69
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
70
|
+
# Uses context model if available, otherwise creates default
|
|
71
|
+
model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
|
|
72
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
73
|
+
return model.call(messages=[message])
|
|
74
|
+
|
|
75
|
+
# Uses default model
|
|
76
|
+
response = recommend_book("fantasy")
|
|
77
|
+
|
|
78
|
+
# Override with different model
|
|
79
|
+
with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
|
|
80
|
+
response = recommend_book("fantasy") # Uses Claude
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
Example (direct instantiation - prevents override):
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
from mirascope import llm
|
|
87
|
+
|
|
88
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
89
|
+
# Hardcoded model, cannot be overridden by context
|
|
90
|
+
model = llm.Model(provider="openai", model_id="gpt-4o-mini")
|
|
91
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
92
|
+
return model.call(messages=[message])
|
|
93
|
+
```
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
provider: Provider
|
|
97
|
+
"""The provider being used (e.g. `openai`)."""
|
|
98
|
+
|
|
99
|
+
model_id: ModelId
|
|
100
|
+
"""The model being used (e.g. `gpt-4o-mini`)."""
|
|
101
|
+
|
|
102
|
+
params: Params
|
|
103
|
+
"""The default parameters for the model (temperature, max_tokens, etc.)."""
|
|
104
|
+
|
|
105
|
+
def __init__(
|
|
106
|
+
self,
|
|
107
|
+
provider: Provider,
|
|
108
|
+
model_id: ModelId,
|
|
109
|
+
**params: Unpack[Params],
|
|
110
|
+
) -> None:
|
|
111
|
+
"""Initialize the Model with provider, model_id, and optional params."""
|
|
112
|
+
if provider not in PROVIDERS:
|
|
113
|
+
raise ValueError(f"Unknown provider: {provider}")
|
|
114
|
+
self.provider = provider
|
|
115
|
+
self.model_id = model_id
|
|
116
|
+
self.params = params
|
|
117
|
+
|
|
118
|
+
@overload
|
|
119
|
+
def call(
|
|
120
|
+
self,
|
|
121
|
+
*,
|
|
122
|
+
messages: Sequence[Message],
|
|
123
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
124
|
+
format: None = None,
|
|
125
|
+
) -> Response:
|
|
126
|
+
"""Generate an `llm.Response` without a response format."""
|
|
127
|
+
...
|
|
128
|
+
|
|
129
|
+
@overload
|
|
130
|
+
def call(
|
|
131
|
+
self,
|
|
132
|
+
*,
|
|
133
|
+
messages: Sequence[Message],
|
|
134
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
135
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
136
|
+
) -> Response[FormattableT]:
|
|
137
|
+
"""Generate an `llm.Response` with a response format."""
|
|
138
|
+
...
|
|
139
|
+
|
|
140
|
+
@overload
|
|
141
|
+
def call(
|
|
142
|
+
self,
|
|
143
|
+
*,
|
|
144
|
+
messages: Sequence[Message],
|
|
145
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
146
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
147
|
+
) -> Response | Response[FormattableT]:
|
|
148
|
+
"""Generate an `llm.Response` with an optional response format."""
|
|
149
|
+
...
|
|
150
|
+
|
|
151
|
+
def call(
|
|
152
|
+
self,
|
|
153
|
+
*,
|
|
154
|
+
messages: Sequence[Message],
|
|
155
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
156
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
157
|
+
) -> Response | Response[FormattableT]:
|
|
158
|
+
"""Generate an `llm.Response` by synchronously calling this model's LLM provider.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
messages: Messages to send to the LLM.
|
|
162
|
+
tools: Optional tools that the model may invoke.
|
|
163
|
+
format: Optional response format specifier.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
An `llm.Response` object containing the LLM-generated content.
|
|
167
|
+
"""
|
|
168
|
+
return get_client(self.provider).call(
|
|
169
|
+
model_id=self.model_id,
|
|
170
|
+
messages=messages,
|
|
171
|
+
tools=tools,
|
|
172
|
+
format=format,
|
|
173
|
+
**self.params,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
@overload
|
|
177
|
+
async def call_async(
|
|
178
|
+
self,
|
|
179
|
+
*,
|
|
180
|
+
messages: Sequence[Message],
|
|
181
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
182
|
+
format: None = None,
|
|
183
|
+
) -> AsyncResponse:
|
|
184
|
+
"""Generate an `llm.AsyncResponse` without a response format."""
|
|
185
|
+
...
|
|
186
|
+
|
|
187
|
+
@overload
|
|
188
|
+
async def call_async(
|
|
189
|
+
self,
|
|
190
|
+
*,
|
|
191
|
+
messages: Sequence[Message],
|
|
192
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
193
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
194
|
+
) -> AsyncResponse[FormattableT]:
|
|
195
|
+
"""Generate an `llm.AsyncResponse` with a response format."""
|
|
196
|
+
...
|
|
197
|
+
|
|
198
|
+
@overload
|
|
199
|
+
async def call_async(
|
|
200
|
+
self,
|
|
201
|
+
*,
|
|
202
|
+
messages: Sequence[Message],
|
|
203
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
204
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
205
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
206
|
+
"""Generate an `llm.AsyncResponse` with an optional response format."""
|
|
207
|
+
...
|
|
208
|
+
|
|
209
|
+
async def call_async(
|
|
210
|
+
self,
|
|
211
|
+
*,
|
|
212
|
+
messages: Sequence[Message],
|
|
213
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
214
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
215
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
216
|
+
"""Generate an `llm.AsyncResponse` by asynchronously calling this model's LLM provider.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
messages: Messages to send to the LLM.
|
|
220
|
+
tools: Optional tools that the model may invoke.
|
|
221
|
+
format: Optional response format specifier.
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
225
|
+
"""
|
|
226
|
+
return await get_client(self.provider).call_async(
|
|
227
|
+
model_id=self.model_id,
|
|
228
|
+
messages=messages,
|
|
229
|
+
tools=tools,
|
|
230
|
+
**self.params,
|
|
231
|
+
format=format,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
@overload
|
|
235
|
+
def stream(
|
|
236
|
+
self,
|
|
237
|
+
*,
|
|
238
|
+
messages: Sequence[Message],
|
|
239
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
240
|
+
format: None = None,
|
|
241
|
+
) -> StreamResponse:
|
|
242
|
+
"""Stream an `llm.StreamResponse` without a response format."""
|
|
243
|
+
...
|
|
244
|
+
|
|
245
|
+
@overload
|
|
246
|
+
def stream(
|
|
247
|
+
self,
|
|
248
|
+
*,
|
|
249
|
+
messages: Sequence[Message],
|
|
250
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
251
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
252
|
+
) -> StreamResponse[FormattableT]:
|
|
253
|
+
"""Stream an `llm.StreamResponse` with a response format."""
|
|
254
|
+
...
|
|
255
|
+
|
|
256
|
+
@overload
|
|
257
|
+
def stream(
|
|
258
|
+
self,
|
|
259
|
+
*,
|
|
260
|
+
messages: Sequence[Message],
|
|
261
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
262
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
263
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
264
|
+
"""Stream an `llm.StreamResponse` with an optional response format."""
|
|
265
|
+
...
|
|
266
|
+
|
|
267
|
+
def stream(
|
|
268
|
+
self,
|
|
269
|
+
*,
|
|
270
|
+
messages: Sequence[Message],
|
|
271
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
272
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
273
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
274
|
+
"""Generate an `llm.StreamResponse` by synchronously streaming from this model's LLM provider.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
messages: Messages to send to the LLM.
|
|
278
|
+
tools: Optional tools that the model may invoke.
|
|
279
|
+
format: Optional response format specifier.
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
283
|
+
"""
|
|
284
|
+
return get_client(self.provider).stream(
|
|
285
|
+
model_id=self.model_id,
|
|
286
|
+
messages=messages,
|
|
287
|
+
tools=tools,
|
|
288
|
+
format=format,
|
|
289
|
+
**self.params,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
@overload
|
|
293
|
+
async def stream_async(
|
|
294
|
+
self,
|
|
295
|
+
*,
|
|
296
|
+
messages: list[Message],
|
|
297
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
298
|
+
format: None = None,
|
|
299
|
+
) -> AsyncStreamResponse:
|
|
300
|
+
"""Stream an `llm.AsyncStreamResponse` without a response format."""
|
|
301
|
+
...
|
|
302
|
+
|
|
303
|
+
@overload
|
|
304
|
+
async def stream_async(
|
|
305
|
+
self,
|
|
306
|
+
*,
|
|
307
|
+
messages: list[Message],
|
|
308
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
309
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
310
|
+
) -> AsyncStreamResponse[FormattableT]:
|
|
311
|
+
"""Stream an `llm.AsyncStreamResponse` with a response format."""
|
|
312
|
+
...
|
|
313
|
+
|
|
314
|
+
@overload
|
|
315
|
+
async def stream_async(
|
|
316
|
+
self,
|
|
317
|
+
*,
|
|
318
|
+
messages: list[Message],
|
|
319
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
320
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
321
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
322
|
+
"""Stream an `llm.AsyncStreamResponse` with an optional response format."""
|
|
323
|
+
...
|
|
324
|
+
|
|
325
|
+
async def stream_async(
|
|
326
|
+
self,
|
|
327
|
+
*,
|
|
328
|
+
messages: list[Message],
|
|
329
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
330
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
331
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
332
|
+
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
333
|
+
|
|
334
|
+
Args:
|
|
335
|
+
messages: Messages to send to the LLM.
|
|
336
|
+
tools: Optional tools that the model may invoke.
|
|
337
|
+
format: Optional response format specifier.
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
341
|
+
"""
|
|
342
|
+
return await get_client(self.provider).stream_async(
|
|
343
|
+
model_id=self.model_id,
|
|
344
|
+
messages=messages,
|
|
345
|
+
tools=tools,
|
|
346
|
+
format=format,
|
|
347
|
+
**self.params,
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
@overload
|
|
351
|
+
def context_call(
|
|
352
|
+
self,
|
|
353
|
+
*,
|
|
354
|
+
ctx: Context[DepsT],
|
|
355
|
+
messages: Sequence[Message],
|
|
356
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
357
|
+
| ContextToolkit[DepsT]
|
|
358
|
+
| None = None,
|
|
359
|
+
format: None = None,
|
|
360
|
+
) -> ContextResponse[DepsT, None]:
|
|
361
|
+
"""Generate an `llm.ContextResponse` without a response format."""
|
|
362
|
+
...
|
|
363
|
+
|
|
364
|
+
@overload
|
|
365
|
+
def context_call(
|
|
366
|
+
self,
|
|
367
|
+
*,
|
|
368
|
+
ctx: Context[DepsT],
|
|
369
|
+
messages: Sequence[Message],
|
|
370
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
371
|
+
| ContextToolkit[DepsT]
|
|
372
|
+
| None = None,
|
|
373
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
374
|
+
) -> ContextResponse[DepsT, FormattableT]:
|
|
375
|
+
"""Generate an `llm.ContextResponse` with a response format."""
|
|
376
|
+
...
|
|
377
|
+
|
|
378
|
+
@overload
|
|
379
|
+
def context_call(
|
|
380
|
+
self,
|
|
381
|
+
*,
|
|
382
|
+
ctx: Context[DepsT],
|
|
383
|
+
messages: Sequence[Message],
|
|
384
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
385
|
+
| ContextToolkit[DepsT]
|
|
386
|
+
| None = None,
|
|
387
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
388
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
389
|
+
"""Generate an `llm.ContextResponse` with an optional response format."""
|
|
390
|
+
...
|
|
391
|
+
|
|
392
|
+
def context_call(
|
|
393
|
+
self,
|
|
394
|
+
*,
|
|
395
|
+
ctx: Context[DepsT],
|
|
396
|
+
messages: Sequence[Message],
|
|
397
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
398
|
+
| ContextToolkit[DepsT]
|
|
399
|
+
| None = None,
|
|
400
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
401
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
402
|
+
"""Generate an `llm.ContextResponse` by synchronously calling this model's LLM provider.
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
ctx: Context object with dependencies for tools.
|
|
406
|
+
messages: Messages to send to the LLM.
|
|
407
|
+
tools: Optional tools that the model may invoke.
|
|
408
|
+
format: Optional response format specifier.
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
412
|
+
"""
|
|
413
|
+
return get_client(self.provider).context_call(
|
|
414
|
+
ctx=ctx,
|
|
415
|
+
model_id=self.model_id,
|
|
416
|
+
messages=messages,
|
|
417
|
+
tools=tools,
|
|
418
|
+
format=format,
|
|
419
|
+
**self.params,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
@overload
|
|
423
|
+
async def context_call_async(
|
|
424
|
+
self,
|
|
425
|
+
*,
|
|
426
|
+
ctx: Context[DepsT],
|
|
427
|
+
messages: Sequence[Message],
|
|
428
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
429
|
+
| AsyncContextToolkit[DepsT]
|
|
430
|
+
| None = None,
|
|
431
|
+
format: None = None,
|
|
432
|
+
) -> AsyncContextResponse[DepsT, None]:
|
|
433
|
+
"""Generate an `llm.AsyncContextResponse` without a response format."""
|
|
434
|
+
...
|
|
435
|
+
|
|
436
|
+
@overload
|
|
437
|
+
async def context_call_async(
|
|
438
|
+
self,
|
|
439
|
+
*,
|
|
440
|
+
ctx: Context[DepsT],
|
|
441
|
+
messages: Sequence[Message],
|
|
442
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
443
|
+
| AsyncContextToolkit[DepsT]
|
|
444
|
+
| None = None,
|
|
445
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
446
|
+
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
447
|
+
"""Generate an `llm.AsyncContextResponse` with a response format."""
|
|
448
|
+
...
|
|
449
|
+
|
|
450
|
+
@overload
|
|
451
|
+
async def context_call_async(
|
|
452
|
+
self,
|
|
453
|
+
*,
|
|
454
|
+
ctx: Context[DepsT],
|
|
455
|
+
messages: Sequence[Message],
|
|
456
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
457
|
+
| AsyncContextToolkit[DepsT]
|
|
458
|
+
| None = None,
|
|
459
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
460
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
461
|
+
"""Generate an `llm.AsyncContextResponse` with an optional response format."""
|
|
462
|
+
...
|
|
463
|
+
|
|
464
|
+
async def context_call_async(
|
|
465
|
+
self,
|
|
466
|
+
*,
|
|
467
|
+
ctx: Context[DepsT],
|
|
468
|
+
messages: Sequence[Message],
|
|
469
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
470
|
+
| AsyncContextToolkit[DepsT]
|
|
471
|
+
| None = None,
|
|
472
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
473
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
474
|
+
"""Generate an `llm.AsyncContextResponse` by asynchronously calling this model's LLM provider.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
ctx: Context object with dependencies for tools.
|
|
478
|
+
messages: Messages to send to the LLM.
|
|
479
|
+
tools: Optional tools that the model may invoke.
|
|
480
|
+
format: Optional response format specifier.
|
|
481
|
+
|
|
482
|
+
Returns:
|
|
483
|
+
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
484
|
+
"""
|
|
485
|
+
return await get_client(self.provider).context_call_async(
|
|
486
|
+
ctx=ctx,
|
|
487
|
+
model_id=self.model_id,
|
|
488
|
+
messages=messages,
|
|
489
|
+
tools=tools,
|
|
490
|
+
format=format,
|
|
491
|
+
**self.params,
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
@overload
|
|
495
|
+
def context_stream(
|
|
496
|
+
self,
|
|
497
|
+
*,
|
|
498
|
+
ctx: Context[DepsT],
|
|
499
|
+
messages: Sequence[Message],
|
|
500
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
501
|
+
| ContextToolkit[DepsT]
|
|
502
|
+
| None = None,
|
|
503
|
+
format: None = None,
|
|
504
|
+
) -> ContextStreamResponse[DepsT, None]:
|
|
505
|
+
"""Stream an `llm.ContextStreamResponse` without a response format."""
|
|
506
|
+
...
|
|
507
|
+
|
|
508
|
+
@overload
|
|
509
|
+
def context_stream(
|
|
510
|
+
self,
|
|
511
|
+
*,
|
|
512
|
+
ctx: Context[DepsT],
|
|
513
|
+
messages: Sequence[Message],
|
|
514
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
515
|
+
| ContextToolkit[DepsT]
|
|
516
|
+
| None = None,
|
|
517
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
518
|
+
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
519
|
+
"""Stream an `llm.ContextStreamResponse` with a response format."""
|
|
520
|
+
...
|
|
521
|
+
|
|
522
|
+
@overload
|
|
523
|
+
def context_stream(
|
|
524
|
+
self,
|
|
525
|
+
*,
|
|
526
|
+
ctx: Context[DepsT],
|
|
527
|
+
messages: Sequence[Message],
|
|
528
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
529
|
+
| ContextToolkit[DepsT]
|
|
530
|
+
| None = None,
|
|
531
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
532
|
+
) -> (
|
|
533
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
534
|
+
):
|
|
535
|
+
"""Stream an `llm.ContextStreamResponse` with an optional response format."""
|
|
536
|
+
...
|
|
537
|
+
|
|
538
|
+
def context_stream(
|
|
539
|
+
self,
|
|
540
|
+
*,
|
|
541
|
+
ctx: Context[DepsT],
|
|
542
|
+
messages: Sequence[Message],
|
|
543
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
544
|
+
| ContextToolkit[DepsT]
|
|
545
|
+
| None = None,
|
|
546
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
547
|
+
) -> (
|
|
548
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
549
|
+
):
|
|
550
|
+
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from this model's LLM provider.
|
|
551
|
+
|
|
552
|
+
Args:
|
|
553
|
+
ctx: Context object with dependencies for tools.
|
|
554
|
+
messages: Messages to send to the LLM.
|
|
555
|
+
tools: Optional tools that the model may invoke.
|
|
556
|
+
format: Optional response format specifier.
|
|
557
|
+
|
|
558
|
+
Returns:
|
|
559
|
+
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
560
|
+
"""
|
|
561
|
+
return get_client(self.provider).context_stream(
|
|
562
|
+
ctx=ctx,
|
|
563
|
+
model_id=self.model_id,
|
|
564
|
+
messages=messages,
|
|
565
|
+
tools=tools,
|
|
566
|
+
format=format,
|
|
567
|
+
**self.params,
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
@overload
|
|
571
|
+
async def context_stream_async(
|
|
572
|
+
self,
|
|
573
|
+
*,
|
|
574
|
+
ctx: Context[DepsT],
|
|
575
|
+
messages: list[Message],
|
|
576
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
577
|
+
| AsyncContextToolkit[DepsT]
|
|
578
|
+
| None = None,
|
|
579
|
+
format: None = None,
|
|
580
|
+
) -> AsyncContextStreamResponse[DepsT, None]:
|
|
581
|
+
"""Stream an `llm.AsyncContextStreamResponse` without a response format."""
|
|
582
|
+
...
|
|
583
|
+
|
|
584
|
+
@overload
|
|
585
|
+
async def context_stream_async(
|
|
586
|
+
self,
|
|
587
|
+
*,
|
|
588
|
+
ctx: Context[DepsT],
|
|
589
|
+
messages: list[Message],
|
|
590
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
591
|
+
| AsyncContextToolkit[DepsT]
|
|
592
|
+
| None = None,
|
|
593
|
+
format: type[FormattableT] | Format[FormattableT],
|
|
594
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
595
|
+
"""Stream an `llm.AsyncContextStreamResponse` with a response format."""
|
|
596
|
+
...
|
|
597
|
+
|
|
598
|
+
@overload
|
|
599
|
+
async def context_stream_async(
|
|
600
|
+
self,
|
|
601
|
+
*,
|
|
602
|
+
ctx: Context[DepsT],
|
|
603
|
+
messages: list[Message],
|
|
604
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
605
|
+
| AsyncContextToolkit[DepsT]
|
|
606
|
+
| None = None,
|
|
607
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
608
|
+
) -> (
|
|
609
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
610
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
611
|
+
):
|
|
612
|
+
"""Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
613
|
+
...
|
|
614
|
+
|
|
615
|
+
async def context_stream_async(
|
|
616
|
+
self,
|
|
617
|
+
*,
|
|
618
|
+
ctx: Context[DepsT],
|
|
619
|
+
messages: list[Message],
|
|
620
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
621
|
+
| AsyncContextToolkit[DepsT]
|
|
622
|
+
| None = None,
|
|
623
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
624
|
+
) -> (
|
|
625
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
626
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
627
|
+
):
|
|
628
|
+
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
629
|
+
|
|
630
|
+
Args:
|
|
631
|
+
ctx: Context object with dependencies for tools.
|
|
632
|
+
messages: Messages to send to the LLM.
|
|
633
|
+
tools: Optional tools that the model may invoke.
|
|
634
|
+
format: Optional response format specifier.
|
|
635
|
+
|
|
636
|
+
Returns:
|
|
637
|
+
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
638
|
+
"""
|
|
639
|
+
return await get_client(self.provider).context_stream_async(
|
|
640
|
+
ctx=ctx,
|
|
641
|
+
model_id=self.model_id,
|
|
642
|
+
messages=messages,
|
|
643
|
+
tools=tools,
|
|
644
|
+
format=format,
|
|
645
|
+
**self.params,
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
@overload
|
|
649
|
+
def resume(
|
|
650
|
+
self,
|
|
651
|
+
*,
|
|
652
|
+
response: Response,
|
|
653
|
+
content: UserContent,
|
|
654
|
+
) -> Response:
|
|
655
|
+
"""Resume an `llm.Response` without a response format."""
|
|
656
|
+
...
|
|
657
|
+
|
|
658
|
+
@overload
|
|
659
|
+
def resume(
|
|
660
|
+
self,
|
|
661
|
+
*,
|
|
662
|
+
response: Response[FormattableT],
|
|
663
|
+
content: UserContent,
|
|
664
|
+
) -> Response[FormattableT]:
|
|
665
|
+
"""Resume an `llm.Response` with a response format."""
|
|
666
|
+
...
|
|
667
|
+
|
|
668
|
+
@overload
|
|
669
|
+
def resume(
|
|
670
|
+
self,
|
|
671
|
+
*,
|
|
672
|
+
response: Response | Response[FormattableT],
|
|
673
|
+
content: UserContent,
|
|
674
|
+
) -> Response | Response[FormattableT]:
|
|
675
|
+
"""Resume an `llm.Response` with an optional response format."""
|
|
676
|
+
...
|
|
677
|
+
|
|
678
|
+
def resume(
|
|
679
|
+
self,
|
|
680
|
+
*,
|
|
681
|
+
response: Response | Response[FormattableT],
|
|
682
|
+
content: UserContent,
|
|
683
|
+
) -> Response | Response[FormattableT]:
|
|
684
|
+
"""Generate a new `llm.Response` by extending another response's messages with additional user content.
|
|
685
|
+
|
|
686
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
687
|
+
|
|
688
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
689
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
690
|
+
API for resuming an existing interaction.
|
|
691
|
+
|
|
692
|
+
Args:
|
|
693
|
+
response: Previous response to extend.
|
|
694
|
+
content: Additional user content to append.
|
|
695
|
+
|
|
696
|
+
Returns:
|
|
697
|
+
A new `llm.Response` object containing the extended conversation.
|
|
698
|
+
"""
|
|
699
|
+
return get_client(self.provider).resume(
|
|
700
|
+
model_id=self.model_id,
|
|
701
|
+
response=response,
|
|
702
|
+
content=content,
|
|
703
|
+
**self.params,
|
|
704
|
+
)
|
|
705
|
+
|
|
706
|
+
@overload
|
|
707
|
+
async def resume_async(
|
|
708
|
+
self,
|
|
709
|
+
*,
|
|
710
|
+
response: AsyncResponse,
|
|
711
|
+
content: UserContent,
|
|
712
|
+
) -> AsyncResponse:
|
|
713
|
+
"""Resume an `llm.AsyncResponse` without a response format."""
|
|
714
|
+
...
|
|
715
|
+
|
|
716
|
+
@overload
|
|
717
|
+
async def resume_async(
|
|
718
|
+
self,
|
|
719
|
+
*,
|
|
720
|
+
response: AsyncResponse[FormattableT],
|
|
721
|
+
content: UserContent,
|
|
722
|
+
) -> AsyncResponse[FormattableT]:
|
|
723
|
+
"""Resume an `llm.AsyncResponse` with a response format."""
|
|
724
|
+
...
|
|
725
|
+
|
|
726
|
+
@overload
|
|
727
|
+
async def resume_async(
|
|
728
|
+
self,
|
|
729
|
+
*,
|
|
730
|
+
response: AsyncResponse | AsyncResponse[FormattableT],
|
|
731
|
+
content: UserContent,
|
|
732
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
733
|
+
"""Resume an `llm.AsyncResponse` with an optional response format."""
|
|
734
|
+
...
|
|
735
|
+
|
|
736
|
+
async def resume_async(
|
|
737
|
+
self,
|
|
738
|
+
*,
|
|
739
|
+
response: AsyncResponse | AsyncResponse[FormattableT],
|
|
740
|
+
content: UserContent,
|
|
741
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
742
|
+
"""Generate a new `llm.AsyncResponse` by extending another response's messages with additional user content.
|
|
743
|
+
|
|
744
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
745
|
+
|
|
746
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
747
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
748
|
+
API for resuming an existing interaction.
|
|
749
|
+
|
|
750
|
+
Args:
|
|
751
|
+
response: Previous async response to extend.
|
|
752
|
+
content: Additional user content to append.
|
|
753
|
+
|
|
754
|
+
Returns:
|
|
755
|
+
A new `llm.AsyncResponse` object containing the extended conversation.
|
|
756
|
+
"""
|
|
757
|
+
return await get_client(self.provider).resume_async(
|
|
758
|
+
model_id=self.model_id,
|
|
759
|
+
response=response,
|
|
760
|
+
content=content,
|
|
761
|
+
**self.params,
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
@overload
|
|
765
|
+
def context_resume(
|
|
766
|
+
self,
|
|
767
|
+
*,
|
|
768
|
+
ctx: Context[DepsT],
|
|
769
|
+
response: ContextResponse[DepsT, None],
|
|
770
|
+
content: UserContent,
|
|
771
|
+
) -> ContextResponse[DepsT, None]:
|
|
772
|
+
"""Resume an `llm.ContextResponse` without a response format."""
|
|
773
|
+
...
|
|
774
|
+
|
|
775
|
+
@overload
|
|
776
|
+
def context_resume(
|
|
777
|
+
self,
|
|
778
|
+
*,
|
|
779
|
+
ctx: Context[DepsT],
|
|
780
|
+
response: ContextResponse[DepsT, FormattableT],
|
|
781
|
+
content: UserContent,
|
|
782
|
+
) -> ContextResponse[DepsT, FormattableT]:
|
|
783
|
+
"""Resume an `llm.ContextResponse` with a response format."""
|
|
784
|
+
...
|
|
785
|
+
|
|
786
|
+
@overload
|
|
787
|
+
def context_resume(
|
|
788
|
+
self,
|
|
789
|
+
*,
|
|
790
|
+
ctx: Context[DepsT],
|
|
791
|
+
response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
|
|
792
|
+
content: UserContent,
|
|
793
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
794
|
+
"""Resume an `llm.ContextResponse` with an optional response format."""
|
|
795
|
+
...
|
|
796
|
+
|
|
797
|
+
def context_resume(
|
|
798
|
+
self,
|
|
799
|
+
*,
|
|
800
|
+
ctx: Context[DepsT],
|
|
801
|
+
response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
|
|
802
|
+
content: UserContent,
|
|
803
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
804
|
+
"""Generate a new `llm.ContextResponse` by extending another response's messages with additional user content.
|
|
805
|
+
|
|
806
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
807
|
+
|
|
808
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
809
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
810
|
+
API for resuming an existing interaction.
|
|
811
|
+
|
|
812
|
+
Args:
|
|
813
|
+
ctx: Context object with dependencies for tools.
|
|
814
|
+
response: Previous context response to extend.
|
|
815
|
+
content: Additional user content to append.
|
|
816
|
+
|
|
817
|
+
Returns:
|
|
818
|
+
A new `llm.ContextResponse` object containing the extended conversation.
|
|
819
|
+
"""
|
|
820
|
+
return get_client(self.provider).context_resume(
|
|
821
|
+
ctx=ctx,
|
|
822
|
+
model_id=self.model_id,
|
|
823
|
+
response=response,
|
|
824
|
+
content=content,
|
|
825
|
+
**self.params,
|
|
826
|
+
)
|
|
827
|
+
|
|
828
|
+
@overload
|
|
829
|
+
async def context_resume_async(
|
|
830
|
+
self,
|
|
831
|
+
*,
|
|
832
|
+
ctx: Context[DepsT],
|
|
833
|
+
response: AsyncContextResponse[DepsT, None],
|
|
834
|
+
content: UserContent,
|
|
835
|
+
) -> AsyncContextResponse[DepsT, None]:
|
|
836
|
+
"""Resume an `llm.AsyncContextResponse` without a response format."""
|
|
837
|
+
...
|
|
838
|
+
|
|
839
|
+
@overload
|
|
840
|
+
async def context_resume_async(
|
|
841
|
+
self,
|
|
842
|
+
*,
|
|
843
|
+
ctx: Context[DepsT],
|
|
844
|
+
response: AsyncContextResponse[DepsT, FormattableT],
|
|
845
|
+
content: UserContent,
|
|
846
|
+
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
847
|
+
"""Resume an `llm.AsyncContextResponse` with a response format."""
|
|
848
|
+
...
|
|
849
|
+
|
|
850
|
+
@overload
|
|
851
|
+
async def context_resume_async(
|
|
852
|
+
self,
|
|
853
|
+
*,
|
|
854
|
+
ctx: Context[DepsT],
|
|
855
|
+
response: AsyncContextResponse[DepsT, None]
|
|
856
|
+
| AsyncContextResponse[DepsT, FormattableT],
|
|
857
|
+
content: UserContent,
|
|
858
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
859
|
+
"""Resume an `llm.AsyncContextResponse` with an optional response format."""
|
|
860
|
+
...
|
|
861
|
+
|
|
862
|
+
async def context_resume_async(
|
|
863
|
+
self,
|
|
864
|
+
*,
|
|
865
|
+
ctx: Context[DepsT],
|
|
866
|
+
response: AsyncContextResponse[DepsT, None]
|
|
867
|
+
| AsyncContextResponse[DepsT, FormattableT],
|
|
868
|
+
content: UserContent,
|
|
869
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
870
|
+
"""Generate a new `llm.AsyncContextResponse` by extending another response's messages with additional user content.
|
|
871
|
+
|
|
872
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
873
|
+
|
|
874
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
875
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
876
|
+
API for resuming an existing interaction.
|
|
877
|
+
|
|
878
|
+
Args:
|
|
879
|
+
ctx: Context object with dependencies for tools.
|
|
880
|
+
response: Previous async context response to extend.
|
|
881
|
+
content: Additional user content to append.
|
|
882
|
+
|
|
883
|
+
Returns:
|
|
884
|
+
A new `llm.AsyncContextResponse` object containing the extended conversation.
|
|
885
|
+
"""
|
|
886
|
+
return await get_client(self.provider).context_resume_async(
|
|
887
|
+
ctx=ctx,
|
|
888
|
+
model_id=self.model_id,
|
|
889
|
+
response=response,
|
|
890
|
+
content=content,
|
|
891
|
+
**self.params,
|
|
892
|
+
)
|
|
893
|
+
|
|
894
|
+
@overload
|
|
895
|
+
def resume_stream(
|
|
896
|
+
self,
|
|
897
|
+
*,
|
|
898
|
+
response: StreamResponse,
|
|
899
|
+
content: UserContent,
|
|
900
|
+
) -> StreamResponse:
|
|
901
|
+
"""Resume an `llm.StreamResponse` without a response format."""
|
|
902
|
+
...
|
|
903
|
+
|
|
904
|
+
@overload
|
|
905
|
+
def resume_stream(
|
|
906
|
+
self,
|
|
907
|
+
*,
|
|
908
|
+
response: StreamResponse[FormattableT],
|
|
909
|
+
content: UserContent,
|
|
910
|
+
) -> StreamResponse[FormattableT]:
|
|
911
|
+
"""Resume an `llm.StreamResponse` with a response format."""
|
|
912
|
+
...
|
|
913
|
+
|
|
914
|
+
@overload
|
|
915
|
+
def resume_stream(
|
|
916
|
+
self,
|
|
917
|
+
*,
|
|
918
|
+
response: StreamResponse | StreamResponse[FormattableT],
|
|
919
|
+
content: UserContent,
|
|
920
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
921
|
+
"""Resume an `llm.StreamResponse` with an optional response format."""
|
|
922
|
+
...
|
|
923
|
+
|
|
924
|
+
def resume_stream(
|
|
925
|
+
self,
|
|
926
|
+
*,
|
|
927
|
+
response: StreamResponse | StreamResponse[FormattableT],
|
|
928
|
+
content: UserContent,
|
|
929
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
930
|
+
"""Generate a new `llm.StreamResponse` by extending another response's messages with additional user content.
|
|
931
|
+
|
|
932
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
933
|
+
|
|
934
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
935
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
936
|
+
API for resuming an existing interaction.
|
|
937
|
+
|
|
938
|
+
Args:
|
|
939
|
+
response: Previous stream response to extend.
|
|
940
|
+
content: Additional user content to append.
|
|
941
|
+
|
|
942
|
+
Returns:
|
|
943
|
+
A new `llm.StreamResponse` object for streaming the extended conversation.
|
|
944
|
+
"""
|
|
945
|
+
return get_client(self.provider).resume_stream(
|
|
946
|
+
model_id=self.model_id,
|
|
947
|
+
response=response,
|
|
948
|
+
content=content,
|
|
949
|
+
**self.params,
|
|
950
|
+
)
|
|
951
|
+
|
|
952
|
+
@overload
|
|
953
|
+
async def resume_stream_async(
|
|
954
|
+
self,
|
|
955
|
+
*,
|
|
956
|
+
response: AsyncStreamResponse,
|
|
957
|
+
content: UserContent,
|
|
958
|
+
) -> AsyncStreamResponse:
|
|
959
|
+
"""Resume an `llm.AsyncStreamResponse` without a response format."""
|
|
960
|
+
...
|
|
961
|
+
|
|
962
|
+
@overload
|
|
963
|
+
async def resume_stream_async(
|
|
964
|
+
self,
|
|
965
|
+
*,
|
|
966
|
+
response: AsyncStreamResponse[FormattableT],
|
|
967
|
+
content: UserContent,
|
|
968
|
+
) -> AsyncStreamResponse[FormattableT]:
|
|
969
|
+
"""Resume an `llm.AsyncStreamResponse` with a response format."""
|
|
970
|
+
...
|
|
971
|
+
|
|
972
|
+
@overload
|
|
973
|
+
async def resume_stream_async(
|
|
974
|
+
self,
|
|
975
|
+
*,
|
|
976
|
+
response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
|
|
977
|
+
content: UserContent,
|
|
978
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
979
|
+
"""Resume an `llm.AsyncStreamResponse` with an optional response format."""
|
|
980
|
+
...
|
|
981
|
+
|
|
982
|
+
async def resume_stream_async(
|
|
983
|
+
self,
|
|
984
|
+
*,
|
|
985
|
+
response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
|
|
986
|
+
content: UserContent,
|
|
987
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
988
|
+
"""Generate a new `llm.AsyncStreamResponse` by extending another response's messages with additional user content.
|
|
989
|
+
|
|
990
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
991
|
+
|
|
992
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
993
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
994
|
+
API for resuming an existing interaction.
|
|
995
|
+
|
|
996
|
+
Args:
|
|
997
|
+
response: Previous async stream response to extend.
|
|
998
|
+
content: Additional user content to append.
|
|
999
|
+
|
|
1000
|
+
Returns:
|
|
1001
|
+
A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1002
|
+
"""
|
|
1003
|
+
return await get_client(self.provider).resume_stream_async(
|
|
1004
|
+
model_id=self.model_id,
|
|
1005
|
+
response=response,
|
|
1006
|
+
content=content,
|
|
1007
|
+
**self.params,
|
|
1008
|
+
)
|
|
1009
|
+
|
|
1010
|
+
@overload
|
|
1011
|
+
def context_resume_stream(
|
|
1012
|
+
self,
|
|
1013
|
+
*,
|
|
1014
|
+
ctx: Context[DepsT],
|
|
1015
|
+
response: ContextStreamResponse[DepsT, None],
|
|
1016
|
+
content: UserContent,
|
|
1017
|
+
) -> ContextStreamResponse[DepsT, None]:
|
|
1018
|
+
"""Resume an `llm.ContextStreamResponse` without a response format."""
|
|
1019
|
+
...
|
|
1020
|
+
|
|
1021
|
+
@overload
|
|
1022
|
+
def context_resume_stream(
|
|
1023
|
+
self,
|
|
1024
|
+
*,
|
|
1025
|
+
ctx: Context[DepsT],
|
|
1026
|
+
response: ContextStreamResponse[DepsT, FormattableT],
|
|
1027
|
+
content: UserContent,
|
|
1028
|
+
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
1029
|
+
"""Resume an `llm.ContextStreamResponse` with a response format."""
|
|
1030
|
+
...
|
|
1031
|
+
|
|
1032
|
+
@overload
|
|
1033
|
+
def context_resume_stream(
|
|
1034
|
+
self,
|
|
1035
|
+
*,
|
|
1036
|
+
ctx: Context[DepsT],
|
|
1037
|
+
response: ContextStreamResponse[DepsT, None]
|
|
1038
|
+
| ContextStreamResponse[DepsT, FormattableT],
|
|
1039
|
+
content: UserContent,
|
|
1040
|
+
) -> (
|
|
1041
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
1042
|
+
):
|
|
1043
|
+
"""Resume an `llm.ContextStreamResponse` with an optional response format."""
|
|
1044
|
+
...
|
|
1045
|
+
|
|
1046
|
+
def context_resume_stream(
|
|
1047
|
+
self,
|
|
1048
|
+
*,
|
|
1049
|
+
ctx: Context[DepsT],
|
|
1050
|
+
response: ContextStreamResponse[DepsT, None]
|
|
1051
|
+
| ContextStreamResponse[DepsT, FormattableT],
|
|
1052
|
+
content: UserContent,
|
|
1053
|
+
) -> (
|
|
1054
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
1055
|
+
):
|
|
1056
|
+
"""Generate a new `llm.ContextStreamResponse` by extending another response's messages with additional user content.
|
|
1057
|
+
|
|
1058
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
1059
|
+
|
|
1060
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
1061
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
1062
|
+
API for resuming an existing interaction.
|
|
1063
|
+
|
|
1064
|
+
Args:
|
|
1065
|
+
ctx: Context object with dependencies for tools.
|
|
1066
|
+
response: Previous context stream response to extend.
|
|
1067
|
+
content: Additional user content to append.
|
|
1068
|
+
|
|
1069
|
+
Returns:
|
|
1070
|
+
A new `llm.ContextStreamResponse` object for streaming the extended conversation.
|
|
1071
|
+
"""
|
|
1072
|
+
return get_client(self.provider).context_resume_stream(
|
|
1073
|
+
ctx=ctx,
|
|
1074
|
+
model_id=self.model_id,
|
|
1075
|
+
response=response,
|
|
1076
|
+
content=content,
|
|
1077
|
+
**self.params,
|
|
1078
|
+
)
|
|
1079
|
+
|
|
1080
|
+
@overload
|
|
1081
|
+
async def context_resume_stream_async(
|
|
1082
|
+
self,
|
|
1083
|
+
*,
|
|
1084
|
+
ctx: Context[DepsT],
|
|
1085
|
+
response: AsyncContextStreamResponse[DepsT, None],
|
|
1086
|
+
content: UserContent,
|
|
1087
|
+
) -> AsyncContextStreamResponse[DepsT, None]:
|
|
1088
|
+
"""Resume an `llm.AsyncContextStreamResponse` without a response format."""
|
|
1089
|
+
...
|
|
1090
|
+
|
|
1091
|
+
@overload
|
|
1092
|
+
async def context_resume_stream_async(
|
|
1093
|
+
self,
|
|
1094
|
+
*,
|
|
1095
|
+
ctx: Context[DepsT],
|
|
1096
|
+
response: AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1097
|
+
content: UserContent,
|
|
1098
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
1099
|
+
"""Resume an `llm.AsyncContextStreamResponse` with a response format."""
|
|
1100
|
+
...
|
|
1101
|
+
|
|
1102
|
+
@overload
|
|
1103
|
+
async def context_resume_stream_async(
|
|
1104
|
+
self,
|
|
1105
|
+
*,
|
|
1106
|
+
ctx: Context[DepsT],
|
|
1107
|
+
response: AsyncContextStreamResponse[DepsT, None]
|
|
1108
|
+
| AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1109
|
+
content: UserContent,
|
|
1110
|
+
) -> (
|
|
1111
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
1112
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
1113
|
+
):
|
|
1114
|
+
"""Resume an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
1115
|
+
...
|
|
1116
|
+
|
|
1117
|
+
async def context_resume_stream_async(
|
|
1118
|
+
self,
|
|
1119
|
+
*,
|
|
1120
|
+
ctx: Context[DepsT],
|
|
1121
|
+
response: AsyncContextStreamResponse[DepsT, None]
|
|
1122
|
+
| AsyncContextStreamResponse[DepsT, FormattableT],
|
|
1123
|
+
content: UserContent,
|
|
1124
|
+
) -> (
|
|
1125
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
1126
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
1127
|
+
):
|
|
1128
|
+
"""Generate a new `llm.AsyncContextStreamResponse` by extending another response's messages with additional user content.
|
|
1129
|
+
|
|
1130
|
+
Uses the previous response's tools and output format, and this model's params.
|
|
1131
|
+
|
|
1132
|
+
Depending on the client, this may be a wrapper around using client call methods
|
|
1133
|
+
with the response's messages and the new content, or it may use a provider-specific
|
|
1134
|
+
API for resuming an existing interaction.
|
|
1135
|
+
|
|
1136
|
+
Args:
|
|
1137
|
+
ctx: Context object with dependencies for tools.
|
|
1138
|
+
response: Previous async context stream response to extend.
|
|
1139
|
+
content: Additional user content to append.
|
|
1140
|
+
|
|
1141
|
+
Returns:
|
|
1142
|
+
A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1143
|
+
"""
|
|
1144
|
+
return await get_client(self.provider).context_resume_stream_async(
|
|
1145
|
+
ctx=ctx,
|
|
1146
|
+
model_id=self.model_id,
|
|
1147
|
+
response=response,
|
|
1148
|
+
content=content,
|
|
1149
|
+
**self.params,
|
|
1150
|
+
)
|
|
1151
|
+
|
|
1152
|
+
|
|
1153
|
+
@contextmanager
|
|
1154
|
+
def model(
|
|
1155
|
+
*,
|
|
1156
|
+
provider: Provider,
|
|
1157
|
+
model_id: ModelId,
|
|
1158
|
+
**params: Unpack[Params],
|
|
1159
|
+
) -> Iterator[None]:
|
|
1160
|
+
"""Set a model in context for the duration of the context manager.
|
|
1161
|
+
|
|
1162
|
+
This context manager sets a model that will be used by `llm.use_model()` calls
|
|
1163
|
+
within the context. This allows you to override the default model at runtime.
|
|
1164
|
+
|
|
1165
|
+
Args:
|
|
1166
|
+
provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
|
|
1167
|
+
model_id: The specific model identifier for the chosen provider.
|
|
1168
|
+
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1169
|
+
|
|
1170
|
+
Raises:
|
|
1171
|
+
ValueError: If the specified provider is not supported.
|
|
1172
|
+
|
|
1173
|
+
Example:
|
|
1174
|
+
|
|
1175
|
+
```python
|
|
1176
|
+
import mirascope.llm as llm
|
|
1177
|
+
|
|
1178
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
1179
|
+
model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
|
|
1180
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
1181
|
+
return model.call(messages=[message])
|
|
1182
|
+
|
|
1183
|
+
# Override the default model at runtime
|
|
1184
|
+
with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
|
|
1185
|
+
response = recommend_book("fantasy") # Uses Claude instead of GPT
|
|
1186
|
+
```
|
|
1187
|
+
"""
|
|
1188
|
+
token = MODEL_CONTEXT.set(Model(provider, model_id, **params))
|
|
1189
|
+
try:
|
|
1190
|
+
yield
|
|
1191
|
+
finally:
|
|
1192
|
+
MODEL_CONTEXT.reset(token)
|
|
1193
|
+
|
|
1194
|
+
|
|
1195
|
+
def use_model(
|
|
1196
|
+
*,
|
|
1197
|
+
provider: Provider,
|
|
1198
|
+
model_id: ModelId,
|
|
1199
|
+
**params: Unpack[Params],
|
|
1200
|
+
) -> Model:
|
|
1201
|
+
"""Get the model from context if available, otherwise create a new Model.
|
|
1202
|
+
|
|
1203
|
+
This function checks if a model has been set in the context (via `llm.model()`
|
|
1204
|
+
context manager). If a model is found in the context, it returns that model.
|
|
1205
|
+
Otherwise, it creates and returns a new `llm.Model` instance with the provided
|
|
1206
|
+
arguments as defaults.
|
|
1207
|
+
|
|
1208
|
+
This allows you to write functions that work with a default model but can be
|
|
1209
|
+
overridden at runtime using the `llm.model()` context manager.
|
|
1210
|
+
|
|
1211
|
+
Args:
|
|
1212
|
+
provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
|
|
1213
|
+
model_id: The specific model identifier for the chosen provider.
|
|
1214
|
+
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1215
|
+
|
|
1216
|
+
Returns:
|
|
1217
|
+
An `llm.Model` instance from context or a new instance with the specified settings.
|
|
1218
|
+
|
|
1219
|
+
Raises:
|
|
1220
|
+
ValueError: If the specified provider is not supported.
|
|
1221
|
+
|
|
1222
|
+
Example:
|
|
1223
|
+
|
|
1224
|
+
```python
|
|
1225
|
+
import mirascope.llm as llm
|
|
1226
|
+
|
|
1227
|
+
def recommend_book(genre: str) -> llm.Response:
|
|
1228
|
+
model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
|
|
1229
|
+
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
1230
|
+
return model.call(messages=[message])
|
|
1231
|
+
|
|
1232
|
+
# Uses the default model (gpt-4o-mini)
|
|
1233
|
+
response = recommend_book("fantasy")
|
|
1234
|
+
|
|
1235
|
+
# Override with a different model
|
|
1236
|
+
with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
|
|
1237
|
+
response = recommend_book("fantasy") # Uses Claude instead
|
|
1238
|
+
```
|
|
1239
|
+
"""
|
|
1240
|
+
context_model = get_model_from_context()
|
|
1241
|
+
if context_model is not None:
|
|
1242
|
+
return context_model
|
|
1243
|
+
return Model(provider, model_id, **params)
|