mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
"""The Call module for generating responses using LLMs."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Generic, overload
|
|
5
|
+
|
|
6
|
+
from ..context import Context, DepsT
|
|
7
|
+
from ..formatting import FormattableT
|
|
8
|
+
from ..prompts import (
|
|
9
|
+
AsyncContextPrompt,
|
|
10
|
+
AsyncPrompt,
|
|
11
|
+
ContextPrompt,
|
|
12
|
+
Prompt,
|
|
13
|
+
)
|
|
14
|
+
from ..responses import (
|
|
15
|
+
AsyncContextResponse,
|
|
16
|
+
AsyncContextStreamResponse,
|
|
17
|
+
AsyncResponse,
|
|
18
|
+
AsyncStreamResponse,
|
|
19
|
+
ContextResponse,
|
|
20
|
+
ContextStreamResponse,
|
|
21
|
+
Response,
|
|
22
|
+
StreamResponse,
|
|
23
|
+
)
|
|
24
|
+
from ..tools import (
|
|
25
|
+
AsyncContextToolkit,
|
|
26
|
+
AsyncToolkit,
|
|
27
|
+
ContextToolkit,
|
|
28
|
+
Toolkit,
|
|
29
|
+
)
|
|
30
|
+
from ..types import P
|
|
31
|
+
from .base_call import BaseCall
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT]):
|
|
36
|
+
"""A class for generating responses using LLMs."""
|
|
37
|
+
|
|
38
|
+
@overload
|
|
39
|
+
def __call__(
|
|
40
|
+
self: "Call[P, None]", *args: P.args, **kwargs: P.kwargs
|
|
41
|
+
) -> Response: ...
|
|
42
|
+
|
|
43
|
+
@overload
|
|
44
|
+
def __call__(
|
|
45
|
+
self: "Call[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
|
|
46
|
+
) -> Response[FormattableT]: ...
|
|
47
|
+
|
|
48
|
+
def __call__(
|
|
49
|
+
self, *args: P.args, **kwargs: P.kwargs
|
|
50
|
+
) -> Response | Response[FormattableT]:
|
|
51
|
+
"""Generates a response using the LLM."""
|
|
52
|
+
return self.call(*args, **kwargs)
|
|
53
|
+
|
|
54
|
+
@overload
|
|
55
|
+
def call(self: "Call[P, None]", *args: P.args, **kwargs: P.kwargs) -> Response: ...
|
|
56
|
+
|
|
57
|
+
@overload
|
|
58
|
+
def call(
|
|
59
|
+
self: "Call[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
|
|
60
|
+
) -> Response[FormattableT]: ...
|
|
61
|
+
|
|
62
|
+
def call(
|
|
63
|
+
self, *args: P.args, **kwargs: P.kwargs
|
|
64
|
+
) -> Response | Response[FormattableT]:
|
|
65
|
+
"""Generates a response using the LLM."""
|
|
66
|
+
messages = self.fn(*args, **kwargs)
|
|
67
|
+
return self.model.call(
|
|
68
|
+
messages=messages, tools=self.toolkit, format=self.format
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
@overload
|
|
72
|
+
def stream(
|
|
73
|
+
self: "Call[P, None]", *args: P.args, **kwargs: P.kwargs
|
|
74
|
+
) -> StreamResponse: ...
|
|
75
|
+
|
|
76
|
+
@overload
|
|
77
|
+
def stream(
|
|
78
|
+
self: "Call[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
|
|
79
|
+
) -> StreamResponse[FormattableT]: ...
|
|
80
|
+
|
|
81
|
+
def stream(
|
|
82
|
+
self, *args: P.args, **kwargs: P.kwargs
|
|
83
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
84
|
+
"""Generates a streaming response using the LLM."""
|
|
85
|
+
messages = self.fn(*args, **kwargs)
|
|
86
|
+
return self.model.stream(
|
|
87
|
+
messages=messages, tools=self.toolkit, format=self.format
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass
|
|
92
|
+
class AsyncCall(
|
|
93
|
+
BaseCall[P, AsyncPrompt, AsyncToolkit, FormattableT],
|
|
94
|
+
Generic[P, FormattableT],
|
|
95
|
+
):
|
|
96
|
+
"""A class for generating responses using LLMs asynchronously."""
|
|
97
|
+
|
|
98
|
+
@overload
|
|
99
|
+
async def __call__(
|
|
100
|
+
self: "AsyncCall[P, None]", *args: P.args, **kwargs: P.kwargs
|
|
101
|
+
) -> AsyncResponse: ...
|
|
102
|
+
|
|
103
|
+
@overload
|
|
104
|
+
async def __call__(
|
|
105
|
+
self: "AsyncCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
|
|
106
|
+
) -> AsyncResponse[FormattableT]: ...
|
|
107
|
+
|
|
108
|
+
async def __call__(
|
|
109
|
+
self, *args: P.args, **kwargs: P.kwargs
|
|
110
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
111
|
+
"""Generates a Asyncresponse using the LLM asynchronously."""
|
|
112
|
+
return await self.call(*args, **kwargs)
|
|
113
|
+
|
|
114
|
+
@overload
|
|
115
|
+
async def call(
|
|
116
|
+
self: "AsyncCall[P, None]", *args: P.args, **kwargs: P.kwargs
|
|
117
|
+
) -> AsyncResponse: ...
|
|
118
|
+
|
|
119
|
+
@overload
|
|
120
|
+
async def call(
|
|
121
|
+
self: "AsyncCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
|
|
122
|
+
) -> AsyncResponse[FormattableT]: ...
|
|
123
|
+
|
|
124
|
+
async def call(
|
|
125
|
+
self, *args: P.args, **kwargs: P.kwargs
|
|
126
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
127
|
+
"""Generates a response using the LLM asynchronously."""
|
|
128
|
+
messages = await self.fn(*args, **kwargs)
|
|
129
|
+
return await self.model.call_async(
|
|
130
|
+
messages=messages, tools=self.toolkit, format=self.format
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
@overload
|
|
134
|
+
async def stream(
|
|
135
|
+
self: "AsyncCall[P, None]", *args: P.args, **kwargs: P.kwargs
|
|
136
|
+
) -> AsyncStreamResponse: ...
|
|
137
|
+
|
|
138
|
+
@overload
|
|
139
|
+
async def stream(
|
|
140
|
+
self: "AsyncCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
|
|
141
|
+
) -> AsyncStreamResponse[FormattableT]: ...
|
|
142
|
+
|
|
143
|
+
async def stream(
|
|
144
|
+
self, *args: P.args, **kwargs: P.kwargs
|
|
145
|
+
) -> AsyncStreamResponse[FormattableT] | AsyncStreamResponse:
|
|
146
|
+
"""Generates a streaming response using the LLM asynchronously."""
|
|
147
|
+
messages = await self.fn(*args, **kwargs)
|
|
148
|
+
return await self.model.stream_async(
|
|
149
|
+
messages=messages, tools=self.toolkit, format=self.format
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
@dataclass
|
|
154
|
+
class ContextCall(
|
|
155
|
+
BaseCall[P, ContextPrompt, ContextToolkit[DepsT], FormattableT],
|
|
156
|
+
Generic[P, DepsT, FormattableT],
|
|
157
|
+
):
|
|
158
|
+
"""A class for generating responses using LLMs."""
|
|
159
|
+
|
|
160
|
+
@overload
|
|
161
|
+
def __call__(
|
|
162
|
+
self: "ContextCall[P, DepsT, None]",
|
|
163
|
+
ctx: Context[DepsT],
|
|
164
|
+
*args: P.args,
|
|
165
|
+
**kwargs: P.kwargs,
|
|
166
|
+
) -> ContextResponse[DepsT, None]: ...
|
|
167
|
+
|
|
168
|
+
@overload
|
|
169
|
+
def __call__(
|
|
170
|
+
self: "ContextCall[P, DepsT, FormattableT]",
|
|
171
|
+
ctx: Context[DepsT],
|
|
172
|
+
*args: P.args,
|
|
173
|
+
**kwargs: P.kwargs,
|
|
174
|
+
) -> ContextResponse[DepsT, FormattableT]: ...
|
|
175
|
+
|
|
176
|
+
def __call__(
|
|
177
|
+
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
178
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
179
|
+
"""Generates a response using the LLM."""
|
|
180
|
+
return self.call(ctx, *args, **kwargs)
|
|
181
|
+
|
|
182
|
+
@overload
|
|
183
|
+
def call(
|
|
184
|
+
self: "ContextCall[P, DepsT, None]",
|
|
185
|
+
ctx: Context[DepsT],
|
|
186
|
+
*args: P.args,
|
|
187
|
+
**kwargs: P.kwargs,
|
|
188
|
+
) -> ContextResponse[DepsT, None]: ...
|
|
189
|
+
|
|
190
|
+
@overload
|
|
191
|
+
def call(
|
|
192
|
+
self: "ContextCall[P, DepsT, FormattableT]",
|
|
193
|
+
ctx: Context[DepsT],
|
|
194
|
+
*args: P.args,
|
|
195
|
+
**kwargs: P.kwargs,
|
|
196
|
+
) -> ContextResponse[DepsT, FormattableT]: ...
|
|
197
|
+
|
|
198
|
+
def call(
|
|
199
|
+
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
200
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
201
|
+
"""Generates a response using the LLM."""
|
|
202
|
+
messages = self.fn(ctx, *args, **kwargs)
|
|
203
|
+
return self.model.context_call(
|
|
204
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
@overload
|
|
208
|
+
def stream(
|
|
209
|
+
self: "ContextCall[P, DepsT, None]",
|
|
210
|
+
ctx: Context[DepsT],
|
|
211
|
+
*args: P.args,
|
|
212
|
+
**kwargs: P.kwargs,
|
|
213
|
+
) -> ContextStreamResponse[DepsT, None]: ...
|
|
214
|
+
|
|
215
|
+
@overload
|
|
216
|
+
def stream(
|
|
217
|
+
self: "ContextCall[P, DepsT, FormattableT]",
|
|
218
|
+
ctx: Context[DepsT],
|
|
219
|
+
*args: P.args,
|
|
220
|
+
**kwargs: P.kwargs,
|
|
221
|
+
) -> ContextStreamResponse[DepsT, FormattableT]: ...
|
|
222
|
+
|
|
223
|
+
def stream(
|
|
224
|
+
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
225
|
+
) -> (
|
|
226
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
227
|
+
):
|
|
228
|
+
"""Generates a streaming response using the LLM."""
|
|
229
|
+
messages = self.fn(ctx, *args, **kwargs)
|
|
230
|
+
return self.model.context_stream(
|
|
231
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
@dataclass
|
|
236
|
+
class AsyncContextCall(
|
|
237
|
+
BaseCall[P, AsyncContextPrompt, AsyncContextToolkit[DepsT], FormattableT],
|
|
238
|
+
Generic[P, DepsT, FormattableT],
|
|
239
|
+
):
|
|
240
|
+
"""A class for generating responses using LLMs asynchronously."""
|
|
241
|
+
|
|
242
|
+
@overload
|
|
243
|
+
async def __call__(
|
|
244
|
+
self: "AsyncContextCall[P, DepsT, None]",
|
|
245
|
+
ctx: Context[DepsT],
|
|
246
|
+
*args: P.args,
|
|
247
|
+
**kwargs: P.kwargs,
|
|
248
|
+
) -> AsyncContextResponse[DepsT, None]: ...
|
|
249
|
+
|
|
250
|
+
@overload
|
|
251
|
+
async def __call__(
|
|
252
|
+
self: "AsyncContextCall[P, DepsT, FormattableT]",
|
|
253
|
+
ctx: Context[DepsT],
|
|
254
|
+
*args: P.args,
|
|
255
|
+
**kwargs: P.kwargs,
|
|
256
|
+
) -> AsyncContextResponse[DepsT, FormattableT]: ...
|
|
257
|
+
|
|
258
|
+
async def __call__(
|
|
259
|
+
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
260
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
261
|
+
"""Generates a response using the LLM asynchronously."""
|
|
262
|
+
return await self.call(ctx, *args, **kwargs)
|
|
263
|
+
|
|
264
|
+
@overload
|
|
265
|
+
async def call(
|
|
266
|
+
self: "AsyncContextCall[P, DepsT, None]",
|
|
267
|
+
ctx: Context[DepsT],
|
|
268
|
+
*args: P.args,
|
|
269
|
+
**kwargs: P.kwargs,
|
|
270
|
+
) -> AsyncContextResponse[DepsT, None]: ...
|
|
271
|
+
|
|
272
|
+
@overload
|
|
273
|
+
async def call(
|
|
274
|
+
self: "AsyncContextCall[P, DepsT, FormattableT]",
|
|
275
|
+
ctx: Context[DepsT],
|
|
276
|
+
*args: P.args,
|
|
277
|
+
**kwargs: P.kwargs,
|
|
278
|
+
) -> AsyncContextResponse[DepsT, FormattableT]: ...
|
|
279
|
+
|
|
280
|
+
async def call(
|
|
281
|
+
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
282
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
283
|
+
"""Generates a response using the LLM asynchronously."""
|
|
284
|
+
messages = await self.fn(ctx, *args, **kwargs)
|
|
285
|
+
return await self.model.context_call_async(
|
|
286
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
@overload
|
|
290
|
+
async def stream(
|
|
291
|
+
self: "AsyncContextCall[P, DepsT, None]",
|
|
292
|
+
ctx: Context[DepsT],
|
|
293
|
+
*args: P.args,
|
|
294
|
+
**kwargs: P.kwargs,
|
|
295
|
+
) -> AsyncContextStreamResponse[DepsT, None]: ...
|
|
296
|
+
|
|
297
|
+
@overload
|
|
298
|
+
async def stream(
|
|
299
|
+
self: "AsyncContextCall[P, DepsT, FormattableT]",
|
|
300
|
+
ctx: Context[DepsT],
|
|
301
|
+
*args: P.args,
|
|
302
|
+
**kwargs: P.kwargs,
|
|
303
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]: ...
|
|
304
|
+
|
|
305
|
+
async def stream(
|
|
306
|
+
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
307
|
+
) -> (
|
|
308
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
309
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
310
|
+
):
|
|
311
|
+
"""Generates a streaming response using the LLM asynchronously."""
|
|
312
|
+
messages = await self.fn(ctx, *args, **kwargs)
|
|
313
|
+
return await self.model.context_stream_async(
|
|
314
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
315
|
+
)
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
"""The `llm.call` decorator for turning `Prompt` functions into LLM calls."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Generic, Literal, cast, overload
|
|
8
|
+
from typing_extensions import Unpack
|
|
9
|
+
|
|
10
|
+
from ..clients import (
|
|
11
|
+
AnthropicModelId,
|
|
12
|
+
GoogleModelId,
|
|
13
|
+
ModelId,
|
|
14
|
+
OpenAICompletionsModelId,
|
|
15
|
+
OpenAIResponsesModelId,
|
|
16
|
+
Params,
|
|
17
|
+
Provider,
|
|
18
|
+
)
|
|
19
|
+
from ..context import DepsT
|
|
20
|
+
from ..formatting import Format, FormattableT
|
|
21
|
+
from ..models import Model
|
|
22
|
+
from ..prompts import (
|
|
23
|
+
AsyncContextPromptable,
|
|
24
|
+
AsyncPromptable,
|
|
25
|
+
ContextPromptable,
|
|
26
|
+
Promptable,
|
|
27
|
+
_utils as _prompt_utils,
|
|
28
|
+
prompt,
|
|
29
|
+
)
|
|
30
|
+
from ..tools import (
|
|
31
|
+
AsyncContextTool,
|
|
32
|
+
AsyncContextToolkit,
|
|
33
|
+
AsyncTool,
|
|
34
|
+
AsyncToolkit,
|
|
35
|
+
ContextTool,
|
|
36
|
+
ContextToolkit,
|
|
37
|
+
Tool,
|
|
38
|
+
Toolkit,
|
|
39
|
+
ToolT,
|
|
40
|
+
)
|
|
41
|
+
from ..types import P
|
|
42
|
+
from .calls import AsyncCall, AsyncContextCall, Call, ContextCall
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass(kw_only=True)
|
|
46
|
+
class CallDecorator(Generic[ToolT, FormattableT]):
|
|
47
|
+
"""A decorator for converting prompts to calls."""
|
|
48
|
+
|
|
49
|
+
model: Model
|
|
50
|
+
tools: Sequence[ToolT] | None
|
|
51
|
+
format: type[FormattableT] | Format[FormattableT] | None
|
|
52
|
+
|
|
53
|
+
@overload
|
|
54
|
+
def __call__(
|
|
55
|
+
self: CallDecorator[AsyncTool | AsyncContextTool[DepsT], FormattableT],
|
|
56
|
+
fn: AsyncContextPromptable[P, DepsT],
|
|
57
|
+
) -> AsyncContextCall[P, DepsT, FormattableT]:
|
|
58
|
+
"""Decorate an async context prompt into an AsyncContextCall."""
|
|
59
|
+
...
|
|
60
|
+
|
|
61
|
+
@overload
|
|
62
|
+
def __call__(
|
|
63
|
+
self: CallDecorator[Tool | ContextTool[DepsT], FormattableT],
|
|
64
|
+
fn: ContextPromptable[P, DepsT],
|
|
65
|
+
) -> ContextCall[P, DepsT, FormattableT]:
|
|
66
|
+
"""Decorate a context prompt into a ContextCall."""
|
|
67
|
+
...
|
|
68
|
+
|
|
69
|
+
@overload
|
|
70
|
+
def __call__(
|
|
71
|
+
self: CallDecorator[AsyncTool, FormattableT], fn: AsyncPromptable[P]
|
|
72
|
+
) -> AsyncCall[P, FormattableT]:
|
|
73
|
+
"""Decorate an async prompt into an AsyncCall."""
|
|
74
|
+
...
|
|
75
|
+
|
|
76
|
+
@overload
|
|
77
|
+
def __call__(
|
|
78
|
+
self: CallDecorator[Tool, FormattableT], fn: Promptable[P]
|
|
79
|
+
) -> Call[P, FormattableT]:
|
|
80
|
+
"""Decorate a prompt into a Call."""
|
|
81
|
+
...
|
|
82
|
+
|
|
83
|
+
def __call__(
|
|
84
|
+
self,
|
|
85
|
+
fn: ContextPromptable[P, DepsT]
|
|
86
|
+
| AsyncContextPromptable[P, DepsT]
|
|
87
|
+
| Promptable[P]
|
|
88
|
+
| AsyncPromptable[P],
|
|
89
|
+
) -> (
|
|
90
|
+
ContextCall[P, DepsT, FormattableT]
|
|
91
|
+
| AsyncContextCall[P, DepsT, FormattableT]
|
|
92
|
+
| Call[P, FormattableT]
|
|
93
|
+
| AsyncCall[P, FormattableT]
|
|
94
|
+
):
|
|
95
|
+
"""Decorates a prompt into a Call or ContextCall."""
|
|
96
|
+
is_context = _prompt_utils.is_context_promptable(fn)
|
|
97
|
+
is_async = _prompt_utils.is_async_promptable(fn)
|
|
98
|
+
|
|
99
|
+
if is_context and is_async:
|
|
100
|
+
tools = cast(
|
|
101
|
+
Sequence[AsyncTool | AsyncContextTool[DepsT]] | None, self.tools
|
|
102
|
+
)
|
|
103
|
+
return AsyncContextCall(
|
|
104
|
+
fn=prompt(fn),
|
|
105
|
+
default_model=self.model,
|
|
106
|
+
format=self.format,
|
|
107
|
+
toolkit=AsyncContextToolkit(tools=tools),
|
|
108
|
+
)
|
|
109
|
+
elif is_context:
|
|
110
|
+
tools = cast(Sequence[Tool | ContextTool[DepsT]] | None, self.tools)
|
|
111
|
+
return ContextCall(
|
|
112
|
+
fn=prompt(fn),
|
|
113
|
+
default_model=self.model,
|
|
114
|
+
format=self.format,
|
|
115
|
+
toolkit=ContextToolkit(tools=tools),
|
|
116
|
+
)
|
|
117
|
+
elif is_async:
|
|
118
|
+
tools = cast(Sequence[AsyncTool] | None, self.tools)
|
|
119
|
+
return AsyncCall(
|
|
120
|
+
fn=prompt(fn),
|
|
121
|
+
default_model=self.model,
|
|
122
|
+
format=self.format,
|
|
123
|
+
toolkit=AsyncToolkit(tools=tools),
|
|
124
|
+
)
|
|
125
|
+
else:
|
|
126
|
+
tools = cast(Sequence[Tool] | None, self.tools)
|
|
127
|
+
return Call(
|
|
128
|
+
fn=prompt(fn),
|
|
129
|
+
default_model=self.model,
|
|
130
|
+
format=self.format,
|
|
131
|
+
toolkit=Toolkit(tools=tools),
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@overload
|
|
136
|
+
def call(
|
|
137
|
+
*,
|
|
138
|
+
provider: Literal["anthropic"],
|
|
139
|
+
model_id: AnthropicModelId,
|
|
140
|
+
tools: list[ToolT] | None = None,
|
|
141
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
142
|
+
**params: Unpack[Params],
|
|
143
|
+
) -> CallDecorator[ToolT, FormattableT]:
|
|
144
|
+
"""Decorate a prompt into a Call using Anthropic models."""
|
|
145
|
+
...
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
@overload
|
|
149
|
+
def call(
|
|
150
|
+
*,
|
|
151
|
+
provider: Literal["google"],
|
|
152
|
+
model_id: GoogleModelId,
|
|
153
|
+
tools: list[ToolT] | None = None,
|
|
154
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
155
|
+
**params: Unpack[Params],
|
|
156
|
+
) -> CallDecorator[ToolT, FormattableT]:
|
|
157
|
+
"""Decorate a prompt into a Call using Google models."""
|
|
158
|
+
...
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
@overload
|
|
162
|
+
def call(
|
|
163
|
+
*,
|
|
164
|
+
provider: Literal["openai:completions"],
|
|
165
|
+
model_id: OpenAICompletionsModelId,
|
|
166
|
+
tools: list[ToolT] | None = None,
|
|
167
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
168
|
+
**params: Unpack[Params],
|
|
169
|
+
) -> CallDecorator[ToolT, FormattableT]:
|
|
170
|
+
"""Decorate a prompt into a Call using OpenAI models."""
|
|
171
|
+
...
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
@overload
|
|
175
|
+
def call(
|
|
176
|
+
*,
|
|
177
|
+
provider: Literal["openai:responses", "openai"],
|
|
178
|
+
model_id: OpenAIResponsesModelId,
|
|
179
|
+
tools: list[ToolT] | None = None,
|
|
180
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
181
|
+
**params: Unpack[Params],
|
|
182
|
+
) -> CallDecorator[ToolT, FormattableT]:
|
|
183
|
+
"""Decorate a prompt into a Call using OpenAI models (Responses API)."""
|
|
184
|
+
...
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
@overload
|
|
188
|
+
def call(
|
|
189
|
+
*,
|
|
190
|
+
provider: Provider,
|
|
191
|
+
model_id: ModelId,
|
|
192
|
+
tools: list[ToolT] | None = None,
|
|
193
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
194
|
+
**params: Unpack[Params],
|
|
195
|
+
) -> CallDecorator[ToolT, FormattableT]:
|
|
196
|
+
"""Decorate a prompt into a Call using a generic provider and model."""
|
|
197
|
+
...
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def call(
|
|
201
|
+
*,
|
|
202
|
+
provider: Provider,
|
|
203
|
+
model_id: ModelId,
|
|
204
|
+
tools: list[ToolT] | None = None,
|
|
205
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
206
|
+
**params: Unpack[Params],
|
|
207
|
+
) -> CallDecorator[ToolT, FormattableT]:
|
|
208
|
+
"""Returns a decorator for turning prompt template functions into generations.
|
|
209
|
+
|
|
210
|
+
This decorator creates a `Call` or `ContextCall` that can be used with prompt functions.
|
|
211
|
+
If the first parameter is typed as `llm.Context[T]`, it creates a ContextCall.
|
|
212
|
+
Otherwise, it creates a regular Call.
|
|
213
|
+
|
|
214
|
+
Example:
|
|
215
|
+
|
|
216
|
+
Regular call:
|
|
217
|
+
```python
|
|
218
|
+
from mirascope import llm
|
|
219
|
+
|
|
220
|
+
@llm.call(
|
|
221
|
+
provider="openai:completions",
|
|
222
|
+
model_id="gpt-4o-mini",
|
|
223
|
+
)
|
|
224
|
+
def answer_question(question: str) -> str:
|
|
225
|
+
return f"Answer this question: {question}"
|
|
226
|
+
|
|
227
|
+
response: llm.Response = answer_question("What is the capital of France?")
|
|
228
|
+
print(response)
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
Example:
|
|
232
|
+
|
|
233
|
+
Context call:
|
|
234
|
+
```python
|
|
235
|
+
from dataclasses import dataclass
|
|
236
|
+
from mirascope import llm
|
|
237
|
+
|
|
238
|
+
@dataclass
|
|
239
|
+
class Personality:
|
|
240
|
+
vibe: str
|
|
241
|
+
|
|
242
|
+
@llm.call(
|
|
243
|
+
provider="openai:completions",
|
|
244
|
+
model_id="gpt-4o-mini",
|
|
245
|
+
)
|
|
246
|
+
def answer_question(ctx: llm.Context[Personality], question: str) -> str:
|
|
247
|
+
return f"Your vibe is {ctx.deps.vibe}. Answer this question: {question}"
|
|
248
|
+
|
|
249
|
+
ctx = llm.Context(deps=Personality(vibe="snarky"))
|
|
250
|
+
response = answer_question(ctx, "What is the capital of France?")
|
|
251
|
+
print(response)
|
|
252
|
+
```
|
|
253
|
+
"""
|
|
254
|
+
model = Model(provider=provider, model_id=model_id, **params)
|
|
255
|
+
return CallDecorator(model=model, tools=tools, format=format)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""Client interfaces for LLM providers."""
|
|
2
|
+
|
|
3
|
+
from .anthropic import (
|
|
4
|
+
AnthropicClient,
|
|
5
|
+
AnthropicModelId,
|
|
6
|
+
)
|
|
7
|
+
from .base import BaseClient, ClientT, Params
|
|
8
|
+
from .google import GoogleClient, GoogleModelId
|
|
9
|
+
from .openai import (
|
|
10
|
+
OpenAICompletionsClient,
|
|
11
|
+
OpenAICompletionsModelId,
|
|
12
|
+
OpenAIResponsesClient,
|
|
13
|
+
OpenAIResponsesModelId,
|
|
14
|
+
)
|
|
15
|
+
from .providers import PROVIDERS, ModelId, Provider, client, get_client
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"PROVIDERS",
|
|
19
|
+
"AnthropicClient",
|
|
20
|
+
"AnthropicModelId",
|
|
21
|
+
"BaseClient",
|
|
22
|
+
"ClientT",
|
|
23
|
+
"GoogleClient",
|
|
24
|
+
"GoogleModelId",
|
|
25
|
+
"ModelId",
|
|
26
|
+
"OpenAICompletionsClient",
|
|
27
|
+
"OpenAICompletionsModelId",
|
|
28
|
+
"OpenAIResponsesClient",
|
|
29
|
+
"OpenAIResponsesModelId",
|
|
30
|
+
"Params",
|
|
31
|
+
"Provider",
|
|
32
|
+
"client",
|
|
33
|
+
"get_client",
|
|
34
|
+
]
|