mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +3 -59
- mirascope/graphs/__init__.py +22 -0
- mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
- mirascope/llm/__init__.py +206 -16
- mirascope/llm/agents/__init__.py +15 -0
- mirascope/llm/agents/agent.py +97 -0
- mirascope/llm/agents/agent_template.py +45 -0
- mirascope/llm/agents/decorator.py +176 -0
- mirascope/llm/calls/__init__.py +16 -0
- mirascope/llm/calls/base_call.py +33 -0
- mirascope/llm/calls/calls.py +315 -0
- mirascope/llm/calls/decorator.py +255 -0
- mirascope/llm/clients/__init__.py +34 -0
- mirascope/llm/clients/anthropic/__init__.py +11 -0
- mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
- mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
- mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
- mirascope/llm/clients/anthropic/clients.py +819 -0
- mirascope/llm/clients/anthropic/model_ids.py +8 -0
- mirascope/llm/clients/base/__init__.py +15 -0
- mirascope/llm/clients/base/_utils.py +192 -0
- mirascope/llm/clients/base/client.py +1256 -0
- mirascope/llm/clients/base/kwargs.py +12 -0
- mirascope/llm/clients/base/params.py +93 -0
- mirascope/llm/clients/google/__init__.py +6 -0
- mirascope/llm/clients/google/_utils/__init__.py +13 -0
- mirascope/llm/clients/google/_utils/decode.py +231 -0
- mirascope/llm/clients/google/_utils/encode.py +279 -0
- mirascope/llm/clients/google/clients.py +853 -0
- mirascope/llm/clients/google/message.py +7 -0
- mirascope/llm/clients/google/model_ids.py +15 -0
- mirascope/llm/clients/openai/__init__.py +25 -0
- mirascope/llm/clients/openai/completions/__init__.py +9 -0
- mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
- mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
- mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
- mirascope/llm/clients/openai/completions/clients.py +833 -0
- mirascope/llm/clients/openai/completions/model_ids.py +8 -0
- mirascope/llm/clients/openai/responses/__init__.py +9 -0
- mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
- mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
- mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
- mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
- mirascope/llm/clients/openai/responses/clients.py +832 -0
- mirascope/llm/clients/openai/responses/model_ids.py +8 -0
- mirascope/llm/clients/openai/shared/__init__.py +7 -0
- mirascope/llm/clients/openai/shared/_utils.py +55 -0
- mirascope/llm/clients/providers.py +175 -0
- mirascope/llm/content/__init__.py +70 -0
- mirascope/llm/content/audio.py +173 -0
- mirascope/llm/content/document.py +94 -0
- mirascope/llm/content/image.py +206 -0
- mirascope/llm/content/text.py +47 -0
- mirascope/llm/content/thought.py +58 -0
- mirascope/llm/content/tool_call.py +63 -0
- mirascope/llm/content/tool_output.py +26 -0
- mirascope/llm/context/__init__.py +6 -0
- mirascope/llm/context/_utils.py +28 -0
- mirascope/llm/context/context.py +24 -0
- mirascope/llm/exceptions.py +105 -0
- mirascope/llm/formatting/__init__.py +22 -0
- mirascope/llm/formatting/_utils.py +74 -0
- mirascope/llm/formatting/format.py +104 -0
- mirascope/llm/formatting/from_call_args.py +30 -0
- mirascope/llm/formatting/partial.py +58 -0
- mirascope/llm/formatting/types.py +109 -0
- mirascope/llm/mcp/__init__.py +5 -0
- mirascope/llm/mcp/client.py +118 -0
- mirascope/llm/messages/__init__.py +32 -0
- mirascope/llm/messages/message.py +182 -0
- mirascope/llm/models/__init__.py +16 -0
- mirascope/llm/models/models.py +1243 -0
- mirascope/llm/prompts/__init__.py +33 -0
- mirascope/llm/prompts/_utils.py +60 -0
- mirascope/llm/prompts/decorator.py +286 -0
- mirascope/llm/prompts/protocols.py +99 -0
- mirascope/llm/responses/__init__.py +57 -0
- mirascope/llm/responses/_utils.py +56 -0
- mirascope/llm/responses/base_response.py +91 -0
- mirascope/llm/responses/base_stream_response.py +697 -0
- mirascope/llm/responses/finish_reason.py +27 -0
- mirascope/llm/responses/response.py +345 -0
- mirascope/llm/responses/root_response.py +177 -0
- mirascope/llm/responses/stream_response.py +572 -0
- mirascope/llm/responses/streams.py +363 -0
- mirascope/llm/tools/__init__.py +40 -0
- mirascope/llm/tools/_utils.py +25 -0
- mirascope/llm/tools/decorator.py +175 -0
- mirascope/llm/tools/protocols.py +96 -0
- mirascope/llm/tools/tool_schema.py +246 -0
- mirascope/llm/tools/toolkit.py +152 -0
- mirascope/llm/tools/tools.py +169 -0
- mirascope/llm/types/__init__.py +22 -0
- mirascope/llm/types/dataclass.py +9 -0
- mirascope/llm/types/jsonable.py +44 -0
- mirascope/llm/types/type_vars.py +19 -0
- mirascope-2.0.0a0.dist-info/METADATA +117 -0
- mirascope-2.0.0a0.dist-info/RECORD +101 -0
- mirascope/beta/__init__.py +0 -3
- mirascope/beta/openai/__init__.py +0 -17
- mirascope/beta/openai/realtime/__init__.py +0 -13
- mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
- mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
- mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
- mirascope/beta/openai/realtime/realtime.py +0 -500
- mirascope/beta/openai/realtime/recording.py +0 -98
- mirascope/beta/openai/realtime/tool.py +0 -113
- mirascope/beta/rag/__init__.py +0 -24
- mirascope/beta/rag/base/__init__.py +0 -22
- mirascope/beta/rag/base/chunkers/__init__.py +0 -2
- mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
- mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
- mirascope/beta/rag/base/config.py +0 -8
- mirascope/beta/rag/base/document.py +0 -11
- mirascope/beta/rag/base/embedders.py +0 -35
- mirascope/beta/rag/base/embedding_params.py +0 -18
- mirascope/beta/rag/base/embedding_response.py +0 -30
- mirascope/beta/rag/base/query_results.py +0 -7
- mirascope/beta/rag/base/vectorstore_params.py +0 -18
- mirascope/beta/rag/base/vectorstores.py +0 -37
- mirascope/beta/rag/chroma/__init__.py +0 -11
- mirascope/beta/rag/chroma/types.py +0 -62
- mirascope/beta/rag/chroma/vectorstores.py +0 -121
- mirascope/beta/rag/cohere/__init__.py +0 -11
- mirascope/beta/rag/cohere/embedders.py +0 -87
- mirascope/beta/rag/cohere/embedding_params.py +0 -29
- mirascope/beta/rag/cohere/embedding_response.py +0 -29
- mirascope/beta/rag/cohere/py.typed +0 -0
- mirascope/beta/rag/openai/__init__.py +0 -11
- mirascope/beta/rag/openai/embedders.py +0 -144
- mirascope/beta/rag/openai/embedding_params.py +0 -18
- mirascope/beta/rag/openai/embedding_response.py +0 -14
- mirascope/beta/rag/openai/py.typed +0 -0
- mirascope/beta/rag/pinecone/__init__.py +0 -19
- mirascope/beta/rag/pinecone/types.py +0 -143
- mirascope/beta/rag/pinecone/vectorstores.py +0 -148
- mirascope/beta/rag/weaviate/__init__.py +0 -6
- mirascope/beta/rag/weaviate/types.py +0 -92
- mirascope/beta/rag/weaviate/vectorstores.py +0 -103
- mirascope/core/__init__.py +0 -109
- mirascope/core/anthropic/__init__.py +0 -31
- mirascope/core/anthropic/_call.py +0 -67
- mirascope/core/anthropic/_call_kwargs.py +0 -13
- mirascope/core/anthropic/_thinking.py +0 -70
- mirascope/core/anthropic/_utils/__init__.py +0 -16
- mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
- mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
- mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
- mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
- mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
- mirascope/core/anthropic/_utils/_setup_call.py +0 -146
- mirascope/core/anthropic/call_params.py +0 -44
- mirascope/core/anthropic/call_response.py +0 -226
- mirascope/core/anthropic/call_response_chunk.py +0 -152
- mirascope/core/anthropic/dynamic_config.py +0 -40
- mirascope/core/anthropic/py.typed +0 -0
- mirascope/core/anthropic/stream.py +0 -204
- mirascope/core/anthropic/tool.py +0 -101
- mirascope/core/azure/__init__.py +0 -31
- mirascope/core/azure/_call.py +0 -67
- mirascope/core/azure/_call_kwargs.py +0 -13
- mirascope/core/azure/_utils/__init__.py +0 -14
- mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
- mirascope/core/azure/_utils/_convert_message_params.py +0 -121
- mirascope/core/azure/_utils/_get_credential.py +0 -33
- mirascope/core/azure/_utils/_get_json_output.py +0 -27
- mirascope/core/azure/_utils/_handle_stream.py +0 -130
- mirascope/core/azure/_utils/_message_param_converter.py +0 -117
- mirascope/core/azure/_utils/_setup_call.py +0 -183
- mirascope/core/azure/call_params.py +0 -59
- mirascope/core/azure/call_response.py +0 -215
- mirascope/core/azure/call_response_chunk.py +0 -105
- mirascope/core/azure/dynamic_config.py +0 -30
- mirascope/core/azure/py.typed +0 -0
- mirascope/core/azure/stream.py +0 -147
- mirascope/core/azure/tool.py +0 -93
- mirascope/core/base/__init__.py +0 -86
- mirascope/core/base/_call_factory.py +0 -256
- mirascope/core/base/_create.py +0 -253
- mirascope/core/base/_extract.py +0 -175
- mirascope/core/base/_extract_with_tools.py +0 -189
- mirascope/core/base/_partial.py +0 -95
- mirascope/core/base/_utils/__init__.py +0 -92
- mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
- mirascope/core/base/_utils/_base_type.py +0 -26
- mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
- mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
- mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
- mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
- mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
- mirascope/core/base/_utils/_extract_tool_return.py +0 -42
- mirascope/core/base/_utils/_fn_is_async.py +0 -24
- mirascope/core/base/_utils/_format_template.py +0 -32
- mirascope/core/base/_utils/_get_audio_type.py +0 -18
- mirascope/core/base/_utils/_get_common_usage.py +0 -20
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
- mirascope/core/base/_utils/_get_document_type.py +0 -7
- mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
- mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
- mirascope/core/base/_utils/_get_fn_args.py +0 -23
- mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
- mirascope/core/base/_utils/_get_image_type.py +0 -26
- mirascope/core/base/_utils/_get_metadata.py +0 -17
- mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
- mirascope/core/base/_utils/_get_prompt_template.py +0 -28
- mirascope/core/base/_utils/_get_template_values.py +0 -51
- mirascope/core/base/_utils/_get_template_variables.py +0 -38
- mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
- mirascope/core/base/_utils/_is_prompt_template.py +0 -24
- mirascope/core/base/_utils/_json_mode_content.py +0 -17
- mirascope/core/base/_utils/_messages_decorator.py +0 -121
- mirascope/core/base/_utils/_parse_content_template.py +0 -323
- mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
- mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
- mirascope/core/base/_utils/_protocols.py +0 -901
- mirascope/core/base/_utils/_setup_call.py +0 -79
- mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
- mirascope/core/base/call_kwargs.py +0 -13
- mirascope/core/base/call_params.py +0 -36
- mirascope/core/base/call_response.py +0 -338
- mirascope/core/base/call_response_chunk.py +0 -130
- mirascope/core/base/dynamic_config.py +0 -82
- mirascope/core/base/from_call_args.py +0 -30
- mirascope/core/base/merge_decorators.py +0 -59
- mirascope/core/base/message_param.py +0 -175
- mirascope/core/base/messages.py +0 -116
- mirascope/core/base/metadata.py +0 -13
- mirascope/core/base/prompt.py +0 -497
- mirascope/core/base/response_model_config_dict.py +0 -9
- mirascope/core/base/stream.py +0 -479
- mirascope/core/base/stream_config.py +0 -11
- mirascope/core/base/structured_stream.py +0 -296
- mirascope/core/base/tool.py +0 -214
- mirascope/core/base/toolkit.py +0 -176
- mirascope/core/base/types.py +0 -344
- mirascope/core/bedrock/__init__.py +0 -34
- mirascope/core/bedrock/_call.py +0 -68
- mirascope/core/bedrock/_call_kwargs.py +0 -12
- mirascope/core/bedrock/_types.py +0 -104
- mirascope/core/bedrock/_utils/__init__.py +0 -14
- mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
- mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
- mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
- mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
- mirascope/core/bedrock/_utils/_setup_call.py +0 -258
- mirascope/core/bedrock/call_params.py +0 -38
- mirascope/core/bedrock/call_response.py +0 -248
- mirascope/core/bedrock/call_response_chunk.py +0 -111
- mirascope/core/bedrock/dynamic_config.py +0 -37
- mirascope/core/bedrock/py.typed +0 -0
- mirascope/core/bedrock/stream.py +0 -154
- mirascope/core/bedrock/tool.py +0 -100
- mirascope/core/cohere/__init__.py +0 -30
- mirascope/core/cohere/_call.py +0 -67
- mirascope/core/cohere/_call_kwargs.py +0 -11
- mirascope/core/cohere/_types.py +0 -20
- mirascope/core/cohere/_utils/__init__.py +0 -14
- mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
- mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
- mirascope/core/cohere/_utils/_get_json_output.py +0 -30
- mirascope/core/cohere/_utils/_handle_stream.py +0 -35
- mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
- mirascope/core/cohere/_utils/_setup_call.py +0 -150
- mirascope/core/cohere/call_params.py +0 -62
- mirascope/core/cohere/call_response.py +0 -205
- mirascope/core/cohere/call_response_chunk.py +0 -125
- mirascope/core/cohere/dynamic_config.py +0 -32
- mirascope/core/cohere/py.typed +0 -0
- mirascope/core/cohere/stream.py +0 -113
- mirascope/core/cohere/tool.py +0 -93
- mirascope/core/costs/__init__.py +0 -5
- mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
- mirascope/core/costs/_azure_calculate_cost.py +0 -11
- mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
- mirascope/core/costs/_cohere_calculate_cost.py +0 -44
- mirascope/core/costs/_gemini_calculate_cost.py +0 -67
- mirascope/core/costs/_google_calculate_cost.py +0 -427
- mirascope/core/costs/_groq_calculate_cost.py +0 -156
- mirascope/core/costs/_litellm_calculate_cost.py +0 -11
- mirascope/core/costs/_mistral_calculate_cost.py +0 -64
- mirascope/core/costs/_openai_calculate_cost.py +0 -416
- mirascope/core/costs/_vertex_calculate_cost.py +0 -67
- mirascope/core/costs/_xai_calculate_cost.py +0 -104
- mirascope/core/costs/calculate_cost.py +0 -86
- mirascope/core/gemini/__init__.py +0 -40
- mirascope/core/gemini/_call.py +0 -67
- mirascope/core/gemini/_call_kwargs.py +0 -12
- mirascope/core/gemini/_utils/__init__.py +0 -14
- mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
- mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
- mirascope/core/gemini/_utils/_get_json_output.py +0 -35
- mirascope/core/gemini/_utils/_handle_stream.py +0 -33
- mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
- mirascope/core/gemini/_utils/_setup_call.py +0 -149
- mirascope/core/gemini/call_params.py +0 -52
- mirascope/core/gemini/call_response.py +0 -216
- mirascope/core/gemini/call_response_chunk.py +0 -100
- mirascope/core/gemini/dynamic_config.py +0 -26
- mirascope/core/gemini/stream.py +0 -120
- mirascope/core/gemini/tool.py +0 -104
- mirascope/core/google/__init__.py +0 -29
- mirascope/core/google/_call.py +0 -67
- mirascope/core/google/_call_kwargs.py +0 -13
- mirascope/core/google/_utils/__init__.py +0 -14
- mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
- mirascope/core/google/_utils/_convert_message_params.py +0 -297
- mirascope/core/google/_utils/_get_json_output.py +0 -37
- mirascope/core/google/_utils/_handle_stream.py +0 -58
- mirascope/core/google/_utils/_message_param_converter.py +0 -200
- mirascope/core/google/_utils/_setup_call.py +0 -201
- mirascope/core/google/_utils/_validate_media_type.py +0 -58
- mirascope/core/google/call_params.py +0 -22
- mirascope/core/google/call_response.py +0 -255
- mirascope/core/google/call_response_chunk.py +0 -135
- mirascope/core/google/dynamic_config.py +0 -26
- mirascope/core/google/stream.py +0 -199
- mirascope/core/google/tool.py +0 -146
- mirascope/core/groq/__init__.py +0 -30
- mirascope/core/groq/_call.py +0 -67
- mirascope/core/groq/_call_kwargs.py +0 -13
- mirascope/core/groq/_utils/__init__.py +0 -14
- mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/groq/_utils/_convert_message_params.py +0 -112
- mirascope/core/groq/_utils/_get_json_output.py +0 -27
- mirascope/core/groq/_utils/_handle_stream.py +0 -123
- mirascope/core/groq/_utils/_message_param_converter.py +0 -89
- mirascope/core/groq/_utils/_setup_call.py +0 -132
- mirascope/core/groq/call_params.py +0 -52
- mirascope/core/groq/call_response.py +0 -213
- mirascope/core/groq/call_response_chunk.py +0 -104
- mirascope/core/groq/dynamic_config.py +0 -29
- mirascope/core/groq/py.typed +0 -0
- mirascope/core/groq/stream.py +0 -135
- mirascope/core/groq/tool.py +0 -80
- mirascope/core/litellm/__init__.py +0 -28
- mirascope/core/litellm/_call.py +0 -67
- mirascope/core/litellm/_utils/__init__.py +0 -5
- mirascope/core/litellm/_utils/_setup_call.py +0 -109
- mirascope/core/litellm/call_params.py +0 -10
- mirascope/core/litellm/call_response.py +0 -24
- mirascope/core/litellm/call_response_chunk.py +0 -14
- mirascope/core/litellm/dynamic_config.py +0 -8
- mirascope/core/litellm/py.typed +0 -0
- mirascope/core/litellm/stream.py +0 -86
- mirascope/core/litellm/tool.py +0 -13
- mirascope/core/mistral/__init__.py +0 -36
- mirascope/core/mistral/_call.py +0 -65
- mirascope/core/mistral/_call_kwargs.py +0 -19
- mirascope/core/mistral/_utils/__init__.py +0 -14
- mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
- mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
- mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
- mirascope/core/mistral/_utils/_get_json_output.py +0 -34
- mirascope/core/mistral/_utils/_handle_stream.py +0 -139
- mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
- mirascope/core/mistral/_utils/_setup_call.py +0 -164
- mirascope/core/mistral/call_params.py +0 -36
- mirascope/core/mistral/call_response.py +0 -205
- mirascope/core/mistral/call_response_chunk.py +0 -105
- mirascope/core/mistral/dynamic_config.py +0 -33
- mirascope/core/mistral/py.typed +0 -0
- mirascope/core/mistral/stream.py +0 -120
- mirascope/core/mistral/tool.py +0 -81
- mirascope/core/openai/__init__.py +0 -31
- mirascope/core/openai/_call.py +0 -67
- mirascope/core/openai/_call_kwargs.py +0 -13
- mirascope/core/openai/_utils/__init__.py +0 -14
- mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
- mirascope/core/openai/_utils/_convert_message_params.py +0 -148
- mirascope/core/openai/_utils/_get_json_output.py +0 -31
- mirascope/core/openai/_utils/_handle_stream.py +0 -138
- mirascope/core/openai/_utils/_message_param_converter.py +0 -105
- mirascope/core/openai/_utils/_setup_call.py +0 -155
- mirascope/core/openai/call_params.py +0 -92
- mirascope/core/openai/call_response.py +0 -273
- mirascope/core/openai/call_response_chunk.py +0 -139
- mirascope/core/openai/dynamic_config.py +0 -34
- mirascope/core/openai/py.typed +0 -0
- mirascope/core/openai/stream.py +0 -185
- mirascope/core/openai/tool.py +0 -101
- mirascope/core/py.typed +0 -0
- mirascope/core/vertex/__init__.py +0 -45
- mirascope/core/vertex/_call.py +0 -62
- mirascope/core/vertex/_call_kwargs.py +0 -12
- mirascope/core/vertex/_utils/__init__.py +0 -14
- mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
- mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
- mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
- mirascope/core/vertex/_utils/_get_json_output.py +0 -36
- mirascope/core/vertex/_utils/_handle_stream.py +0 -33
- mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
- mirascope/core/vertex/_utils/_setup_call.py +0 -160
- mirascope/core/vertex/call_params.py +0 -24
- mirascope/core/vertex/call_response.py +0 -206
- mirascope/core/vertex/call_response_chunk.py +0 -99
- mirascope/core/vertex/dynamic_config.py +0 -28
- mirascope/core/vertex/stream.py +0 -119
- mirascope/core/vertex/tool.py +0 -101
- mirascope/core/xai/__init__.py +0 -28
- mirascope/core/xai/_call.py +0 -67
- mirascope/core/xai/_utils/__init__.py +0 -5
- mirascope/core/xai/_utils/_setup_call.py +0 -113
- mirascope/core/xai/call_params.py +0 -10
- mirascope/core/xai/call_response.py +0 -16
- mirascope/core/xai/call_response_chunk.py +0 -14
- mirascope/core/xai/dynamic_config.py +0 -8
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +0 -57
- mirascope/core/xai/tool.py +0 -13
- mirascope/experimental/graphs/__init__.py +0 -5
- mirascope/integrations/__init__.py +0 -16
- mirascope/integrations/_middleware_factory.py +0 -403
- mirascope/integrations/langfuse/__init__.py +0 -3
- mirascope/integrations/langfuse/_utils.py +0 -114
- mirascope/integrations/langfuse/_with_langfuse.py +0 -70
- mirascope/integrations/logfire/__init__.py +0 -3
- mirascope/integrations/logfire/_utils.py +0 -225
- mirascope/integrations/logfire/_with_logfire.py +0 -63
- mirascope/integrations/otel/__init__.py +0 -10
- mirascope/integrations/otel/_utils.py +0 -270
- mirascope/integrations/otel/_with_hyperdx.py +0 -60
- mirascope/integrations/otel/_with_otel.py +0 -59
- mirascope/integrations/tenacity.py +0 -14
- mirascope/llm/_call.py +0 -401
- mirascope/llm/_context.py +0 -384
- mirascope/llm/_override.py +0 -3639
- mirascope/llm/_protocols.py +0 -500
- mirascope/llm/_response_metaclass.py +0 -31
- mirascope/llm/call_response.py +0 -158
- mirascope/llm/call_response_chunk.py +0 -66
- mirascope/llm/stream.py +0 -162
- mirascope/llm/tool.py +0 -64
- mirascope/mcp/__init__.py +0 -7
- mirascope/mcp/_utils.py +0 -288
- mirascope/mcp/client.py +0 -167
- mirascope/mcp/server.py +0 -356
- mirascope/mcp/tools.py +0 -110
- mirascope/py.typed +0 -0
- mirascope/retries/__init__.py +0 -11
- mirascope/retries/fallback.py +0 -131
- mirascope/retries/tenacity.py +0 -50
- mirascope/tools/__init__.py +0 -37
- mirascope/tools/base.py +0 -98
- mirascope/tools/system/__init__.py +0 -0
- mirascope/tools/system/_docker_operation.py +0 -166
- mirascope/tools/system/_file_system.py +0 -267
- mirascope/tools/web/__init__.py +0 -0
- mirascope/tools/web/_duckduckgo.py +0 -111
- mirascope/tools/web/_httpx.py +0 -125
- mirascope/tools/web/_parse_url_content.py +0 -94
- mirascope/tools/web/_requests.py +0 -54
- mirascope/v0/__init__.py +0 -43
- mirascope/v0/anthropic.py +0 -54
- mirascope/v0/base/__init__.py +0 -12
- mirascope/v0/base/calls.py +0 -118
- mirascope/v0/base/extractors.py +0 -122
- mirascope/v0/base/ops_utils.py +0 -207
- mirascope/v0/base/prompts.py +0 -48
- mirascope/v0/base/types.py +0 -14
- mirascope/v0/base/utils.py +0 -21
- mirascope/v0/openai.py +0 -54
- mirascope-1.25.7.dist-info/METADATA +0 -169
- mirascope-1.25.7.dist-info/RECORD +0 -378
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
- {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,697 @@
|
|
|
1
|
+
"""Base class for StreamResponse and AsyncStreamResponse."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Generic, Literal, TypeAlias, TypeVar
|
|
6
|
+
|
|
7
|
+
from ..content import (
|
|
8
|
+
AssistantContentChunk,
|
|
9
|
+
AssistantContentPart,
|
|
10
|
+
Text,
|
|
11
|
+
TextChunk,
|
|
12
|
+
TextEndChunk,
|
|
13
|
+
TextStartChunk,
|
|
14
|
+
Thought,
|
|
15
|
+
ThoughtChunk,
|
|
16
|
+
ThoughtEndChunk,
|
|
17
|
+
ThoughtStartChunk,
|
|
18
|
+
ToolCall,
|
|
19
|
+
ToolCallChunk,
|
|
20
|
+
ToolCallEndChunk,
|
|
21
|
+
ToolCallStartChunk,
|
|
22
|
+
)
|
|
23
|
+
from ..formatting import Format, FormattableT, Partial
|
|
24
|
+
from ..messages import AssistantMessage, Message
|
|
25
|
+
from ..tools import FORMAT_TOOL_NAME, ToolkitT
|
|
26
|
+
from ..types import Jsonable
|
|
27
|
+
from .finish_reason import FinishReasonChunk
|
|
28
|
+
from .root_response import RootResponse
|
|
29
|
+
from .streams import (
|
|
30
|
+
AsyncStream,
|
|
31
|
+
AsyncTextStream,
|
|
32
|
+
AsyncThoughtStream,
|
|
33
|
+
AsyncToolCallStream,
|
|
34
|
+
Stream,
|
|
35
|
+
TextStream,
|
|
36
|
+
ThoughtStream,
|
|
37
|
+
ToolCallStream,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
if TYPE_CHECKING:
|
|
41
|
+
from ..clients import ModelId, Params, Provider
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass(kw_only=True)
|
|
45
|
+
class RawStreamEventChunk:
|
|
46
|
+
"""A chunk containing a raw stream event from the underlying provider.
|
|
47
|
+
|
|
48
|
+
Will be accumulated on `StreamResponse.raw` for debugging purposes.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
type: Literal["raw_stream_event_chunk"] = "raw_stream_event_chunk"
|
|
52
|
+
|
|
53
|
+
raw_stream_event: Any
|
|
54
|
+
"""The raw stream event from the underlying provider."""
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@dataclass(kw_only=True)
|
|
58
|
+
class RawMessageChunk:
|
|
59
|
+
"""A chunk containing provider-specific raw message content that will be added to the `AssistantMessage`.
|
|
60
|
+
|
|
61
|
+
This chunk contains a provider-specific representation of a piece of content that
|
|
62
|
+
will be added to the `AssistantMessage` reconstructed by the containing stream.
|
|
63
|
+
This content should be a Jsonable Python object for serialization purposes.
|
|
64
|
+
|
|
65
|
+
The intention is that this content may be passed as-is back to the provider when the
|
|
66
|
+
generated `AssistantMessage` is being reused in conversation.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
type: Literal["raw_message_chunk"] = "raw_message_chunk"
|
|
70
|
+
|
|
71
|
+
raw_message: Jsonable
|
|
72
|
+
"""The provider-specific raw content.
|
|
73
|
+
|
|
74
|
+
Should be a Jsonable object.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
StreamResponseChunk: TypeAlias = (
|
|
79
|
+
AssistantContentChunk | FinishReasonChunk | RawStreamEventChunk | RawMessageChunk
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
ChunkIterator: TypeAlias = Iterator[StreamResponseChunk]
|
|
83
|
+
"""Synchronous iterator yielding chunks with raw data."""
|
|
84
|
+
|
|
85
|
+
AsyncChunkIterator: TypeAlias = AsyncIterator[StreamResponseChunk]
|
|
86
|
+
"""Asynchronous iterator yielding chunks with raw data."""
|
|
87
|
+
|
|
88
|
+
ChunkIteratorT = TypeVar("ChunkIteratorT", bound=ChunkIterator | AsyncChunkIterator)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class BaseStreamResponse(
|
|
92
|
+
RootResponse[ToolkitT, FormattableT],
|
|
93
|
+
Generic[ChunkIteratorT, ToolkitT, FormattableT],
|
|
94
|
+
):
|
|
95
|
+
"""Base class underpinning StreamResponse and AsyncStreamResponse.
|
|
96
|
+
|
|
97
|
+
Manages chunk handling logic for both.
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
raw_stream_events: Sequence[Any]
|
|
101
|
+
"""The raw stream event chunks from the LLM. Provider-specific."""
|
|
102
|
+
|
|
103
|
+
chunks: Sequence[AssistantContentChunk]
|
|
104
|
+
"""All of the Mirascope chunks consumed from the stream."""
|
|
105
|
+
|
|
106
|
+
content: Sequence[AssistantContentPart]
|
|
107
|
+
"""The content generated by the LLM.
|
|
108
|
+
|
|
109
|
+
Content is updated in this array as it is consumed by the stream. Text content will
|
|
110
|
+
update with each text chunk (this will mutate the Text object that is returned
|
|
111
|
+
rather than creating a new one). Other content will be added once each part
|
|
112
|
+
is fully streamed.
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
messages: list[Message]
|
|
116
|
+
"""The message history, including the most recent assistant message.
|
|
117
|
+
|
|
118
|
+
The most recent assistant message will have all of the completed content that has
|
|
119
|
+
already been consumed from the stream. Text content will be included as each chunk
|
|
120
|
+
is processed; other content will be included only when its corresponding part is
|
|
121
|
+
completed (to avoid partial tool calls and the like). If no content has been
|
|
122
|
+
streamed, then the final assistant message will be present (to maintain turn order
|
|
123
|
+
expectations), but will be empty.
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
texts: Sequence[Text]
|
|
127
|
+
"""The text content in the generated response, if any.
|
|
128
|
+
|
|
129
|
+
Text content updates with each text chunk as it streams. The `Text` objects are
|
|
130
|
+
mutated in place rather than creating new ones for each chunk.
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
tool_calls: Sequence[ToolCall]
|
|
134
|
+
"""The tools the LLM wants called on its behalf, if any.
|
|
135
|
+
|
|
136
|
+
Tool calls are only added to this sequence once they have been fully streamed
|
|
137
|
+
to avoid partial tool calls in the response.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
thoughts: Sequence[Thought]
|
|
141
|
+
"""The readable thoughts from the model's thinking process, if any.
|
|
142
|
+
|
|
143
|
+
The thoughts may be direct output from the model thinking process, or may be a
|
|
144
|
+
generated summary. (This depends on the provider; newer models tend to summarize.)
|
|
145
|
+
|
|
146
|
+
Thoughts are added to the sequence as they are streamed. The `Thought` objects are
|
|
147
|
+
mutated in place rather than creating new ones for each chunk.
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
consumed: bool = False
|
|
151
|
+
"""Whether the stream has been fully consumed.
|
|
152
|
+
|
|
153
|
+
This is True after all chunks have been processed from the underlying iterator.
|
|
154
|
+
When False, more content may be available by calling the stream methods.
|
|
155
|
+
"""
|
|
156
|
+
|
|
157
|
+
def __init__(
|
|
158
|
+
self,
|
|
159
|
+
*,
|
|
160
|
+
provider: "Provider",
|
|
161
|
+
model_id: "ModelId",
|
|
162
|
+
params: "Params",
|
|
163
|
+
toolkit: ToolkitT,
|
|
164
|
+
format: Format[FormattableT] | None = None,
|
|
165
|
+
input_messages: Sequence[Message],
|
|
166
|
+
chunk_iterator: ChunkIteratorT,
|
|
167
|
+
) -> None:
|
|
168
|
+
"""Initialize the BaseStreamResponse.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
provider: The provider name (e.g. "anthropic", "openai:completions").
|
|
172
|
+
model_id: The model identifier that generated the response.
|
|
173
|
+
params: The params used to generate the response (or None).
|
|
174
|
+
toolkit: Toolkit containing all the tools used to generate the response.
|
|
175
|
+
format: The `Format` for the expected structured output format (or None).
|
|
176
|
+
input_messages: The input messages that were sent to the LLM
|
|
177
|
+
|
|
178
|
+
The BaseStreamResponse will process the tuples to build the chunks and raw lists
|
|
179
|
+
as the stream is consumed.
|
|
180
|
+
"""
|
|
181
|
+
|
|
182
|
+
self.provider = provider
|
|
183
|
+
self.model_id = model_id
|
|
184
|
+
self.params = params
|
|
185
|
+
self.toolkit = toolkit
|
|
186
|
+
self.format = format
|
|
187
|
+
|
|
188
|
+
# Internal-only lists which we mutate (append) during chunk processing
|
|
189
|
+
self._chunks: list[AssistantContentChunk] = []
|
|
190
|
+
self._content: list[AssistantContentPart] = []
|
|
191
|
+
self._texts: list[Text] = []
|
|
192
|
+
self._thoughts: list[Thought] = []
|
|
193
|
+
self._tool_calls: list[ToolCall] = []
|
|
194
|
+
self._raw_stream_events: list[Any] = []
|
|
195
|
+
self._last_raw_stream_event_chunk: Any | None = None
|
|
196
|
+
|
|
197
|
+
# Externally-facing references typed as immutable Sequences
|
|
198
|
+
self.chunks = self._chunks
|
|
199
|
+
self.content = self._content
|
|
200
|
+
self.texts = self._texts
|
|
201
|
+
self.thoughts = self._thoughts
|
|
202
|
+
self.tool_calls = self._tool_calls
|
|
203
|
+
self.raw_stream_events = self._raw_stream_events
|
|
204
|
+
|
|
205
|
+
self.finish_reason = None
|
|
206
|
+
|
|
207
|
+
self._assistant_message = AssistantMessage(
|
|
208
|
+
content=self._content,
|
|
209
|
+
provider=provider,
|
|
210
|
+
model_id=model_id,
|
|
211
|
+
raw_message=None,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
self.messages = list(input_messages) + [self._assistant_message]
|
|
215
|
+
|
|
216
|
+
self._chunk_iterator = chunk_iterator
|
|
217
|
+
self._current_content: Text | Thought | ToolCall | None = None
|
|
218
|
+
|
|
219
|
+
self._processing_format_tool: bool = False
|
|
220
|
+
|
|
221
|
+
def _transform_format_tool_chunks(
|
|
222
|
+
self, chunk: AssistantContentChunk
|
|
223
|
+
) -> AssistantContentChunk:
|
|
224
|
+
if chunk.type == "tool_call_start_chunk" and chunk.name.startswith(
|
|
225
|
+
FORMAT_TOOL_NAME
|
|
226
|
+
):
|
|
227
|
+
self._processing_format_tool = True
|
|
228
|
+
return TextStartChunk()
|
|
229
|
+
if self._processing_format_tool and chunk.type == "tool_call_chunk":
|
|
230
|
+
return TextChunk(delta=chunk.delta)
|
|
231
|
+
if self._processing_format_tool and chunk.type == "tool_call_end_chunk":
|
|
232
|
+
self._processing_format_tool = False
|
|
233
|
+
return TextEndChunk()
|
|
234
|
+
return chunk
|
|
235
|
+
|
|
236
|
+
def _handle_chunk(self, chunk: AssistantContentChunk) -> AssistantContentChunk:
|
|
237
|
+
if self.finish_reason:
|
|
238
|
+
raise RuntimeError(
|
|
239
|
+
f"Stream already finished with reason: {self.finish_reason}"
|
|
240
|
+
)
|
|
241
|
+
chunk = self._transform_format_tool_chunks(chunk)
|
|
242
|
+
|
|
243
|
+
if chunk.content_type == "text":
|
|
244
|
+
self._handle_text_chunk(chunk)
|
|
245
|
+
elif chunk.content_type == "tool_call":
|
|
246
|
+
self._handle_tool_call_chunk(chunk)
|
|
247
|
+
elif chunk.content_type == "thought":
|
|
248
|
+
self._handle_thought_chunk(chunk)
|
|
249
|
+
else:
|
|
250
|
+
raise NotImplementedError
|
|
251
|
+
|
|
252
|
+
self._chunks.append(chunk)
|
|
253
|
+
return chunk
|
|
254
|
+
|
|
255
|
+
def _handle_text_chunk(
|
|
256
|
+
self, chunk: TextStartChunk | TextChunk | TextEndChunk
|
|
257
|
+
) -> None:
|
|
258
|
+
if chunk.type == "text_start_chunk":
|
|
259
|
+
if self._current_content:
|
|
260
|
+
raise RuntimeError(
|
|
261
|
+
"Received text_start_chunk while processing another chunk"
|
|
262
|
+
)
|
|
263
|
+
self._current_content = Text(text="")
|
|
264
|
+
# Text gets included in content even when unfinished.
|
|
265
|
+
self._content.append(self._current_content)
|
|
266
|
+
self._texts.append(self._current_content)
|
|
267
|
+
|
|
268
|
+
elif chunk.type == "text_chunk":
|
|
269
|
+
if self._current_content is None or self._current_content.type != "text":
|
|
270
|
+
raise RuntimeError("Received text_chunk while not processing text.")
|
|
271
|
+
self._current_content.text += chunk.delta
|
|
272
|
+
|
|
273
|
+
elif chunk.type == "text_end_chunk":
|
|
274
|
+
if self._current_content is None or self._current_content.type != "text":
|
|
275
|
+
raise RuntimeError("Received text_end_chunk while not processing text.")
|
|
276
|
+
self._current_content = None
|
|
277
|
+
|
|
278
|
+
def _handle_thought_chunk(
|
|
279
|
+
self, chunk: ThoughtStartChunk | ThoughtChunk | ThoughtEndChunk
|
|
280
|
+
) -> None:
|
|
281
|
+
if chunk.type == "thought_start_chunk":
|
|
282
|
+
if self._current_content:
|
|
283
|
+
raise RuntimeError(
|
|
284
|
+
"Received thought_start_chunk while processing another chunk"
|
|
285
|
+
)
|
|
286
|
+
self._current_content = Thought(thought="")
|
|
287
|
+
# Thoughts get included even when unfinished.
|
|
288
|
+
self._content.append(self._current_content)
|
|
289
|
+
self._thoughts.append(self._current_content)
|
|
290
|
+
|
|
291
|
+
elif chunk.type == "thought_chunk":
|
|
292
|
+
if self._current_content is None or self._current_content.type != "thought":
|
|
293
|
+
raise RuntimeError(
|
|
294
|
+
"Received thought_chunk while not processing thought."
|
|
295
|
+
)
|
|
296
|
+
self._current_content.thought += chunk.delta
|
|
297
|
+
|
|
298
|
+
elif chunk.type == "thought_end_chunk":
|
|
299
|
+
if self._current_content is None or self._current_content.type != "thought":
|
|
300
|
+
raise RuntimeError(
|
|
301
|
+
"Received thought_end_chunk while not processing thought."
|
|
302
|
+
)
|
|
303
|
+
self._current_content = None
|
|
304
|
+
|
|
305
|
+
def _handle_tool_call_chunk(
|
|
306
|
+
self, chunk: ToolCallStartChunk | ToolCallChunk | ToolCallEndChunk
|
|
307
|
+
) -> None:
|
|
308
|
+
if chunk.type == "tool_call_start_chunk":
|
|
309
|
+
if self._current_content:
|
|
310
|
+
raise RuntimeError(
|
|
311
|
+
"Received tool_call_start_chunk while processing another chunk"
|
|
312
|
+
)
|
|
313
|
+
self._current_content = ToolCall(
|
|
314
|
+
id=chunk.id,
|
|
315
|
+
name=chunk.name,
|
|
316
|
+
args="",
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
elif chunk.type == "tool_call_chunk":
|
|
320
|
+
if (
|
|
321
|
+
self._current_content is None
|
|
322
|
+
or self._current_content.type != "tool_call"
|
|
323
|
+
):
|
|
324
|
+
raise RuntimeError(
|
|
325
|
+
"Received tool_call_chunk while not processing tool call."
|
|
326
|
+
)
|
|
327
|
+
self._current_content.args += chunk.delta
|
|
328
|
+
|
|
329
|
+
elif chunk.type == "tool_call_end_chunk":
|
|
330
|
+
if (
|
|
331
|
+
self._current_content is None
|
|
332
|
+
or self._current_content.type != "tool_call"
|
|
333
|
+
):
|
|
334
|
+
raise RuntimeError(
|
|
335
|
+
"Received tool_call_end_chunk while not processing tool call."
|
|
336
|
+
)
|
|
337
|
+
if not self._current_content.args:
|
|
338
|
+
self._current_content.args = "{}"
|
|
339
|
+
self._content.append(self._current_content)
|
|
340
|
+
self._tool_calls.append(self._current_content)
|
|
341
|
+
self._current_content = None
|
|
342
|
+
|
|
343
|
+
def _pretty_chunk(self, chunk: AssistantContentChunk, spacer: str) -> str:
|
|
344
|
+
match chunk.type:
|
|
345
|
+
case "text_start_chunk":
|
|
346
|
+
return spacer
|
|
347
|
+
case "text_chunk":
|
|
348
|
+
return chunk.delta
|
|
349
|
+
case "tool_call_start_chunk":
|
|
350
|
+
return spacer + f"**ToolCall ({chunk.name}):** "
|
|
351
|
+
case "tool_call_chunk":
|
|
352
|
+
return chunk.delta
|
|
353
|
+
case "thought_start_chunk":
|
|
354
|
+
return spacer + "**Thinking:**\n "
|
|
355
|
+
case "thought_chunk":
|
|
356
|
+
return chunk.delta.replace("\n", "\n ") # Indent every line
|
|
357
|
+
case _:
|
|
358
|
+
return ""
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
class BaseSyncStreamResponse(BaseStreamResponse[ChunkIterator, ToolkitT, FormattableT]):
|
|
362
|
+
"""A base class for synchronous Stream Responses."""
|
|
363
|
+
|
|
364
|
+
def streams(self) -> Iterator[Stream]:
|
|
365
|
+
"""Returns an iterator that yields streams for each content part in the response.
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
Iterator[Stream]: Synchronous iterator yielding Stream objects
|
|
369
|
+
|
|
370
|
+
Each content part in the response will correspond to one stream, which will yield
|
|
371
|
+
chunks of content as they come in from the underlying LLM.
|
|
372
|
+
|
|
373
|
+
Fully iterating through this iterator will fully consume the underlying stream,
|
|
374
|
+
updating the Response with all collected content.
|
|
375
|
+
|
|
376
|
+
As content is consumed, it is cached on the StreamResponse. If a new iterator
|
|
377
|
+
is constructed via calling `streams()`, it will start by replaying the cached
|
|
378
|
+
content from the response, and (if there is still more content to consume from
|
|
379
|
+
the LLM), it will proceed to consume it once it has iterated through all the
|
|
380
|
+
cached chunks.
|
|
381
|
+
"""
|
|
382
|
+
chunk_iter = self.chunk_stream()
|
|
383
|
+
|
|
384
|
+
for start_chunk in chunk_iter:
|
|
385
|
+
# At the start of this loop, we always expect to find a start chunk. Then,
|
|
386
|
+
# before proceeding, we will collect from the stream we create (in case the
|
|
387
|
+
# user did not exhaust it), which ensures we will be expecting a start chunk
|
|
388
|
+
# again on the next iteration
|
|
389
|
+
match start_chunk.type:
|
|
390
|
+
case "text_start_chunk":
|
|
391
|
+
|
|
392
|
+
def text_stream_iterator() -> Iterator[TextChunk]:
|
|
393
|
+
for chunk in chunk_iter:
|
|
394
|
+
if chunk.type == "text_chunk":
|
|
395
|
+
yield chunk
|
|
396
|
+
else:
|
|
397
|
+
return # Stream finished
|
|
398
|
+
|
|
399
|
+
stream = TextStream(chunk_iterator=text_stream_iterator())
|
|
400
|
+
yield stream
|
|
401
|
+
|
|
402
|
+
case "thought_start_chunk":
|
|
403
|
+
|
|
404
|
+
def thought_stream_iterator() -> Iterator[ThoughtChunk]:
|
|
405
|
+
for chunk in chunk_iter:
|
|
406
|
+
if chunk.type == "thought_chunk":
|
|
407
|
+
yield chunk
|
|
408
|
+
else:
|
|
409
|
+
return # Stream finished
|
|
410
|
+
|
|
411
|
+
stream = ThoughtStream(chunk_iterator=thought_stream_iterator())
|
|
412
|
+
yield stream
|
|
413
|
+
|
|
414
|
+
case "tool_call_start_chunk":
|
|
415
|
+
tool_id = start_chunk.id
|
|
416
|
+
tool_name = start_chunk.name
|
|
417
|
+
|
|
418
|
+
def tool_call_stream_iterator() -> Iterator[ToolCallChunk]:
|
|
419
|
+
for chunk in chunk_iter:
|
|
420
|
+
if chunk.type == "tool_call_chunk":
|
|
421
|
+
yield chunk
|
|
422
|
+
else:
|
|
423
|
+
return # Stream finished
|
|
424
|
+
|
|
425
|
+
stream = ToolCallStream(
|
|
426
|
+
tool_id=tool_id,
|
|
427
|
+
tool_name=tool_name,
|
|
428
|
+
chunk_iterator=tool_call_stream_iterator(),
|
|
429
|
+
)
|
|
430
|
+
yield stream
|
|
431
|
+
|
|
432
|
+
case _: # pragma: no cover
|
|
433
|
+
raise RuntimeError(f"Expected start chunk, got: {start_chunk.type}")
|
|
434
|
+
|
|
435
|
+
# Before continuing to the next stream, make sure the last stream is consumed
|
|
436
|
+
# (If the user did not do so when we yielded it)
|
|
437
|
+
stream.collect()
|
|
438
|
+
|
|
439
|
+
def chunk_stream(
|
|
440
|
+
self,
|
|
441
|
+
) -> Iterator[AssistantContentChunk]:
|
|
442
|
+
"""Returns an iterator that yields content chunks as they are received.
|
|
443
|
+
|
|
444
|
+
Returns:
|
|
445
|
+
Iterator[AssistantContentChunk]: Synchronous iterator yielding chunks
|
|
446
|
+
|
|
447
|
+
This provides access to the Mirascope chunk data including start, delta, and end chunks
|
|
448
|
+
for each content type (text, thought, tool_call). Unlike the streams() method
|
|
449
|
+
that groups chunks by content part, this yields individual chunks as they arrive.
|
|
450
|
+
|
|
451
|
+
Fully iterating through this iterator will fully consume the underlying stream,
|
|
452
|
+
updating the Response with all collected content.
|
|
453
|
+
|
|
454
|
+
As chunks are consumed, they are cached on the StreamResponse. If a new iterator
|
|
455
|
+
is constructed via calling `chunk_stream()`, it will start by replaying the cached
|
|
456
|
+
chunks from the response, and (if there is still more content to consume from
|
|
457
|
+
the LLM), it will proceed to consume it once it has iterated through all the
|
|
458
|
+
cached chunks.
|
|
459
|
+
"""
|
|
460
|
+
for chunk in self.chunks:
|
|
461
|
+
yield chunk
|
|
462
|
+
|
|
463
|
+
if self.consumed:
|
|
464
|
+
return
|
|
465
|
+
|
|
466
|
+
for chunk in self._chunk_iterator:
|
|
467
|
+
if chunk.type == "raw_stream_event_chunk":
|
|
468
|
+
self._raw_stream_events.append(chunk.raw_stream_event)
|
|
469
|
+
elif chunk.type == "raw_message_chunk":
|
|
470
|
+
self._assistant_message.raw_message = chunk.raw_message
|
|
471
|
+
elif chunk.type == "finish_reason_chunk":
|
|
472
|
+
self.finish_reason = chunk.finish_reason
|
|
473
|
+
else:
|
|
474
|
+
yield self._handle_chunk(chunk)
|
|
475
|
+
|
|
476
|
+
self.consumed = True
|
|
477
|
+
|
|
478
|
+
def finish(self) -> None:
|
|
479
|
+
"""Finish streaming all of this response's content."""
|
|
480
|
+
for _ in self.chunk_stream():
|
|
481
|
+
pass
|
|
482
|
+
|
|
483
|
+
def pretty_stream(self) -> Iterator[str]:
|
|
484
|
+
"""Stream a readable representation of the stream_response as text.
|
|
485
|
+
|
|
486
|
+
Returns:
|
|
487
|
+
Iterator[str]: Iterator yielding string chunks depicting the content
|
|
488
|
+
|
|
489
|
+
Iterating through the pretty stream will populate the stream response by consuming
|
|
490
|
+
the underlying iterator (if it hasn't been consumed already). Calling `.pretty_stream()`
|
|
491
|
+
will always return a fresh iterator that begins from the start of the stream.
|
|
492
|
+
|
|
493
|
+
If you concatenate the text from `.pretty_stream()`, it will be equivalent to the
|
|
494
|
+
text generated by calling `.pretty()` (assuming the response was fully consumed
|
|
495
|
+
at the time when you call `.pretty()`).
|
|
496
|
+
"""
|
|
497
|
+
printed = False
|
|
498
|
+
|
|
499
|
+
for chunk in self.chunk_stream():
|
|
500
|
+
pretty = self._pretty_chunk(chunk, "\n\n" if printed else "")
|
|
501
|
+
if pretty != "":
|
|
502
|
+
printed = True
|
|
503
|
+
yield pretty
|
|
504
|
+
|
|
505
|
+
if not printed:
|
|
506
|
+
yield "**[No Content]**"
|
|
507
|
+
|
|
508
|
+
def structured_stream(
|
|
509
|
+
self,
|
|
510
|
+
) -> Iterator[Partial[FormattableT]]:
|
|
511
|
+
"""Returns an iterator that yields partial structured objects as content streams.
|
|
512
|
+
|
|
513
|
+
Returns:
|
|
514
|
+
Iterator[Partial[FormatT]]: Synchronous iterator yielding partial structured objects
|
|
515
|
+
|
|
516
|
+
This method yields Partial[FormatT] objects as the response content is streamed,
|
|
517
|
+
allowing you to access partial structured data before the response is fully complete.
|
|
518
|
+
Each yielded object represents the current state of the parsed structure with all
|
|
519
|
+
fields optional.
|
|
520
|
+
|
|
521
|
+
Fully iterating through this iterator will fully consume the underlying stream,
|
|
522
|
+
updating the Response with all collected content.
|
|
523
|
+
"""
|
|
524
|
+
raise NotImplementedError()
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
class BaseAsyncStreamResponse(
|
|
528
|
+
BaseStreamResponse[AsyncChunkIterator, ToolkitT, FormattableT]
|
|
529
|
+
):
|
|
530
|
+
"""A base class for asynchronous Stream Responses."""
|
|
531
|
+
|
|
532
|
+
async def streams(self) -> AsyncIterator[AsyncStream]:
|
|
533
|
+
"""Returns an async iterator that yields streams for each content part in the response.
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
AsyncIterator[AsyncStream]: Async iterator yielding AsyncStream objects
|
|
537
|
+
|
|
538
|
+
Each content part in the response will correspond to one stream, which will yield
|
|
539
|
+
chunks of content as they come in from the underlying LLM.
|
|
540
|
+
|
|
541
|
+
Fully iterating through this iterator will fully consume the underlying stream,
|
|
542
|
+
updating the Response with all collected content.
|
|
543
|
+
|
|
544
|
+
As content is consumed, it is cached on the AsyncStreamResponse. If a new iterator
|
|
545
|
+
is constructed via calling `streams()`, it will start by replaying the cached
|
|
546
|
+
content from the response, and (if there is still more content to consume from
|
|
547
|
+
the LLM), it will proceed to consume it once it has iterated through all the
|
|
548
|
+
cached chunks.
|
|
549
|
+
"""
|
|
550
|
+
chunk_iter = self.chunk_stream()
|
|
551
|
+
|
|
552
|
+
async for start_chunk in chunk_iter:
|
|
553
|
+
# At the start of this loop, we always expect to find a start chunk. Then,
|
|
554
|
+
# before proceeding, we will collect from the stream we create (in case the
|
|
555
|
+
# user did not exhaust it), which ensures we will be expecting a start chunk
|
|
556
|
+
# again on the next iteration
|
|
557
|
+
match start_chunk.type:
|
|
558
|
+
case "text_start_chunk":
|
|
559
|
+
|
|
560
|
+
async def text_stream_iterator() -> AsyncIterator[TextChunk]:
|
|
561
|
+
async for chunk in chunk_iter:
|
|
562
|
+
if chunk.type == "text_chunk":
|
|
563
|
+
yield chunk
|
|
564
|
+
else:
|
|
565
|
+
return # Stream finished
|
|
566
|
+
|
|
567
|
+
stream = AsyncTextStream(chunk_iterator=text_stream_iterator())
|
|
568
|
+
yield stream
|
|
569
|
+
|
|
570
|
+
case "thought_start_chunk":
|
|
571
|
+
|
|
572
|
+
async def thought_stream_iterator() -> AsyncIterator[ThoughtChunk]:
|
|
573
|
+
async for chunk in chunk_iter:
|
|
574
|
+
if chunk.type == "thought_chunk":
|
|
575
|
+
yield chunk
|
|
576
|
+
else:
|
|
577
|
+
return # Stream finished
|
|
578
|
+
|
|
579
|
+
stream = AsyncThoughtStream(
|
|
580
|
+
chunk_iterator=thought_stream_iterator()
|
|
581
|
+
)
|
|
582
|
+
yield stream
|
|
583
|
+
|
|
584
|
+
case "tool_call_start_chunk":
|
|
585
|
+
tool_id = start_chunk.id
|
|
586
|
+
tool_name = start_chunk.name
|
|
587
|
+
|
|
588
|
+
async def tool_call_stream_iterator() -> AsyncIterator[
|
|
589
|
+
ToolCallChunk
|
|
590
|
+
]:
|
|
591
|
+
async for chunk in chunk_iter:
|
|
592
|
+
if chunk.type == "tool_call_chunk":
|
|
593
|
+
yield chunk
|
|
594
|
+
else:
|
|
595
|
+
return # Stream finished
|
|
596
|
+
|
|
597
|
+
stream = AsyncToolCallStream(
|
|
598
|
+
tool_id=tool_id,
|
|
599
|
+
tool_name=tool_name,
|
|
600
|
+
chunk_iterator=tool_call_stream_iterator(),
|
|
601
|
+
)
|
|
602
|
+
yield stream
|
|
603
|
+
|
|
604
|
+
case _: # pragma: no cover
|
|
605
|
+
raise RuntimeError(f"Expected start chunk, got: {start_chunk.type}")
|
|
606
|
+
|
|
607
|
+
# Before continuing to the next stream, make sure the last stream is consumed
|
|
608
|
+
# (If the user did not do so when we yielded it)
|
|
609
|
+
await stream.collect()
|
|
610
|
+
|
|
611
|
+
async def chunk_stream(
|
|
612
|
+
self,
|
|
613
|
+
) -> AsyncIterator[AssistantContentChunk]:
|
|
614
|
+
"""Returns an async iterator that yields content chunks as they are received.
|
|
615
|
+
|
|
616
|
+
Returns:
|
|
617
|
+
AsyncIterator[AssistantContentChunk]: Async iterator yielding chunks
|
|
618
|
+
|
|
619
|
+
This provides access to the Mirascope chunk data including start, delta, and end chunks
|
|
620
|
+
for each content type (text, thinking, tool_call). Unlike the streams() method
|
|
621
|
+
that groups chunks by content part, this yields individual chunks as they arrive.
|
|
622
|
+
|
|
623
|
+
Fully iterating through this iterator will fully consume the underlying stream,
|
|
624
|
+
updating the Response with all collected content.
|
|
625
|
+
|
|
626
|
+
As chunks are consumed, they are cached on the AsyncStreamResponse. If a new iterator
|
|
627
|
+
is constructed via calling `chunk_stream()`, it will start by replaying the cached
|
|
628
|
+
chunks from the response, and (if there is still more content to consume from
|
|
629
|
+
the LLM), it will proceed to consume it once it has iterated through all the
|
|
630
|
+
cached chunks.
|
|
631
|
+
"""
|
|
632
|
+
|
|
633
|
+
for chunk in self.chunks:
|
|
634
|
+
yield chunk
|
|
635
|
+
|
|
636
|
+
if self.consumed:
|
|
637
|
+
return
|
|
638
|
+
|
|
639
|
+
async for chunk in self._chunk_iterator:
|
|
640
|
+
if chunk.type == "raw_stream_event_chunk":
|
|
641
|
+
self._raw_stream_events.append(chunk.raw_stream_event)
|
|
642
|
+
elif chunk.type == "raw_message_chunk":
|
|
643
|
+
self._assistant_message.raw_message = chunk.raw_message
|
|
644
|
+
elif chunk.type == "finish_reason_chunk":
|
|
645
|
+
self.finish_reason = chunk.finish_reason
|
|
646
|
+
else:
|
|
647
|
+
yield self._handle_chunk(chunk)
|
|
648
|
+
|
|
649
|
+
self.consumed = True
|
|
650
|
+
|
|
651
|
+
async def finish(self) -> None:
|
|
652
|
+
"""Finish streaming all of this response's content."""
|
|
653
|
+
async for _ in self.chunk_stream():
|
|
654
|
+
pass
|
|
655
|
+
|
|
656
|
+
async def pretty_stream(self) -> AsyncIterator[str]:
|
|
657
|
+
"""Stream a readable representation of the stream_response as text.
|
|
658
|
+
|
|
659
|
+
Returns:
|
|
660
|
+
AsyncIterator[str]: Async iterator yielding string chunks depicting the content
|
|
661
|
+
|
|
662
|
+
Iterating through the pretty stream will populate the stream response by consuming
|
|
663
|
+
the underlying iterator (if it hasn't been consumed already). Calling `.pretty_stream()`
|
|
664
|
+
will always return a fresh iterator that begins from the start of the stream.
|
|
665
|
+
|
|
666
|
+
If you concatenate the text from `.pretty_stream()`, it will be equivalent to the
|
|
667
|
+
text generated by calling `.pretty()` (assuming the response was fully consumed
|
|
668
|
+
at the time when you call `.pretty()`).
|
|
669
|
+
"""
|
|
670
|
+
printed = False
|
|
671
|
+
|
|
672
|
+
async for chunk in self.chunk_stream():
|
|
673
|
+
pretty = self._pretty_chunk(chunk, "\n\n" if printed else "")
|
|
674
|
+
if pretty != "":
|
|
675
|
+
printed = True
|
|
676
|
+
yield pretty
|
|
677
|
+
|
|
678
|
+
if not printed:
|
|
679
|
+
yield "**[No Content]**"
|
|
680
|
+
|
|
681
|
+
def structured_stream(
|
|
682
|
+
self,
|
|
683
|
+
) -> AsyncIterator[Partial[FormattableT]]:
|
|
684
|
+
"""Returns an async iterator that yields partial structured objects as content streams.
|
|
685
|
+
|
|
686
|
+
Returns:
|
|
687
|
+
AsyncIterator[Partial[FormatT]]: Async iterator yielding partial structured objects
|
|
688
|
+
|
|
689
|
+
This method yields Partial[FormatT] objects as the response content is streamed,
|
|
690
|
+
allowing you to access partial structured data before the response is fully complete.
|
|
691
|
+
Each yielded object represents the current state of the parsed structure with all
|
|
692
|
+
fields optional.
|
|
693
|
+
|
|
694
|
+
Fully iterating through this iterator will fully consume the underlying stream,
|
|
695
|
+
updating the Response with all collected content.
|
|
696
|
+
"""
|
|
697
|
+
raise NotImplementedError()
|