ag2 0.9.1a1__py3-none-any.whl → 0.9.1.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/METADATA +264 -73
- ag2-0.9.1.post0.dist-info/RECORD +392 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4020 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1010 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +113 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +379 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1435 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +990 -0
- autogen/oai/gemini_types.py +129 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +43 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Any, Optional, Union
|
|
10
|
+
|
|
11
|
+
import requests
|
|
12
|
+
|
|
13
|
+
from ...code_utils import content_str
|
|
14
|
+
from ...formatting_utils import colored
|
|
15
|
+
from ...import_utils import optional_import_block, require_optional_import
|
|
16
|
+
from ...llm_config import LLMConfig
|
|
17
|
+
from ..agent import Agent
|
|
18
|
+
from .img_utils import get_image_data, llava_formatter
|
|
19
|
+
from .multimodal_conversable_agent import MultimodalConversableAgent
|
|
20
|
+
|
|
21
|
+
with optional_import_block():
|
|
22
|
+
import replicate
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
# we will override the following variables later.
|
|
27
|
+
SEP = "###"
|
|
28
|
+
|
|
29
|
+
DEFAULT_LLAVA_SYS_MSG = "You are an AI agent and you can view images."
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class LLaVAAgent(MultimodalConversableAgent):
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
name: str,
|
|
36
|
+
system_message: Optional[tuple[str, list]] = DEFAULT_LLAVA_SYS_MSG,
|
|
37
|
+
*args,
|
|
38
|
+
**kwargs: Any,
|
|
39
|
+
):
|
|
40
|
+
"""Args:
|
|
41
|
+
name (str): agent name.
|
|
42
|
+
system_message (str): system message for the ChatCompletion inference.
|
|
43
|
+
Please override this attribute if you want to reprogram the agent.
|
|
44
|
+
**kwargs (dict): Please refer to other kwargs in
|
|
45
|
+
[ConversableAgent](/docs/api-reference/autogen/ConversableAgent#conversableagent).
|
|
46
|
+
"""
|
|
47
|
+
super().__init__(
|
|
48
|
+
name,
|
|
49
|
+
system_message=system_message,
|
|
50
|
+
*args,
|
|
51
|
+
**kwargs,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
assert self.llm_config is not None, "llm_config must be provided."
|
|
55
|
+
self.register_reply([Agent, None], reply_func=LLaVAAgent._image_reply, position=2)
|
|
56
|
+
|
|
57
|
+
def _image_reply(self, messages=None, sender=None, config=None):
|
|
58
|
+
# Note: we did not use "llm_config" yet.
|
|
59
|
+
|
|
60
|
+
if all((messages is None, sender is None)):
|
|
61
|
+
error_msg = f"Either {messages=} or {sender=} must be provided."
|
|
62
|
+
logger.error(error_msg)
|
|
63
|
+
raise AssertionError(error_msg)
|
|
64
|
+
|
|
65
|
+
if messages is None:
|
|
66
|
+
messages = self._oai_messages[sender]
|
|
67
|
+
|
|
68
|
+
# The formats for LLaVA and GPT are different. So, we manually handle them here.
|
|
69
|
+
images = []
|
|
70
|
+
prompt = content_str(self.system_message) + "\n"
|
|
71
|
+
for msg in messages:
|
|
72
|
+
role = "Human" if msg["role"] == "user" else "Assistant"
|
|
73
|
+
# pdb.set_trace()
|
|
74
|
+
images += [d["image_url"]["url"] for d in msg["content"] if d["type"] == "image_url"]
|
|
75
|
+
content_prompt = content_str(msg["content"])
|
|
76
|
+
prompt += f"{SEP}{role}: {content_prompt}\n"
|
|
77
|
+
prompt += "\n" + SEP + "Assistant: "
|
|
78
|
+
|
|
79
|
+
# TODO: PIL to base64
|
|
80
|
+
images = [get_image_data(im) for im in images]
|
|
81
|
+
print(colored(prompt, "blue"))
|
|
82
|
+
|
|
83
|
+
out = ""
|
|
84
|
+
retry = 10
|
|
85
|
+
while len(out) == 0 and retry > 0:
|
|
86
|
+
# image names will be inferred automatically from llava_call
|
|
87
|
+
out = llava_call_binary(
|
|
88
|
+
prompt=prompt,
|
|
89
|
+
images=images,
|
|
90
|
+
config_list=self.llm_config["config_list"],
|
|
91
|
+
temperature=self.llm_config.get("temperature", 0.5),
|
|
92
|
+
max_new_tokens=self.llm_config.get("max_new_tokens", 2000),
|
|
93
|
+
)
|
|
94
|
+
retry -= 1
|
|
95
|
+
|
|
96
|
+
assert out != "", "Empty response from LLaVA."
|
|
97
|
+
|
|
98
|
+
return True, out
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@require_optional_import("replicate", "lmm")
|
|
102
|
+
def _llava_call_binary_with_config(
|
|
103
|
+
prompt: str,
|
|
104
|
+
images: list[Any],
|
|
105
|
+
config: dict[str, Any],
|
|
106
|
+
max_new_tokens: int = 1000,
|
|
107
|
+
temperature: float = 0.5,
|
|
108
|
+
seed: int = 1,
|
|
109
|
+
):
|
|
110
|
+
if config["base_url"].find("0.0.0.0") >= 0 or config["base_url"].find("localhost") >= 0:
|
|
111
|
+
llava_mode = "local"
|
|
112
|
+
else:
|
|
113
|
+
llava_mode = "remote"
|
|
114
|
+
|
|
115
|
+
if llava_mode == "local":
|
|
116
|
+
headers = {"User-Agent": "LLaVA Client"}
|
|
117
|
+
pload = {
|
|
118
|
+
"model": config["model"],
|
|
119
|
+
"prompt": prompt,
|
|
120
|
+
"max_new_tokens": max_new_tokens,
|
|
121
|
+
"temperature": temperature,
|
|
122
|
+
"stop": SEP,
|
|
123
|
+
"images": images,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
response = requests.post(
|
|
127
|
+
config["base_url"].rstrip("/") + "/worker_generate_stream", headers=headers, json=pload, stream=False
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
|
|
131
|
+
if chunk:
|
|
132
|
+
data = json.loads(chunk.decode("utf-8"))
|
|
133
|
+
output = data["text"].split(SEP)[-1]
|
|
134
|
+
elif llava_mode == "remote":
|
|
135
|
+
# The Replicate version of the model only support 1 image for now.
|
|
136
|
+
img = "data:image/jpeg;base64," + images[0]
|
|
137
|
+
response = replicate.run(
|
|
138
|
+
config["base_url"], input={"image": img, "prompt": prompt.replace("<image>", " "), "seed": seed}
|
|
139
|
+
)
|
|
140
|
+
# The yorickvp/llava-13b model can stream output as it's running.
|
|
141
|
+
# The predict method returns an iterator, and you can iterate over that output.
|
|
142
|
+
output = ""
|
|
143
|
+
for item in response:
|
|
144
|
+
# https://replicate.com/yorickvp/llava-13b/versions/2facb4a474a0462c15041b78b1ad70952ea46b5ec6ad29583c0b29dbd4249591/api#output-schema
|
|
145
|
+
output += item
|
|
146
|
+
|
|
147
|
+
# Remove the prompt and the space.
|
|
148
|
+
output = output.replace(prompt, "").strip().rstrip()
|
|
149
|
+
return output
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@require_optional_import("replicate", "lmm")
|
|
153
|
+
def llava_call_binary(
|
|
154
|
+
prompt: str,
|
|
155
|
+
images: list[Any],
|
|
156
|
+
config_list: list[dict[str, Any]],
|
|
157
|
+
max_new_tokens: int = 1000,
|
|
158
|
+
temperature: float = 0.5,
|
|
159
|
+
seed: int = 1,
|
|
160
|
+
):
|
|
161
|
+
# TODO 1: add caching around the LLaVA call to save compute and cost
|
|
162
|
+
# TODO 2: add `seed` to ensure reproducibility. The seed is not working now.
|
|
163
|
+
|
|
164
|
+
for config in config_list:
|
|
165
|
+
try:
|
|
166
|
+
return _llava_call_binary_with_config(prompt, images, config, max_new_tokens, temperature, seed)
|
|
167
|
+
except Exception as e:
|
|
168
|
+
print(f"Error: {e}")
|
|
169
|
+
continue
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def llava_call(prompt: str, llm_config: Union[LLMConfig, dict]) -> str:
|
|
173
|
+
"""Makes a call to the LLaVA service to generate text based on a given prompt"""
|
|
174
|
+
prompt, images = llava_formatter(prompt, order_image_tokens=False)
|
|
175
|
+
|
|
176
|
+
for im in images:
|
|
177
|
+
if len(im) == 0:
|
|
178
|
+
raise RuntimeError("An image is empty!")
|
|
179
|
+
|
|
180
|
+
return llava_call_binary(
|
|
181
|
+
prompt,
|
|
182
|
+
images,
|
|
183
|
+
config_list=llm_config["config_list"],
|
|
184
|
+
max_new_tokens=llm_config.get("max_new_tokens", 2000),
|
|
185
|
+
temperature=llm_config.get("temperature", 0.5),
|
|
186
|
+
seed=llm_config.get("seed"),
|
|
187
|
+
)
|
|
@@ -0,0 +1,464 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import os
|
|
8
|
+
import re
|
|
9
|
+
from time import sleep
|
|
10
|
+
from typing import Any, Callable, Dict, Literal, Optional, Union
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, root_validator
|
|
13
|
+
|
|
14
|
+
from ...code_utils import UNKNOWN, execute_code, extract_code, infer_lang
|
|
15
|
+
from ...import_utils import optional_import_block, require_optional_import
|
|
16
|
+
from ...math_utils import get_answer
|
|
17
|
+
from .. import Agent, UserProxyAgent
|
|
18
|
+
|
|
19
|
+
with optional_import_block() as result:
|
|
20
|
+
import wolframalpha
|
|
21
|
+
|
|
22
|
+
PROMPTS = {
|
|
23
|
+
# default
|
|
24
|
+
"default": """Let's use Python to solve a math problem.
|
|
25
|
+
|
|
26
|
+
Query requirements:
|
|
27
|
+
You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.
|
|
28
|
+
You can use packages like sympy to help you.
|
|
29
|
+
You must follow the formats below to write your code:
|
|
30
|
+
```python
|
|
31
|
+
# your code
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
First state the key idea to solve the problem. You may choose from three ways to solve the problem:
|
|
35
|
+
Case 1: If the problem can be solved with Python code directly, please write a program to solve it. You can enumerate all possible arrangements if needed.
|
|
36
|
+
Case 2: If the problem is mostly reasoning, you can solve it by yourself directly.
|
|
37
|
+
Case 3: If the problem cannot be handled in the above two ways, please follow this process:
|
|
38
|
+
1. Solve the problem step by step (do not over-divide the steps).
|
|
39
|
+
2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).
|
|
40
|
+
3. Wait for me to give the results.
|
|
41
|
+
4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.
|
|
42
|
+
|
|
43
|
+
After all the queries are run and you get the answer, put the answer in \\boxed{}.
|
|
44
|
+
|
|
45
|
+
Problem:
|
|
46
|
+
""",
|
|
47
|
+
# select python or wolfram
|
|
48
|
+
"two_tools": """Let's use two tools (Python and Wolfram alpha) to solve a math problem.
|
|
49
|
+
|
|
50
|
+
Query requirements:
|
|
51
|
+
You must follow the formats below to write your query:
|
|
52
|
+
For Wolfram Alpha:
|
|
53
|
+
```wolfram
|
|
54
|
+
# one wolfram query
|
|
55
|
+
```
|
|
56
|
+
For Python:
|
|
57
|
+
```python
|
|
58
|
+
# your code
|
|
59
|
+
```
|
|
60
|
+
When using Python, you should always use the 'print' function for the output and use fractions/radical forms instead of decimals. You can use packages like sympy to help you.
|
|
61
|
+
When using wolfram, give one query in each code block.
|
|
62
|
+
|
|
63
|
+
Please follow this process:
|
|
64
|
+
1. Solve the problem step by step (do not over-divide the steps).
|
|
65
|
+
2. Take out any queries that can be asked through Python or Wolfram Alpha, select the most suitable tool to be used (for example, any calculations or equations that can be calculated).
|
|
66
|
+
3. Wait for me to give the results.
|
|
67
|
+
4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.
|
|
68
|
+
|
|
69
|
+
After all the queries are run and you get the answer, put the final answer in \\boxed{}.
|
|
70
|
+
|
|
71
|
+
Problem: """,
|
|
72
|
+
# use python step by step
|
|
73
|
+
"python": """Let's use Python to solve a math problem.
|
|
74
|
+
|
|
75
|
+
Query requirements:
|
|
76
|
+
You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.
|
|
77
|
+
You can use packages like sympy to help you.
|
|
78
|
+
You must follow the formats below to write your code:
|
|
79
|
+
```python
|
|
80
|
+
# your code
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
Please follow this process:
|
|
84
|
+
1. Solve the problem step by step (do not over-divide the steps).
|
|
85
|
+
2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).
|
|
86
|
+
3. Wait for me to give the results.
|
|
87
|
+
4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.
|
|
88
|
+
|
|
89
|
+
After all the queries are run and you get the answer, put the answer in \\boxed{}.
|
|
90
|
+
|
|
91
|
+
Problem: """,
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _is_termination_msg_mathchat(message):
|
|
96
|
+
"""Check if a message is a termination message."""
|
|
97
|
+
if isinstance(message, dict):
|
|
98
|
+
message = message.get("content")
|
|
99
|
+
if message is None:
|
|
100
|
+
return False
|
|
101
|
+
cb = extract_code(message)
|
|
102
|
+
contain_code = False
|
|
103
|
+
for c in cb:
|
|
104
|
+
if c[0] == "python" or c[0] == "wolfram":
|
|
105
|
+
contain_code = True
|
|
106
|
+
break
|
|
107
|
+
return not contain_code and get_answer(message) is not None and get_answer(message) != ""
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _add_print_to_last_line(code):
|
|
111
|
+
"""Add print() to the last line of a string."""
|
|
112
|
+
# 1. check if there is already a print statement
|
|
113
|
+
if "print(" in code:
|
|
114
|
+
return code
|
|
115
|
+
# 2. extract the last line, enclose it in print() and return the new string
|
|
116
|
+
lines = code.splitlines()
|
|
117
|
+
last_line = lines[-1]
|
|
118
|
+
if "\t" in last_line or "=" in last_line:
|
|
119
|
+
return code
|
|
120
|
+
if "=" in last_line:
|
|
121
|
+
last_line = "print(" + last_line.split(" = ")[0] + ")"
|
|
122
|
+
lines.append(last_line)
|
|
123
|
+
else:
|
|
124
|
+
lines[-1] = "print(" + last_line + ")"
|
|
125
|
+
# 3. join the lines back together
|
|
126
|
+
return "\n".join(lines)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _remove_print(code):
|
|
130
|
+
"""Remove all print statements from a string."""
|
|
131
|
+
lines = code.splitlines()
|
|
132
|
+
lines = [line for line in lines if not line.startswith("print(")]
|
|
133
|
+
return "\n".join(lines)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class MathUserProxyAgent(UserProxyAgent):
|
|
137
|
+
"""(Experimental) A MathChat agent that can handle math problems."""
|
|
138
|
+
|
|
139
|
+
MAX_CONSECUTIVE_AUTO_REPLY = 15 # maximum number of consecutive auto replies (subject to future change)
|
|
140
|
+
DEFAULT_REPLY = "Continue. Please keep solving the problem until you need to query. (If you get to the answer, put it in \\boxed{}.)"
|
|
141
|
+
|
|
142
|
+
def __init__(
|
|
143
|
+
self,
|
|
144
|
+
name: Optional[str] = "MathChatAgent", # default set to MathChatAgent
|
|
145
|
+
is_termination_msg: Optional[
|
|
146
|
+
Callable[[Dict[str, Any]], bool]
|
|
147
|
+
] = _is_termination_msg_mathchat, # terminate if \boxed{} in message
|
|
148
|
+
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", # Fully automated
|
|
149
|
+
default_auto_reply: Optional[Union[str, dict[str, Any]]] = DEFAULT_REPLY,
|
|
150
|
+
max_invalid_q_per_step=3, # a parameter needed in MathChat
|
|
151
|
+
**kwargs: Any,
|
|
152
|
+
):
|
|
153
|
+
"""Args:
|
|
154
|
+
name (str): name of the agent
|
|
155
|
+
is_termination_msg (function): a function that takes a message in the form of a dictionary and returns a boolean value indicating if this received message is a termination message.
|
|
156
|
+
The dict can contain the following keys: "content", "role", "name", "function_call".
|
|
157
|
+
human_input_mode (str): whether to ask for human inputs every time a message is received.
|
|
158
|
+
Possible values are "ALWAYS", "TERMINATE", "NEVER".
|
|
159
|
+
(1) When "ALWAYS", the agent prompts for human input every time a message is received.
|
|
160
|
+
Under this mode, the conversation stops when the human input is "exit",
|
|
161
|
+
or when is_termination_msg is True and there is no human input.
|
|
162
|
+
(2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
|
|
163
|
+
the number of auto reply reaches the max_consecutive_auto_reply.
|
|
164
|
+
(3) (Default) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
|
|
165
|
+
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
|
|
166
|
+
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
|
|
167
|
+
max_invalid_q_per_step (int): (ADDED) the maximum number of invalid queries per step.
|
|
168
|
+
**kwargs (dict): other kwargs in [UserProxyAgent](/docs/api-reference/autogen/UserProxyAgent#userproxyagent).
|
|
169
|
+
"""
|
|
170
|
+
super().__init__(
|
|
171
|
+
name=name,
|
|
172
|
+
is_termination_msg=is_termination_msg,
|
|
173
|
+
human_input_mode=human_input_mode,
|
|
174
|
+
default_auto_reply=default_auto_reply,
|
|
175
|
+
**kwargs,
|
|
176
|
+
)
|
|
177
|
+
self.register_reply([Agent, None], MathUserProxyAgent._generate_math_reply, position=2)
|
|
178
|
+
# fixed var
|
|
179
|
+
self._max_invalid_q_per_step = max_invalid_q_per_step
|
|
180
|
+
|
|
181
|
+
# mutable
|
|
182
|
+
self._valid_q_count = 0
|
|
183
|
+
self._total_q_count = 0
|
|
184
|
+
self._accum_invalid_q_per_step = 0
|
|
185
|
+
self._previous_code = ""
|
|
186
|
+
self.last_reply = None
|
|
187
|
+
|
|
188
|
+
@staticmethod
|
|
189
|
+
def message_generator(sender, recipient, context):
|
|
190
|
+
"""Generate a prompt for the assistant agent with the given problem and prompt.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
sender (Agent): the sender of the message.
|
|
194
|
+
recipient (Agent): the recipient of the message.
|
|
195
|
+
context (dict): a dictionary with the following fields:
|
|
196
|
+
problem (str): the problem to be solved.
|
|
197
|
+
prompt_type (str, Optional): the type of the prompt. Possible values are "default", "python", "wolfram".
|
|
198
|
+
(1) "default": the prompt that allows the agent to choose between 3 ways to solve a problem:
|
|
199
|
+
1. write a python program to solve it directly.
|
|
200
|
+
2. solve it directly without python.
|
|
201
|
+
3. solve it step by step with python.
|
|
202
|
+
(2) "python":
|
|
203
|
+
a simplified prompt from the third way of the "default" prompt, that asks the assistant
|
|
204
|
+
to solve the problem step by step with python.
|
|
205
|
+
(3) "two_tools":
|
|
206
|
+
a simplified prompt similar to the "python" prompt, but allows the model to choose between
|
|
207
|
+
Python and Wolfram Alpha to solve the problem.
|
|
208
|
+
customized_prompt (str, Optional): a customized prompt to be used. If it is not None, the prompt_type will be ignored.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
str: the generated prompt ready to be sent to the assistant agent.
|
|
212
|
+
"""
|
|
213
|
+
sender._reset()
|
|
214
|
+
problem = context.get("problem")
|
|
215
|
+
prompt_type = context.get("prompt_type", "default")
|
|
216
|
+
customized_prompt = context.get("customized_prompt", None)
|
|
217
|
+
if customized_prompt is not None:
|
|
218
|
+
return customized_prompt + problem
|
|
219
|
+
return PROMPTS[prompt_type] + problem
|
|
220
|
+
|
|
221
|
+
def _reset(self):
|
|
222
|
+
# super().reset()
|
|
223
|
+
self._valid_q_count = 0
|
|
224
|
+
self._total_q_count = 0
|
|
225
|
+
self._accum_invalid_q_per_step = 0
|
|
226
|
+
self._previous_code = ""
|
|
227
|
+
self.last_reply = None
|
|
228
|
+
|
|
229
|
+
def execute_one_python_code(self, pycode):
|
|
230
|
+
"""Execute python code blocks.
|
|
231
|
+
|
|
232
|
+
Previous python code will be saved and executed together with the new code.
|
|
233
|
+
the "print" function will also be added to the last line of the code if needed
|
|
234
|
+
"""
|
|
235
|
+
# Need to replace all "; " with "\n" to avoid syntax error when adding `print` to the last line
|
|
236
|
+
pycode = pycode.replace("; ", "\n").replace(";", "\n")
|
|
237
|
+
pycode = self._previous_code + _add_print_to_last_line(pycode)
|
|
238
|
+
|
|
239
|
+
return_code, output, _ = execute_code(pycode, **self._code_execution_config, timeout=5)
|
|
240
|
+
is_success = return_code == 0
|
|
241
|
+
|
|
242
|
+
if not is_success:
|
|
243
|
+
# Remove the file information from the error string
|
|
244
|
+
pattern = r'File "/[^"]+\.py", line \d+, in .+\n'
|
|
245
|
+
if isinstance(output, str):
|
|
246
|
+
output = re.sub(pattern, "", output)
|
|
247
|
+
output = "Error: " + output
|
|
248
|
+
elif output == "":
|
|
249
|
+
# Check if there is any print statement
|
|
250
|
+
if "print" not in pycode:
|
|
251
|
+
output = "No output found. Make sure you print the results."
|
|
252
|
+
is_success = False
|
|
253
|
+
else:
|
|
254
|
+
output = "No output found."
|
|
255
|
+
is_success = True
|
|
256
|
+
|
|
257
|
+
if len(output) > 2000:
|
|
258
|
+
output = "Your requested query response is too long. You might have made a mistake. Please revise your reasoning and query."
|
|
259
|
+
is_success = False
|
|
260
|
+
|
|
261
|
+
if is_success:
|
|
262
|
+
# remove print and check if it still works
|
|
263
|
+
tmp = self._previous_code + "\n" + _remove_print(pycode) + "\n"
|
|
264
|
+
rcode, _, _ = execute_code(tmp, **self._code_execution_config)
|
|
265
|
+
else:
|
|
266
|
+
# only add imports and check if it works
|
|
267
|
+
tmp = self._previous_code + "\n"
|
|
268
|
+
for line in pycode.split("\n"):
|
|
269
|
+
if "import" in line:
|
|
270
|
+
tmp += line + "\n"
|
|
271
|
+
rcode, _, _ = execute_code(tmp, **self._code_execution_config)
|
|
272
|
+
|
|
273
|
+
if rcode == 0:
|
|
274
|
+
self._previous_code = tmp
|
|
275
|
+
return output, is_success
|
|
276
|
+
|
|
277
|
+
def execute_one_wolfram_query(self, query: str):
|
|
278
|
+
"""Run one wolfram query and return the output.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
query: string of the query.
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
output: string with the output of the query.
|
|
285
|
+
is_success: boolean indicating whether the query was successful.
|
|
286
|
+
"""
|
|
287
|
+
# wolfram query handler
|
|
288
|
+
wolfram = WolframAlphaAPIWrapper()
|
|
289
|
+
output, is_success = wolfram.run(query)
|
|
290
|
+
if output == "":
|
|
291
|
+
output = "Error: The wolfram query is invalid."
|
|
292
|
+
is_success = False
|
|
293
|
+
return output, is_success
|
|
294
|
+
|
|
295
|
+
def _generate_math_reply(
|
|
296
|
+
self,
|
|
297
|
+
messages: Optional[list[dict]] = None,
|
|
298
|
+
sender: Optional[Agent] = None,
|
|
299
|
+
config: Optional[Any] = None,
|
|
300
|
+
):
|
|
301
|
+
"""Generate an auto reply."""
|
|
302
|
+
if messages is None:
|
|
303
|
+
messages = self._oai_messages[sender]
|
|
304
|
+
message = messages[-1]
|
|
305
|
+
message = message.get("content", "")
|
|
306
|
+
code_blocks = extract_code(message)
|
|
307
|
+
|
|
308
|
+
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
|
|
309
|
+
# no code block is found, lang should be `UNKNOWN``
|
|
310
|
+
return True, self._default_auto_reply
|
|
311
|
+
is_success, all_success = True, True
|
|
312
|
+
reply = ""
|
|
313
|
+
for code_block in code_blocks:
|
|
314
|
+
lang, code = code_block
|
|
315
|
+
if not lang:
|
|
316
|
+
lang = infer_lang(code)
|
|
317
|
+
if lang == "python":
|
|
318
|
+
output, is_success = self.execute_one_python_code(code)
|
|
319
|
+
elif lang == "wolfram":
|
|
320
|
+
output, is_success = self.execute_one_wolfram_query(code)
|
|
321
|
+
else:
|
|
322
|
+
output = "Error: Unknown language."
|
|
323
|
+
is_success = False
|
|
324
|
+
|
|
325
|
+
reply += output + "\n"
|
|
326
|
+
if not is_success:
|
|
327
|
+
all_success = False
|
|
328
|
+
self._valid_q_count -= 1 # count invalid queries
|
|
329
|
+
|
|
330
|
+
reply = reply.strip()
|
|
331
|
+
|
|
332
|
+
if self.last_reply == reply:
|
|
333
|
+
return True, reply + "\nYour query or result is same from the last, please try a new approach."
|
|
334
|
+
self.last_reply = reply
|
|
335
|
+
|
|
336
|
+
if not all_success:
|
|
337
|
+
self._accum_invalid_q_per_step += 1
|
|
338
|
+
if self._accum_invalid_q_per_step > self._max_invalid_q_per_step:
|
|
339
|
+
self._accum_invalid_q_per_step = 0
|
|
340
|
+
reply = "Please revisit the problem statement and your reasoning. If you think this step is correct, solve it yourself and continue the next step. Otherwise, correct this step."
|
|
341
|
+
|
|
342
|
+
return True, reply
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
# Modified based on langchain. Langchain is licensed under MIT License:
|
|
346
|
+
# The MIT License
|
|
347
|
+
|
|
348
|
+
# Copyright (c) Harrison Chase
|
|
349
|
+
|
|
350
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
351
|
+
# of this software and associated documentation files (the "Software"), to deal
|
|
352
|
+
# in the Software without restriction, including without limitation the rights
|
|
353
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
354
|
+
# copies of the Software, and to permit persons to whom the Software is
|
|
355
|
+
# furnished to do so, subject to the following conditions:
|
|
356
|
+
|
|
357
|
+
# The above copyright notice and this permission notice shall be included in
|
|
358
|
+
# all copies or substantial portions of the Software.
|
|
359
|
+
|
|
360
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
361
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
362
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
363
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
364
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
365
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
366
|
+
# THE SOFTWARE.
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def get_from_dict_or_env(data: dict[str, Any], key: str, env_key: str, default: Optional[str] = None) -> str:
|
|
370
|
+
"""Get a value from a dictionary or an environment variable."""
|
|
371
|
+
if data.get(key):
|
|
372
|
+
return data[key]
|
|
373
|
+
elif os.environ.get(env_key):
|
|
374
|
+
return os.environ[env_key]
|
|
375
|
+
elif default is not None:
|
|
376
|
+
return default
|
|
377
|
+
else:
|
|
378
|
+
raise ValueError(
|
|
379
|
+
f"Did not find {key}, please add an environment variable"
|
|
380
|
+
f" `{env_key}` which contains it, or pass"
|
|
381
|
+
f" `{key}` as a named parameter."
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
class WolframAlphaAPIWrapper(BaseModel):
|
|
386
|
+
"""Wrapper for Wolfram Alpha.
|
|
387
|
+
|
|
388
|
+
Docs for using:
|
|
389
|
+
|
|
390
|
+
1. Go to wolfram alpha and sign up for a developer account
|
|
391
|
+
2. Create an app and get your APP ID
|
|
392
|
+
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
|
|
393
|
+
4. pip install wolframalpha
|
|
394
|
+
|
|
395
|
+
"""
|
|
396
|
+
|
|
397
|
+
wolfram_client: Any #: :meta private:
|
|
398
|
+
wolfram_alpha_appid: Optional[str] = None
|
|
399
|
+
|
|
400
|
+
@root_validator(skip_on_failure=True)
|
|
401
|
+
@classmethod
|
|
402
|
+
@require_optional_import("wolframalpha", "mathchat")
|
|
403
|
+
def validate_environment(cls, values: dict) -> dict:
|
|
404
|
+
"""Validate that api key and python package exists in environment."""
|
|
405
|
+
wolfram_alpha_appid = get_from_dict_or_env(values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID")
|
|
406
|
+
values["wolfram_alpha_appid"] = wolfram_alpha_appid
|
|
407
|
+
|
|
408
|
+
client = wolframalpha.Client(wolfram_alpha_appid)
|
|
409
|
+
values["wolfram_client"] = client
|
|
410
|
+
|
|
411
|
+
return values
|
|
412
|
+
|
|
413
|
+
def run(self, query: str) -> tuple[str, bool]:
|
|
414
|
+
"""Run query through WolframAlpha and parse result."""
|
|
415
|
+
from urllib.error import HTTPError
|
|
416
|
+
|
|
417
|
+
is_success = False # added
|
|
418
|
+
res = None
|
|
419
|
+
for _ in range(20):
|
|
420
|
+
try:
|
|
421
|
+
res = self.wolfram_client.query(query)
|
|
422
|
+
break
|
|
423
|
+
except HTTPError:
|
|
424
|
+
sleep(1)
|
|
425
|
+
except Exception:
|
|
426
|
+
return (
|
|
427
|
+
"Wolfram Alpha wasn't able to answer it. Please try a new query for wolfram or use python.",
|
|
428
|
+
is_success,
|
|
429
|
+
)
|
|
430
|
+
if res is None:
|
|
431
|
+
return (
|
|
432
|
+
"Wolfram Alpha wasn't able to answer it (may due to web error), you can try again or use python.",
|
|
433
|
+
is_success,
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
try:
|
|
437
|
+
if not res["@success"]:
|
|
438
|
+
return (
|
|
439
|
+
"Your Wolfram query is invalid. Please try a new query for wolfram or use python.",
|
|
440
|
+
is_success,
|
|
441
|
+
)
|
|
442
|
+
assumption = next(res.pods).text
|
|
443
|
+
answer = ""
|
|
444
|
+
for result in res["pod"]:
|
|
445
|
+
if result["@title"] == "Solution":
|
|
446
|
+
answer = result["subpod"]["plaintext"]
|
|
447
|
+
if result["@title"] == "Results" or result["@title"] == "Solutions":
|
|
448
|
+
for i, sub in enumerate(result["subpod"]):
|
|
449
|
+
answer += f"ans {i}: " + sub["plaintext"] + "\n"
|
|
450
|
+
break
|
|
451
|
+
if answer == "":
|
|
452
|
+
answer = next(res.results).text
|
|
453
|
+
|
|
454
|
+
except Exception:
|
|
455
|
+
return (
|
|
456
|
+
"Wolfram Alpha wasn't able to answer it. Please try a new query for wolfram or use python.",
|
|
457
|
+
is_success,
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
if answer is None or answer == "":
|
|
461
|
+
# We don't want to return the assumption alone if answer is empty
|
|
462
|
+
return "No good Wolfram Alpha Result was found", is_success
|
|
463
|
+
is_success = True
|
|
464
|
+
return f"Assumption: {assumption} \nAnswer: {answer}", is_success
|