ag2 0.9.1a1__py3-none-any.whl → 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/METADATA +272 -75
- ag2-0.9.2.dist-info/RECORD +406 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4023 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1013 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +179 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +382 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +63 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +581 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +158 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +114 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +400 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1444 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +998 -0
- autogen/oai/gemini_types.py +155 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +48 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1316 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +69 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,526 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import copy
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import time
|
|
11
|
+
from collections import defaultdict
|
|
12
|
+
from typing import Any, Optional, Union
|
|
13
|
+
|
|
14
|
+
from ... import OpenAIWrapper
|
|
15
|
+
from ...llm_config import LLMConfig
|
|
16
|
+
from ...oai.openai_utils import create_gpt_assistant, retrieve_assistants_by_name, update_gpt_assistant
|
|
17
|
+
from ...runtime_logging import log_new_agent, logging_enabled
|
|
18
|
+
from ..agent import Agent
|
|
19
|
+
from ..assistant_agent import AssistantAgent, ConversableAgent
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class GPTAssistantAgent(ConversableAgent):
|
|
25
|
+
"""An experimental AG2 agent class that leverages the OpenAI Assistant API for conversational capabilities.
|
|
26
|
+
This agent is unique in its reliance on the OpenAI Assistant for state management, differing from other agents like ConversableAgent.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
DEFAULT_MODEL_NAME = "gpt-4-0125-preview"
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
name="GPT Assistant",
|
|
34
|
+
instructions: Optional[str] = None,
|
|
35
|
+
llm_config: Optional[Union[LLMConfig, dict[str, Any], bool]] = None,
|
|
36
|
+
assistant_config: Optional[dict[str, Any]] = None,
|
|
37
|
+
overwrite_instructions: bool = False,
|
|
38
|
+
overwrite_tools: bool = False,
|
|
39
|
+
**kwargs: Any,
|
|
40
|
+
):
|
|
41
|
+
"""Args:
|
|
42
|
+
name (str): name of the agent. It will be used to find the existing assistant by name. Please remember to delete an old assistant with the same name if you intend to create a new assistant with the same name.
|
|
43
|
+
instructions (str): instructions for the OpenAI assistant configuration.
|
|
44
|
+
When instructions is not None, the system message of the agent will be
|
|
45
|
+
set to the provided instructions and used in the assistant run, irrespective
|
|
46
|
+
of the overwrite_instructions flag. But when instructions is None,
|
|
47
|
+
and the assistant does not exist, the system message will be set to
|
|
48
|
+
AssistantAgent.DEFAULT_SYSTEM_MESSAGE. If the assistant exists, the
|
|
49
|
+
system message will be set to the existing assistant instructions.
|
|
50
|
+
llm_config (LLMConfig or dict or False): llm inference configuration.
|
|
51
|
+
- model: Model to use for the assistant (gpt-4-1106-preview, gpt-3.5-turbo-1106).
|
|
52
|
+
assistant_config
|
|
53
|
+
- assistant_id: ID of the assistant to use. If None, a new assistant will be created.
|
|
54
|
+
- check_every_ms: check thread run status interval
|
|
55
|
+
- tools: Give Assistants access to OpenAI-hosted tools like Code Interpreter and Knowledge Retrieval,
|
|
56
|
+
or build your own tools using Function calling. ref https://platform.openai.com/docs/assistants/tools
|
|
57
|
+
- file_ids: (Deprecated) files used by retrieval in run. It is Deprecated, use tool_resources instead. https://platform.openai.com/docs/assistants/migration/what-has-changed.
|
|
58
|
+
- tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool.
|
|
59
|
+
overwrite_instructions (bool): whether to overwrite the instructions of an existing assistant. This parameter is in effect only when assistant_id is specified in llm_config.
|
|
60
|
+
overwrite_tools (bool): whether to overwrite the tools of an existing assistant. This parameter is in effect only when assistant_id is specified in llm_config.
|
|
61
|
+
kwargs (dict): Additional configuration options for the agent.
|
|
62
|
+
- verbose (bool): If set to True, enables more detailed output from the assistant thread.
|
|
63
|
+
- Other kwargs: Except verbose, others are passed directly to ConversableAgent.
|
|
64
|
+
"""
|
|
65
|
+
self._verbose = kwargs.pop("verbose", False)
|
|
66
|
+
openai_client_cfg, openai_assistant_cfg = self._process_assistant_config(llm_config, assistant_config)
|
|
67
|
+
|
|
68
|
+
super().__init__(
|
|
69
|
+
name=name, system_message=instructions, human_input_mode="NEVER", llm_config=openai_client_cfg, **kwargs
|
|
70
|
+
)
|
|
71
|
+
if logging_enabled():
|
|
72
|
+
log_new_agent(self, locals())
|
|
73
|
+
|
|
74
|
+
# GPTAssistantAgent's azure_deployment param may cause NotFoundError (404) in client.beta.assistants.list()
|
|
75
|
+
# See: https://github.com/microsoft/autogen/pull/1721
|
|
76
|
+
model_name = self.DEFAULT_MODEL_NAME
|
|
77
|
+
if openai_client_cfg.get("config_list") is not None and len(openai_client_cfg["config_list"]) > 0:
|
|
78
|
+
model_name = openai_client_cfg["config_list"][0].pop("model", self.DEFAULT_MODEL_NAME)
|
|
79
|
+
else:
|
|
80
|
+
model_name = openai_client_cfg.pop("model", self.DEFAULT_MODEL_NAME)
|
|
81
|
+
|
|
82
|
+
logger.warning("OpenAI client config of GPTAssistantAgent(%s) - model: %s", name, model_name)
|
|
83
|
+
|
|
84
|
+
oai_wrapper = OpenAIWrapper(**openai_client_cfg)
|
|
85
|
+
if len(oai_wrapper._clients) > 1:
|
|
86
|
+
logger.warning("GPT Assistant only supports one OpenAI client. Using the first client in the list.")
|
|
87
|
+
|
|
88
|
+
self._openai_client = oai_wrapper._clients[0]._oai_client
|
|
89
|
+
openai_assistant_id = openai_assistant_cfg.get("assistant_id", None)
|
|
90
|
+
if openai_assistant_id is None:
|
|
91
|
+
# try to find assistant by name first
|
|
92
|
+
candidate_assistants = retrieve_assistants_by_name(self._openai_client, name)
|
|
93
|
+
if len(candidate_assistants) > 0:
|
|
94
|
+
# Filter out candidates with the same name but different instructions, file IDs, and function names.
|
|
95
|
+
candidate_assistants = self.find_matching_assistant(
|
|
96
|
+
candidate_assistants,
|
|
97
|
+
instructions,
|
|
98
|
+
openai_assistant_cfg.get("tools", []),
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
if len(candidate_assistants) == 0:
|
|
102
|
+
logger.warning("No matching assistant found, creating a new assistant")
|
|
103
|
+
# create a new assistant
|
|
104
|
+
if instructions is None:
|
|
105
|
+
logger.warning(
|
|
106
|
+
"No instructions were provided for new assistant. Using default instructions from AssistantAgent.DEFAULT_SYSTEM_MESSAGE."
|
|
107
|
+
)
|
|
108
|
+
instructions = AssistantAgent.DEFAULT_SYSTEM_MESSAGE
|
|
109
|
+
self._openai_assistant = create_gpt_assistant(
|
|
110
|
+
self._openai_client,
|
|
111
|
+
name=name,
|
|
112
|
+
instructions=instructions,
|
|
113
|
+
model=model_name,
|
|
114
|
+
assistant_config=openai_assistant_cfg,
|
|
115
|
+
)
|
|
116
|
+
else:
|
|
117
|
+
logger.warning(
|
|
118
|
+
"Matching assistant found, using the first matching assistant: %s",
|
|
119
|
+
candidate_assistants[0].__dict__,
|
|
120
|
+
)
|
|
121
|
+
self._openai_assistant = candidate_assistants[0]
|
|
122
|
+
else:
|
|
123
|
+
# retrieve an existing assistant
|
|
124
|
+
self._openai_assistant = self._openai_client.beta.assistants.retrieve(openai_assistant_id)
|
|
125
|
+
# if no instructions are provided, set the instructions to the existing instructions
|
|
126
|
+
if instructions is None:
|
|
127
|
+
logger.warning(
|
|
128
|
+
"No instructions were provided for given assistant. Using existing instructions from assistant API."
|
|
129
|
+
)
|
|
130
|
+
instructions = self.get_assistant_instructions()
|
|
131
|
+
elif overwrite_instructions is True:
|
|
132
|
+
logger.warning(
|
|
133
|
+
"overwrite_instructions is True. Provided instructions will be used and will modify the assistant in the API"
|
|
134
|
+
)
|
|
135
|
+
self._openai_assistant = update_gpt_assistant(
|
|
136
|
+
self._openai_client,
|
|
137
|
+
assistant_id=openai_assistant_id,
|
|
138
|
+
assistant_config={
|
|
139
|
+
"instructions": instructions,
|
|
140
|
+
},
|
|
141
|
+
)
|
|
142
|
+
else:
|
|
143
|
+
logger.warning(
|
|
144
|
+
"overwrite_instructions is False. Provided instructions will be used without permanently modifying the assistant in the API."
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Check if tools are specified in assistant_config
|
|
148
|
+
specified_tools = openai_assistant_cfg.get("tools", None)
|
|
149
|
+
|
|
150
|
+
if specified_tools is None:
|
|
151
|
+
# Check if the current assistant has tools defined
|
|
152
|
+
if self._openai_assistant.tools:
|
|
153
|
+
logger.warning(
|
|
154
|
+
"No tools were provided for given assistant. Using existing tools from assistant API."
|
|
155
|
+
)
|
|
156
|
+
else:
|
|
157
|
+
logger.info(
|
|
158
|
+
"No tools were provided for the assistant, and the assistant currently has no tools set."
|
|
159
|
+
)
|
|
160
|
+
elif overwrite_tools is True:
|
|
161
|
+
# Tools are specified and overwrite_tools is True; update the assistant's tools
|
|
162
|
+
logger.warning(
|
|
163
|
+
"overwrite_tools is True. Provided tools will be used and will modify the assistant in the API"
|
|
164
|
+
)
|
|
165
|
+
self._openai_assistant = update_gpt_assistant(
|
|
166
|
+
self._openai_client,
|
|
167
|
+
assistant_id=openai_assistant_id,
|
|
168
|
+
assistant_config={
|
|
169
|
+
"tools": specified_tools,
|
|
170
|
+
"tool_resources": openai_assistant_cfg.get("tool_resources", None),
|
|
171
|
+
},
|
|
172
|
+
)
|
|
173
|
+
else:
|
|
174
|
+
# Tools are specified but overwrite_tools is False; do not update the assistant's tools
|
|
175
|
+
logger.warning("overwrite_tools is False. Using existing tools from assistant API.")
|
|
176
|
+
|
|
177
|
+
self.update_system_message(self._openai_assistant.instructions)
|
|
178
|
+
# lazily create threads
|
|
179
|
+
self._openai_threads = {}
|
|
180
|
+
self._unread_index = defaultdict(int)
|
|
181
|
+
self.register_reply([Agent, None], GPTAssistantAgent._invoke_assistant, position=2)
|
|
182
|
+
|
|
183
|
+
def _invoke_assistant(
|
|
184
|
+
self,
|
|
185
|
+
messages: Optional[list[dict[str, Any]]] = None,
|
|
186
|
+
sender: Optional[Agent] = None,
|
|
187
|
+
config: Optional[Any] = None,
|
|
188
|
+
) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
|
|
189
|
+
"""Invokes the OpenAI assistant to generate a reply based on the given messages.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
messages: A list of messages in the conversation history with the sender.
|
|
193
|
+
sender: The agent instance that sent the message.
|
|
194
|
+
config: Optional configuration for message processing.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
A tuple containing a boolean indicating success and the assistant's reply.
|
|
198
|
+
"""
|
|
199
|
+
if messages is None:
|
|
200
|
+
messages = self._oai_messages[sender]
|
|
201
|
+
unread_index = self._unread_index[sender] or 0
|
|
202
|
+
pending_messages = messages[unread_index:]
|
|
203
|
+
|
|
204
|
+
# Check and initiate a new thread if necessary
|
|
205
|
+
if self._openai_threads.get(sender, None) is None:
|
|
206
|
+
self._openai_threads[sender] = self._openai_client.beta.threads.create(
|
|
207
|
+
messages=[],
|
|
208
|
+
)
|
|
209
|
+
assistant_thread = self._openai_threads[sender]
|
|
210
|
+
# Process each unread message
|
|
211
|
+
for message in pending_messages:
|
|
212
|
+
if message["content"].strip() == "":
|
|
213
|
+
continue
|
|
214
|
+
# Convert message roles to 'user' or 'assistant', by calling _map_role_for_api, to comply with OpenAI API spec
|
|
215
|
+
api_role = self._map_role_for_api(message["role"])
|
|
216
|
+
self._openai_client.beta.threads.messages.create(
|
|
217
|
+
thread_id=assistant_thread.id,
|
|
218
|
+
content=message["content"],
|
|
219
|
+
role=api_role,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Create a new run to get responses from the assistant
|
|
223
|
+
run = self._openai_client.beta.threads.runs.create(
|
|
224
|
+
thread_id=assistant_thread.id,
|
|
225
|
+
assistant_id=self._openai_assistant.id,
|
|
226
|
+
# pass the latest system message as instructions
|
|
227
|
+
instructions=self.system_message,
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
run_response_messages = self._get_run_response(assistant_thread, run)
|
|
231
|
+
assert len(run_response_messages) > 0, "No response from the assistant."
|
|
232
|
+
|
|
233
|
+
response = {
|
|
234
|
+
"role": run_response_messages[-1]["role"],
|
|
235
|
+
"content": "",
|
|
236
|
+
}
|
|
237
|
+
for message in run_response_messages:
|
|
238
|
+
# just logging or do something with the intermediate messages?
|
|
239
|
+
# if current response is not empty and there is more, append new lines
|
|
240
|
+
if len(response["content"]) > 0:
|
|
241
|
+
response["content"] += "\n\n"
|
|
242
|
+
response["content"] += message["content"]
|
|
243
|
+
|
|
244
|
+
self._unread_index[sender] = len(self._oai_messages[sender]) + 1
|
|
245
|
+
return True, response
|
|
246
|
+
|
|
247
|
+
def _map_role_for_api(self, role: str) -> str:
|
|
248
|
+
"""Maps internal message roles to the roles expected by the OpenAI Assistant API.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
role (str): The role from the internal message.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
str: The mapped role suitable for the API.
|
|
255
|
+
"""
|
|
256
|
+
if role in ["function", "tool"]:
|
|
257
|
+
return "assistant"
|
|
258
|
+
elif role == "system":
|
|
259
|
+
return "system"
|
|
260
|
+
elif role == "user":
|
|
261
|
+
return "user"
|
|
262
|
+
elif role == "assistant":
|
|
263
|
+
return "assistant"
|
|
264
|
+
else:
|
|
265
|
+
# Default to 'assistant' for any other roles not recognized by the API
|
|
266
|
+
return "assistant"
|
|
267
|
+
|
|
268
|
+
def _get_run_response(self, thread, run):
|
|
269
|
+
"""Waits for and processes the response of a run from the OpenAI assistant.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
thread: The thread object initiated with the OpenAI assistant.
|
|
273
|
+
run: The run object initiated with the OpenAI assistant.
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
Updated run object, status of the run, and response messages.
|
|
277
|
+
"""
|
|
278
|
+
while True:
|
|
279
|
+
run = self._wait_for_run(run.id, thread.id)
|
|
280
|
+
if run.status == "completed":
|
|
281
|
+
response_messages = self._openai_client.beta.threads.messages.list(thread.id, order="asc")
|
|
282
|
+
|
|
283
|
+
new_messages = []
|
|
284
|
+
for msg in response_messages:
|
|
285
|
+
if msg.run_id == run.id:
|
|
286
|
+
for content in msg.content:
|
|
287
|
+
if content.type == "text":
|
|
288
|
+
new_messages.append({
|
|
289
|
+
"role": msg.role,
|
|
290
|
+
"content": self._format_assistant_message(content.text),
|
|
291
|
+
})
|
|
292
|
+
elif content.type == "image_file":
|
|
293
|
+
new_messages.append({
|
|
294
|
+
"role": msg.role,
|
|
295
|
+
"content": f"Received file id={content.image_file.file_id}",
|
|
296
|
+
})
|
|
297
|
+
return new_messages
|
|
298
|
+
elif run.status == "requires_action":
|
|
299
|
+
actions = []
|
|
300
|
+
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
|
|
301
|
+
function = tool_call.function
|
|
302
|
+
tool_call_id = tool_call.id
|
|
303
|
+
is_exec_success, tool_response = self.execute_function(
|
|
304
|
+
function.dict(), call_id=tool_call_id, verbose=self._verbose
|
|
305
|
+
)
|
|
306
|
+
tool_response["metadata"] = {
|
|
307
|
+
"tool_call_id": tool_call.id,
|
|
308
|
+
"run_id": run.id,
|
|
309
|
+
"thread_id": thread.id,
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
logger.info(
|
|
313
|
+
"Intermediate executing(%s, Success: %s) : %s",
|
|
314
|
+
tool_response["name"],
|
|
315
|
+
is_exec_success,
|
|
316
|
+
tool_response["content"],
|
|
317
|
+
)
|
|
318
|
+
actions.append(tool_response)
|
|
319
|
+
|
|
320
|
+
submit_tool_outputs = {
|
|
321
|
+
"tool_outputs": [
|
|
322
|
+
{"output": action["content"], "tool_call_id": action["metadata"]["tool_call_id"]}
|
|
323
|
+
for action in actions
|
|
324
|
+
],
|
|
325
|
+
"run_id": run.id,
|
|
326
|
+
"thread_id": thread.id,
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
run = self._openai_client.beta.threads.runs.submit_tool_outputs(**submit_tool_outputs)
|
|
330
|
+
else:
|
|
331
|
+
run_info = json.dumps(run.dict(), indent=2)
|
|
332
|
+
raise ValueError(f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})")
|
|
333
|
+
|
|
334
|
+
def _wait_for_run(self, run_id: str, thread_id: str) -> Any:
|
|
335
|
+
"""Waits for a run to complete or reach a final state.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
run_id: The ID of the run.
|
|
339
|
+
thread_id: The ID of the thread associated with the run.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
The updated run object after completion or reaching a final state.
|
|
343
|
+
"""
|
|
344
|
+
in_progress = True
|
|
345
|
+
while in_progress:
|
|
346
|
+
run = self._openai_client.beta.threads.runs.retrieve(run_id, thread_id=thread_id)
|
|
347
|
+
in_progress = run.status in ("in_progress", "queued")
|
|
348
|
+
if in_progress:
|
|
349
|
+
time.sleep(self.llm_config.get("check_every_ms", 1000) / 1000)
|
|
350
|
+
return run
|
|
351
|
+
|
|
352
|
+
def _format_assistant_message(self, message_content):
|
|
353
|
+
"""Formats the assistant's message to include annotations and citations."""
|
|
354
|
+
annotations = message_content.annotations
|
|
355
|
+
citations = []
|
|
356
|
+
|
|
357
|
+
# Iterate over the annotations and add footnotes
|
|
358
|
+
for index, annotation in enumerate(annotations):
|
|
359
|
+
# Replace the text with a footnote
|
|
360
|
+
message_content.value = message_content.value.replace(annotation.text, f" [{index}]")
|
|
361
|
+
|
|
362
|
+
# Gather citations based on annotation attributes
|
|
363
|
+
if file_citation := getattr(annotation, "file_citation", None):
|
|
364
|
+
try:
|
|
365
|
+
cited_file = self._openai_client.files.retrieve(file_citation.file_id)
|
|
366
|
+
citations.append(f"[{index}] {cited_file.filename}: {file_citation.quote}")
|
|
367
|
+
except Exception as e:
|
|
368
|
+
logger.error(f"Error retrieving file citation: {e}")
|
|
369
|
+
elif file_path := getattr(annotation, "file_path", None):
|
|
370
|
+
try:
|
|
371
|
+
cited_file = self._openai_client.files.retrieve(file_path.file_id)
|
|
372
|
+
citations.append(f"[{index}] Click <here> to download {cited_file.filename}")
|
|
373
|
+
except Exception as e:
|
|
374
|
+
logger.error(f"Error retrieving file citation: {e}")
|
|
375
|
+
# Note: File download functionality not implemented above for brevity
|
|
376
|
+
|
|
377
|
+
# Add footnotes to the end of the message before displaying to user
|
|
378
|
+
message_content.value += "\n" + "\n".join(citations)
|
|
379
|
+
return message_content.value
|
|
380
|
+
|
|
381
|
+
def can_execute_function(self, name: str) -> bool:
|
|
382
|
+
"""Whether the agent can execute the function."""
|
|
383
|
+
return False
|
|
384
|
+
|
|
385
|
+
def reset(self):
|
|
386
|
+
"""Resets the agent, clearing any existing conversation thread and unread message indices."""
|
|
387
|
+
super().reset()
|
|
388
|
+
for thread in self._openai_threads.values():
|
|
389
|
+
# Delete the existing thread to start fresh in the next conversation
|
|
390
|
+
self._openai_client.beta.threads.delete(thread.id)
|
|
391
|
+
self._openai_threads = {}
|
|
392
|
+
# Clear the record of unread messages
|
|
393
|
+
self._unread_index.clear()
|
|
394
|
+
|
|
395
|
+
def clear_history(self, agent: Optional[Agent] = None):
|
|
396
|
+
"""Clear the chat history of the agent.
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
agent: the agent with whom the chat history to clear. If None, clear the chat history with all agents.
|
|
400
|
+
"""
|
|
401
|
+
super().clear_history(agent)
|
|
402
|
+
if self._openai_threads.get(agent, None) is not None:
|
|
403
|
+
# Delete the existing thread to start fresh in the next conversation
|
|
404
|
+
thread = self._openai_threads[agent]
|
|
405
|
+
logger.info("Clearing thread %s", thread.id)
|
|
406
|
+
self._openai_client.beta.threads.delete(thread.id)
|
|
407
|
+
self._openai_threads.pop(agent)
|
|
408
|
+
self._unread_index[agent] = 0
|
|
409
|
+
|
|
410
|
+
def pretty_print_thread(self, thread):
|
|
411
|
+
"""Pretty print the thread."""
|
|
412
|
+
if thread is None:
|
|
413
|
+
print("No thread to print")
|
|
414
|
+
return
|
|
415
|
+
# NOTE: that list may not be in order, sorting by created_at is important
|
|
416
|
+
messages = self._openai_client.beta.threads.messages.list(
|
|
417
|
+
thread_id=thread.id,
|
|
418
|
+
)
|
|
419
|
+
messages = sorted(messages.data, key=lambda x: x.created_at)
|
|
420
|
+
print("~~~~~~~THREAD CONTENTS~~~~~~~")
|
|
421
|
+
for message in messages:
|
|
422
|
+
content_types = [content.type for content in message.content]
|
|
423
|
+
print(f"[{message.created_at}]", message.role, ": [", ", ".join(content_types), "]")
|
|
424
|
+
for content in message.content:
|
|
425
|
+
content_type = content.type
|
|
426
|
+
if content_type == "text":
|
|
427
|
+
print(content.type, ": ", content.text.value)
|
|
428
|
+
elif content_type == "image_file":
|
|
429
|
+
print(content.type, ": ", content.image_file.file_id)
|
|
430
|
+
else:
|
|
431
|
+
print(content.type, ": ", content)
|
|
432
|
+
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
|
433
|
+
|
|
434
|
+
@property
|
|
435
|
+
def oai_threads(self) -> dict[Agent, Any]:
|
|
436
|
+
"""Return the threads of the agent."""
|
|
437
|
+
return self._openai_threads
|
|
438
|
+
|
|
439
|
+
@property
|
|
440
|
+
def assistant_id(self):
|
|
441
|
+
"""Return the assistant id"""
|
|
442
|
+
return self._openai_assistant.id
|
|
443
|
+
|
|
444
|
+
@property
|
|
445
|
+
def openai_client(self):
|
|
446
|
+
return self._openai_client
|
|
447
|
+
|
|
448
|
+
@property
|
|
449
|
+
def openai_assistant(self):
|
|
450
|
+
return self._openai_assistant
|
|
451
|
+
|
|
452
|
+
def get_assistant_instructions(self):
|
|
453
|
+
"""Return the assistant instructions from OAI assistant API"""
|
|
454
|
+
return self._openai_assistant.instructions
|
|
455
|
+
|
|
456
|
+
def delete_assistant(self):
|
|
457
|
+
"""Delete the assistant from OAI assistant API"""
|
|
458
|
+
logger.warning("Permanently deleting assistant...")
|
|
459
|
+
self._openai_client.beta.assistants.delete(self.assistant_id)
|
|
460
|
+
|
|
461
|
+
def find_matching_assistant(self, candidate_assistants, instructions, tools):
|
|
462
|
+
"""Find the matching assistant from a list of candidate assistants.
|
|
463
|
+
Filter out candidates with the same name but different instructions, and function names.
|
|
464
|
+
"""
|
|
465
|
+
matching_assistants = []
|
|
466
|
+
|
|
467
|
+
# Preprocess the required tools for faster comparison
|
|
468
|
+
required_tool_types = {
|
|
469
|
+
"file_search" if tool.get("type") in ["retrieval", "file_search"] else tool.get("type") for tool in tools
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
required_function_names = {
|
|
473
|
+
tool.get("function", {}).get("name")
|
|
474
|
+
for tool in tools
|
|
475
|
+
if tool.get("type") not in ["code_interpreter", "retrieval", "file_search"]
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
for assistant in candidate_assistants:
|
|
479
|
+
# Check if instructions are similar
|
|
480
|
+
if instructions and instructions != getattr(assistant, "instructions", None):
|
|
481
|
+
logger.warning(
|
|
482
|
+
"instructions not match, skip assistant(%s): %s",
|
|
483
|
+
assistant.id,
|
|
484
|
+
getattr(assistant, "instructions", None),
|
|
485
|
+
)
|
|
486
|
+
continue
|
|
487
|
+
|
|
488
|
+
# Preprocess the assistant's tools
|
|
489
|
+
assistant_tool_types = {
|
|
490
|
+
"file_search" if tool.type in ["retrieval", "file_search"] else tool.type for tool in assistant.tools
|
|
491
|
+
}
|
|
492
|
+
assistant_function_names = {tool.function.name for tool in assistant.tools if hasattr(tool, "function")}
|
|
493
|
+
|
|
494
|
+
# Check if the tool types, function names match
|
|
495
|
+
if required_tool_types != assistant_tool_types or required_function_names != assistant_function_names:
|
|
496
|
+
logger.warning(
|
|
497
|
+
"tools not match, skip assistant(%s): tools %s, functions %s",
|
|
498
|
+
assistant.id,
|
|
499
|
+
assistant_tool_types,
|
|
500
|
+
assistant_function_names,
|
|
501
|
+
)
|
|
502
|
+
continue
|
|
503
|
+
|
|
504
|
+
# Append assistant to matching list if all conditions are met
|
|
505
|
+
matching_assistants.append(assistant)
|
|
506
|
+
|
|
507
|
+
return matching_assistants
|
|
508
|
+
|
|
509
|
+
def _process_assistant_config(self, llm_config, assistant_config):
|
|
510
|
+
"""Process the llm_config and assistant_config to extract the model name and assistant related configurations."""
|
|
511
|
+
if llm_config is False:
|
|
512
|
+
raise ValueError("llm_config=False is not supported for GPTAssistantAgent.")
|
|
513
|
+
|
|
514
|
+
openai_client_cfg = {} if llm_config is None else copy.deepcopy(llm_config)
|
|
515
|
+
|
|
516
|
+
openai_assistant_cfg = {} if assistant_config is None else copy.deepcopy(assistant_config)
|
|
517
|
+
|
|
518
|
+
# Move the assistant related configurations to assistant_config
|
|
519
|
+
# It's important to keep forward compatibility
|
|
520
|
+
assistant_config_items = ["assistant_id", "tools", "file_ids", "tool_resources", "check_every_ms"]
|
|
521
|
+
for item in assistant_config_items:
|
|
522
|
+
if openai_client_cfg.get(item) is not None and openai_assistant_cfg.get(item) is None:
|
|
523
|
+
openai_assistant_cfg[item] = openai_client_cfg[item]
|
|
524
|
+
openai_client_cfg.pop(item, None)
|
|
525
|
+
|
|
526
|
+
return openai_client_cfg, openai_assistant_cfg
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from .document import Document, DocumentType
|
|
6
|
+
from .graph_query_engine import GraphQueryEngine, GraphStoreQueryResult
|
|
7
|
+
from .graph_rag_capability import GraphRagCapability
|
|
8
|
+
|
|
9
|
+
__all__ = ["Document", "DocumentType", "GraphQueryEngine", "GraphRagCapability", "GraphStoreQueryResult"]
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from enum import Enum, auto
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
|
|
11
|
+
__all__ = ["Document", "DocumentType"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DocumentType(Enum):
|
|
15
|
+
"""Enum for supporting document type."""
|
|
16
|
+
|
|
17
|
+
TEXT = auto()
|
|
18
|
+
HTML = auto()
|
|
19
|
+
PDF = auto()
|
|
20
|
+
JSON = auto()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class Document:
|
|
25
|
+
"""A wrapper of graph store query results."""
|
|
26
|
+
|
|
27
|
+
doctype: DocumentType
|
|
28
|
+
data: Optional[Any] = None
|
|
29
|
+
path_or_url: Optional[str] = field(default_factory=lambda: "")
|