ag2 0.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (423) hide show
  1. ag2-0.10.2.dist-info/METADATA +819 -0
  2. ag2-0.10.2.dist-info/RECORD +423 -0
  3. ag2-0.10.2.dist-info/WHEEL +4 -0
  4. ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
  5. ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
  6. autogen/__init__.py +88 -0
  7. autogen/_website/__init__.py +3 -0
  8. autogen/_website/generate_api_references.py +426 -0
  9. autogen/_website/generate_mkdocs.py +1216 -0
  10. autogen/_website/notebook_processor.py +475 -0
  11. autogen/_website/process_notebooks.py +656 -0
  12. autogen/_website/utils.py +413 -0
  13. autogen/a2a/__init__.py +36 -0
  14. autogen/a2a/agent_executor.py +86 -0
  15. autogen/a2a/client.py +357 -0
  16. autogen/a2a/errors.py +18 -0
  17. autogen/a2a/httpx_client_factory.py +79 -0
  18. autogen/a2a/server.py +221 -0
  19. autogen/a2a/utils.py +207 -0
  20. autogen/agentchat/__init__.py +47 -0
  21. autogen/agentchat/agent.py +180 -0
  22. autogen/agentchat/assistant_agent.py +86 -0
  23. autogen/agentchat/chat.py +325 -0
  24. autogen/agentchat/contrib/__init__.py +5 -0
  25. autogen/agentchat/contrib/agent_eval/README.md +7 -0
  26. autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
  27. autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
  28. autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
  29. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
  30. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
  31. autogen/agentchat/contrib/agent_eval/task.py +42 -0
  32. autogen/agentchat/contrib/agent_optimizer.py +432 -0
  33. autogen/agentchat/contrib/capabilities/__init__.py +5 -0
  34. autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
  35. autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
  36. autogen/agentchat/contrib/capabilities/teachability.py +393 -0
  37. autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
  38. autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
  39. autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
  40. autogen/agentchat/contrib/capabilities/transforms.py +578 -0
  41. autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
  42. autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
  43. autogen/agentchat/contrib/captainagent/__init__.py +9 -0
  44. autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
  45. autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
  46. autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
  47. autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
  48. autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
  49. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
  50. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
  51. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
  52. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
  53. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
  54. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
  55. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
  56. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
  57. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
  58. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
  59. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
  60. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
  61. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
  62. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
  63. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
  64. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
  65. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
  66. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
  67. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
  68. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
  69. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
  70. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
  71. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
  72. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
  73. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
  74. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
  75. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
  76. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
  77. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
  78. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
  79. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
  80. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
  81. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
  82. autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
  83. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
  84. autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
  85. autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
  86. autogen/agentchat/contrib/graph_rag/document.py +29 -0
  87. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
  88. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
  89. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
  90. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
  91. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
  92. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
  93. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
  94. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
  95. autogen/agentchat/contrib/img_utils.py +397 -0
  96. autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
  97. autogen/agentchat/contrib/llava_agent.py +189 -0
  98. autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
  99. autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
  100. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
  101. autogen/agentchat/contrib/rag/__init__.py +10 -0
  102. autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
  103. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
  104. autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
  105. autogen/agentchat/contrib/rag/query_engine.py +76 -0
  106. autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
  107. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
  108. autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
  109. autogen/agentchat/contrib/swarm_agent.py +1404 -0
  110. autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
  111. autogen/agentchat/contrib/vectordb/__init__.py +5 -0
  112. autogen/agentchat/contrib/vectordb/base.py +224 -0
  113. autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
  114. autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
  115. autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
  116. autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
  117. autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
  118. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  119. autogen/agentchat/contrib/web_surfer.py +304 -0
  120. autogen/agentchat/conversable_agent.py +4307 -0
  121. autogen/agentchat/group/__init__.py +67 -0
  122. autogen/agentchat/group/available_condition.py +91 -0
  123. autogen/agentchat/group/context_condition.py +77 -0
  124. autogen/agentchat/group/context_expression.py +238 -0
  125. autogen/agentchat/group/context_str.py +39 -0
  126. autogen/agentchat/group/context_variables.py +182 -0
  127. autogen/agentchat/group/events/transition_events.py +111 -0
  128. autogen/agentchat/group/group_tool_executor.py +324 -0
  129. autogen/agentchat/group/group_utils.py +659 -0
  130. autogen/agentchat/group/guardrails.py +179 -0
  131. autogen/agentchat/group/handoffs.py +303 -0
  132. autogen/agentchat/group/llm_condition.py +93 -0
  133. autogen/agentchat/group/multi_agent_chat.py +291 -0
  134. autogen/agentchat/group/on_condition.py +55 -0
  135. autogen/agentchat/group/on_context_condition.py +51 -0
  136. autogen/agentchat/group/patterns/__init__.py +18 -0
  137. autogen/agentchat/group/patterns/auto.py +160 -0
  138. autogen/agentchat/group/patterns/manual.py +177 -0
  139. autogen/agentchat/group/patterns/pattern.py +295 -0
  140. autogen/agentchat/group/patterns/random.py +106 -0
  141. autogen/agentchat/group/patterns/round_robin.py +117 -0
  142. autogen/agentchat/group/reply_result.py +24 -0
  143. autogen/agentchat/group/safeguards/__init__.py +21 -0
  144. autogen/agentchat/group/safeguards/api.py +241 -0
  145. autogen/agentchat/group/safeguards/enforcer.py +1158 -0
  146. autogen/agentchat/group/safeguards/events.py +140 -0
  147. autogen/agentchat/group/safeguards/validator.py +435 -0
  148. autogen/agentchat/group/speaker_selection_result.py +41 -0
  149. autogen/agentchat/group/targets/__init__.py +4 -0
  150. autogen/agentchat/group/targets/function_target.py +245 -0
  151. autogen/agentchat/group/targets/group_chat_target.py +133 -0
  152. autogen/agentchat/group/targets/group_manager_target.py +151 -0
  153. autogen/agentchat/group/targets/transition_target.py +424 -0
  154. autogen/agentchat/group/targets/transition_utils.py +6 -0
  155. autogen/agentchat/groupchat.py +1832 -0
  156. autogen/agentchat/realtime/__init__.py +3 -0
  157. autogen/agentchat/realtime/experimental/__init__.py +20 -0
  158. autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
  159. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
  160. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
  161. autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
  162. autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
  163. autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
  164. autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
  165. autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
  166. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
  167. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
  168. autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
  169. autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
  170. autogen/agentchat/realtime/experimental/function_observer.py +84 -0
  171. autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
  172. autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
  173. autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
  174. autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
  175. autogen/agentchat/realtime/experimental/websockets.py +21 -0
  176. autogen/agentchat/realtime_agent/__init__.py +21 -0
  177. autogen/agentchat/user_proxy_agent.py +114 -0
  178. autogen/agentchat/utils.py +206 -0
  179. autogen/agents/__init__.py +3 -0
  180. autogen/agents/contrib/__init__.py +10 -0
  181. autogen/agents/contrib/time/__init__.py +8 -0
  182. autogen/agents/contrib/time/time_reply_agent.py +74 -0
  183. autogen/agents/contrib/time/time_tool_agent.py +52 -0
  184. autogen/agents/experimental/__init__.py +27 -0
  185. autogen/agents/experimental/deep_research/__init__.py +7 -0
  186. autogen/agents/experimental/deep_research/deep_research.py +52 -0
  187. autogen/agents/experimental/discord/__init__.py +7 -0
  188. autogen/agents/experimental/discord/discord.py +66 -0
  189. autogen/agents/experimental/document_agent/__init__.py +19 -0
  190. autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
  191. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
  192. autogen/agents/experimental/document_agent/document_agent.py +643 -0
  193. autogen/agents/experimental/document_agent/document_conditions.py +50 -0
  194. autogen/agents/experimental/document_agent/document_utils.py +376 -0
  195. autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
  196. autogen/agents/experimental/document_agent/parser_utils.py +134 -0
  197. autogen/agents/experimental/document_agent/url_utils.py +417 -0
  198. autogen/agents/experimental/reasoning/__init__.py +7 -0
  199. autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
  200. autogen/agents/experimental/slack/__init__.py +7 -0
  201. autogen/agents/experimental/slack/slack.py +73 -0
  202. autogen/agents/experimental/telegram/__init__.py +7 -0
  203. autogen/agents/experimental/telegram/telegram.py +76 -0
  204. autogen/agents/experimental/websurfer/__init__.py +7 -0
  205. autogen/agents/experimental/websurfer/websurfer.py +70 -0
  206. autogen/agents/experimental/wikipedia/__init__.py +7 -0
  207. autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
  208. autogen/browser_utils.py +309 -0
  209. autogen/cache/__init__.py +10 -0
  210. autogen/cache/abstract_cache_base.py +71 -0
  211. autogen/cache/cache.py +203 -0
  212. autogen/cache/cache_factory.py +88 -0
  213. autogen/cache/cosmos_db_cache.py +144 -0
  214. autogen/cache/disk_cache.py +97 -0
  215. autogen/cache/in_memory_cache.py +54 -0
  216. autogen/cache/redis_cache.py +119 -0
  217. autogen/code_utils.py +598 -0
  218. autogen/coding/__init__.py +30 -0
  219. autogen/coding/base.py +120 -0
  220. autogen/coding/docker_commandline_code_executor.py +283 -0
  221. autogen/coding/factory.py +56 -0
  222. autogen/coding/func_with_reqs.py +203 -0
  223. autogen/coding/jupyter/__init__.py +23 -0
  224. autogen/coding/jupyter/base.py +36 -0
  225. autogen/coding/jupyter/docker_jupyter_server.py +160 -0
  226. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  227. autogen/coding/jupyter/import_utils.py +82 -0
  228. autogen/coding/jupyter/jupyter_client.py +224 -0
  229. autogen/coding/jupyter/jupyter_code_executor.py +154 -0
  230. autogen/coding/jupyter/local_jupyter_server.py +164 -0
  231. autogen/coding/local_commandline_code_executor.py +341 -0
  232. autogen/coding/markdown_code_extractor.py +44 -0
  233. autogen/coding/utils.py +55 -0
  234. autogen/coding/yepcode_code_executor.py +197 -0
  235. autogen/doc_utils.py +35 -0
  236. autogen/environments/__init__.py +10 -0
  237. autogen/environments/docker_python_environment.py +365 -0
  238. autogen/environments/python_environment.py +125 -0
  239. autogen/environments/system_python_environment.py +85 -0
  240. autogen/environments/venv_python_environment.py +220 -0
  241. autogen/environments/working_directory.py +74 -0
  242. autogen/events/__init__.py +7 -0
  243. autogen/events/agent_events.py +1016 -0
  244. autogen/events/base_event.py +100 -0
  245. autogen/events/client_events.py +168 -0
  246. autogen/events/helpers.py +44 -0
  247. autogen/events/print_event.py +45 -0
  248. autogen/exception_utils.py +73 -0
  249. autogen/extensions/__init__.py +5 -0
  250. autogen/fast_depends/__init__.py +16 -0
  251. autogen/fast_depends/_compat.py +75 -0
  252. autogen/fast_depends/core/__init__.py +14 -0
  253. autogen/fast_depends/core/build.py +206 -0
  254. autogen/fast_depends/core/model.py +527 -0
  255. autogen/fast_depends/dependencies/__init__.py +15 -0
  256. autogen/fast_depends/dependencies/model.py +30 -0
  257. autogen/fast_depends/dependencies/provider.py +40 -0
  258. autogen/fast_depends/library/__init__.py +10 -0
  259. autogen/fast_depends/library/model.py +46 -0
  260. autogen/fast_depends/py.typed +6 -0
  261. autogen/fast_depends/schema.py +66 -0
  262. autogen/fast_depends/use.py +272 -0
  263. autogen/fast_depends/utils.py +177 -0
  264. autogen/formatting_utils.py +83 -0
  265. autogen/function_utils.py +13 -0
  266. autogen/graph_utils.py +173 -0
  267. autogen/import_utils.py +539 -0
  268. autogen/interop/__init__.py +22 -0
  269. autogen/interop/crewai/__init__.py +7 -0
  270. autogen/interop/crewai/crewai.py +88 -0
  271. autogen/interop/interoperability.py +71 -0
  272. autogen/interop/interoperable.py +46 -0
  273. autogen/interop/langchain/__init__.py +8 -0
  274. autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
  275. autogen/interop/langchain/langchain_tool.py +78 -0
  276. autogen/interop/litellm/__init__.py +7 -0
  277. autogen/interop/litellm/litellm_config_factory.py +178 -0
  278. autogen/interop/pydantic_ai/__init__.py +7 -0
  279. autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
  280. autogen/interop/registry.py +70 -0
  281. autogen/io/__init__.py +15 -0
  282. autogen/io/base.py +151 -0
  283. autogen/io/console.py +56 -0
  284. autogen/io/processors/__init__.py +12 -0
  285. autogen/io/processors/base.py +21 -0
  286. autogen/io/processors/console_event_processor.py +61 -0
  287. autogen/io/run_response.py +294 -0
  288. autogen/io/thread_io_stream.py +63 -0
  289. autogen/io/websockets.py +214 -0
  290. autogen/json_utils.py +42 -0
  291. autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
  292. autogen/llm_clients/__init__.py +77 -0
  293. autogen/llm_clients/client_v2.py +122 -0
  294. autogen/llm_clients/models/__init__.py +55 -0
  295. autogen/llm_clients/models/content_blocks.py +389 -0
  296. autogen/llm_clients/models/unified_message.py +145 -0
  297. autogen/llm_clients/models/unified_response.py +83 -0
  298. autogen/llm_clients/openai_completions_client.py +444 -0
  299. autogen/llm_config/__init__.py +11 -0
  300. autogen/llm_config/client.py +59 -0
  301. autogen/llm_config/config.py +461 -0
  302. autogen/llm_config/entry.py +169 -0
  303. autogen/llm_config/types.py +37 -0
  304. autogen/llm_config/utils.py +223 -0
  305. autogen/logger/__init__.py +11 -0
  306. autogen/logger/base_logger.py +129 -0
  307. autogen/logger/file_logger.py +262 -0
  308. autogen/logger/logger_factory.py +42 -0
  309. autogen/logger/logger_utils.py +57 -0
  310. autogen/logger/sqlite_logger.py +524 -0
  311. autogen/math_utils.py +338 -0
  312. autogen/mcp/__init__.py +7 -0
  313. autogen/mcp/__main__.py +78 -0
  314. autogen/mcp/helpers.py +45 -0
  315. autogen/mcp/mcp_client.py +349 -0
  316. autogen/mcp/mcp_proxy/__init__.py +19 -0
  317. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
  318. autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
  319. autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
  320. autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
  321. autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
  322. autogen/mcp/mcp_proxy/security.py +399 -0
  323. autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
  324. autogen/messages/__init__.py +7 -0
  325. autogen/messages/agent_messages.py +946 -0
  326. autogen/messages/base_message.py +108 -0
  327. autogen/messages/client_messages.py +172 -0
  328. autogen/messages/print_message.py +48 -0
  329. autogen/oai/__init__.py +61 -0
  330. autogen/oai/anthropic.py +1516 -0
  331. autogen/oai/bedrock.py +800 -0
  332. autogen/oai/cerebras.py +302 -0
  333. autogen/oai/client.py +1658 -0
  334. autogen/oai/client_utils.py +196 -0
  335. autogen/oai/cohere.py +494 -0
  336. autogen/oai/gemini.py +1045 -0
  337. autogen/oai/gemini_types.py +156 -0
  338. autogen/oai/groq.py +319 -0
  339. autogen/oai/mistral.py +311 -0
  340. autogen/oai/oai_models/__init__.py +23 -0
  341. autogen/oai/oai_models/_models.py +16 -0
  342. autogen/oai/oai_models/chat_completion.py +86 -0
  343. autogen/oai/oai_models/chat_completion_audio.py +32 -0
  344. autogen/oai/oai_models/chat_completion_message.py +97 -0
  345. autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
  346. autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
  347. autogen/oai/oai_models/completion_usage.py +59 -0
  348. autogen/oai/ollama.py +657 -0
  349. autogen/oai/openai_responses.py +451 -0
  350. autogen/oai/openai_utils.py +897 -0
  351. autogen/oai/together.py +387 -0
  352. autogen/remote/__init__.py +18 -0
  353. autogen/remote/agent.py +199 -0
  354. autogen/remote/agent_service.py +197 -0
  355. autogen/remote/errors.py +17 -0
  356. autogen/remote/httpx_client_factory.py +131 -0
  357. autogen/remote/protocol.py +37 -0
  358. autogen/remote/retry.py +102 -0
  359. autogen/remote/runtime.py +96 -0
  360. autogen/retrieve_utils.py +490 -0
  361. autogen/runtime_logging.py +161 -0
  362. autogen/testing/__init__.py +12 -0
  363. autogen/testing/messages.py +45 -0
  364. autogen/testing/test_agent.py +111 -0
  365. autogen/token_count_utils.py +280 -0
  366. autogen/tools/__init__.py +20 -0
  367. autogen/tools/contrib/__init__.py +9 -0
  368. autogen/tools/contrib/time/__init__.py +7 -0
  369. autogen/tools/contrib/time/time.py +40 -0
  370. autogen/tools/dependency_injection.py +249 -0
  371. autogen/tools/experimental/__init__.py +54 -0
  372. autogen/tools/experimental/browser_use/__init__.py +7 -0
  373. autogen/tools/experimental/browser_use/browser_use.py +154 -0
  374. autogen/tools/experimental/code_execution/__init__.py +7 -0
  375. autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
  376. autogen/tools/experimental/crawl4ai/__init__.py +7 -0
  377. autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
  378. autogen/tools/experimental/deep_research/__init__.py +7 -0
  379. autogen/tools/experimental/deep_research/deep_research.py +329 -0
  380. autogen/tools/experimental/duckduckgo/__init__.py +7 -0
  381. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
  382. autogen/tools/experimental/firecrawl/__init__.py +7 -0
  383. autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
  384. autogen/tools/experimental/google/__init__.py +14 -0
  385. autogen/tools/experimental/google/authentication/__init__.py +11 -0
  386. autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
  387. autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
  388. autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
  389. autogen/tools/experimental/google/drive/__init__.py +9 -0
  390. autogen/tools/experimental/google/drive/drive_functions.py +124 -0
  391. autogen/tools/experimental/google/drive/toolkit.py +88 -0
  392. autogen/tools/experimental/google/model.py +17 -0
  393. autogen/tools/experimental/google/toolkit_protocol.py +19 -0
  394. autogen/tools/experimental/google_search/__init__.py +8 -0
  395. autogen/tools/experimental/google_search/google_search.py +93 -0
  396. autogen/tools/experimental/google_search/youtube_search.py +181 -0
  397. autogen/tools/experimental/messageplatform/__init__.py +17 -0
  398. autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
  399. autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
  400. autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
  401. autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
  402. autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
  403. autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
  404. autogen/tools/experimental/perplexity/__init__.py +7 -0
  405. autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
  406. autogen/tools/experimental/reliable/__init__.py +10 -0
  407. autogen/tools/experimental/reliable/reliable.py +1311 -0
  408. autogen/tools/experimental/searxng/__init__.py +7 -0
  409. autogen/tools/experimental/searxng/searxng_search.py +142 -0
  410. autogen/tools/experimental/tavily/__init__.py +7 -0
  411. autogen/tools/experimental/tavily/tavily_search.py +176 -0
  412. autogen/tools/experimental/web_search_preview/__init__.py +7 -0
  413. autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
  414. autogen/tools/experimental/wikipedia/__init__.py +7 -0
  415. autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
  416. autogen/tools/function_utils.py +412 -0
  417. autogen/tools/tool.py +188 -0
  418. autogen/tools/toolkit.py +86 -0
  419. autogen/types.py +29 -0
  420. autogen/version.py +7 -0
  421. templates/client_template/main.jinja2 +72 -0
  422. templates/config_template/config.jinja2 +7 -0
  423. templates/main.jinja2 +61 -0
@@ -0,0 +1,4307 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+ # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
+ # SPDX-License-Identifier: MIT
7
+ import asyncio
8
+ import copy
9
+ import functools
10
+ import inspect
11
+ import json
12
+ import logging
13
+ import re
14
+ import threading
15
+ import warnings
16
+ from collections import defaultdict
17
+ from collections.abc import Callable, Container, Generator, Iterable
18
+ from contextlib import contextmanager
19
+ from dataclasses import dataclass
20
+ from inspect import signature
21
+ from typing import (
22
+ TYPE_CHECKING,
23
+ Any,
24
+ Literal,
25
+ Optional,
26
+ TypeVar,
27
+ Union,
28
+ )
29
+
30
+ from ..cache.cache import AbstractCache, Cache
31
+ from ..code_utils import (
32
+ PYTHON_VARIANTS,
33
+ UNKNOWN,
34
+ check_can_use_docker_or_throw,
35
+ content_str,
36
+ decide_use_docker,
37
+ execute_code,
38
+ extract_code,
39
+ infer_lang,
40
+ )
41
+ from ..coding.base import CodeExecutor
42
+ from ..coding.factory import CodeExecutorFactory
43
+ from ..doc_utils import export_module
44
+ from ..events.agent_events import (
45
+ ClearConversableAgentHistoryEvent,
46
+ ClearConversableAgentHistoryWarningEvent,
47
+ ConversableAgentUsageSummaryEvent,
48
+ ConversableAgentUsageSummaryNoCostIncurredEvent,
49
+ ErrorEvent,
50
+ ExecuteCodeBlockEvent,
51
+ ExecuteFunctionEvent,
52
+ ExecutedFunctionEvent,
53
+ GenerateCodeExecutionReplyEvent,
54
+ PostCarryoverProcessingEvent,
55
+ RunCompletionEvent,
56
+ TerminationAndHumanReplyNoInputEvent,
57
+ TerminationEvent,
58
+ UsingAutoReplyEvent,
59
+ create_received_event_model,
60
+ )
61
+ from ..exception_utils import InvalidCarryOverTypeError, SenderRequiredError
62
+ from ..fast_depends.utils import is_coroutine_callable
63
+ from ..io.base import AsyncIOStreamProtocol, AsyncInputStream, IOStream, IOStreamProtocol, InputStream
64
+ from ..io.run_response import AsyncRunResponse, AsyncRunResponseProtocol, RunResponse, RunResponseProtocol
65
+ from ..io.thread_io_stream import AsyncThreadIOStream, ThreadIOStream
66
+ from ..llm_config import LLMConfig
67
+ from ..llm_config.client import ModelClient
68
+ from ..oai.client import OpenAIWrapper
69
+ from ..runtime_logging import log_event, log_function_use, log_new_agent, logging_enabled
70
+ from ..tools import ChatContext, Tool, load_basemodels_if_needed, serialize_to_str
71
+ from .agent import Agent, LLMAgent
72
+ from .chat import (
73
+ ChatResult,
74
+ _post_process_carryover_item,
75
+ _validate_recipients,
76
+ a_initiate_chats,
77
+ initiate_chats,
78
+ )
79
+ from .group.context_variables import ContextVariables
80
+ from .group.guardrails import Guardrail, GuardrailResult
81
+ from .group.handoffs import Handoffs
82
+ from .utils import consolidate_chat_info, gather_usage_summary
83
+
84
+ if TYPE_CHECKING:
85
+ from .group.on_condition import OnCondition
86
+ from .group.on_context_condition import OnContextCondition
87
+ __all__ = ("ConversableAgent",)
88
+
89
+ logger = logging.getLogger(__name__)
90
+
91
+ F = TypeVar("F", bound=Callable[..., Any])
92
+
93
+
94
+ @dataclass
95
+ @export_module("autogen")
96
+ class UpdateSystemMessage:
97
+ """Update the agent's system message before they reply
98
+
99
+ Args:
100
+ content_updater: The format string or function to update the agent's system message. Can be a format string or a Callable.
101
+ If a string, it will be used as a template and substitute the context variables.
102
+ If a Callable, it should have the signature:
103
+ def my_content_updater(agent: ConversableAgent, messages: List[Dict[str, Any]]) -> str
104
+ """
105
+
106
+ content_updater: Callable | str
107
+
108
+ def __post_init__(self):
109
+ if isinstance(self.content_updater, str):
110
+ # find all {var} in the string
111
+ vars = re.findall(r"\{(\w+)\}", self.content_updater)
112
+ if len(vars) == 0:
113
+ warnings.warn("Update function string contains no variables. This is probably unintended.")
114
+
115
+ elif isinstance(self.content_updater, Callable):
116
+ sig = signature(self.content_updater)
117
+ if len(sig.parameters) != 2:
118
+ raise ValueError(
119
+ "The update function must accept two parameters of type ConversableAgent and List[Dict[str, Any]], respectively"
120
+ )
121
+ if sig.return_annotation != str:
122
+ raise ValueError("The update function must return a string")
123
+ else:
124
+ raise ValueError("The update function must be either a string or a callable")
125
+
126
+
127
+ @export_module("autogen")
128
+ class ConversableAgent(LLMAgent):
129
+ """(In preview) A class for generic conversable agents which can be configured as assistant or user proxy.
130
+
131
+ After receiving each message, the agent will send a reply to the sender unless the msg is a termination msg.
132
+ For example, AssistantAgent and UserProxyAgent are subclasses of this class,
133
+ configured with different default settings.
134
+
135
+ To modify auto reply, override `generate_reply` method. \n
136
+ To disable/enable human response in every turn, set `human_input_mode` to "NEVER" or "ALWAYS". \n
137
+ To modify the way to get human input, override `get_human_input` method. \n
138
+ To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`, \n
139
+ `run_code`, and `execute_function` methods respectively. \n
140
+ """
141
+
142
+ DEFAULT_CONFIG = False # False or dict, the default config for llm inference
143
+ MAX_CONSECUTIVE_AUTO_REPLY = 100 # maximum number of consecutive auto replies (subject to future change)
144
+
145
+ DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
146
+ DEFAULT_SUMMARY_METHOD = "last_msg"
147
+ llm_config: LLMConfig | Literal[False]
148
+
149
+ def __init__(
150
+ self,
151
+ name: str,
152
+ system_message: str | list | None = "You are a helpful AI Assistant.",
153
+ is_termination_msg: Callable[[dict[str, Any]], bool] | None = None,
154
+ max_consecutive_auto_reply: int | None = None,
155
+ human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "TERMINATE",
156
+ function_map: dict[str, Callable[..., Any]] | None = None,
157
+ code_execution_config: dict[str, Any] | Literal[False] = False,
158
+ llm_config: LLMConfig | dict[str, Any] | Literal[False] | None = None,
159
+ default_auto_reply: str | dict[str, Any] = "",
160
+ description: str | None = None,
161
+ chat_messages: dict[Agent, list[dict[str, Any]]] | None = None,
162
+ silent: bool | None = None,
163
+ context_variables: Optional["ContextVariables"] = None,
164
+ functions: list[Callable[..., Any]] | Callable[..., Any] = None,
165
+ update_agent_state_before_reply: list[Callable | UpdateSystemMessage]
166
+ | Callable
167
+ | UpdateSystemMessage
168
+ | None = None,
169
+ handoffs: Handoffs | None = None,
170
+ ):
171
+ """Args:\n
172
+ 1) name (str): name of the agent.\n
173
+ 2) system_message (str or list): system message for the ChatCompletion inference.\n
174
+ 3) is_termination_msg (function): a function that takes a message in the form of a dictionary
175
+ and returns a boolean value indicating if this received message is a termination message.
176
+ The dict can contain the following keys: "content", "role", "name", "function_call".\n
177
+ 4) max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
178
+ default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
179
+ When set to 0, no auto reply will be generated.\n
180
+ 5) human_input_mode (str): whether to ask for human inputs every time a message is received.\n
181
+ Possible values are "ALWAYS", "TERMINATE", "NEVER".\n
182
+ (1) When "ALWAYS", the agent prompts for human input every time a message is received.
183
+ Under this mode, the conversation stops when the human input is "exit",
184
+ or when is_termination_msg is True and there is no human input.\n
185
+ (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
186
+ the number of auto reply reaches the max_consecutive_auto_reply.\n
187
+ (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
188
+ when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True. \n
189
+ 6) function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls. \n
190
+ 7) code_execution_config (dict or False): config for the code execution.\n
191
+ To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:\n
192
+ - work_dir (Optional, str): The working directory for the code execution.\n
193
+ If None, a default working directory will be used.\n
194
+ The default working directory is the "extensions" directory under
195
+ "path_to_autogen".\n
196
+ - use_docker (Optional, list, str or bool): The docker image to use for code execution.\n
197
+ Default is True, which means the code will be executed in a docker container. A default list of images will be used.\n
198
+ If a list or a str of image name(s) is provided, the code will be executed in a docker container\n
199
+ with the first image successfully pulled.\n
200
+ If False, the code will be executed in the current environment.\n
201
+ We strongly recommend using docker for code execution.\n
202
+ - timeout (Optional, int): The maximum execution time in seconds.\n
203
+ - last_n_messages (Experimental, int or str): The number of messages to look back for code execution.
204
+ If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto)\n
205
+ 8) llm_config (LLMConfig or dict or False or None): llm inference configuration.\n
206
+ Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)\n
207
+ for available options.\n
208
+ When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.\n
209
+ To disable llm-based auto reply, set to False.\n
210
+ When set to None, will use self.DEFAULT_CONFIG, which defaults to False.\n
211
+ 9) default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.\n
212
+ 10) description (str): a short description of the agent. This description is used by other agents
213
+ (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)\n
214
+ 11) chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents.
215
+ Can be used to give the agent a memory by providing the chat history. This will allow the agent to
216
+ resume previous had conversations. Defaults to an empty chat history.\n
217
+ 12) silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of
218
+ silent in each function.\n
219
+ 13) context_variables (ContextVariables or None): Context variables that provide a persistent context for the agent.
220
+ Note: This will be a reference to a shared context for multi-agent chats.\n
221
+ Behaves like a dictionary with keys and values (akin to dict[str, Any]).\n
222
+ 14) functions (List[Callable[..., Any]]): A list of functions to register with the agent, these will be wrapped up as tools and registered for LLM (not execution).\n
223
+ 15) update_agent_state_before_reply (List[Callable[..., Any]]): A list of functions, including UpdateSystemMessage's, called to update the agent before it replies.\n
224
+ 16) handoffs (Handoffs): Handoffs object containing all handoff transition conditions.\n
225
+ """
226
+ self.handoffs = handoffs if handoffs is not None else Handoffs()
227
+ self.input_guardrails: list[Guardrail] = []
228
+ self.output_guardrails: list[Guardrail] = []
229
+
230
+ # we change code_execution_config below and we have to make sure we don't change the input
231
+ # in case of UserProxyAgent, without this we could even change the default value {}
232
+ code_execution_config = (
233
+ code_execution_config.copy() if hasattr(code_execution_config, "copy") else code_execution_config
234
+ )
235
+
236
+ # a dictionary of conversations, default value is list
237
+ if chat_messages is None:
238
+ self._oai_messages = defaultdict(list)
239
+ else:
240
+ self._oai_messages = chat_messages
241
+
242
+ self._oai_system_message = [{"content": system_message, "role": "system"}]
243
+ self._description = description if description is not None else system_message
244
+ self._is_termination_msg = (
245
+ is_termination_msg
246
+ if is_termination_msg is not None
247
+ else (lambda x: content_str(x.get("content")) == "TERMINATE")
248
+ )
249
+ self.silent = silent
250
+ self.run_executor: ConversableAgent | None = None
251
+
252
+ # Take a copy to avoid modifying the given dict
253
+ if isinstance(llm_config, dict):
254
+ try:
255
+ llm_config = copy.deepcopy(llm_config)
256
+ except TypeError as e:
257
+ raise TypeError(
258
+ "Please implement __deepcopy__ method for each value class in llm_config to support deepcopy."
259
+ " Refer to the docs for more details: https://docs.ag2.ai/docs/user-guide/advanced-concepts/llm-configuration-deep-dive/#adding-http-client-in-llm_config-for-proxy"
260
+ ) from e
261
+
262
+ self.llm_config = self._validate_llm_config(llm_config)
263
+ self.client = self._create_client(self.llm_config)
264
+ self._validate_name(name)
265
+ self._name = name
266
+
267
+ if logging_enabled():
268
+ log_new_agent(self, locals())
269
+
270
+ # Initialize standalone client cache object.
271
+ self.client_cache = None
272
+
273
+ # To track UI tools
274
+ self._ui_tools: list[Tool] = []
275
+
276
+ self.human_input_mode = human_input_mode
277
+ self._max_consecutive_auto_reply = (
278
+ max_consecutive_auto_reply if max_consecutive_auto_reply is not None else self.MAX_CONSECUTIVE_AUTO_REPLY
279
+ )
280
+ self._consecutive_auto_reply_counter = defaultdict(int)
281
+ self._max_consecutive_auto_reply_dict = defaultdict(self.max_consecutive_auto_reply)
282
+ self._function_map = (
283
+ {}
284
+ if function_map is None
285
+ else {name: callable for name, callable in function_map.items() if self._assert_valid_name(name)}
286
+ )
287
+ self._default_auto_reply = default_auto_reply
288
+ self._reply_func_list = []
289
+ self._human_input = []
290
+ self.reply_at_receive = defaultdict(bool)
291
+ self.register_reply([Agent, None], ConversableAgent.generate_oai_reply)
292
+ self.register_reply([Agent, None], ConversableAgent.a_generate_oai_reply, ignore_async_in_sync_chat=True)
293
+
294
+ self.context_variables = context_variables if context_variables is not None else ContextVariables()
295
+
296
+ self._tools: list[Tool] = []
297
+
298
+ # Register functions to the agent
299
+ if isinstance(functions, list):
300
+ if not all(isinstance(func, Callable) for func in functions):
301
+ raise TypeError("All elements in the functions list must be callable")
302
+ self._add_functions(functions)
303
+ elif isinstance(functions, Callable):
304
+ self._add_single_function(functions)
305
+ elif functions is not None:
306
+ raise TypeError("Functions must be a callable or a list of callables")
307
+
308
+ # Setting up code execution.
309
+ # Do not register code execution reply if code execution is disabled.
310
+ if code_execution_config is not False:
311
+ # If code_execution_config is None, set it to an empty dict.
312
+ if code_execution_config is None:
313
+ warnings.warn(
314
+ "Using None to signal a default code_execution_config is deprecated. "
315
+ "Use {} to use default or False to disable code execution.",
316
+ stacklevel=2,
317
+ )
318
+ code_execution_config = {}
319
+ if not isinstance(code_execution_config, dict):
320
+ raise ValueError("code_execution_config must be a dict or False.")
321
+
322
+ # We have got a valid code_execution_config.
323
+ self._code_execution_config: dict[str, Any] | Literal[False] = code_execution_config
324
+
325
+ if self._code_execution_config.get("executor") is not None:
326
+ if "use_docker" in self._code_execution_config:
327
+ raise ValueError(
328
+ "'use_docker' in code_execution_config is not valid when 'executor' is set. Use the appropriate arg in the chosen executor instead."
329
+ )
330
+
331
+ if "work_dir" in self._code_execution_config:
332
+ raise ValueError(
333
+ "'work_dir' in code_execution_config is not valid when 'executor' is set. Use the appropriate arg in the chosen executor instead."
334
+ )
335
+
336
+ if "timeout" in self._code_execution_config:
337
+ raise ValueError(
338
+ "'timeout' in code_execution_config is not valid when 'executor' is set. Use the appropriate arg in the chosen executor instead."
339
+ )
340
+
341
+ # Use the new code executor.
342
+ self._code_executor = CodeExecutorFactory.create(self._code_execution_config)
343
+ self.register_reply([Agent, None], ConversableAgent._generate_code_execution_reply_using_executor)
344
+ else:
345
+ # Legacy code execution using code_utils.
346
+ use_docker = self._code_execution_config.get("use_docker", None)
347
+ use_docker = decide_use_docker(use_docker)
348
+ check_can_use_docker_or_throw(use_docker)
349
+ self._code_execution_config["use_docker"] = use_docker
350
+ self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply)
351
+ else:
352
+ # Code execution is disabled.
353
+ self._code_execution_config = False
354
+
355
+ self.register_reply([Agent, None], ConversableAgent.generate_tool_calls_reply)
356
+ self.register_reply([Agent, None], ConversableAgent.a_generate_tool_calls_reply, ignore_async_in_sync_chat=True)
357
+ self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply)
358
+ self.register_reply(
359
+ [Agent, None], ConversableAgent.a_generate_function_call_reply, ignore_async_in_sync_chat=True
360
+ )
361
+ self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
362
+ self.register_reply(
363
+ [Agent, None], ConversableAgent.a_check_termination_and_human_reply, ignore_async_in_sync_chat=True
364
+ )
365
+
366
+ # Registered hooks are kept in lists, indexed by hookable method, to be called in their order of registration.
367
+ # New hookable methods should be added to this list as required to support new agent capabilities.
368
+ self.hook_lists: dict[str, list[Callable[..., Any]]] = {
369
+ "process_last_received_message": [],
370
+ "process_all_messages_before_reply": [],
371
+ "process_message_before_send": [],
372
+ "update_agent_state": [],
373
+ # Safeguard hooks for monitoring agent interactions
374
+ "safeguard_tool_inputs": [], # Hook for processing tool inputs before execution
375
+ "safeguard_tool_outputs": [], # Hook for processing tool outputs after execution
376
+ "safeguard_llm_inputs": [], # Hook for processing LLM inputs before sending
377
+ "safeguard_llm_outputs": [], # Hook for processing LLM outputs after receiving
378
+ "safeguard_human_inputs": [], # Hook for processing human inputs
379
+ }
380
+
381
+ # Associate agent update state hooks
382
+ self._register_update_agent_state_before_reply(update_agent_state_before_reply)
383
+
384
+ def _validate_name(self, name: str) -> None:
385
+ if not self.llm_config:
386
+ return
387
+
388
+ if any(entry for entry in self.llm_config.config_list if entry.api_type == "openai" and re.search(r"\s", name)):
389
+ raise ValueError(f"The name of the agent cannot contain any whitespace. The name provided is: '{name}'")
390
+
391
+ def _get_display_name(self):
392
+ """Get the string representation of the agent.
393
+
394
+ If you would like to change the standard string representation for an
395
+ instance of ConversableAgent, you can point it to another function.
396
+ In this example a function called _group_agent_str that returns a string:
397
+ agent._get_display_name = MethodType(_group_agent_str, agent)
398
+ """
399
+ return self.name
400
+
401
+ def __str__(self):
402
+ return self._get_display_name()
403
+
404
+ def _add_functions(self, func_list: list[Callable[..., Any]]):
405
+ """Add (Register) a list of functions to the agent
406
+
407
+ Args:
408
+ func_list (list[Callable[..., Any]]): A list of functions to register with the agent.
409
+ """
410
+ for func in func_list:
411
+ self._add_single_function(func)
412
+
413
+ def _add_single_function(self, func: Callable, name: str | None = None, description: str | None = ""):
414
+ """Add a single function to the agent
415
+
416
+ Args:
417
+ func (Callable): The function to register.
418
+ name (str): The name of the function. If not provided, the function's name will be used.
419
+ description (str): The description of the function, used by the LLM. If not provided, the function's docstring will be used.
420
+ """
421
+ if name:
422
+ func._name = name
423
+ elif not hasattr(func, "_name"):
424
+ func._name = func.__name__
425
+
426
+ if hasattr(func, "_description") and func._description and not description:
427
+ # If the function already has a description, use it
428
+ description = func._description
429
+ else:
430
+ if description:
431
+ func._description = description
432
+ else:
433
+ # Use function's docstring, strip whitespace, fall back to empty string
434
+ description = (func.__doc__ or "").strip()
435
+ func._description = description
436
+
437
+ # Register the function
438
+ self.register_for_llm(name=name, description=description, silent_override=True)(func)
439
+
440
+ def _register_update_agent_state_before_reply(
441
+ self, functions: list[Callable[..., Any]] | Callable[..., Any] | None
442
+ ):
443
+ """Register functions that will be called when the agent is selected and before it speaks.
444
+ You can add your own validation or precondition functions here.
445
+
446
+ Args:
447
+ functions (List[Callable[[], None]]): A list of functions to be registered. Each function
448
+ is called when the agent is selected and before it speaks.
449
+ """
450
+ if functions is None:
451
+ return
452
+ if not isinstance(functions, list) and type(functions) not in [UpdateSystemMessage, Callable[..., Any]]:
453
+ raise ValueError("functions must be a list of callables")
454
+
455
+ if not isinstance(functions, list):
456
+ functions = [functions]
457
+
458
+ for func in functions:
459
+ if isinstance(func, UpdateSystemMessage):
460
+ # Wrapper function that allows this to be used in the update_agent_state hook
461
+ # Its primary purpose, however, is just to update the agent's system message
462
+ # Outer function to create a closure with the update function
463
+ def create_wrapper(update_func: UpdateSystemMessage):
464
+ def update_system_message_wrapper(
465
+ agent: ConversableAgent, messages: list[dict[str, Any]]
466
+ ) -> list[dict[str, Any]]:
467
+ if isinstance(update_func.content_updater, str):
468
+ # Templates like "My context variable passport is {passport}" will
469
+ # use the context_variables for substitution
470
+ sys_message = OpenAIWrapper.instantiate(
471
+ template=update_func.content_updater,
472
+ context=agent.context_variables.to_dict(),
473
+ allow_format_str_template=True,
474
+ )
475
+ else:
476
+ sys_message = update_func.content_updater(agent, messages)
477
+
478
+ agent.update_system_message(sys_message)
479
+ return messages
480
+
481
+ return update_system_message_wrapper
482
+
483
+ self.register_hook(hookable_method="update_agent_state", hook=create_wrapper(func))
484
+
485
+ else:
486
+ self.register_hook(hookable_method="update_agent_state", hook=func)
487
+
488
+ @classmethod
489
+ def _validate_llm_config(
490
+ cls, llm_config: LLMConfig | dict[str, Any] | Literal[False] | None
491
+ ) -> LLMConfig | Literal[False]:
492
+ if llm_config is None:
493
+ llm_config = LLMConfig.get_current_llm_config()
494
+ if llm_config is None:
495
+ return cls.DEFAULT_CONFIG
496
+
497
+ elif llm_config is False:
498
+ return False
499
+
500
+ return LLMConfig.ensure_config(llm_config)
501
+
502
+ @classmethod
503
+ def _create_client(cls, llm_config: LLMConfig | Literal[False]) -> OpenAIWrapper | None:
504
+ return None if llm_config is False else OpenAIWrapper(**llm_config)
505
+
506
+ @staticmethod
507
+ def _is_silent(agent: Agent, silent: bool | None = False) -> bool:
508
+ return agent.silent if agent.silent is not None else silent
509
+
510
+ @property
511
+ def name(self) -> str:
512
+ """Get the name of the agent."""
513
+ return self._name
514
+
515
+ @property
516
+ def description(self) -> str:
517
+ """Get the description of the agent."""
518
+ return self._description
519
+
520
+ @description.setter
521
+ def description(self, description: str):
522
+ """Set the description of the agent."""
523
+ self._description = description
524
+
525
+ @property
526
+ def code_executor(self) -> CodeExecutor | None:
527
+ """The code executor used by this agent. Returns None if code execution is disabled."""
528
+ if not hasattr(self, "_code_executor"):
529
+ return None
530
+ return self._code_executor
531
+
532
+ def register_reply(
533
+ self,
534
+ trigger: type[Agent] | str | Agent | Callable[[Agent], bool] | list,
535
+ reply_func: Callable,
536
+ position: int = 0,
537
+ config: Any | None = None,
538
+ reset_config: Callable[..., Any] | None = None,
539
+ *,
540
+ ignore_async_in_sync_chat: bool = False,
541
+ remove_other_reply_funcs: bool = False,
542
+ ):
543
+ """Register a reply function.
544
+
545
+ The reply function will be called when the trigger matches the sender.
546
+ The function registered later will be checked earlier by default.
547
+ To change the order, set the position to a positive integer.
548
+
549
+ Both sync and async reply functions can be registered. The sync reply function will be triggered
550
+ from both sync and async chats. However, an async reply function will only be triggered from async
551
+ chats (initiated with `ConversableAgent.a_initiate_chat`). If an `async` reply function is registered
552
+ and a chat is initialized with a sync function, `ignore_async_in_sync_chat` determines the behaviour as follows:
553
+ if `ignore_async_in_sync_chat` is set to `False` (default value), an exception will be raised, and
554
+ if `ignore_async_in_sync_chat` is set to `True`, the reply function will be ignored.
555
+
556
+ Args:
557
+ trigger (Agent class, str, Agent instance, callable, or list): the trigger.
558
+ If a class is provided, the reply function will be called when the sender is an instance of the class.
559
+ If a string is provided, the reply function will be called when the sender's name matches the string.
560
+ If an agent instance is provided, the reply function will be called when the sender is the agent instance.
561
+ If a callable is provided, the reply function will be called when the callable returns True.
562
+ If a list is provided, the reply function will be called when any of the triggers in the list is activated.
563
+ If None is provided, the reply function will be called only when the sender is None.
564
+ Note: Be sure to register `None` as a trigger if you would like to trigger an auto-reply function with non-empty messages and `sender=None`.
565
+ reply_func (Callable): the reply function.
566
+ The function takes a recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
567
+
568
+ ```python
569
+ def reply_func(
570
+ recipient: ConversableAgent,
571
+ messages: Optional[List[Dict]] = None,
572
+ sender: Optional[Agent] = None,
573
+ config: Optional[Any] = None,
574
+ ) -> Tuple[bool, Union[str, Dict, None]]:
575
+ ```
576
+ position (int): the position of the reply function in the reply function list.
577
+ The function registered later will be checked earlier by default.
578
+ To change the order, set the position to a positive integer.
579
+ config (Any): the config to be passed to the reply function.
580
+ When an agent is reset, the config will be reset to the original value.
581
+ reset_config (Callable): the function to reset the config.
582
+ The function returns None. Signature: ```def reset_config(config: Any)```
583
+ ignore_async_in_sync_chat (bool): whether to ignore the async reply function in sync chats. If `False`, an exception
584
+ will be raised if an async reply function is registered and a chat is initialized with a sync
585
+ function.
586
+ remove_other_reply_funcs (bool): whether to remove other reply functions when registering this reply function.
587
+ """
588
+ if not isinstance(trigger, (type, str, Agent, Callable, list)):
589
+ raise ValueError("trigger must be a class, a string, an agent, a callable or a list.")
590
+ if remove_other_reply_funcs:
591
+ self._reply_func_list.clear()
592
+ self._reply_func_list.insert(
593
+ position,
594
+ {
595
+ "trigger": trigger,
596
+ "reply_func": reply_func,
597
+ "config": copy.copy(config),
598
+ "init_config": config,
599
+ "reset_config": reset_config,
600
+ "ignore_async_in_sync_chat": ignore_async_in_sync_chat and is_coroutine_callable(reply_func),
601
+ },
602
+ )
603
+
604
+ def replace_reply_func(self, old_reply_func: Callable, new_reply_func: Callable):
605
+ """Replace a registered reply function with a new one.
606
+
607
+ Args:
608
+ old_reply_func (Callable): the old reply function to be replaced.
609
+ new_reply_func (Callable): the new reply function to replace the old one.
610
+ """
611
+ for f in self._reply_func_list:
612
+ if f["reply_func"] == old_reply_func:
613
+ f["reply_func"] = new_reply_func
614
+
615
+ @staticmethod
616
+ def _get_chats_to_run(
617
+ chat_queue: list[dict[str, Any]],
618
+ recipient: Agent,
619
+ messages: list[dict[str, Any]] | None,
620
+ sender: Agent,
621
+ config: Any,
622
+ ) -> list[dict[str, Any]]:
623
+ """A simple chat reply function.
624
+ This function initiate one or a sequence of chats between the "recipient" and the agents in the
625
+ chat_queue.
626
+
627
+ It extracts and returns a summary from the nested chat based on the "summary_method" in each chat in chat_queue.
628
+
629
+ Returns:
630
+ Tuple[bool, str]: A tuple where the first element indicates the completion of the chat, and the second element contains the summary of the last chat if any chats were initiated.
631
+ """
632
+ last_msg = messages[-1].get("content")
633
+ chat_to_run = []
634
+ for i, c in enumerate(chat_queue):
635
+ current_c = c.copy()
636
+ if current_c.get("sender") is None:
637
+ current_c["sender"] = recipient
638
+ message = current_c.get("message")
639
+ # If message is not provided in chat_queue, we by default use the last message from the original chat history as the first message in this nested chat (for the first chat in the chat queue).
640
+ # NOTE: This setting is prone to change.
641
+ if message is None and i == 0:
642
+ message = last_msg
643
+ if callable(message):
644
+ message = message(recipient, messages, sender, config)
645
+ # We only run chat that has a valid message. NOTE: This is prone to change depending on applications.
646
+ if message:
647
+ current_c["message"] = message
648
+ chat_to_run.append(current_c)
649
+ return chat_to_run
650
+
651
+ @staticmethod
652
+ def _process_nested_chat_carryover(
653
+ chat: dict[str, Any],
654
+ recipient: Agent,
655
+ messages: list[dict[str, Any]],
656
+ sender: Agent,
657
+ config: Any,
658
+ trim_n_messages: int = 0,
659
+ ) -> None:
660
+ """Process carryover messages for a nested chat (typically for the first chat of a group chat)
661
+
662
+ The carryover_config key is a dictionary containing:
663
+ "summary_method": The method to use to summarise the messages, can be "all", "last_msg", "reflection_with_llm" or a Callable
664
+ "summary_args": Optional arguments for the summary method
665
+
666
+ Supported carryover 'summary_methods' are:
667
+ "all" - all messages will be incorporated
668
+ "last_msg" - the last message will be incorporated
669
+ "reflection_with_llm" - an llm will summarise all the messages and the summary will be incorporated as a single message
670
+ Callable - a callable with the signature: my_method(agent: ConversableAgent, messages: List[Dict[str, Any]]) -> str
671
+
672
+ Args:
673
+ chat: The chat dictionary containing the carryover configuration
674
+ recipient: The recipient agent
675
+ messages: The messages from the parent chat
676
+ sender: The sender agent
677
+ config: The LLM configuration
678
+ trim_n_messages: The number of latest messages to trim from the messages list
679
+ """
680
+
681
+ def concat_carryover(chat_message: str, carryover_message: str | list[dict[str, Any]]) -> str:
682
+ """Concatenate the carryover message to the chat message."""
683
+ prefix = f"{chat_message}\n" if chat_message else ""
684
+
685
+ if isinstance(carryover_message, str):
686
+ content = carryover_message
687
+ elif isinstance(carryover_message, list):
688
+ content = "\n".join(
689
+ msg["content"] for msg in carryover_message if "content" in msg and msg["content"] is not None
690
+ )
691
+ else:
692
+ raise ValueError("Carryover message must be a string or a list of dictionaries")
693
+
694
+ return f"{prefix}Context:\n{content}"
695
+
696
+ carryover_config = chat["carryover_config"]
697
+
698
+ if "summary_method" not in carryover_config:
699
+ raise ValueError("Carryover configuration must contain a 'summary_method' key")
700
+
701
+ carryover_summary_method = carryover_config["summary_method"]
702
+ carryover_summary_args = carryover_config.get("summary_args") or {}
703
+
704
+ chat_message = ""
705
+ message = chat.get("message")
706
+
707
+ # If the message is a callable, run it and get the result
708
+ if message:
709
+ chat_message = message(recipient, messages, sender, config) if callable(message) else message
710
+
711
+ # deep copy and trim the latest messages
712
+ content_messages = copy.deepcopy(messages)
713
+ content_messages = content_messages[:-trim_n_messages]
714
+
715
+ if carryover_summary_method == "all":
716
+ # Put a string concatenated value of all parent messages into the first message
717
+ # (e.g. message = <first nested chat message>\nContext: \n<chat message 1>\n<chat message 2>\n...)
718
+ carry_over_message = concat_carryover(chat_message, content_messages)
719
+
720
+ elif carryover_summary_method == "last_msg":
721
+ # (e.g. message = <first nested chat message>\nContext: \n<last chat message>)
722
+ carry_over_message = concat_carryover(chat_message, content_messages[-1]["content"])
723
+
724
+ elif carryover_summary_method == "reflection_with_llm":
725
+ # (e.g. message = <first nested chat message>\nContext: \n<llm summary>)
726
+
727
+ # Add the messages to the nested chat agent for reflection (we'll clear after reflection)
728
+ chat["recipient"]._oai_messages[sender] = content_messages
729
+
730
+ carry_over_message_llm = ConversableAgent._reflection_with_llm_as_summary(
731
+ sender=sender,
732
+ recipient=chat["recipient"], # Chat recipient LLM config will be used for the reflection
733
+ summary_args=carryover_summary_args,
734
+ )
735
+
736
+ recipient._oai_messages[sender] = []
737
+
738
+ carry_over_message = concat_carryover(chat_message, carry_over_message_llm)
739
+
740
+ elif isinstance(carryover_summary_method, Callable):
741
+ # (e.g. message = <first nested chat message>\nContext: \n<function's return string>)
742
+ carry_over_message_result = carryover_summary_method(recipient, content_messages, carryover_summary_args)
743
+
744
+ carry_over_message = concat_carryover(chat_message, carry_over_message_result)
745
+
746
+ chat["message"] = carry_over_message
747
+
748
+ @staticmethod
749
+ def _process_chat_queue_carryover(
750
+ chat_queue: list[dict[str, Any]],
751
+ recipient: Agent,
752
+ messages: str | Callable[..., Any],
753
+ sender: Agent,
754
+ config: Any,
755
+ trim_messages: int = 2,
756
+ ) -> tuple[bool, str | None]:
757
+ """Process carryover configuration for the first chat in the queue.
758
+
759
+ Args:
760
+ chat_queue: List of chat configurations
761
+ recipient: Receiving agent
762
+ messages: Chat messages
763
+ sender: Sending agent
764
+ config: LLM configuration
765
+ trim_messages: Number of messages to trim for nested chat carryover (default 2 for nested chat in group chats)
766
+
767
+ Returns:
768
+ Tuple containing:
769
+ - restore_flag: Whether the original message needs to be restored
770
+ - original_message: The original message to restore (if any)
771
+ """
772
+ restore_chat_queue_message = False
773
+ original_chat_queue_message = None
774
+
775
+ # Carryover configuration allowed on the first chat in the queue only, trim the last two messages specifically for group chat nested chat carryover as these are the messages for the transition to the nested chat agent
776
+ if len(chat_queue) > 0 and "carryover_config" in chat_queue[0]:
777
+ if "message" in chat_queue[0]:
778
+ # As we're updating the message in the nested chat queue, we need to restore it after finishing this nested chat.
779
+ restore_chat_queue_message = True
780
+ original_chat_queue_message = chat_queue[0]["message"]
781
+
782
+ # TODO Check the trimming required if not a group chat, it may not be 2 because other chats don't have the group transition messages. We may need to add as a carryover_config parameter.
783
+ ConversableAgent._process_nested_chat_carryover(
784
+ chat=chat_queue[0],
785
+ recipient=recipient,
786
+ messages=messages,
787
+ sender=sender,
788
+ config=config,
789
+ trim_n_messages=trim_messages,
790
+ )
791
+
792
+ return restore_chat_queue_message, original_chat_queue_message
793
+
794
+ @staticmethod
795
+ def _summary_from_nested_chats(
796
+ chat_queue: list[dict[str, Any]],
797
+ recipient: Agent,
798
+ messages: list[dict[str, Any]] | None,
799
+ sender: Agent,
800
+ config: Any,
801
+ ) -> tuple[bool, str | None]:
802
+ """A simple chat reply function.
803
+ This function initiate one or a sequence of chats between the "recipient" and the agents in the
804
+ chat_queue.
805
+
806
+ It extracts and returns a summary from the nested chat based on the "summary_method" in each chat in chat_queue.
807
+
808
+ The first chat in the queue can contain a 'carryover_config' which is a dictionary that denotes how to carryover messages from the parent chat into the first chat of the nested chats). Only applies to the first chat.
809
+ e.g.: carryover_summarize_chat_config = {"summary_method": "reflection_with_llm", "summary_args": None}
810
+ summary_method can be "last_msg", "all", "reflection_with_llm", Callable
811
+ The Callable signature: my_method(agent: ConversableAgent, messages: List[Dict[str, Any]]) -> str
812
+ The summary will be concatenated to the message of the first chat in the queue.
813
+
814
+ Returns:
815
+ Tuple[bool, str]: A tuple where the first element indicates the completion of the chat, and the second element contains the summary of the last chat if any chats were initiated.
816
+ """
817
+ # Process carryover configuration
818
+ restore_chat_queue_message, original_chat_queue_message = ConversableAgent._process_chat_queue_carryover(
819
+ chat_queue, recipient, messages, sender, config
820
+ )
821
+
822
+ chat_to_run = ConversableAgent._get_chats_to_run(chat_queue, recipient, messages, sender, config)
823
+ if not chat_to_run:
824
+ return True, None
825
+ res = initiate_chats(chat_to_run)
826
+
827
+ # We need to restore the chat queue message if it has been modified so that it will be the original message for subsequent uses
828
+ if restore_chat_queue_message:
829
+ chat_queue[0]["message"] = original_chat_queue_message
830
+
831
+ return True, res[-1].summary
832
+
833
+ @staticmethod
834
+ async def _a_summary_from_nested_chats(
835
+ chat_queue: list[dict[str, Any]],
836
+ recipient: Agent,
837
+ messages: list[dict[str, Any]] | None,
838
+ sender: Agent,
839
+ config: Any,
840
+ ) -> tuple[bool, str | None]:
841
+ """A simple chat reply function.
842
+ This function initiate one or a sequence of chats between the "recipient" and the agents in the
843
+ chat_queue.
844
+
845
+ It extracts and returns a summary from the nested chat based on the "summary_method" in each chat in chat_queue.
846
+
847
+ The first chat in the queue can contain a 'carryover_config' which is a dictionary that denotes how to carryover messages from the parent chat into the first chat of the nested chats). Only applies to the first chat.
848
+ e.g.: carryover_summarize_chat_config = {"summary_method": "reflection_with_llm", "summary_args": None}
849
+ summary_method can be "last_msg", "all", "reflection_with_llm", Callable
850
+ The Callable signature: my_method(agent: ConversableAgent, messages: List[Dict[str, Any]]) -> str
851
+ The summary will be concatenated to the message of the first chat in the queue.
852
+
853
+ Returns:
854
+ Tuple[bool, str]: A tuple where the first element indicates the completion of the chat, and the second element contains the summary of the last chat if any chats were initiated.
855
+ """
856
+ # Process carryover configuration
857
+ restore_chat_queue_message, original_chat_queue_message = ConversableAgent._process_chat_queue_carryover(
858
+ chat_queue, recipient, messages, sender, config
859
+ )
860
+
861
+ chat_to_run = ConversableAgent._get_chats_to_run(chat_queue, recipient, messages, sender, config)
862
+ if not chat_to_run:
863
+ return True, None
864
+ res = await a_initiate_chats(chat_to_run)
865
+ index_of_last_chat = chat_to_run[-1]["chat_id"]
866
+
867
+ # We need to restore the chat queue message if it has been modified so that it will be the original message for subsequent uses
868
+ if restore_chat_queue_message:
869
+ chat_queue[0]["message"] = original_chat_queue_message
870
+
871
+ return True, res[index_of_last_chat].summary
872
+
873
+ def register_nested_chats(
874
+ self,
875
+ chat_queue: list[dict[str, Any]],
876
+ trigger: type[Agent] | str | Agent | Callable[[Agent], bool] | list,
877
+ reply_func_from_nested_chats: str | Callable[..., Any] = "summary_from_nested_chats",
878
+ position: int = 2,
879
+ use_async: bool | None = None,
880
+ **kwargs: Any,
881
+ ) -> None:
882
+ """Register a nested chat reply function.
883
+
884
+ Args:
885
+ chat_queue (list): a list of chat objects to be initiated. If use_async is used, then all messages in chat_queue must have a chat-id associated with them.
886
+ trigger (Agent class, str, Agent instance, callable, or list): refer to `register_reply` for details.
887
+ reply_func_from_nested_chats (Callable, str): the reply function for the nested chat.
888
+ The function takes a chat_queue for nested chat, recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
889
+ Default to "summary_from_nested_chats", which corresponds to a built-in reply function that get summary from the nested chat_queue.
890
+ ```python
891
+ def reply_func_from_nested_chats(
892
+ chat_queue: List[Dict],
893
+ recipient: ConversableAgent,
894
+ messages: Optional[List[Dict]] = None,
895
+ sender: Optional[Agent] = None,
896
+ config: Optional[Any] = None,
897
+ ) -> Tuple[bool, Union[str, Dict, None]]:
898
+ ```
899
+ position (int): Ref to `register_reply` for details. Default to 2. It means we first check the termination and human reply, then check the registered nested chat reply.
900
+ use_async: Uses a_initiate_chats internally to start nested chats. If the original chat is initiated with a_initiate_chats, you may set this to true so nested chats do not run in sync.
901
+ kwargs: Ref to `register_reply` for details.
902
+ """
903
+ if use_async:
904
+ for chat in chat_queue:
905
+ if chat.get("chat_id") is None:
906
+ raise ValueError("chat_id is required for async nested chats")
907
+
908
+ if use_async:
909
+ if reply_func_from_nested_chats == "summary_from_nested_chats":
910
+ reply_func_from_nested_chats = self._a_summary_from_nested_chats
911
+ if not callable(reply_func_from_nested_chats) or not is_coroutine_callable(reply_func_from_nested_chats):
912
+ raise ValueError("reply_func_from_nested_chats must be a callable and a coroutine")
913
+
914
+ async def wrapped_reply_func(recipient, messages=None, sender=None, config=None):
915
+ return await reply_func_from_nested_chats(chat_queue, recipient, messages, sender, config)
916
+
917
+ else:
918
+ if reply_func_from_nested_chats == "summary_from_nested_chats":
919
+ reply_func_from_nested_chats = self._summary_from_nested_chats
920
+ if not callable(reply_func_from_nested_chats):
921
+ raise ValueError("reply_func_from_nested_chats must be a callable")
922
+
923
+ def wrapped_reply_func(recipient, messages=None, sender=None, config=None):
924
+ return reply_func_from_nested_chats(chat_queue, recipient, messages, sender, config)
925
+
926
+ functools.update_wrapper(wrapped_reply_func, reply_func_from_nested_chats)
927
+
928
+ self.register_reply(
929
+ trigger,
930
+ wrapped_reply_func,
931
+ position,
932
+ kwargs.get("config"),
933
+ kwargs.get("reset_config"),
934
+ ignore_async_in_sync_chat=(
935
+ not use_async if use_async is not None else kwargs.get("ignore_async_in_sync_chat")
936
+ ),
937
+ )
938
+
939
+ @property
940
+ def system_message(self) -> str:
941
+ """Return the system message."""
942
+ return self._oai_system_message[0]["content"]
943
+
944
+ def update_system_message(self, system_message: str) -> None:
945
+ """Update the system message.
946
+
947
+ Args:
948
+ system_message (str): system message for the ChatCompletion inference.
949
+ """
950
+ self._oai_system_message[0]["content"] = system_message
951
+
952
+ def update_max_consecutive_auto_reply(self, value: int, sender: Agent | None = None):
953
+ """Update the maximum number of consecutive auto replies.
954
+
955
+ Args:
956
+ value (int): the maximum number of consecutive auto replies.
957
+ sender (Agent): when the sender is provided, only update the max_consecutive_auto_reply for that sender.
958
+ """
959
+ if sender is None:
960
+ self._max_consecutive_auto_reply = value
961
+ for k in self._max_consecutive_auto_reply_dict:
962
+ self._max_consecutive_auto_reply_dict[k] = value
963
+ else:
964
+ self._max_consecutive_auto_reply_dict[sender] = value
965
+
966
+ def max_consecutive_auto_reply(self, sender: Agent | None = None) -> int:
967
+ """The maximum number of consecutive auto replies."""
968
+ return self._max_consecutive_auto_reply if sender is None else self._max_consecutive_auto_reply_dict[sender]
969
+
970
+ @property
971
+ def chat_messages(self) -> dict[Agent, list[dict[str, Any]]]:
972
+ """A dictionary of conversations from agent to list of messages."""
973
+ return self._oai_messages
974
+
975
+ def chat_messages_for_summary(self, agent: Agent) -> list[dict[str, Any]]:
976
+ """A list of messages as a conversation to summarize."""
977
+ return self._oai_messages[agent]
978
+
979
+ def last_message(self, agent: Agent | None = None) -> dict[str, Any] | None:
980
+ """The last message exchanged with the agent.
981
+
982
+ Args:
983
+ agent (Agent): The agent in the conversation.
984
+ If None and more than one agent's conversations are found, an error will be raised.
985
+ If None and only one conversation is found, the last message of the only conversation will be returned.
986
+
987
+ Returns:
988
+ The last message exchanged with the agent.
989
+ """
990
+ if agent is None:
991
+ n_conversations = len(self._oai_messages)
992
+ if n_conversations == 0:
993
+ return None
994
+ if n_conversations == 1:
995
+ for conversation in self._oai_messages.values():
996
+ return conversation[-1]
997
+ raise ValueError("More than one conversation is found. Please specify the sender to get the last message.")
998
+ if agent not in self._oai_messages:
999
+ raise KeyError(
1000
+ f"The agent '{agent.name}' is not present in any conversation. No history available for this agent."
1001
+ )
1002
+ return self._oai_messages[agent][-1]
1003
+
1004
+ @property
1005
+ def use_docker(self) -> bool | str | None:
1006
+ """Bool value of whether to use docker to execute the code,
1007
+ or str value of the docker image name to use, or None when code execution is disabled.
1008
+ """
1009
+ return None if self._code_execution_config is False else self._code_execution_config.get("use_docker")
1010
+
1011
+ @staticmethod
1012
+ def _message_to_dict(message: dict[str, Any] | str) -> dict:
1013
+ """Convert a message to a dictionary.
1014
+
1015
+ The message can be a string or a dictionary. The string will be put in the "content" field of the new dictionary.
1016
+ """
1017
+ if isinstance(message, str):
1018
+ return {"content": message}
1019
+ elif isinstance(message, dict):
1020
+ return message
1021
+ else:
1022
+ return dict(message)
1023
+
1024
+ @staticmethod
1025
+ def _normalize_name(name):
1026
+ """LLMs sometimes ask functions while ignoring their own format requirements, this function should be used to replace invalid characters with "_".
1027
+
1028
+ Prefer _assert_valid_name for validating user configuration or input
1029
+ """
1030
+ return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:64]
1031
+
1032
+ @staticmethod
1033
+ def _assert_valid_name(name):
1034
+ """Ensure that configured names are valid, raises ValueError if not.
1035
+
1036
+ For munging LLM responses use _normalize_name to ensure LLM specified names don't break the API.
1037
+ """
1038
+ if not re.match(r"^[a-zA-Z0-9_-]+$", name):
1039
+ raise ValueError(f"Invalid name: {name}. Only letters, numbers, '_' and '-' are allowed.")
1040
+ if len(name) > 64:
1041
+ raise ValueError(f"Invalid name: {name}. Name must be less than 64 characters.")
1042
+ return name
1043
+
1044
+ def _append_oai_message(
1045
+ self,
1046
+ message: dict[str, Any] | str,
1047
+ conversation_id: Agent,
1048
+ role: str = "assistant",
1049
+ name: str | None = None,
1050
+ ) -> bool:
1051
+ """Append a message to the ChatCompletion conversation.
1052
+
1053
+ If the message received is a string, it will be put in the "content" field of the new dictionary.
1054
+ If the message received is a dictionary but does not have any of the three fields "content", "function_call", or "tool_calls",
1055
+ this message is not a valid ChatCompletion message.
1056
+ If only "function_call" or "tool_calls" is provided, "content" will be set to None if not provided, and the role of the message will be forced "assistant".
1057
+
1058
+ Args:
1059
+ message (dict or str): message to be appended to the ChatCompletion conversation.
1060
+ conversation_id (Agent): id of the conversation, should be the recipient or sender.
1061
+ role (str): role of the message, can be "assistant" or "function".
1062
+ name (str | None): name of the message author, can be the name of the agent. If not provided, the name of the currentagent will be used.
1063
+
1064
+ Returns:
1065
+ bool: whether the message is appended to the ChatCompletion conversation.
1066
+ """
1067
+ valid, oai_message = normilize_message_to_oai(message, role=role, name=name or self.name)
1068
+ if not valid:
1069
+ return False
1070
+ self._oai_messages[conversation_id].append(oai_message)
1071
+ return True
1072
+
1073
+ def _process_message_before_send(
1074
+ self, message: dict[str, Any] | str, recipient: Agent, silent: bool
1075
+ ) -> dict[str, Any] | str:
1076
+ """Process the message before sending it to the recipient."""
1077
+ hook_list = self.hook_lists["process_message_before_send"]
1078
+ for hook in hook_list:
1079
+ message = hook(
1080
+ sender=self, message=message, recipient=recipient, silent=ConversableAgent._is_silent(self, silent)
1081
+ )
1082
+ return message
1083
+
1084
+ def send(
1085
+ self,
1086
+ message: dict[str, Any] | str,
1087
+ recipient: Agent,
1088
+ request_reply: bool | None = None,
1089
+ silent: bool | None = False,
1090
+ ):
1091
+ """Send a message to another agent.
1092
+
1093
+ Args:
1094
+ message (dict or str): message to be sent.
1095
+ The message could contain the following fields:
1096
+ - content (str or List): Required, the content of the message. (Can be None)
1097
+ - function_call (str): the name of the function to be called.
1098
+ - name (str): the name of the function to be called.
1099
+ - role (str): the role of the message, any role that is not "function"
1100
+ will be modified to "assistant".
1101
+ - context (dict): the context of the message, which will be passed to
1102
+ [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create).
1103
+ For example, one agent can send a message A as:
1104
+ ```python
1105
+ {
1106
+ "content": lambda context: context["use_tool_msg"],
1107
+ "context": {"use_tool_msg": "Use tool X if they are relevant."},
1108
+ }
1109
+ ```
1110
+ Next time, one agent can send a message B with a different "use_tool_msg".
1111
+ Then the content of message A will be refreshed to the new "use_tool_msg".
1112
+ So effectively, this provides a way for an agent to send a "link" and modify
1113
+ the content of the "link" later.
1114
+ recipient (Agent): the recipient of the message.
1115
+ request_reply (bool or None): whether to request a reply from the recipient.
1116
+ silent (bool or None): (Experimental) whether to print the message sent.
1117
+
1118
+ Raises:
1119
+ ValueError: if the message can't be converted into a valid ChatCompletion message.
1120
+ """
1121
+ message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
1122
+ # When the agent composes and sends the message, the role of the message is "assistant"
1123
+ # unless it's "function".
1124
+ valid = self._append_oai_message(message, recipient, role="assistant", name=self.name)
1125
+ if valid:
1126
+ recipient.receive(message, self, request_reply, silent)
1127
+ else:
1128
+ raise ValueError(
1129
+ "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
1130
+ )
1131
+
1132
+ async def a_send(
1133
+ self,
1134
+ message: dict[str, Any] | str,
1135
+ recipient: Agent,
1136
+ request_reply: bool | None = None,
1137
+ silent: bool | None = False,
1138
+ ):
1139
+ """(async) Send a message to another agent.
1140
+
1141
+ Args:
1142
+ message (dict or str): message to be sent.
1143
+ The message could contain the following fields:
1144
+ - content (str or List): Required, the content of the message. (Can be None)
1145
+ - function_call (str): the name of the function to be called.
1146
+ - name (str): the name of the function to be called.
1147
+ - role (str): the role of the message, any role that is not "function"
1148
+ will be modified to "assistant".
1149
+ - context (dict): the context of the message, which will be passed to
1150
+ [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create).
1151
+ For example, one agent can send a message A as:
1152
+ ```python
1153
+ {
1154
+ "content": lambda context: context["use_tool_msg"],
1155
+ "context": {"use_tool_msg": "Use tool X if they are relevant."},
1156
+ }
1157
+ ```
1158
+ Next time, one agent can send a message B with a different "use_tool_msg".
1159
+ Then the content of message A will be refreshed to the new "use_tool_msg".
1160
+ So effectively, this provides a way for an agent to send a "link" and modify
1161
+ the content of the "link" later.
1162
+ recipient (Agent): the recipient of the message.
1163
+ request_reply (bool or None): whether to request a reply from the recipient.
1164
+ silent (bool or None): (Experimental) whether to print the message sent.
1165
+
1166
+ Raises:
1167
+ ValueError: if the message can't be converted into a valid ChatCompletion message.
1168
+ """
1169
+ message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
1170
+ # When the agent composes and sends the message, the role of the message is "assistant"
1171
+ # unless it's "function".
1172
+ valid = self._append_oai_message(message, recipient, role="assistant", name=self.name)
1173
+ if valid:
1174
+ await recipient.a_receive(message, self, request_reply, silent)
1175
+ else:
1176
+ raise ValueError(
1177
+ "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
1178
+ )
1179
+
1180
+ def _print_received_message(self, message: dict[str, Any] | str, sender: Agent, skip_head: bool = False):
1181
+ message = message_to_dict(message)
1182
+ message_model = create_received_event_model(event=message, sender=sender, recipient=self)
1183
+ iostream = IOStream.get_default()
1184
+ # message_model.print(iostream.print)
1185
+ iostream.send(message_model)
1186
+
1187
+ def _process_received_message(self, message: dict[str, Any] | str, sender: Agent, silent: bool):
1188
+ # When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
1189
+ valid = self._append_oai_message(message, sender, role="user", name=sender.name)
1190
+ if logging_enabled():
1191
+ log_event(self, "received_message", message=message, sender=sender.name, valid=valid)
1192
+
1193
+ if not valid:
1194
+ raise ValueError(
1195
+ "Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
1196
+ )
1197
+
1198
+ if not ConversableAgent._is_silent(sender, silent):
1199
+ self._print_received_message(message, sender)
1200
+
1201
+ def receive(
1202
+ self,
1203
+ message: dict[str, Any] | str,
1204
+ sender: Agent,
1205
+ request_reply: bool | None = None,
1206
+ silent: bool | None = False,
1207
+ ):
1208
+ """Receive a message from another agent.
1209
+
1210
+ Once a message is received, this function sends a reply to the sender or stop.
1211
+ The reply can be generated automatically or entered manually by a human.
1212
+
1213
+ Args:
1214
+ message (dict or str): message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided).
1215
+ 1. "content": content of the message, can be None.
1216
+ 2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
1217
+ 3. "tool_calls": a list of dictionaries containing the function name and arguments.
1218
+ 4. "role": role of the message, can be "assistant", "user", "function", "tool".
1219
+ This field is only needed to distinguish between "function" or "assistant"/"user".
1220
+ 5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
1221
+ 6. "context" (dict): the context of the message, which will be passed to
1222
+ [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create).
1223
+ sender: sender of an Agent instance.
1224
+ request_reply (bool or None): whether a reply is requested from the sender.
1225
+ If None, the value is determined by `self.reply_at_receive[sender]`.
1226
+ silent (bool or None): (Experimental) whether to print the message received.
1227
+
1228
+ Raises:
1229
+ ValueError: if the message can't be converted into a valid ChatCompletion message.
1230
+ """
1231
+ self._process_received_message(message, sender, silent)
1232
+ if request_reply is False or (request_reply is None and self.reply_at_receive[sender] is False):
1233
+ return
1234
+ reply = self.generate_reply(messages=self.chat_messages[sender], sender=sender)
1235
+ if reply is not None:
1236
+ self.send(reply, sender, silent=silent)
1237
+
1238
+ async def a_receive(
1239
+ self,
1240
+ message: dict[str, Any] | str,
1241
+ sender: Agent,
1242
+ request_reply: bool | None = None,
1243
+ silent: bool | None = False,
1244
+ ):
1245
+ """(async) Receive a message from another agent.
1246
+
1247
+ Once a message is received, this function sends a reply to the sender or stop.
1248
+ The reply can be generated automatically or entered manually by a human.
1249
+
1250
+ Args:
1251
+ message (dict or str): message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided).
1252
+ 1. "content": content of the message, can be None.
1253
+ 2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
1254
+ 3. "tool_calls": a list of dictionaries containing the function name and arguments.
1255
+ 4. "role": role of the message, can be "assistant", "user", "function".
1256
+ This field is only needed to distinguish between "function" or "assistant"/"user".
1257
+ 5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
1258
+ 6. "context" (dict): the context of the message, which will be passed to
1259
+ [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create).
1260
+ sender: sender of an Agent instance.
1261
+ request_reply (bool or None): whether a reply is requested from the sender.
1262
+ If None, the value is determined by `self.reply_at_receive[sender]`.
1263
+ silent (bool or None): (Experimental) whether to print the message received.
1264
+
1265
+ Raises:
1266
+ ValueError: if the message can't be converted into a valid ChatCompletion message.
1267
+ """
1268
+ self._process_received_message(message, sender, silent)
1269
+ if request_reply is False or (request_reply is None and self.reply_at_receive[sender] is False):
1270
+ return
1271
+ reply = await self.a_generate_reply(messages=self.chat_messages[sender], sender=sender)
1272
+ if reply is not None:
1273
+ await self.a_send(reply, sender, silent=silent)
1274
+
1275
+ def _prepare_chat(
1276
+ self,
1277
+ recipient: "ConversableAgent",
1278
+ clear_history: bool,
1279
+ prepare_recipient: bool = True,
1280
+ reply_at_receive: bool = True,
1281
+ ) -> None:
1282
+ self.reset_consecutive_auto_reply_counter(recipient)
1283
+ self.reply_at_receive[recipient] = reply_at_receive
1284
+ if clear_history:
1285
+ self.clear_history(recipient)
1286
+ self._human_input = []
1287
+ if prepare_recipient:
1288
+ recipient._prepare_chat(self, clear_history, False, reply_at_receive)
1289
+
1290
+ def _raise_exception_on_async_reply_functions(self) -> None:
1291
+ """Raise an exception if any async reply functions are registered.
1292
+
1293
+ Raises:
1294
+ RuntimeError: if any async reply functions are registered.
1295
+ """
1296
+ reply_functions = {
1297
+ f["reply_func"] for f in self._reply_func_list if not f.get("ignore_async_in_sync_chat", False)
1298
+ }
1299
+
1300
+ async_reply_functions = [f for f in reply_functions if is_coroutine_callable(f)]
1301
+ if async_reply_functions:
1302
+ msg = (
1303
+ "Async reply functions can only be used with ConversableAgent.a_initiate_chat(). The following async reply functions are found: "
1304
+ + ", ".join([f.__name__ for f in async_reply_functions])
1305
+ )
1306
+
1307
+ raise RuntimeError(msg)
1308
+
1309
+ def _should_terminate_chat(self, recipient: "ConversableAgent", message: dict[str, Any]) -> bool:
1310
+ """
1311
+ Determines whether the chat should be terminated based on the message content
1312
+ and the recipient's termination condition.
1313
+
1314
+ Args:
1315
+ recipient (ConversableAgent): The agent to check for termination condition.
1316
+ message (dict[str, Any]): The message dictionary to evaluate for termination.
1317
+
1318
+ Returns:
1319
+ bool: True if the chat should be terminated, False otherwise.
1320
+ """
1321
+ content = message.get("content")
1322
+ return (
1323
+ isinstance(recipient, ConversableAgent)
1324
+ and content is not None
1325
+ and hasattr(recipient, "_is_termination_msg")
1326
+ and recipient._is_termination_msg(message)
1327
+ )
1328
+
1329
+ def initiate_chat(
1330
+ self,
1331
+ recipient: "ConversableAgent",
1332
+ clear_history: bool = True,
1333
+ silent: bool | None = False,
1334
+ cache: AbstractCache | None = None,
1335
+ max_turns: int | None = None,
1336
+ summary_method: str | Callable[..., Any] | None = DEFAULT_SUMMARY_METHOD,
1337
+ summary_args: dict[str, Any] | None = {},
1338
+ message: dict[str, Any] | str | Callable[..., Any] | None = None,
1339
+ **kwargs: Any,
1340
+ ) -> ChatResult:
1341
+ """Initiate a chat with the recipient agent.
1342
+
1343
+ Reset the consecutive auto reply counter.
1344
+ If `clear_history` is True, the chat history with the recipient agent will be cleared.
1345
+
1346
+
1347
+ Args:
1348
+ recipient: the recipient agent.
1349
+ clear_history (bool): whether to clear the chat history with the agent. Default is True.
1350
+ silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False.
1351
+ cache (AbstractCache or None): the cache client to be used for this conversation. Default is None.
1352
+ max_turns (int or None): the maximum number of turns for the chat between the two agents. One turn means one conversation round trip. Note that this is different from
1353
+ `max_consecutive_auto_reply` which is the maximum number of consecutive auto replies; and it is also different from `max_rounds` in GroupChat which is the maximum number of rounds in a group chat session.
1354
+ If max_turns is set to None, the chat will continue until a termination condition is met. Default is None.
1355
+ summary_method (str or callable): a method to get a summary from the chat. Default is DEFAULT_SUMMARY_METHOD, i.e., "last_msg".
1356
+ Supported strings are "last_msg" and "reflection_with_llm":
1357
+ - when set to "last_msg", it returns the last message of the dialog as the summary.
1358
+ - when set to "reflection_with_llm", it returns a summary extracted using an llm client.
1359
+ `llm_config` must be set in either the recipient or sender.
1360
+
1361
+ A callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g.,
1362
+
1363
+ ```python
1364
+ def my_summary_method(
1365
+ sender: ConversableAgent,
1366
+ recipient: ConversableAgent,
1367
+ summary_args: dict,
1368
+ ):
1369
+ return recipient.last_message(sender)["content"]
1370
+ ```
1371
+ summary_args (dict): a dictionary of arguments to be passed to the summary_method.
1372
+ One example key is "summary_prompt", and value is a string of text used to prompt a LLM-based agent (the sender or recipient agent) to reflect
1373
+ on the conversation and extract a summary when summary_method is "reflection_with_llm".
1374
+ The default summary_prompt is DEFAULT_SUMMARY_PROMPT, i.e., "Summarize takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out."
1375
+ Another available key is "summary_role", which is the role of the message sent to the agent in charge of summarizing. Default is "system".
1376
+ message (str, dict or Callable): the initial message to be sent to the recipient. Needs to be provided. Otherwise, input() will be called to get the initial message.
1377
+ - If a string or a dict is provided, it will be used as the initial message. `generate_init_message` is called to generate the initial message for the agent based on this string and the context.
1378
+ If dict, it may contain the following reserved fields (either content or tool_calls need to be provided).
1379
+
1380
+ 1. "content": content of the message, can be None.
1381
+ 2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
1382
+ 3. "tool_calls": a list of dictionaries containing the function name and arguments.
1383
+ 4. "role": role of the message, can be "assistant", "user", "function".
1384
+ This field is only needed to distinguish between "function" or "assistant"/"user".
1385
+ 5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
1386
+ 6. "context" (dict): the context of the message, which will be passed to
1387
+ `OpenAIWrapper.create`.
1388
+
1389
+ - If a callable is provided, it will be called to get the initial message in the form of a string or a dict.
1390
+ If the returned type is dict, it may contain the reserved fields mentioned above.
1391
+
1392
+ Example of a callable message (returning a string):
1393
+
1394
+ ```python
1395
+ def my_message(
1396
+ sender: ConversableAgent, recipient: ConversableAgent, context: dict
1397
+ ) -> Union[str, Dict]:
1398
+ carryover = context.get("carryover", "")
1399
+ if isinstance(message, list):
1400
+ carryover = carryover[-1]
1401
+ final_msg = "Write a blogpost." + "\\nContext: \\n" + carryover
1402
+ return final_msg
1403
+ ```
1404
+
1405
+ Example of a callable message (returning a dict):
1406
+
1407
+ ```python
1408
+ def my_message(
1409
+ sender: ConversableAgent, recipient: ConversableAgent, context: dict
1410
+ ) -> Union[str, Dict]:
1411
+ final_msg = {}
1412
+ carryover = context.get("carryover", "")
1413
+ if isinstance(message, list):
1414
+ carryover = carryover[-1]
1415
+ final_msg["content"] = "Write a blogpost." + "\\nContext: \\n" + carryover
1416
+ final_msg["context"] = {"prefix": "Today I feel"}
1417
+ return final_msg
1418
+ ```
1419
+ **kwargs: any additional information. It has the following reserved fields:
1420
+ - "carryover": a string or a list of string to specify the carryover information to be passed to this chat.
1421
+ If provided, we will combine this carryover (by attaching a "context: " string and the carryover content after the message content) with the "message" content when generating the initial chat
1422
+ message in `generate_init_message`.
1423
+ - "verbose": a boolean to specify whether to print the message and carryover in a chat. Default is False.
1424
+
1425
+ Raises:
1426
+ RuntimeError: if any async reply functions are registered and not ignored in sync chat.
1427
+
1428
+ Returns:
1429
+ ChatResult: an ChatResult object.
1430
+ """
1431
+ iostream = IOStream.get_default()
1432
+
1433
+ cache = Cache.get_current_cache(cache)
1434
+ _chat_info = locals().copy()
1435
+ _chat_info["sender"] = self
1436
+ consolidate_chat_info(_chat_info, uniform_sender=self)
1437
+ for agent in [self, recipient]:
1438
+ agent._raise_exception_on_async_reply_functions()
1439
+ agent.previous_cache = agent.client_cache
1440
+ agent.client_cache = cache
1441
+ if isinstance(max_turns, int):
1442
+ self._prepare_chat(recipient, clear_history, reply_at_receive=False)
1443
+ is_termination = False
1444
+ for i in range(max_turns):
1445
+ # check recipient max consecutive auto reply limit
1446
+ if self._consecutive_auto_reply_counter[recipient] >= recipient._max_consecutive_auto_reply:
1447
+ break
1448
+ if i == 0:
1449
+ if isinstance(message, Callable):
1450
+ msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
1451
+ else:
1452
+ msg2send = self.generate_init_message(message, **kwargs)
1453
+ else:
1454
+ last_message = self.chat_messages[recipient][-1]
1455
+ if self._should_terminate_chat(recipient, last_message):
1456
+ break
1457
+ msg2send = self.generate_reply(messages=self.chat_messages[recipient], sender=recipient)
1458
+ if msg2send is None:
1459
+ break
1460
+ self.send(msg2send, recipient, request_reply=True, silent=silent)
1461
+ else: # No breaks in the for loop, so we have reached max turns
1462
+ iostream.send(
1463
+ TerminationEvent(
1464
+ termination_reason=f"Maximum turns ({max_turns}) reached", sender=self, recipient=recipient
1465
+ )
1466
+ )
1467
+ else:
1468
+ self._prepare_chat(recipient, clear_history)
1469
+ if isinstance(message, Callable):
1470
+ msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
1471
+ else:
1472
+ msg2send = self.generate_init_message(message, **kwargs)
1473
+ self.send(msg2send, recipient, silent=silent)
1474
+ summary = self._summarize_chat(
1475
+ summary_method,
1476
+ summary_args,
1477
+ recipient,
1478
+ cache=cache,
1479
+ )
1480
+ for agent in [self, recipient]:
1481
+ agent.client_cache = agent.previous_cache
1482
+ agent.previous_cache = None
1483
+ chat_result = ChatResult(
1484
+ chat_history=self.chat_messages[recipient],
1485
+ summary=summary,
1486
+ cost=gather_usage_summary([self, recipient]),
1487
+ human_input=self._human_input,
1488
+ )
1489
+ return chat_result
1490
+
1491
+ def run(
1492
+ self,
1493
+ recipient: Optional["ConversableAgent"] = None,
1494
+ clear_history: bool = True,
1495
+ silent: bool | None = False,
1496
+ cache: AbstractCache | None = None,
1497
+ max_turns: int | None = None,
1498
+ summary_method: str | Callable[..., Any] | None = DEFAULT_SUMMARY_METHOD,
1499
+ summary_args: dict[str, Any] | None = {},
1500
+ message: dict[str, Any] | str | Callable[..., Any] | None = None,
1501
+ executor_kwargs: dict[str, Any] | None = None,
1502
+ tools: Tool | Iterable[Tool] | None = None,
1503
+ user_input: bool | None = False,
1504
+ msg_to: str | None = "agent",
1505
+ **kwargs: Any,
1506
+ ) -> RunResponseProtocol:
1507
+ iostream = ThreadIOStream()
1508
+ agents = [self, recipient] if recipient else [self]
1509
+ response = RunResponse(iostream, agents=agents)
1510
+
1511
+ if recipient is None:
1512
+
1513
+ def initiate_chat(
1514
+ self=self,
1515
+ iostream: ThreadIOStream = iostream,
1516
+ response: RunResponse = response,
1517
+ ) -> None:
1518
+ with (
1519
+ IOStream.set_default(iostream),
1520
+ self._create_or_get_executor(
1521
+ executor_kwargs=executor_kwargs,
1522
+ tools=tools,
1523
+ agent_name="user",
1524
+ agent_human_input_mode="ALWAYS" if user_input else "NEVER",
1525
+ ) as executor,
1526
+ ):
1527
+ try:
1528
+ if msg_to == "agent":
1529
+ chat_result = executor.initiate_chat(
1530
+ self,
1531
+ message=message,
1532
+ clear_history=clear_history,
1533
+ max_turns=max_turns,
1534
+ summary_method=summary_method,
1535
+ )
1536
+ else:
1537
+ chat_result = self.initiate_chat(
1538
+ executor,
1539
+ message=message,
1540
+ clear_history=clear_history,
1541
+ max_turns=max_turns,
1542
+ summary_method=summary_method,
1543
+ )
1544
+
1545
+ IOStream.get_default().send(
1546
+ RunCompletionEvent(
1547
+ history=chat_result.chat_history,
1548
+ summary=chat_result.summary,
1549
+ cost=chat_result.cost,
1550
+ last_speaker=self.name,
1551
+ )
1552
+ )
1553
+ except Exception as e:
1554
+ response.iostream.send(ErrorEvent(error=e))
1555
+
1556
+ else:
1557
+
1558
+ def initiate_chat(
1559
+ self=self,
1560
+ iostream: ThreadIOStream = iostream,
1561
+ response: RunResponse = response,
1562
+ ) -> None:
1563
+ with IOStream.set_default(iostream): # type: ignore[arg-type]
1564
+ try:
1565
+ chat_result = self.initiate_chat(
1566
+ recipient,
1567
+ clear_history=clear_history,
1568
+ silent=silent,
1569
+ cache=cache,
1570
+ max_turns=max_turns,
1571
+ summary_method=summary_method,
1572
+ summary_args=summary_args,
1573
+ message=message,
1574
+ **kwargs,
1575
+ )
1576
+
1577
+ response._summary = chat_result.summary
1578
+ response._messages = chat_result.chat_history
1579
+
1580
+ _last_speaker = recipient if chat_result.chat_history[-1]["name"] == recipient.name else self
1581
+ if hasattr(recipient, "last_speaker"):
1582
+ _last_speaker = recipient.last_speaker
1583
+
1584
+ IOStream.get_default().send(
1585
+ RunCompletionEvent(
1586
+ history=chat_result.chat_history,
1587
+ summary=chat_result.summary,
1588
+ cost=chat_result.cost,
1589
+ last_speaker=_last_speaker.name,
1590
+ )
1591
+ )
1592
+ except Exception as e:
1593
+ response.iostream.send(ErrorEvent(error=e))
1594
+
1595
+ threading.Thread(
1596
+ target=initiate_chat,
1597
+ ).start()
1598
+
1599
+ return response
1600
+
1601
+ async def a_initiate_chat(
1602
+ self,
1603
+ recipient: "ConversableAgent",
1604
+ clear_history: bool = True,
1605
+ silent: bool | None = False,
1606
+ cache: AbstractCache | None = None,
1607
+ max_turns: int | None = None,
1608
+ summary_method: str | Callable[..., Any] | None = DEFAULT_SUMMARY_METHOD,
1609
+ summary_args: dict[str, Any] | None = {},
1610
+ message: str | Callable[..., Any] | None = None,
1611
+ **kwargs: Any,
1612
+ ) -> ChatResult:
1613
+ """(async) Initiate a chat with the recipient agent.
1614
+
1615
+ Reset the consecutive auto reply counter.
1616
+ If `clear_history` is True, the chat history with the recipient agent will be cleared.
1617
+ `a_generate_init_message` is called to generate the initial message for the agent.
1618
+
1619
+ Args: Please refer to `initiate_chat`.
1620
+
1621
+ Returns:
1622
+ ChatResult: an ChatResult object.
1623
+ """
1624
+ iostream = IOStream.get_default()
1625
+
1626
+ _chat_info = locals().copy()
1627
+ _chat_info["sender"] = self
1628
+ consolidate_chat_info(_chat_info, uniform_sender=self)
1629
+ for agent in [self, recipient]:
1630
+ agent.previous_cache = agent.client_cache
1631
+ agent.client_cache = cache
1632
+ if isinstance(max_turns, int):
1633
+ self._prepare_chat(recipient, clear_history, reply_at_receive=False)
1634
+ is_termination = False
1635
+ for _ in range(max_turns):
1636
+ if _ == 0:
1637
+ if isinstance(message, Callable):
1638
+ msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
1639
+ else:
1640
+ msg2send = await self.a_generate_init_message(message, **kwargs)
1641
+ else:
1642
+ last_message = self.chat_messages[recipient][-1]
1643
+ if self._should_terminate_chat(recipient, last_message):
1644
+ break
1645
+ msg2send = await self.a_generate_reply(messages=self.chat_messages[recipient], sender=recipient)
1646
+ if msg2send is None:
1647
+ break
1648
+ await self.a_send(msg2send, recipient, request_reply=True, silent=silent)
1649
+ else: # No breaks in the for loop, so we have reached max turns
1650
+ iostream.send(
1651
+ TerminationEvent(
1652
+ termination_reason=f"Maximum turns ({max_turns}) reached", sender=self, recipient=recipient
1653
+ )
1654
+ )
1655
+ else:
1656
+ self._prepare_chat(recipient, clear_history)
1657
+ if isinstance(message, Callable):
1658
+ msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
1659
+ else:
1660
+ msg2send = await self.a_generate_init_message(message, **kwargs)
1661
+ await self.a_send(msg2send, recipient, silent=silent)
1662
+ summary = self._summarize_chat(
1663
+ summary_method,
1664
+ summary_args,
1665
+ recipient,
1666
+ cache=cache,
1667
+ )
1668
+ for agent in [self, recipient]:
1669
+ agent.client_cache = agent.previous_cache
1670
+ agent.previous_cache = None
1671
+ chat_result = ChatResult(
1672
+ chat_history=self.chat_messages[recipient],
1673
+ summary=summary,
1674
+ cost=gather_usage_summary([self, recipient]),
1675
+ human_input=self._human_input,
1676
+ )
1677
+ return chat_result
1678
+
1679
+ async def a_run(
1680
+ self,
1681
+ recipient: Optional["ConversableAgent"] = None,
1682
+ clear_history: bool = True,
1683
+ silent: bool | None = False,
1684
+ cache: AbstractCache | None = None,
1685
+ max_turns: int | None = None,
1686
+ summary_method: str | Callable[..., Any] | None = DEFAULT_SUMMARY_METHOD,
1687
+ summary_args: dict[str, Any] | None = {},
1688
+ message: dict[str, Any] | str | Callable[..., Any] | None = None,
1689
+ executor_kwargs: dict[str, Any] | None = None,
1690
+ tools: Tool | Iterable[Tool] | None = None,
1691
+ user_input: bool | None = False,
1692
+ msg_to: str | None = "agent",
1693
+ **kwargs: Any,
1694
+ ) -> AsyncRunResponseProtocol:
1695
+ iostream = AsyncThreadIOStream()
1696
+ agents = [self, recipient] if recipient else [self]
1697
+ response = AsyncRunResponse(iostream, agents=agents)
1698
+
1699
+ if recipient is None:
1700
+
1701
+ async def initiate_chat(
1702
+ self=self,
1703
+ iostream: AsyncThreadIOStream = iostream,
1704
+ response: AsyncRunResponse = response,
1705
+ ) -> None:
1706
+ with (
1707
+ IOStream.set_default(iostream),
1708
+ self._create_or_get_executor(
1709
+ executor_kwargs=executor_kwargs,
1710
+ tools=tools,
1711
+ agent_name="user",
1712
+ agent_human_input_mode="ALWAYS" if user_input else "NEVER",
1713
+ ) as executor,
1714
+ ):
1715
+ try:
1716
+ if msg_to == "agent":
1717
+ chat_result = await executor.a_initiate_chat(
1718
+ self,
1719
+ message=message,
1720
+ clear_history=clear_history,
1721
+ max_turns=max_turns,
1722
+ summary_method=summary_method,
1723
+ )
1724
+ else:
1725
+ chat_result = await self.a_initiate_chat(
1726
+ executor,
1727
+ message=message,
1728
+ clear_history=clear_history,
1729
+ max_turns=max_turns,
1730
+ summary_method=summary_method,
1731
+ )
1732
+
1733
+ IOStream.get_default().send(
1734
+ RunCompletionEvent(
1735
+ history=chat_result.chat_history,
1736
+ summary=chat_result.summary,
1737
+ cost=chat_result.cost,
1738
+ last_speaker=self.name,
1739
+ )
1740
+ )
1741
+ except Exception as e:
1742
+ response.iostream.send(ErrorEvent(error=e))
1743
+
1744
+ else:
1745
+
1746
+ async def initiate_chat(
1747
+ self=self,
1748
+ iostream: AsyncThreadIOStream = iostream,
1749
+ response: AsyncRunResponse = response,
1750
+ ) -> None:
1751
+ with IOStream.set_default(iostream): # type: ignore[arg-type]
1752
+ try:
1753
+ chat_result = await self.a_initiate_chat(
1754
+ recipient,
1755
+ clear_history=clear_history,
1756
+ silent=silent,
1757
+ cache=cache,
1758
+ max_turns=max_turns,
1759
+ summary_method=summary_method,
1760
+ summary_args=summary_args,
1761
+ message=message,
1762
+ **kwargs,
1763
+ )
1764
+
1765
+ last_speaker = recipient if chat_result.chat_history[-1]["name"] == recipient.name else self
1766
+ if hasattr(recipient, "last_speaker"):
1767
+ last_speaker = recipient.last_speaker
1768
+
1769
+ IOStream.get_default().send(
1770
+ RunCompletionEvent(
1771
+ history=chat_result.chat_history,
1772
+ summary=chat_result.summary,
1773
+ cost=chat_result.cost,
1774
+ last_speaker=last_speaker.name,
1775
+ )
1776
+ )
1777
+
1778
+ except Exception as e:
1779
+ response.iostream.send(ErrorEvent(error=e))
1780
+
1781
+ asyncio.create_task(initiate_chat())
1782
+
1783
+ return response
1784
+
1785
+ def _summarize_chat(
1786
+ self,
1787
+ summary_method,
1788
+ summary_args,
1789
+ recipient: Agent | None = None,
1790
+ cache: AbstractCache | None = None,
1791
+ ) -> str:
1792
+ """Get a chat summary from an agent participating in a chat.
1793
+
1794
+ Args:
1795
+ summary_method (str or callable): the summary_method to get the summary.
1796
+ The callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g,
1797
+ ```python
1798
+ def my_summary_method(
1799
+ sender: ConversableAgent,
1800
+ recipient: ConversableAgent,
1801
+ summary_args: dict,
1802
+ ):
1803
+ return recipient.last_message(sender)["content"]
1804
+ ```
1805
+ summary_args (dict): a dictionary of arguments to be passed to the summary_method.
1806
+ recipient: the recipient agent in a chat.
1807
+ cache: the cache client to be used for this conversation. When provided,
1808
+ the cache will be used to store and retrieve LLM responses when generating
1809
+ summaries, which can improve performance and reduce API costs for
1810
+ repetitive summary requests. The cache is passed to the summary_method
1811
+ via summary_args['cache'].
1812
+
1813
+ Returns:
1814
+ str: a chat summary from the agent.
1815
+ """
1816
+ summary = ""
1817
+ if summary_method is None:
1818
+ return summary
1819
+ if "cache" not in summary_args:
1820
+ summary_args["cache"] = cache
1821
+ if summary_method == "reflection_with_llm":
1822
+ summary_method = self._reflection_with_llm_as_summary
1823
+ elif summary_method == "last_msg":
1824
+ summary_method = self._last_msg_as_summary
1825
+
1826
+ if isinstance(summary_method, Callable):
1827
+ summary = summary_method(self, recipient, summary_args)
1828
+ else:
1829
+ raise ValueError(
1830
+ "If not None, the summary_method must be a string from [`reflection_with_llm`, `last_msg`] or a callable."
1831
+ )
1832
+ if isinstance(summary, dict):
1833
+ summary = str(summary.get("content", ""))
1834
+ return summary
1835
+
1836
+ @staticmethod
1837
+ def _last_msg_as_summary(sender, recipient, summary_args) -> str:
1838
+ """Get a chat summary from the last message of the recipient."""
1839
+ summary = ""
1840
+ try:
1841
+ content = recipient.last_message(sender)["content"]
1842
+ if isinstance(content, str):
1843
+ summary = content.replace("TERMINATE", "")
1844
+ elif isinstance(content, list):
1845
+ # Remove the `TERMINATE` word in the content list.
1846
+ summary = "\n".join(
1847
+ x["text"].replace("TERMINATE", "") for x in content if isinstance(x, dict) and "text" in x
1848
+ )
1849
+ except (IndexError, AttributeError) as e:
1850
+ warnings.warn(f"Cannot extract summary using last_msg: {e}. Using an empty str as summary.", UserWarning)
1851
+ return summary
1852
+
1853
+ @staticmethod
1854
+ def _reflection_with_llm_as_summary(sender, recipient, summary_args):
1855
+ prompt = summary_args.get("summary_prompt")
1856
+ prompt = ConversableAgent.DEFAULT_SUMMARY_PROMPT if prompt is None else prompt
1857
+ if not isinstance(prompt, str):
1858
+ raise ValueError("The summary_prompt must be a string.")
1859
+ msg_list = recipient.chat_messages_for_summary(sender)
1860
+ agent = sender if recipient is None else recipient
1861
+ role = summary_args.get("summary_role", None)
1862
+ if role and not isinstance(role, str):
1863
+ raise ValueError("The summary_role in summary_arg must be a string.")
1864
+ try:
1865
+ summary = sender._reflection_with_llm(
1866
+ prompt, msg_list, llm_agent=agent, cache=summary_args.get("cache"), role=role
1867
+ )
1868
+ except Exception as e:
1869
+ warnings.warn(
1870
+ f"Cannot extract summary using reflection_with_llm: {e}. Using an empty str as summary.", UserWarning
1871
+ )
1872
+ summary = ""
1873
+ return summary
1874
+
1875
+ def _reflection_with_llm(
1876
+ self,
1877
+ prompt,
1878
+ messages,
1879
+ llm_agent: Agent | None = None,
1880
+ cache: AbstractCache | None = None,
1881
+ role: str | None = None,
1882
+ ) -> str:
1883
+ """Get a chat summary using reflection with an llm client based on the conversation history.
1884
+
1885
+ Args:
1886
+ prompt (str): The prompt (in this method it is used as system prompt) used to get the summary.
1887
+ messages (list): The messages generated as part of a chat conversation.
1888
+ llm_agent: the agent with an llm client.
1889
+ cache (AbstractCache or None): the cache client to be used for this conversation.
1890
+ role (str): the role of the message, usually "system" or "user". Default is "system".
1891
+ """
1892
+ if not role:
1893
+ role = "system"
1894
+
1895
+ system_msg = [
1896
+ {
1897
+ "role": role,
1898
+ "content": prompt,
1899
+ }
1900
+ ]
1901
+
1902
+ messages = messages + system_msg
1903
+ if llm_agent and llm_agent.client is not None:
1904
+ llm_client = llm_agent.client
1905
+ elif self.client is not None:
1906
+ llm_client = self.client
1907
+ else:
1908
+ raise ValueError("No OpenAIWrapper client is found.")
1909
+ response = self._generate_oai_reply_from_client(llm_client=llm_client, messages=messages, cache=cache)
1910
+ return response
1911
+
1912
+ def _check_chat_queue_for_sender(self, chat_queue: list[dict[str, Any]]) -> list[dict[str, Any]]:
1913
+ """Check the chat queue and add the "sender" key if it's missing.
1914
+
1915
+ Args:
1916
+ chat_queue (List[Dict[str, Any]]): A list of dictionaries containing chat information.
1917
+
1918
+ Returns:
1919
+ List[Dict[str, Any]]: A new list of dictionaries with the "sender" key added if it was missing.
1920
+ """
1921
+ chat_queue_with_sender = []
1922
+ for chat_info in chat_queue:
1923
+ if chat_info.get("sender") is None:
1924
+ chat_info["sender"] = self
1925
+ chat_queue_with_sender.append(chat_info)
1926
+ return chat_queue_with_sender
1927
+
1928
+ def initiate_chats(self, chat_queue: list[dict[str, Any]]) -> list[ChatResult]:
1929
+ """(Experimental) Initiate chats with multiple agents.
1930
+
1931
+ Args:
1932
+ chat_queue (List[Dict]): a list of dictionaries containing the information of the chats.
1933
+ Each dictionary should contain the input arguments for [`initiate_chat`](#initiate-chat)
1934
+
1935
+ Returns: a list of ChatResult objects corresponding to the finished chats in the chat_queue.
1936
+ """
1937
+ _chat_queue = self._check_chat_queue_for_sender(chat_queue)
1938
+ self._finished_chats = initiate_chats(_chat_queue)
1939
+
1940
+ return self._finished_chats
1941
+
1942
+ def sequential_run(
1943
+ self,
1944
+ chat_queue: list[dict[str, Any]],
1945
+ ) -> list[RunResponseProtocol]:
1946
+ """(Experimental) Initiate chats with multiple agents sequentially.
1947
+
1948
+ Args:
1949
+ chat_queue (List[Dict]): a list of dictionaries containing the information of the chats.
1950
+ Each dictionary should contain the input arguments for [`initiate_chat`](#initiate-chat)
1951
+
1952
+ Returns: a list of ChatResult objects corresponding to the finished chats in the chat_queue.
1953
+ """
1954
+ iostreams = [ThreadIOStream() for _ in range(len(chat_queue))]
1955
+ # todo: add agents
1956
+ responses = [RunResponse(iostream, agents=[]) for iostream in iostreams]
1957
+
1958
+ def _initiate_chats(
1959
+ iostreams: list[ThreadIOStream] = iostreams,
1960
+ responses: list[RunResponseProtocol] = responses,
1961
+ ) -> None:
1962
+ response = responses[0]
1963
+ try:
1964
+ _chat_queue = self._check_chat_queue_for_sender(chat_queue)
1965
+
1966
+ consolidate_chat_info(_chat_queue)
1967
+ _validate_recipients(_chat_queue)
1968
+ finished_chats = []
1969
+ for chat_info, response, iostream in zip(_chat_queue, responses, iostreams):
1970
+ with IOStream.set_default(iostream):
1971
+ _chat_carryover = chat_info.get("carryover", [])
1972
+ finished_chat_indexes_to_exclude_from_carryover = chat_info.get(
1973
+ "finished_chat_indexes_to_exclude_from_carryover", []
1974
+ )
1975
+
1976
+ if isinstance(_chat_carryover, str):
1977
+ _chat_carryover = [_chat_carryover]
1978
+ chat_info["carryover"] = _chat_carryover + [
1979
+ r.summary
1980
+ for i, r in enumerate(finished_chats)
1981
+ if i not in finished_chat_indexes_to_exclude_from_carryover
1982
+ ]
1983
+
1984
+ if not chat_info.get("silent", False):
1985
+ IOStream.get_default().send(PostCarryoverProcessingEvent(chat_info=chat_info))
1986
+
1987
+ sender = chat_info["sender"]
1988
+ chat_res = sender.initiate_chat(**chat_info)
1989
+
1990
+ IOStream.get_default().send(
1991
+ RunCompletionEvent(
1992
+ history=chat_res.chat_history,
1993
+ summary=chat_res.summary,
1994
+ cost=chat_res.cost,
1995
+ last_speaker=(self if chat_res.chat_history[-1]["name"] == self.name else sender).name,
1996
+ )
1997
+ )
1998
+
1999
+ finished_chats.append(chat_res)
2000
+ except Exception as e:
2001
+ response.iostream.send(ErrorEvent(error=e))
2002
+
2003
+ threading.Thread(target=_initiate_chats).start()
2004
+
2005
+ return responses
2006
+
2007
+ async def a_initiate_chats(self, chat_queue: list[dict[str, Any]]) -> dict[int, ChatResult]:
2008
+ _chat_queue = self._check_chat_queue_for_sender(chat_queue)
2009
+ self._finished_chats = await a_initiate_chats(_chat_queue)
2010
+ return self._finished_chats
2011
+
2012
+ async def a_sequential_run(
2013
+ self,
2014
+ chat_queue: list[dict[str, Any]],
2015
+ ) -> list[AsyncRunResponseProtocol]:
2016
+ """(Experimental) Initiate chats with multiple agents sequentially.
2017
+
2018
+ Args:
2019
+ chat_queue (List[Dict]): a list of dictionaries containing the information of the chats.
2020
+ Each dictionary should contain the input arguments for [`initiate_chat`](#initiate-chat)
2021
+
2022
+ Returns: a list of ChatResult objects corresponding to the finished chats in the chat_queue.
2023
+ """
2024
+ iostreams = [AsyncThreadIOStream() for _ in range(len(chat_queue))]
2025
+ # todo: add agents
2026
+ responses = [AsyncRunResponse(iostream, agents=[]) for iostream in iostreams]
2027
+
2028
+ async def _a_initiate_chats(
2029
+ iostreams: list[AsyncThreadIOStream] = iostreams,
2030
+ responses: list[AsyncRunResponseProtocol] = responses,
2031
+ ) -> None:
2032
+ response = responses[0]
2033
+ try:
2034
+ _chat_queue = self._check_chat_queue_for_sender(chat_queue)
2035
+
2036
+ consolidate_chat_info(_chat_queue)
2037
+ _validate_recipients(_chat_queue)
2038
+ finished_chats = []
2039
+ for chat_info, response, iostream in zip(_chat_queue, responses, iostreams):
2040
+ with IOStream.set_default(iostream):
2041
+ _chat_carryover = chat_info.get("carryover", [])
2042
+ finished_chat_indexes_to_exclude_from_carryover = chat_info.get(
2043
+ "finished_chat_indexes_to_exclude_from_carryover", []
2044
+ )
2045
+
2046
+ if isinstance(_chat_carryover, str):
2047
+ _chat_carryover = [_chat_carryover]
2048
+ chat_info["carryover"] = _chat_carryover + [
2049
+ r.summary
2050
+ for i, r in enumerate(finished_chats)
2051
+ if i not in finished_chat_indexes_to_exclude_from_carryover
2052
+ ]
2053
+
2054
+ if not chat_info.get("silent", False):
2055
+ IOStream.get_default().send(PostCarryoverProcessingEvent(chat_info=chat_info))
2056
+
2057
+ sender = chat_info["sender"]
2058
+ chat_res = await sender.a_initiate_chat(**chat_info)
2059
+
2060
+ IOStream.get_default().send(
2061
+ RunCompletionEvent(
2062
+ history=chat_res.chat_history,
2063
+ summary=chat_res.summary,
2064
+ cost=chat_res.cost,
2065
+ last_speaker=(self if chat_res.chat_history[-1]["name"] == self.name else sender).name,
2066
+ )
2067
+ )
2068
+
2069
+ finished_chats.append(chat_res)
2070
+
2071
+ except Exception as e:
2072
+ response.iostream.send(ErrorEvent(error=e))
2073
+
2074
+ asyncio.create_task(_a_initiate_chats())
2075
+
2076
+ return responses
2077
+
2078
+ def get_chat_results(self, chat_index: int | None = None) -> list[ChatResult] | ChatResult:
2079
+ """A summary from the finished chats of particular agents."""
2080
+ if chat_index is not None:
2081
+ return self._finished_chats[chat_index]
2082
+ else:
2083
+ return self._finished_chats
2084
+
2085
+ def reset(self) -> None:
2086
+ """Reset the agent."""
2087
+ self.clear_history()
2088
+ self.reset_consecutive_auto_reply_counter()
2089
+ self.stop_reply_at_receive()
2090
+ if self.client is not None:
2091
+ self.client.clear_usage_summary()
2092
+ for reply_func_tuple in self._reply_func_list:
2093
+ if reply_func_tuple["reset_config"] is not None:
2094
+ reply_func_tuple["reset_config"](reply_func_tuple["config"])
2095
+ else:
2096
+ reply_func_tuple["config"] = copy.copy(reply_func_tuple["init_config"])
2097
+
2098
+ def stop_reply_at_receive(self, sender: Agent | None = None):
2099
+ """Reset the reply_at_receive of the sender."""
2100
+ if sender is None:
2101
+ self.reply_at_receive.clear()
2102
+ else:
2103
+ self.reply_at_receive[sender] = False
2104
+
2105
+ def reset_consecutive_auto_reply_counter(self, sender: Agent | None = None):
2106
+ """Reset the consecutive_auto_reply_counter of the sender."""
2107
+ if sender is None:
2108
+ self._consecutive_auto_reply_counter.clear()
2109
+ else:
2110
+ self._consecutive_auto_reply_counter[sender] = 0
2111
+
2112
+ def clear_history(self, recipient: Agent | None = None, nr_messages_to_preserve: int | None = None):
2113
+ """Clear the chat history of the agent.
2114
+
2115
+ Args:
2116
+ recipient: the agent with whom the chat history to clear. If None, clear the chat history with all agents.
2117
+ nr_messages_to_preserve: the number of newest messages to preserve in the chat history.
2118
+ """
2119
+ iostream = IOStream.get_default()
2120
+ if recipient is None:
2121
+ no_messages_preserved = 0
2122
+ if nr_messages_to_preserve:
2123
+ for key in self._oai_messages:
2124
+ nr_messages_to_preserve_internal = nr_messages_to_preserve
2125
+ # if breaking history between function call and function response, save function call message
2126
+ # additionally, otherwise openai will return error
2127
+ first_msg_to_save = self._oai_messages[key][-nr_messages_to_preserve_internal]
2128
+ if "tool_responses" in first_msg_to_save:
2129
+ nr_messages_to_preserve_internal += 1
2130
+ # clear_conversable_agent_history.print_preserving_message(iostream.print)
2131
+ no_messages_preserved += 1
2132
+ # Remove messages from history except last `nr_messages_to_preserve` messages.
2133
+ self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve_internal:]
2134
+ iostream.send(ClearConversableAgentHistoryEvent(agent=self, no_events_preserved=no_messages_preserved))
2135
+ else:
2136
+ self._oai_messages.clear()
2137
+ else:
2138
+ self._oai_messages[recipient].clear()
2139
+ # clear_conversable_agent_history.print_warning(iostream.print)
2140
+ if nr_messages_to_preserve:
2141
+ iostream.send(ClearConversableAgentHistoryWarningEvent(recipient=self))
2142
+
2143
+ def generate_oai_reply(
2144
+ self,
2145
+ messages: list[dict[str, Any]] | None = None,
2146
+ sender: Agent | None = None,
2147
+ config: OpenAIWrapper | None = None,
2148
+ **kwargs: Any,
2149
+ ) -> tuple[bool, str | dict[str, Any] | None]:
2150
+ """Generate a reply using autogen.oai."""
2151
+ client = self.client if config is None else config
2152
+ if client is None:
2153
+ return False, None
2154
+ if messages is None:
2155
+ messages = self._oai_messages[sender]
2156
+
2157
+ # Process messages before sending to LLM, hook point for llm input monitoring
2158
+ processed_messages = self._process_llm_input(self._oai_system_message + messages)
2159
+ if processed_messages is None:
2160
+ return True, {"content": "LLM call blocked by safeguard", "role": "assistant"}
2161
+
2162
+ extracted_response = self._generate_oai_reply_from_client(
2163
+ client,
2164
+ self._oai_system_message + messages,
2165
+ self.client_cache,
2166
+ **kwargs,
2167
+ )
2168
+
2169
+ # Process LLM response
2170
+ if extracted_response is not None:
2171
+ processed_extracted_response = self._process_llm_output(extracted_response)
2172
+ if processed_extracted_response is None:
2173
+ raise ValueError("safeguard_llm_outputs hook returned None")
2174
+
2175
+ return (False, None) if extracted_response is None else (True, extracted_response)
2176
+
2177
+ def _generate_oai_reply_from_client(
2178
+ self,
2179
+ llm_client,
2180
+ messages,
2181
+ cache,
2182
+ **kwargs: Any,
2183
+ ) -> str | dict[str, Any] | None:
2184
+ # unroll tool_responses
2185
+ all_messages = []
2186
+ for message in messages:
2187
+ tool_responses = message.get("tool_responses", [])
2188
+ if tool_responses:
2189
+ all_messages += tool_responses
2190
+ # tool role on the parent message means the content is just concatenation of all of the tool_responses
2191
+ if message.get("role") != "tool":
2192
+ all_messages.append({key: message[key] for key in message if key != "tool_responses"})
2193
+ else:
2194
+ all_messages.append(message)
2195
+
2196
+ # TODO: #1143 handle token limit exceeded error
2197
+ response = llm_client.create(
2198
+ context=messages[-1].pop("context", None),
2199
+ messages=all_messages,
2200
+ cache=cache,
2201
+ agent=self,
2202
+ **kwargs,
2203
+ )
2204
+ extracted_response = llm_client.extract_text_or_completion_object(response)[0]
2205
+
2206
+ if extracted_response is None:
2207
+ warnings.warn(f"Extracted_response from {response} is None.", UserWarning)
2208
+ return None
2209
+ # ensure function and tool calls will be accepted when sent back to the LLM
2210
+ if not isinstance(extracted_response, str) and hasattr(extracted_response, "model_dump"):
2211
+ extracted_response = extracted_response.model_dump()
2212
+ if isinstance(extracted_response, dict):
2213
+ if extracted_response.get("function_call"):
2214
+ extracted_response["function_call"]["name"] = self._normalize_name(
2215
+ extracted_response["function_call"]["name"]
2216
+ )
2217
+ for tool_call in extracted_response.get("tool_calls") or []:
2218
+ tool_call["function"]["name"] = self._normalize_name(tool_call["function"]["name"])
2219
+ # Remove id and type if they are not present.
2220
+ # This is to make the tool call object compatible with Mistral API.
2221
+ if tool_call.get("id") is None:
2222
+ tool_call.pop("id")
2223
+ if tool_call.get("type") is None:
2224
+ tool_call.pop("type")
2225
+ return extracted_response
2226
+
2227
+ async def a_generate_oai_reply(
2228
+ self,
2229
+ messages: list[dict[str, Any]] | None = None,
2230
+ sender: Agent | None = None,
2231
+ config: Any | None = None,
2232
+ **kwargs: Any,
2233
+ ) -> tuple[bool, str | dict[str, Any] | None]:
2234
+ """Generate a reply using autogen.oai asynchronously."""
2235
+ iostream = IOStream.get_default()
2236
+
2237
+ def _generate_oai_reply(
2238
+ self, iostream: IOStream, *args: Any, **kw: Any
2239
+ ) -> tuple[bool, str | dict[str, Any] | None]:
2240
+ with IOStream.set_default(iostream):
2241
+ return self.generate_oai_reply(*args, **kw)
2242
+
2243
+ return await asyncio.get_event_loop().run_in_executor(
2244
+ None,
2245
+ functools.partial(
2246
+ _generate_oai_reply,
2247
+ self=self,
2248
+ iostream=iostream,
2249
+ messages=messages,
2250
+ sender=sender,
2251
+ config=config,
2252
+ **kwargs,
2253
+ ),
2254
+ )
2255
+
2256
+ def _generate_code_execution_reply_using_executor(
2257
+ self,
2258
+ messages: list[dict[str, Any]] | None = None,
2259
+ sender: Agent | None = None,
2260
+ config: dict[str, Any] | Literal[False] | None = None,
2261
+ ):
2262
+ """Generate a reply using code executor."""
2263
+ iostream = IOStream.get_default()
2264
+
2265
+ if config is not None:
2266
+ raise ValueError("config is not supported for _generate_code_execution_reply_using_executor.")
2267
+ if self._code_execution_config is False:
2268
+ return False, None
2269
+ if messages is None:
2270
+ messages = self._oai_messages[sender]
2271
+ last_n_messages = self._code_execution_config.get("last_n_messages", "auto")
2272
+
2273
+ if not (isinstance(last_n_messages, (int, float)) and last_n_messages >= 0) and last_n_messages != "auto":
2274
+ raise ValueError("last_n_messages must be either a non-negative integer, or the string 'auto'.")
2275
+
2276
+ num_messages_to_scan = last_n_messages
2277
+ if last_n_messages == "auto":
2278
+ # Find when the agent last spoke
2279
+ num_messages_to_scan = 0
2280
+ for message in reversed(messages):
2281
+ if "role" not in message or message["role"] != "user":
2282
+ break
2283
+ else:
2284
+ num_messages_to_scan += 1
2285
+ num_messages_to_scan = min(len(messages), num_messages_to_scan)
2286
+ messages_to_scan = messages[-num_messages_to_scan:]
2287
+
2288
+ # iterate through the last n messages in reverse
2289
+ # if code blocks are found, execute the code blocks and return the output
2290
+ # if no code blocks are found, continue
2291
+ for message in reversed(messages_to_scan):
2292
+ if not message["content"]:
2293
+ continue
2294
+ code_blocks = self._code_executor.code_extractor.extract_code_blocks(message["content"])
2295
+ if len(code_blocks) == 0:
2296
+ continue
2297
+
2298
+ iostream.send(GenerateCodeExecutionReplyEvent(code_blocks=code_blocks, sender=sender, recipient=self))
2299
+
2300
+ # found code blocks, execute code.
2301
+ code_result = self._code_executor.execute_code_blocks(code_blocks)
2302
+ exitcode2str = "execution succeeded" if code_result.exit_code == 0 else "execution failed"
2303
+ return True, f"exitcode: {code_result.exit_code} ({exitcode2str})\nCode output: {code_result.output}"
2304
+
2305
+ return False, None
2306
+
2307
+ def generate_code_execution_reply(
2308
+ self,
2309
+ messages: list[dict[str, Any]] | None = None,
2310
+ sender: Agent | None = None,
2311
+ config: dict[str, Any] | Literal[False] | None = None,
2312
+ ):
2313
+ """Generate a reply using code execution."""
2314
+ code_execution_config = config if config is not None else self._code_execution_config
2315
+ if code_execution_config is False:
2316
+ return False, None
2317
+ if messages is None:
2318
+ messages = self._oai_messages[sender]
2319
+ last_n_messages = code_execution_config.pop("last_n_messages", "auto")
2320
+
2321
+ if not (isinstance(last_n_messages, (int, float)) and last_n_messages >= 0) and last_n_messages != "auto":
2322
+ raise ValueError("last_n_messages must be either a non-negative integer, or the string 'auto'.")
2323
+
2324
+ messages_to_scan = last_n_messages
2325
+ if last_n_messages == "auto":
2326
+ # Find when the agent last spoke
2327
+ messages_to_scan = 0
2328
+ for i in range(len(messages)):
2329
+ message = messages[-(i + 1)]
2330
+ if "role" not in message or message["role"] != "user":
2331
+ break
2332
+ else:
2333
+ messages_to_scan += 1
2334
+
2335
+ # iterate through the last n messages in reverse
2336
+ # if code blocks are found, execute the code blocks and return the output
2337
+ # if no code blocks are found, continue
2338
+ for i in range(min(len(messages), messages_to_scan)):
2339
+ message = messages[-(i + 1)]
2340
+ if not message["content"]:
2341
+ continue
2342
+ code_blocks = extract_code(message["content"])
2343
+ if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
2344
+ continue
2345
+
2346
+ # found code blocks, execute code and push "last_n_messages" back
2347
+ exitcode, logs = self.execute_code_blocks(code_blocks)
2348
+ code_execution_config["last_n_messages"] = last_n_messages
2349
+ exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
2350
+ return True, f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"
2351
+
2352
+ # no code blocks are found, push last_n_messages back and return.
2353
+ code_execution_config["last_n_messages"] = last_n_messages
2354
+
2355
+ return False, None
2356
+
2357
+ def _run_async_in_thread(self, coro):
2358
+ """Run an async coroutine in a separate thread with its own event loop."""
2359
+ result = {}
2360
+
2361
+ def runner():
2362
+ loop = asyncio.new_event_loop()
2363
+ asyncio.set_event_loop(loop)
2364
+ result["value"] = loop.run_until_complete(coro)
2365
+ loop.close()
2366
+
2367
+ t = threading.Thread(target=runner)
2368
+ t.start()
2369
+ t.join()
2370
+ return result["value"]
2371
+
2372
+ def generate_function_call_reply(
2373
+ self,
2374
+ messages: list[dict[str, Any]] | None = None,
2375
+ sender: Agent | None = None,
2376
+ config: Any | None = None,
2377
+ ) -> tuple[bool, dict[str, Any] | None]:
2378
+ """Generate a reply using function call.
2379
+
2380
+ "function_call" replaced by "tool_calls" as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
2381
+ See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions
2382
+ """
2383
+ if config is None:
2384
+ config = self
2385
+ if messages is None:
2386
+ messages = self._oai_messages[sender]
2387
+ message = messages[-1]
2388
+ if message.get("function_call"):
2389
+ call_id = message.get("id", None)
2390
+ func_call = message["function_call"]
2391
+ func = self._function_map.get(func_call.get("name", None), None)
2392
+ if is_coroutine_callable(func):
2393
+ coro = self.a_execute_function(func_call, call_id=call_id)
2394
+ _, func_return = self._run_async_in_thread(coro)
2395
+ else:
2396
+ _, func_return = self.execute_function(message["function_call"], call_id=call_id)
2397
+ return True, func_return
2398
+ return False, None
2399
+
2400
+ async def a_generate_function_call_reply(
2401
+ self,
2402
+ messages: list[dict[str, Any]] | None = None,
2403
+ sender: Agent | None = None,
2404
+ config: Any | None = None,
2405
+ ) -> tuple[bool, dict[str, Any] | None]:
2406
+ """Generate a reply using async function call.
2407
+
2408
+ "function_call" replaced by "tool_calls" as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
2409
+ See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions
2410
+ """
2411
+ if config is None:
2412
+ config = self
2413
+ if messages is None:
2414
+ messages = self._oai_messages[sender]
2415
+ message = messages[-1]
2416
+ if message.get("function_call"):
2417
+ call_id = message.get("id", None)
2418
+ func_call = message["function_call"]
2419
+ func_name = func_call.get("name", "")
2420
+ func = self._function_map.get(func_name, None)
2421
+ if func and is_coroutine_callable(func):
2422
+ _, func_return = await self.a_execute_function(func_call, call_id=call_id)
2423
+ else:
2424
+ _, func_return = self.execute_function(func_call, call_id=call_id)
2425
+ return True, func_return
2426
+
2427
+ return False, None
2428
+
2429
+ def _str_for_tool_response(self, tool_response):
2430
+ return str(tool_response.get("content", ""))
2431
+
2432
+ def generate_tool_calls_reply(
2433
+ self,
2434
+ messages: list[dict[str, Any]] | None = None,
2435
+ sender: Agent | None = None,
2436
+ config: Any | None = None,
2437
+ ) -> tuple[bool, dict[str, Any] | None]:
2438
+ """Generate a reply using tool call."""
2439
+ if config is None:
2440
+ config = self
2441
+ if messages is None:
2442
+ messages = self._oai_messages[sender]
2443
+ message = messages[-1]
2444
+ tool_returns = []
2445
+ for tool_call in message.get("tool_calls", []):
2446
+ function_call = tool_call.get("function", {})
2447
+
2448
+ # Hook: Process tool input before execution
2449
+ processed_call = self._process_tool_input(function_call)
2450
+ if processed_call is None:
2451
+ raise ValueError("safeguard_tool_inputs hook returned None")
2452
+
2453
+ tool_call_id = tool_call.get("id", None)
2454
+ func = self._function_map.get(processed_call.get("name", None), None)
2455
+ if is_coroutine_callable(func):
2456
+ coro = self.a_execute_function(processed_call, call_id=tool_call_id)
2457
+ _, func_return = self._run_async_in_thread(coro)
2458
+ else:
2459
+ _, func_return = self.execute_function(processed_call, call_id=tool_call_id)
2460
+
2461
+ # Hook: Process tool output before returning
2462
+ processed_return = self._process_tool_output(func_return)
2463
+ if processed_return is None:
2464
+ raise ValueError("safeguard_tool_outputs hook returned None")
2465
+
2466
+ content = processed_return.get("content", "")
2467
+ if content is None:
2468
+ content = ""
2469
+
2470
+ if tool_call_id is not None:
2471
+ tool_call_response = {
2472
+ "tool_call_id": tool_call_id,
2473
+ "role": "tool",
2474
+ "content": content,
2475
+ }
2476
+ else:
2477
+ # Do not include tool_call_id if it is not present.
2478
+ # This is to make the tool call object compatible with Mistral API.
2479
+ tool_call_response = {
2480
+ "role": "tool",
2481
+ "content": content,
2482
+ }
2483
+ tool_returns.append(tool_call_response)
2484
+ if tool_returns:
2485
+ return True, {
2486
+ "role": "tool",
2487
+ "tool_responses": tool_returns,
2488
+ "content": "\n\n".join([self._str_for_tool_response(tool_return) for tool_return in tool_returns]),
2489
+ }
2490
+ return False, None
2491
+
2492
+ async def _a_execute_tool_call(self, tool_call):
2493
+ tool_call_id = tool_call["id"]
2494
+ function_call = tool_call.get("function", {})
2495
+ _, func_return = await self.a_execute_function(function_call, call_id=tool_call_id)
2496
+ return {
2497
+ "tool_call_id": tool_call_id,
2498
+ "role": "tool",
2499
+ "content": func_return.get("content", ""),
2500
+ }
2501
+
2502
+ async def a_generate_tool_calls_reply(
2503
+ self,
2504
+ messages: list[dict[str, Any]] | None = None,
2505
+ sender: Agent | None = None,
2506
+ config: Any | None = None,
2507
+ ) -> tuple[bool, dict[str, Any] | None]:
2508
+ """Generate a reply using async function call."""
2509
+ if config is None:
2510
+ config = self
2511
+ if messages is None:
2512
+ messages = self._oai_messages[sender]
2513
+ message = messages[-1]
2514
+ async_tool_calls = []
2515
+ for tool_call in message.get("tool_calls", []):
2516
+ async_tool_calls.append(self._a_execute_tool_call(tool_call))
2517
+ if async_tool_calls:
2518
+ tool_returns = await asyncio.gather(*async_tool_calls)
2519
+ return True, {
2520
+ "role": "tool",
2521
+ "tool_responses": tool_returns,
2522
+ "content": "\n\n".join([self._str_for_tool_response(tool_return) for tool_return in tool_returns]),
2523
+ }
2524
+
2525
+ return False, None
2526
+
2527
+ def check_termination_and_human_reply(
2528
+ self,
2529
+ messages: list[dict[str, Any]] | None = None,
2530
+ sender: Agent | None = None,
2531
+ config: Any | None = None,
2532
+ iostream: IOStreamProtocol | None = None,
2533
+ ) -> tuple[bool, str | None]:
2534
+ """Check if the conversation should be terminated, and if human reply is provided.
2535
+
2536
+ This method checks for conditions that require the conversation to be terminated, such as reaching
2537
+ a maximum number of consecutive auto-replies or encountering a termination message. Additionally,
2538
+ it prompts for and processes human input based on the configured human input mode, which can be
2539
+ 'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter
2540
+ for the conversation and prints relevant messages based on the human input received.
2541
+
2542
+ Args:
2543
+ messages (Optional[List[Dict]]): A list of message dictionaries, representing the conversation history.
2544
+ sender (Optional[Agent]): The agent object representing the sender of the message.
2545
+ config (Optional[Any]): Configuration object, defaults to the current instance if not provided.
2546
+ iostream (Optional[IOStreamProtocol]): The IOStream object to use for sending messages.
2547
+
2548
+ Returns:
2549
+ A tuple containing a boolean indicating if the conversation
2550
+ should be terminated, and a human reply which can be a string, a dictionary, or None.
2551
+ """
2552
+ iostream = iostream or IOStream.get_default()
2553
+
2554
+ if config is None:
2555
+ config = self
2556
+ if messages is None:
2557
+ messages = self._oai_messages[sender] if sender else []
2558
+
2559
+ termination_reason = None
2560
+
2561
+ # if there are no messages, continue the conversation
2562
+ if not messages:
2563
+ return False, None
2564
+ message = messages[-1]
2565
+
2566
+ reply = ""
2567
+ no_human_input_msg = ""
2568
+ sender_name = "the sender" if sender is None else sender.name
2569
+ if self.human_input_mode == "ALWAYS":
2570
+ reply = self.get_human_input(
2571
+ f"Replying as {self.name}. Provide feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: ",
2572
+ iostream=iostream,
2573
+ )
2574
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
2575
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
2576
+ if not reply and self._is_termination_msg(message):
2577
+ termination_reason = f"Termination message condition on agent '{self.name}' met"
2578
+ elif reply == "exit":
2579
+ termination_reason = "User requested to end the conversation"
2580
+
2581
+ reply = reply if reply or not self._is_termination_msg(message) else "exit"
2582
+ else:
2583
+ if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
2584
+ if self.human_input_mode == "NEVER":
2585
+ termination_reason = "Maximum number of consecutive auto-replies reached"
2586
+ reply = "exit"
2587
+ else:
2588
+ # self.human_input_mode == "TERMINATE":
2589
+ terminate = self._is_termination_msg(message)
2590
+ reply = self.get_human_input(
2591
+ f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
2592
+ if terminate
2593
+ else f"Please give feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: ",
2594
+ iostream=iostream,
2595
+ )
2596
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
2597
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
2598
+ if reply != "exit" and terminate:
2599
+ termination_reason = (
2600
+ f"Termination message condition on agent '{self.name}' met and no human input provided"
2601
+ )
2602
+ elif reply == "exit":
2603
+ termination_reason = "User requested to end the conversation"
2604
+
2605
+ reply = reply if reply or not terminate else "exit"
2606
+ elif self._is_termination_msg(message):
2607
+ if self.human_input_mode == "NEVER":
2608
+ termination_reason = f"Termination message condition on agent '{self.name}' met"
2609
+ reply = "exit"
2610
+ else:
2611
+ # self.human_input_mode == "TERMINATE":
2612
+ reply = self.get_human_input(
2613
+ f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: ",
2614
+ iostream=iostream,
2615
+ )
2616
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
2617
+
2618
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
2619
+ if not reply or reply == "exit":
2620
+ termination_reason = (
2621
+ f"Termination message condition on agent '{self.name}' met and no human input provided"
2622
+ )
2623
+
2624
+ reply = reply or "exit"
2625
+
2626
+ # print the no_human_input_msg
2627
+ if no_human_input_msg:
2628
+ iostream.send(
2629
+ TerminationAndHumanReplyNoInputEvent(
2630
+ no_human_input_msg=no_human_input_msg, sender=sender, recipient=self
2631
+ )
2632
+ )
2633
+
2634
+ # stop the conversation
2635
+ if reply == "exit":
2636
+ # reset the consecutive_auto_reply_counter
2637
+ self._consecutive_auto_reply_counter[sender] = 0
2638
+
2639
+ if termination_reason:
2640
+ iostream.send(TerminationEvent(termination_reason=termination_reason, sender=self, recipient=sender))
2641
+
2642
+ return True, None
2643
+
2644
+ # send the human reply
2645
+ if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
2646
+ # reset the consecutive_auto_reply_counter
2647
+ self._consecutive_auto_reply_counter[sender] = 0
2648
+ # User provided a custom response, return function and tool failures indicating user interruption
2649
+ tool_returns = []
2650
+ if message.get("function_call", False):
2651
+ tool_returns.append({
2652
+ "role": "function",
2653
+ "name": message["function_call"].get("name", ""),
2654
+ "content": "USER INTERRUPTED",
2655
+ })
2656
+
2657
+ if message.get("tool_calls", False):
2658
+ tool_returns.extend([
2659
+ {"role": "tool", "tool_call_id": tool_call.get("id", ""), "content": "USER INTERRUPTED"}
2660
+ for tool_call in message["tool_calls"]
2661
+ ])
2662
+
2663
+ response = {"role": "user", "content": reply}
2664
+ if tool_returns:
2665
+ response["tool_responses"] = tool_returns
2666
+
2667
+ return True, response
2668
+
2669
+ # increment the consecutive_auto_reply_counter
2670
+ self._consecutive_auto_reply_counter[sender] += 1
2671
+ if self.human_input_mode != "NEVER":
2672
+ iostream.send(UsingAutoReplyEvent(human_input_mode=self.human_input_mode, sender=sender, recipient=self))
2673
+
2674
+ return False, None
2675
+
2676
+ async def a_check_termination_and_human_reply(
2677
+ self,
2678
+ messages: list[dict[str, Any]] | None = None,
2679
+ sender: Agent | None = None,
2680
+ config: Any | None = None,
2681
+ iostream: AsyncIOStreamProtocol | None = None,
2682
+ ) -> tuple[bool, str | None]:
2683
+ """(async) Check if the conversation should be terminated, and if human reply is provided.
2684
+
2685
+ This method checks for conditions that require the conversation to be terminated, such as reaching
2686
+ a maximum number of consecutive auto-replies or encountering a termination message. Additionally,
2687
+ it prompts for and processes human input based on the configured human input mode, which can be
2688
+ 'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter
2689
+ for the conversation and prints relevant messages based on the human input received.
2690
+
2691
+ Args:
2692
+ messages (Optional[List[Dict]]): A list of message dictionaries, representing the conversation history.
2693
+ sender (Optional[Agent]): The agent object representing the sender of the message.
2694
+ config (Optional[Any]): Configuration object, defaults to the current instance if not provided.
2695
+ iostream (Optional[AsyncIOStreamProtocol]): The AsyncIOStreamProtocol object to use for sending messages.
2696
+ Returns:
2697
+ Tuple[bool, Union[str, Dict, None]]: A tuple containing a boolean indicating if the conversation
2698
+ should be terminated, and a human reply which can be a string, a dictionary, or None.
2699
+ """
2700
+ iostream = iostream or IOStream.get_default()
2701
+
2702
+ if config is None:
2703
+ config = self
2704
+ if messages is None:
2705
+ messages = self._oai_messages[sender] if sender else []
2706
+
2707
+ termination_reason = None
2708
+
2709
+ message = messages[-1] if messages else {}
2710
+ reply = ""
2711
+ no_human_input_msg = ""
2712
+ sender_name = "the sender" if sender is None else sender.name
2713
+ if self.human_input_mode == "ALWAYS":
2714
+ reply = await self.a_get_human_input(
2715
+ f"Replying as {self.name}. Provide feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: ",
2716
+ iostream=iostream,
2717
+ )
2718
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
2719
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
2720
+ if not reply and self._is_termination_msg(message):
2721
+ termination_reason = f"Termination message condition on agent '{self.name}' met"
2722
+ elif reply == "exit":
2723
+ termination_reason = "User requested to end the conversation"
2724
+
2725
+ reply = reply if reply or not self._is_termination_msg(message) else "exit"
2726
+ else:
2727
+ if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
2728
+ if self.human_input_mode == "NEVER":
2729
+ termination_reason = "Maximum number of consecutive auto-replies reached"
2730
+ reply = "exit"
2731
+ else:
2732
+ # self.human_input_mode == "TERMINATE":
2733
+ terminate = self._is_termination_msg(message)
2734
+ reply = await self.a_get_human_input(
2735
+ f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
2736
+ if terminate
2737
+ else f"Please give feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: ",
2738
+ iostream=iostream,
2739
+ )
2740
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
2741
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
2742
+ if reply != "exit" and terminate:
2743
+ termination_reason = (
2744
+ f"Termination message condition on agent '{self.name}' met and no human input provided"
2745
+ )
2746
+ elif reply == "exit":
2747
+ termination_reason = "User requested to end the conversation"
2748
+
2749
+ reply = reply if reply or not terminate else "exit"
2750
+ elif self._is_termination_msg(message):
2751
+ if self.human_input_mode == "NEVER":
2752
+ termination_reason = f"Termination message condition on agent '{self.name}' met"
2753
+ reply = "exit"
2754
+ else:
2755
+ # self.human_input_mode == "TERMINATE":
2756
+ reply = await self.a_get_human_input(
2757
+ f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: ",
2758
+ iostream=iostream,
2759
+ )
2760
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
2761
+
2762
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
2763
+ if not reply or reply == "exit":
2764
+ termination_reason = (
2765
+ f"Termination message condition on agent '{self.name}' met and no human input provided"
2766
+ )
2767
+
2768
+ reply = reply or "exit"
2769
+
2770
+ # print the no_human_input_msg
2771
+ if no_human_input_msg:
2772
+ iostream.send(
2773
+ TerminationAndHumanReplyNoInputEvent(
2774
+ no_human_input_msg=no_human_input_msg, sender=sender, recipient=self
2775
+ )
2776
+ )
2777
+
2778
+ # stop the conversation
2779
+ if reply == "exit":
2780
+ # reset the consecutive_auto_reply_counter
2781
+ self._consecutive_auto_reply_counter[sender] = 0
2782
+
2783
+ if termination_reason:
2784
+ iostream.send(TerminationEvent(termination_reason=termination_reason, sender=self, recipient=sender))
2785
+
2786
+ return True, None
2787
+
2788
+ # send the human reply
2789
+ if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
2790
+ # User provided a custom response, return function and tool results indicating user interruption
2791
+ # reset the consecutive_auto_reply_counter
2792
+ self._consecutive_auto_reply_counter[sender] = 0
2793
+ tool_returns = []
2794
+ if message.get("function_call", False):
2795
+ tool_returns.append({
2796
+ "role": "function",
2797
+ "name": message["function_call"].get("name", ""),
2798
+ "content": "USER INTERRUPTED",
2799
+ })
2800
+
2801
+ if message.get("tool_calls", False):
2802
+ tool_returns.extend([
2803
+ {"role": "tool", "tool_call_id": tool_call.get("id", ""), "content": "USER INTERRUPTED"}
2804
+ for tool_call in message["tool_calls"]
2805
+ ])
2806
+
2807
+ response = {"role": "user", "content": reply}
2808
+ if tool_returns:
2809
+ response["tool_responses"] = tool_returns
2810
+
2811
+ return True, response
2812
+
2813
+ # increment the consecutive_auto_reply_counter
2814
+ self._consecutive_auto_reply_counter[sender] += 1
2815
+ if self.human_input_mode != "NEVER":
2816
+ iostream.send(UsingAutoReplyEvent(human_input_mode=self.human_input_mode, sender=sender, recipient=self))
2817
+
2818
+ return False, None
2819
+
2820
+ def generate_reply(
2821
+ self,
2822
+ messages: list[dict[str, Any]] | None = None,
2823
+ sender: Optional["Agent"] = None,
2824
+ exclude: Container[Any] = (),
2825
+ ) -> str | dict[str, Any] | None:
2826
+ """Reply based on the conversation history and the sender.
2827
+
2828
+ Either messages or sender must be provided.
2829
+ Register a reply_func with `None` as one trigger for it to be activated when `messages` is non-empty and `sender` is `None`.
2830
+ Use registered auto reply functions to generate replies.
2831
+ By default, the following functions are checked in order:
2832
+ 1. check_termination_and_human_reply
2833
+ 2. generate_function_call_reply (deprecated in favor of tool_calls)
2834
+ 3. generate_tool_calls_reply
2835
+ 4. generate_code_execution_reply
2836
+ 5. generate_oai_reply
2837
+ Every function returns a tuple (final, reply).
2838
+ When a function returns final=False, the next function will be checked.
2839
+ So by default, termination and human reply will be checked first.
2840
+ If not terminating and human reply is skipped, execute function or code and return the result.
2841
+ AI replies are generated only when no code execution is performed.
2842
+
2843
+ Args:
2844
+ messages: a list of messages in the conversation history.
2845
+ sender: sender of an Agent instance.
2846
+ exclude: A list of reply functions to exclude from
2847
+ the reply generation process. Functions in this list will be skipped even if
2848
+ they would normally be triggered.
2849
+
2850
+ Returns:
2851
+ str or dict or None: reply. None if no reply is generated.
2852
+ """
2853
+ if all((messages is None, sender is None)):
2854
+ error_msg = f"Either {messages=} or {sender=} must be provided."
2855
+ logger.error(error_msg)
2856
+ raise AssertionError(error_msg)
2857
+
2858
+ if messages is None:
2859
+ messages = self._oai_messages[sender]
2860
+
2861
+ # Call the hookable method that gives registered hooks a chance to update agent state, used for their context variables.
2862
+ self.update_agent_state_before_reply(messages)
2863
+
2864
+ # Call the hookable method that gives registered hooks a chance to process the last message.
2865
+ # Message modifications do not affect the incoming messages or self._oai_messages.
2866
+ messages = self.process_last_received_message(messages)
2867
+
2868
+ # Call the hookable method that gives registered hooks a chance to process all messages.
2869
+ # Message modifications do not affect the incoming messages or self._oai_messages.
2870
+ messages = self.process_all_messages_before_reply(messages)
2871
+
2872
+ for reply_func_tuple in self._reply_func_list:
2873
+ reply_func = reply_func_tuple["reply_func"]
2874
+ if reply_func in exclude:
2875
+ continue
2876
+ if is_coroutine_callable(reply_func):
2877
+ continue
2878
+ if self._match_trigger(reply_func_tuple["trigger"], sender):
2879
+ final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
2880
+ if logging_enabled():
2881
+ log_event(
2882
+ self,
2883
+ "reply_func_executed",
2884
+ reply_func_module=reply_func.__module__,
2885
+ reply_func_name=reply_func.__name__,
2886
+ final=final,
2887
+ reply=reply,
2888
+ )
2889
+ if final:
2890
+ return reply
2891
+ return self._default_auto_reply
2892
+
2893
+ async def a_generate_reply(
2894
+ self,
2895
+ messages: list[dict[str, Any]] | None = None,
2896
+ sender: Optional["Agent"] = None,
2897
+ exclude: Container[Any] = (),
2898
+ ) -> str | dict[str, Any] | None:
2899
+ """(async) Reply based on the conversation history and the sender.
2900
+
2901
+ Either messages or sender must be provided.
2902
+ Register a reply_func with `None` as one trigger for it to be activated when `messages` is non-empty and `sender` is `None`.
2903
+ Use registered auto reply functions to generate replies.
2904
+ By default, the following functions are checked in order:
2905
+ 1. check_termination_and_human_reply
2906
+ 2. generate_function_call_reply
2907
+ 3. generate_tool_calls_reply
2908
+ 4. generate_code_execution_reply
2909
+ 5. generate_oai_reply
2910
+ Every function returns a tuple (final, reply).
2911
+ When a function returns final=False, the next function will be checked.
2912
+ So by default, termination and human reply will be checked first.
2913
+ If not terminating and human reply is skipped, execute function or code and return the result.
2914
+ AI replies are generated only when no code execution is performed.
2915
+
2916
+ Args:
2917
+ messages: a list of messages in the conversation history.
2918
+ sender: sender of an Agent instance.
2919
+ exclude: A list of reply functions to exclude from
2920
+ the reply generation process. Functions in this list will be skipped even if
2921
+ they would normally be triggered.
2922
+
2923
+ Returns:
2924
+ str or dict or None: reply. None if no reply is generated.
2925
+ """
2926
+ if all((messages is None, sender is None)):
2927
+ error_msg = f"Either {messages=} or {sender=} must be provided."
2928
+ logger.error(error_msg)
2929
+ raise AssertionError(error_msg)
2930
+
2931
+ if messages is None:
2932
+ messages = self._oai_messages[sender]
2933
+
2934
+ # Call the hookable method that gives registered hooks a chance to update agent state, used for their context variables.
2935
+ self.update_agent_state_before_reply(messages)
2936
+
2937
+ # Call the hookable method that gives registered hooks a chance to process the last message.
2938
+ # Message modifications do not affect the incoming messages or self._oai_messages.
2939
+ messages = self.process_last_received_message(messages)
2940
+
2941
+ # Call the hookable method that gives registered hooks a chance to process all messages.
2942
+ # Message modifications do not affect the incoming messages or self._oai_messages.
2943
+ messages = self.process_all_messages_before_reply(messages)
2944
+
2945
+ for reply_func_tuple in self._reply_func_list:
2946
+ reply_func = reply_func_tuple["reply_func"]
2947
+ if reply_func in exclude:
2948
+ continue
2949
+
2950
+ if self._match_trigger(reply_func_tuple["trigger"], sender):
2951
+ if is_coroutine_callable(reply_func):
2952
+ final, reply = await reply_func(
2953
+ self,
2954
+ messages=messages,
2955
+ sender=sender,
2956
+ config=reply_func_tuple["config"],
2957
+ )
2958
+ else:
2959
+ final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
2960
+ if final:
2961
+ return reply
2962
+ return self._default_auto_reply
2963
+
2964
+ def _match_trigger(self, trigger: None | str | type | Agent | Callable | list, sender: Agent | None) -> bool:
2965
+ """Check if the sender matches the trigger.
2966
+
2967
+ Args:
2968
+ trigger (Union[None, str, type, Agent, Callable, List]): The condition to match against the sender.
2969
+ Can be `None`, string, type, `Agent` instance, callable, or a list of these.
2970
+ sender (Agent): The sender object or type to be matched against the trigger.
2971
+
2972
+ Returns:
2973
+ `True` if the sender matches the trigger, otherwise `False`.
2974
+
2975
+ Raises:
2976
+ ValueError: If the trigger type is unsupported.
2977
+ """
2978
+ if trigger is None:
2979
+ return sender is None
2980
+ elif isinstance(trigger, str):
2981
+ if sender is None:
2982
+ raise SenderRequiredError()
2983
+ return trigger == sender.name
2984
+ elif isinstance(trigger, type):
2985
+ return isinstance(sender, trigger)
2986
+ elif isinstance(trigger, Agent):
2987
+ # return True if the sender is the same type (class) as the trigger
2988
+ return trigger == sender
2989
+ elif isinstance(trigger, Callable):
2990
+ rst = trigger(sender)
2991
+ assert isinstance(rst, bool), f"trigger {trigger} must return a boolean value."
2992
+ return rst
2993
+ elif isinstance(trigger, list):
2994
+ return any(self._match_trigger(t, sender) for t in trigger)
2995
+ else:
2996
+ raise ValueError(f"Unsupported trigger type: {type(trigger)}")
2997
+
2998
+ def get_human_input(self, prompt: str, *, iostream: InputStream | None = None) -> str:
2999
+ """Get human input.
3000
+
3001
+ Override this method to customize the way to get human input.
3002
+
3003
+ Args:
3004
+ prompt (str): prompt for the human input.
3005
+ iostream (Optional[InputStream]): The InputStream object to use for sending messages.
3006
+ Returns:
3007
+ str: human input.
3008
+ """
3009
+ iostream = iostream or IOStream.get_default()
3010
+
3011
+ reply = iostream.input(prompt)
3012
+
3013
+ # Process the human input through hooks
3014
+ processed_reply = self._process_human_input(reply)
3015
+ if processed_reply is None:
3016
+ raise ValueError("safeguard_human_inputs hook returned None")
3017
+
3018
+ self._human_input.append(processed_reply)
3019
+ return processed_reply
3020
+
3021
+ async def a_get_human_input(self, prompt: str, *, iostream: AsyncInputStream | None = None) -> str:
3022
+ """(Async) Get human input.
3023
+
3024
+ Override this method to customize the way to get human input.
3025
+
3026
+ Args:
3027
+ prompt (str): prompt for the human input.
3028
+ iostream (Optional[AsyncInputStream]): The AsyncInputStream object to use for sending messages.
3029
+ Returns:
3030
+ str: human input.
3031
+ """
3032
+
3033
+ iostream = iostream or IOStream.get_default()
3034
+ input_func = iostream.input
3035
+
3036
+ if is_coroutine_callable(input_func):
3037
+ reply = await input_func(prompt)
3038
+ else:
3039
+ reply = await asyncio.to_thread(input_func, prompt)
3040
+ self._human_input.append(reply)
3041
+ return reply
3042
+
3043
+ def run_code(self, code: str, **kwargs: Any) -> tuple[int, str, str | None]:
3044
+ """Run the code and return the result.
3045
+
3046
+ Override this function to modify the way to run the code.
3047
+
3048
+ Args:
3049
+ code (str): the code to be executed.
3050
+ **kwargs: other keyword arguments.
3051
+
3052
+ Returns:
3053
+ A tuple of (exitcode, logs, image).
3054
+ exitcode (int): the exit code of the code execution.
3055
+ logs (str): the logs of the code execution.
3056
+ image (str or None): the docker image used for the code execution.
3057
+ """
3058
+ return execute_code(code, **kwargs)
3059
+
3060
+ def execute_code_blocks(self, code_blocks):
3061
+ """Execute the code blocks and return the result."""
3062
+ iostream = IOStream.get_default()
3063
+
3064
+ logs_all = ""
3065
+ for i, code_block in enumerate(code_blocks):
3066
+ lang, code = code_block
3067
+ if not lang:
3068
+ lang = infer_lang(code)
3069
+
3070
+ iostream.send(ExecuteCodeBlockEvent(code=code, language=lang, code_block_count=i, recipient=self))
3071
+
3072
+ if lang in ["bash", "shell", "sh"]:
3073
+ exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config)
3074
+ elif lang in PYTHON_VARIANTS:
3075
+ filename = code[11 : code.find("\n")].strip() if code.startswith("# filename: ") else None
3076
+ exitcode, logs, image = self.run_code(
3077
+ code,
3078
+ lang="python",
3079
+ filename=filename,
3080
+ **self._code_execution_config,
3081
+ )
3082
+ else:
3083
+ # In case the language is not supported, we return an error message.
3084
+ exitcode, logs, image = (
3085
+ 1,
3086
+ f"unknown language {lang}",
3087
+ None,
3088
+ )
3089
+ # raise NotImplementedError
3090
+ if image is not None:
3091
+ self._code_execution_config["use_docker"] = image
3092
+ logs_all += "\n" + logs
3093
+ if exitcode != 0:
3094
+ return exitcode, logs_all
3095
+ return exitcode, logs_all
3096
+
3097
+ @staticmethod
3098
+ def _format_json_str(jstr):
3099
+ """Remove newlines outside of quotes, and handle JSON escape sequences.
3100
+
3101
+ 1. this function removes the newline in the query outside of quotes otherwise json.loads(s) will fail.
3102
+ Ex 1:
3103
+ "{\n"tool": "python",\n"query": "print('hello')\nprint('world')"\n}" -> "{"tool": "python","query": "print('hello')\nprint('world')"}"
3104
+ Ex 2:
3105
+ "{\n \"location\": \"Boston, MA\"\n}" -> "{"location": "Boston, MA"}"
3106
+
3107
+ 2. this function also handles JSON escape sequences inside quotes.
3108
+ Ex 1:
3109
+ '{"args": "a\na\na\ta"}' -> '{"args": "a\\na\\na\\ta"}'
3110
+ """
3111
+ result = []
3112
+ inside_quotes = False
3113
+ last_char = " "
3114
+ for char in jstr:
3115
+ if last_char != "\\" and char == '"':
3116
+ inside_quotes = not inside_quotes
3117
+ last_char = char
3118
+ if not inside_quotes and char == "\n":
3119
+ continue
3120
+ if inside_quotes and char == "\n":
3121
+ char = "\\n"
3122
+ if inside_quotes and char == "\t":
3123
+ char = "\\t"
3124
+ result.append(char)
3125
+ return "".join(result)
3126
+
3127
+ def execute_function(
3128
+ self, func_call: dict[str, Any], call_id: str | None = None, verbose: bool = False
3129
+ ) -> tuple[bool, dict[str, Any]]:
3130
+ """Execute a function call and return the result.
3131
+
3132
+ Override this function to modify the way to execute function and tool calls.
3133
+
3134
+ Args:
3135
+ func_call: a dictionary extracted from openai message at "function_call" or "tool_calls" with keys "name" and "arguments".
3136
+ call_id: a string to identify the tool call.
3137
+ verbose (bool): Whether to send messages about the execution details to the
3138
+ output stream. When True, both the function call arguments and the execution
3139
+ result will be displayed. Defaults to False.
3140
+
3141
+
3142
+ Returns:
3143
+ A tuple of (is_exec_success, result_dict).
3144
+ is_exec_success (boolean): whether the execution is successful.
3145
+ result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".
3146
+
3147
+ "function_call" deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
3148
+ See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
3149
+ """
3150
+ iostream = IOStream.get_default()
3151
+
3152
+ func_name = func_call.get("name", "")
3153
+ func = self._function_map.get(func_name, None)
3154
+
3155
+ is_exec_success = False
3156
+ if func is not None:
3157
+ # Extract arguments from a json-like string and put it into a dict.
3158
+ input_string = self._format_json_str(func_call.get("arguments", "{}"))
3159
+ try:
3160
+ arguments = json.loads(input_string)
3161
+ except json.JSONDecodeError as e:
3162
+ arguments = None
3163
+ content = f"Error: {e}\n The argument must be in JSON format."
3164
+
3165
+ # Try to execute the function
3166
+ if arguments is not None:
3167
+ iostream.send(
3168
+ ExecuteFunctionEvent(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self)
3169
+ )
3170
+ try:
3171
+ content = func(**arguments)
3172
+ if inspect.isawaitable(content):
3173
+
3174
+ async def _await_result(awaitable):
3175
+ return await awaitable
3176
+
3177
+ content = self._run_async_in_thread(_await_result(content))
3178
+ is_exec_success = True
3179
+ except Exception as e:
3180
+ content = f"Error: {e}"
3181
+ else:
3182
+ arguments = {}
3183
+ content = f"Error: Function {func_name} not found."
3184
+
3185
+ iostream.send(
3186
+ ExecutedFunctionEvent(
3187
+ func_name=func_name,
3188
+ call_id=call_id,
3189
+ arguments=arguments,
3190
+ content=content,
3191
+ recipient=self,
3192
+ is_exec_success=is_exec_success,
3193
+ )
3194
+ )
3195
+
3196
+ return is_exec_success, {
3197
+ "name": func_name,
3198
+ "role": "function",
3199
+ "content": content,
3200
+ }
3201
+
3202
+ async def a_execute_function(
3203
+ self, func_call: dict[str, Any], call_id: str | None = None, verbose: bool = False
3204
+ ) -> tuple[bool, dict[str, Any]]:
3205
+ """Execute an async function call and return the result.
3206
+
3207
+ Override this function to modify the way async functions and tools are executed.
3208
+
3209
+ Args:
3210
+ func_call: a dictionary extracted from openai message at key "function_call" or "tool_calls" with keys "name" and "arguments".
3211
+ call_id: a string to identify the tool call.
3212
+ verbose (bool): Whether to send messages about the execution details to the
3213
+ output stream. When True, both the function call arguments and the execution
3214
+ result will be displayed. Defaults to False.
3215
+
3216
+ Returns:
3217
+ A tuple of (is_exec_success, result_dict).
3218
+ is_exec_success (boolean): whether the execution is successful.
3219
+ result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".
3220
+
3221
+ "function_call" deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
3222
+ See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
3223
+ """
3224
+ iostream = IOStream.get_default()
3225
+
3226
+ func_name = func_call.get("name", "")
3227
+ func = self._function_map.get(func_name, None)
3228
+
3229
+ is_exec_success = False
3230
+ if func is not None:
3231
+ # Extract arguments from a json-like string and put it into a dict.
3232
+ input_string = self._format_json_str(func_call.get("arguments", "{}"))
3233
+ try:
3234
+ arguments = json.loads(input_string)
3235
+ except json.JSONDecodeError as e:
3236
+ arguments = None
3237
+ content = f"Error: {e}\n The argument must be in JSON format."
3238
+
3239
+ # Try to execute the function
3240
+ if arguments is not None:
3241
+ iostream.send(
3242
+ ExecuteFunctionEvent(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self)
3243
+ )
3244
+ try:
3245
+ if is_coroutine_callable(func):
3246
+ content = await func(**arguments)
3247
+ else:
3248
+ # Fallback to sync function if the function is not async
3249
+ content = func(**arguments)
3250
+ is_exec_success = True
3251
+ except Exception as e:
3252
+ content = f"Error: {e}"
3253
+ else:
3254
+ arguments = {}
3255
+ content = f"Error: Function {func_name} not found."
3256
+
3257
+ iostream.send(
3258
+ ExecutedFunctionEvent(
3259
+ func_name=func_name,
3260
+ call_id=call_id,
3261
+ arguments=arguments,
3262
+ content=content,
3263
+ recipient=self,
3264
+ is_exec_success=is_exec_success,
3265
+ )
3266
+ )
3267
+
3268
+ return is_exec_success, {
3269
+ "name": func_name,
3270
+ "role": "function",
3271
+ "content": content,
3272
+ }
3273
+
3274
+ def generate_init_message(self, message: dict[str, Any] | str | None, **kwargs: Any) -> str | dict[str, Any]:
3275
+ """Generate the initial message for the agent.
3276
+ If message is None, input() will be called to get the initial message.
3277
+
3278
+ Args:
3279
+ message (str or None): the message to be processed.
3280
+ **kwargs: any additional information. It has the following reserved fields:
3281
+ "carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string.
3282
+ If provided, we will combine this carryover with the "message" content when generating the initial chat
3283
+ message.
3284
+
3285
+ Returns:
3286
+ str or dict: the processed message.
3287
+ """
3288
+ if message is None:
3289
+ message = self.get_human_input(">")
3290
+
3291
+ return self._handle_carryover(message, kwargs)
3292
+
3293
+ def _handle_carryover(self, message: str | dict[str, Any], kwargs: dict) -> str | dict[str, Any]:
3294
+ if not kwargs.get("carryover"):
3295
+ return message
3296
+
3297
+ if isinstance(message, str):
3298
+ return self._process_carryover(message, kwargs)
3299
+
3300
+ elif isinstance(message, dict):
3301
+ if isinstance(message.get("content"), str):
3302
+ # Makes sure the original message is not mutated
3303
+ message = message.copy()
3304
+ message["content"] = self._process_carryover(message["content"], kwargs)
3305
+ elif isinstance(message.get("content"), list):
3306
+ # Makes sure the original message is not mutated
3307
+ message = message.copy()
3308
+ message["content"] = self._process_multimodal_carryover(message["content"], kwargs)
3309
+ else:
3310
+ raise InvalidCarryOverTypeError("Carryover should be a string or a list of strings.")
3311
+
3312
+ return message
3313
+
3314
+ def _process_carryover(self, content: str, kwargs: dict) -> str:
3315
+ # Makes sure there's a carryover
3316
+ if not kwargs.get("carryover"):
3317
+ return content
3318
+
3319
+ # if carryover is string
3320
+ if isinstance(kwargs["carryover"], str):
3321
+ content += "\nContext: \n" + kwargs["carryover"]
3322
+ elif isinstance(kwargs["carryover"], list):
3323
+ content += "\nContext: \n" + ("\n").join([_post_process_carryover_item(t) for t in kwargs["carryover"]])
3324
+ else:
3325
+ raise InvalidCarryOverTypeError(
3326
+ "Carryover should be a string or a list of strings. Not adding carryover to the message."
3327
+ )
3328
+ return content
3329
+
3330
+ def _process_multimodal_carryover(self, content: list[dict[str, Any]], kwargs: dict) -> list[dict[str, Any]]:
3331
+ """Prepends the context to a multimodal message."""
3332
+ # Makes sure there's a carryover
3333
+ if not kwargs.get("carryover"):
3334
+ return content
3335
+
3336
+ return [{"type": "text", "text": self._process_carryover("", kwargs)}] + content
3337
+
3338
+ async def a_generate_init_message(
3339
+ self, message: dict[str, Any] | str | None, **kwargs: Any
3340
+ ) -> str | dict[str, Any]:
3341
+ """Generate the initial message for the agent.
3342
+ If message is None, input() will be called to get the initial message.
3343
+
3344
+ Args:
3345
+ message (str or None): the message to be processed.
3346
+ **kwargs: any additional information. It has the following reserved fields:
3347
+ "carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string.
3348
+ If provided, we will combine this carryover with the "message" content when generating the initial chat
3349
+ message.
3350
+
3351
+ Returns:
3352
+ str or dict: the processed message.
3353
+ """
3354
+ if message is None:
3355
+ message = await self.a_get_human_input(">")
3356
+
3357
+ return self._handle_carryover(message, kwargs)
3358
+
3359
+ @property
3360
+ def tools(self) -> list[Tool]:
3361
+ """Get the agent's tools (registered for LLM)
3362
+
3363
+ Note this is a copy of the tools list, use add_tool and remove_tool to modify the tools list.
3364
+ """
3365
+ return self._tools.copy()
3366
+
3367
+ def remove_tool_for_llm(self, tool: Tool) -> None:
3368
+ """Remove a tool (register for LLM tool)"""
3369
+ try:
3370
+ self._register_for_llm(tool=tool, api_style="tool", is_remove=True)
3371
+ self._tools.remove(tool)
3372
+ except ValueError:
3373
+ raise ValueError(f"Tool {tool} not found in collection")
3374
+
3375
+ def register_function(self, function_map: dict[str, Callable[..., Any]], silent_override: bool = False):
3376
+ """Register functions to the agent.
3377
+
3378
+ Args:
3379
+ function_map: a dictionary mapping function names to functions. if function_map[name] is None, the function will be removed from the function_map.
3380
+ silent_override: whether to print warnings when overriding functions.
3381
+ """
3382
+ for name, func in function_map.items():
3383
+ self._assert_valid_name(name)
3384
+ if func is None and name not in self._function_map:
3385
+ warnings.warn(f"The function {name} to remove doesn't exist", name)
3386
+ if not silent_override and name in self._function_map:
3387
+ warnings.warn(f"Function '{name}' is being overridden.", UserWarning)
3388
+ self._function_map.update(function_map)
3389
+ self._function_map = {k: v for k, v in self._function_map.items() if v is not None}
3390
+
3391
+ def update_function_signature(
3392
+ self, func_sig: str | dict[str, Any], is_remove: bool = False, silent_override: bool = False
3393
+ ):
3394
+ """Update a function_signature in the LLM configuration for function_call.
3395
+
3396
+ Args:
3397
+ func_sig (str or dict): description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions
3398
+ is_remove: whether removing the function from llm_config with name 'func_sig'
3399
+ silent_override: whether to print warnings when overriding functions.
3400
+
3401
+ Deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
3402
+ See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
3403
+ """
3404
+ if not isinstance(self.llm_config, (dict, LLMConfig)):
3405
+ error_msg = "To update a function signature, agent must have an llm_config"
3406
+ logger.error(error_msg)
3407
+ raise AssertionError(error_msg)
3408
+
3409
+ if is_remove:
3410
+ if "functions" not in self.llm_config or len(self.llm_config["functions"]) == 0:
3411
+ error_msg = f"The agent config doesn't have function {func_sig}."
3412
+ logger.error(error_msg)
3413
+ raise AssertionError(error_msg)
3414
+ else:
3415
+ self.llm_config["functions"] = [
3416
+ func for func in self.llm_config["functions"] if func["name"] != func_sig
3417
+ ]
3418
+ else:
3419
+ if not isinstance(func_sig, dict):
3420
+ raise ValueError(
3421
+ f"The function signature must be of the type dict. Received function signature type {type(func_sig)}"
3422
+ )
3423
+ if "name" not in func_sig:
3424
+ raise ValueError(f"The function signature must have a 'name' key. Received: {func_sig}")
3425
+ self._assert_valid_name(func_sig["name"]), func_sig
3426
+ if "functions" in self.llm_config:
3427
+ if not silent_override and any(
3428
+ func["name"] == func_sig["name"] for func in self.llm_config["functions"]
3429
+ ):
3430
+ warnings.warn(f"Function '{func_sig['name']}' is being overridden.", UserWarning)
3431
+
3432
+ self.llm_config["functions"] = [
3433
+ func for func in self.llm_config["functions"] if func.get("name") != func_sig["name"]
3434
+ ] + [func_sig]
3435
+ else:
3436
+ self.llm_config["functions"] = [func_sig]
3437
+
3438
+ # Do this only if llm_config is a dict. If llm_config is LLMConfig, LLMConfig will handle this.
3439
+ if len(self.llm_config["functions"]) == 0 and isinstance(self.llm_config, dict):
3440
+ del self.llm_config["functions"]
3441
+
3442
+ self.client = OpenAIWrapper(**self.llm_config)
3443
+
3444
+ def update_tool_signature(self, tool_sig: str | dict[str, Any], is_remove: bool, silent_override: bool = False):
3445
+ """Update a tool_signature in the LLM configuration for tool_call.
3446
+
3447
+ Args:
3448
+ tool_sig (str or dict): description/name of the tool to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
3449
+ is_remove: whether removing the tool from llm_config with name 'tool_sig'
3450
+ silent_override: whether to print warnings when overriding functions.
3451
+ """
3452
+ if not self.llm_config:
3453
+ error_msg = "To update a tool signature, agent must have an llm_config"
3454
+ logger.error(error_msg)
3455
+ raise AssertionError(error_msg)
3456
+
3457
+ self.llm_config = self._update_tool_config(
3458
+ self.llm_config,
3459
+ tool_sig=tool_sig,
3460
+ is_remove=is_remove,
3461
+ silent_override=silent_override,
3462
+ )
3463
+
3464
+ self.client = OpenAIWrapper(**self.llm_config)
3465
+
3466
+ def _update_tool_config(
3467
+ self,
3468
+ llm_config: dict[str, Any] | LLMConfig,
3469
+ tool_sig: str | dict[str, Any],
3470
+ is_remove: bool,
3471
+ silent_override: bool = False,
3472
+ ) -> dict[str, Any]:
3473
+ if is_remove:
3474
+ if "tools" not in llm_config or len(llm_config["tools"]) == 0:
3475
+ error_msg = f"The agent config doesn't have tool {tool_sig}."
3476
+ logger.error(error_msg)
3477
+ raise AssertionError(error_msg)
3478
+
3479
+ else:
3480
+ current_tools = llm_config["tools"]
3481
+ filtered_tools = []
3482
+
3483
+ # Loop through and rebuild tools list without the tool to remove
3484
+ for tool in current_tools:
3485
+ tool_name = tool["function"]["name"]
3486
+
3487
+ # Match by tool name, or by tool signature
3488
+ is_different = tool_name != tool_sig if isinstance(tool_sig, str) else tool != tool_sig
3489
+
3490
+ if is_different:
3491
+ filtered_tools.append(tool)
3492
+
3493
+ llm_config["tools"] = filtered_tools
3494
+
3495
+ else:
3496
+ if not isinstance(tool_sig, dict):
3497
+ raise ValueError(
3498
+ f"The tool signature must be of the type dict. Received tool signature type {type(tool_sig)}"
3499
+ )
3500
+
3501
+ self._assert_valid_name(tool_sig["function"]["name"])
3502
+ if "tools" in llm_config and len(llm_config["tools"]) > 0:
3503
+ if not silent_override and any(
3504
+ tool["function"]["name"] == tool_sig["function"]["name"] for tool in llm_config["tools"]
3505
+ ):
3506
+ warnings.warn(f"Function '{tool_sig['function']['name']}' is being overridden.", UserWarning)
3507
+
3508
+ llm_config["tools"] = [
3509
+ tool
3510
+ for tool in llm_config["tools"]
3511
+ if tool.get("function", {}).get("name") != tool_sig["function"]["name"]
3512
+ ] + [tool_sig]
3513
+ else:
3514
+ llm_config["tools"] = [tool_sig]
3515
+
3516
+ # Do this only if llm_config is a dict. If llm_config is LLMConfig, LLMConfig will handle this.
3517
+ if len(llm_config["tools"]) == 0 and isinstance(llm_config, dict):
3518
+ del llm_config["tools"]
3519
+
3520
+ return llm_config
3521
+
3522
+ def can_execute_function(self, name: list[str] | str) -> bool:
3523
+ """Whether the agent can execute the function."""
3524
+ names = name if isinstance(name, list) else [name]
3525
+ return all(n in self._function_map for n in names)
3526
+
3527
+ @property
3528
+ def function_map(self) -> dict[str, Callable[..., Any]]:
3529
+ """Return the function map."""
3530
+ return self._function_map
3531
+
3532
+ def _wrap_function(self, func: F, inject_params: dict[str, Any] = {}, *, serialize: bool = True) -> F:
3533
+ """Wrap the function inject chat context parameters and to dump the return value to json.
3534
+
3535
+ Handles both sync and async functions.
3536
+
3537
+ Args:
3538
+ func: the function to be wrapped.
3539
+ inject_params: the chat context parameters which will be passed to the function.
3540
+ serialize: whether to serialize the return value
3541
+
3542
+ Returns:
3543
+ The wrapped function.
3544
+ """
3545
+
3546
+ @load_basemodels_if_needed
3547
+ @functools.wraps(func)
3548
+ def _wrapped_func(*args, **kwargs):
3549
+ retval = func(*args, **kwargs, **inject_params)
3550
+ if logging_enabled():
3551
+ log_function_use(self, func, kwargs, retval)
3552
+ return serialize_to_str(retval) if serialize else retval
3553
+
3554
+ @load_basemodels_if_needed
3555
+ @functools.wraps(func)
3556
+ async def _a_wrapped_func(*args, **kwargs):
3557
+ retval = await func(*args, **kwargs, **inject_params)
3558
+ if logging_enabled():
3559
+ log_function_use(self, func, kwargs, retval)
3560
+ return serialize_to_str(retval) if serialize else retval
3561
+
3562
+ wrapped_func = _a_wrapped_func if is_coroutine_callable(func) else _wrapped_func
3563
+
3564
+ # needed for testing
3565
+ wrapped_func._origin = func
3566
+
3567
+ return wrapped_func
3568
+
3569
+ @staticmethod
3570
+ def _create_tool_if_needed(
3571
+ func_or_tool: F | Tool,
3572
+ name: str | None,
3573
+ description: str | None,
3574
+ ) -> Tool:
3575
+ if isinstance(func_or_tool, Tool):
3576
+ tool: Tool = func_or_tool
3577
+ # create new tool object if name or description is not None
3578
+ if name or description:
3579
+ tool = Tool(func_or_tool=tool, name=name, description=description)
3580
+ elif inspect.isfunction(func_or_tool):
3581
+ function: Callable[..., Any] = func_or_tool
3582
+ tool = Tool(func_or_tool=function, name=name, description=description)
3583
+ else:
3584
+ raise TypeError(f"'func_or_tool' must be a function or a Tool object, got '{type(func_or_tool)}' instead.")
3585
+ return tool
3586
+
3587
+ def register_for_llm(
3588
+ self,
3589
+ *,
3590
+ name: str | None = None,
3591
+ description: str | None = None,
3592
+ api_style: Literal["function", "tool"] = "tool",
3593
+ silent_override: bool = False,
3594
+ ) -> Callable[[F | Tool], Tool]:
3595
+ """Decorator factory for registering a function to be used by an agent.
3596
+
3597
+ It's return value is used to decorate a function to be registered to the agent. The function uses type hints to
3598
+ specify the arguments and return type. The function name is used as the default name for the function,
3599
+ but a custom name can be provided. The function description is used to describe the function in the
3600
+ agent's configuration.
3601
+
3602
+ Args:
3603
+ name (optional(str)): name of the function. If None, the function name will be used (default: None).
3604
+ description (optional(str)): description of the function (default: None). It is mandatory
3605
+ for the initial decorator, but the following ones can omit it.
3606
+ api_style: (literal): the API style for function call.
3607
+ For Azure OpenAI API, use version 2023-12-01-preview or later.
3608
+ `"function"` style will be deprecated. For earlier version use
3609
+ `"function"` if `"tool"` doesn't work.
3610
+ See [Azure OpenAI documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling?tabs=python) for details.
3611
+ silent_override (bool): whether to suppress any override warning messages.
3612
+
3613
+ Returns:
3614
+ The decorator for registering a function to be used by an agent.
3615
+
3616
+ Examples:
3617
+ ```
3618
+ @user_proxy.register_for_execution()
3619
+ @agent2.register_for_llm()
3620
+ @agent1.register_for_llm(description="This is a very useful function")
3621
+ def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
3622
+ return a + str(b * c)
3623
+ ```
3624
+
3625
+ For Azure OpenAI versions prior to 2023-12-01-preview, set `api_style`
3626
+ to `"function"` if `"tool"` doesn't work:
3627
+ ```
3628
+ @agent2.register_for_llm(api_style="function")
3629
+ def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
3630
+ return a + str(b * c)
3631
+ ```
3632
+
3633
+ """
3634
+
3635
+ def _decorator(func_or_tool: F | Tool, name: str | None = name, description: str | None = description) -> Tool:
3636
+ """Decorator for registering a function to be used by an agent.
3637
+
3638
+ Args:
3639
+ func_or_tool: The function or the tool to be registered.
3640
+ name: The name of the function or the tool.
3641
+ description: The description of the function or the tool.
3642
+
3643
+ Returns:
3644
+ The function to be registered, with the _description attribute set to the function description.
3645
+
3646
+ Raises:
3647
+ ValueError: if the function description is not provided and not propagated by a previous decorator.
3648
+ RuntimeError: if the LLM config is not set up before registering a function.
3649
+
3650
+ """
3651
+ tool = self._create_tool_if_needed(func_or_tool, name, description)
3652
+
3653
+ self._register_for_llm(tool, api_style, silent_override=silent_override)
3654
+ if tool not in self._tools:
3655
+ self._tools.append(tool)
3656
+
3657
+ return tool
3658
+
3659
+ return _decorator
3660
+
3661
+ def _register_for_llm(
3662
+ self, tool: Tool, api_style: Literal["tool", "function"], is_remove: bool = False, silent_override: bool = False
3663
+ ) -> None:
3664
+ """Register a tool for LLM.
3665
+
3666
+ Args:
3667
+ tool: the tool to be registered.
3668
+ api_style: the API style for function call ("tool" or "function").
3669
+ is_remove: whether to remove the function or tool.
3670
+ silent_override: whether to suppress any override warning messages.
3671
+
3672
+ Returns:
3673
+ None
3674
+ """
3675
+ # register the function to the agent if there is LLM config, raise an exception otherwise
3676
+ if self.llm_config is None:
3677
+ raise RuntimeError("LLM config must be setup before registering a function for LLM.")
3678
+
3679
+ if api_style == "function":
3680
+ self.update_function_signature(tool.function_schema, is_remove=is_remove, silent_override=silent_override)
3681
+ elif api_style == "tool":
3682
+ self.update_tool_signature(tool.tool_schema, is_remove=is_remove, silent_override=silent_override)
3683
+ else:
3684
+ raise ValueError(f"Unsupported API style: {api_style}")
3685
+
3686
+ def set_ui_tools(self, tools: list[Tool]) -> None:
3687
+ """Set the UI tools for the agent.
3688
+
3689
+ Args:
3690
+ tools: a list of tools to be set.
3691
+ """
3692
+ # Unset the previous UI tools
3693
+ self._unset_previous_ui_tools()
3694
+
3695
+ # Set the new UI tools
3696
+ for tool in tools:
3697
+ # Register the tool for LLM
3698
+ self._register_for_llm(tool, api_style="tool", silent_override=True)
3699
+ if tool not in self._tools:
3700
+ self._tools.append(tool)
3701
+
3702
+ # Register for execution
3703
+ self.register_for_execution(serialize=False, silent_override=True)(tool)
3704
+
3705
+ # Set the current UI tools
3706
+ self._ui_tools = tools
3707
+
3708
+ def unset_ui_tools(self, tools: list[Tool]) -> None:
3709
+ """Unset the UI tools for the agent.
3710
+
3711
+ Args:
3712
+ tools: a list of tools to be unset.
3713
+ """
3714
+ for tool in tools:
3715
+ self.remove_tool_for_llm(tool)
3716
+
3717
+ def _unset_previous_ui_tools(self) -> None:
3718
+ """Unset the previous UI tools for the agent.
3719
+
3720
+ This is used to remove UI tools that were previously registered for LLM.
3721
+ """
3722
+ self.unset_ui_tools(self._ui_tools)
3723
+ for tool in self._ui_tools:
3724
+ if tool in self._tools:
3725
+ self._tools.remove(tool)
3726
+
3727
+ # Unregister the function from the function map
3728
+ if tool.name in self._function_map:
3729
+ del self._function_map[tool.name]
3730
+
3731
+ self._ui_tools = []
3732
+
3733
+ def register_for_execution(
3734
+ self,
3735
+ name: str | None = None,
3736
+ description: str | None = None,
3737
+ *,
3738
+ serialize: bool = True,
3739
+ silent_override: bool = False,
3740
+ ) -> Callable[[Tool | F], Tool]:
3741
+ """Decorator factory for registering a function to be executed by an agent.
3742
+
3743
+ It's return value is used to decorate a function to be registered to the agent.
3744
+
3745
+ Args:
3746
+ name: name of the function. If None, the function name will be used (default: None).
3747
+ description: description of the function (default: None).
3748
+ serialize: whether to serialize the return value
3749
+ silent_override: whether to suppress any override warning messages
3750
+
3751
+ Returns:
3752
+ The decorator for registering a function to be used by an agent.
3753
+
3754
+ Examples:
3755
+ ```
3756
+ @user_proxy.register_for_execution()
3757
+ @agent2.register_for_llm()
3758
+ @agent1.register_for_llm(description="This is a very useful function")
3759
+ def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14):
3760
+ return a + str(b * c)
3761
+ ```
3762
+
3763
+ """
3764
+
3765
+ def _decorator(func_or_tool: Tool | F, name: str | None = name, description: str | None = description) -> Tool:
3766
+ """Decorator for registering a function to be used by an agent.
3767
+
3768
+ Args:
3769
+ func_or_tool: the function or the tool to be registered.
3770
+ name: the name of the function.
3771
+ description: the description of the function.
3772
+
3773
+ Returns:
3774
+ The tool to be registered.
3775
+
3776
+ """
3777
+ tool = self._create_tool_if_needed(func_or_tool, name, description)
3778
+ chat_context = ChatContext(self)
3779
+ chat_context_params = dict.fromkeys(tool._chat_context_param_names, chat_context)
3780
+
3781
+ self.register_function(
3782
+ {tool.name: self._wrap_function(tool.func, chat_context_params, serialize=serialize)},
3783
+ silent_override=silent_override,
3784
+ )
3785
+
3786
+ return tool
3787
+
3788
+ return _decorator
3789
+
3790
+ def register_model_client(self, model_client_cls: ModelClient, **kwargs: Any):
3791
+ """Register a model client.
3792
+
3793
+ Args:
3794
+ model_client_cls: A custom client class that follows the Client interface
3795
+ **kwargs: The kwargs for the custom client class to be initialized with
3796
+ """
3797
+ self.client.register_model_client(model_client_cls, **kwargs)
3798
+
3799
+ def register_hook(self, hookable_method: str, hook: Callable):
3800
+ """Registers a hook to be called by a hookable method, in order to add a capability to the agent.
3801
+ Registered hooks are kept in lists (one per hookable method), and are called in their order of registration.
3802
+
3803
+ Args:
3804
+ hookable_method: A hookable method name implemented by ConversableAgent.
3805
+ hook: A method implemented by a subclass of AgentCapability.
3806
+ """
3807
+ assert hookable_method in self.hook_lists, f"{hookable_method} is not a hookable method."
3808
+ hook_list = self.hook_lists[hookable_method]
3809
+ assert hook not in hook_list, f"{hook} is already registered as a hook."
3810
+ hook_list.append(hook)
3811
+
3812
+ def update_agent_state_before_reply(self, messages: list[dict[str, Any]]) -> None:
3813
+ """Calls any registered capability hooks to update the agent's state.
3814
+ Primarily used to update context variables.
3815
+ Will, potentially, modify the messages.
3816
+ """
3817
+ hook_list = self.hook_lists["update_agent_state"]
3818
+
3819
+ # Call each hook (in order of registration) to process the messages.
3820
+ for hook in hook_list:
3821
+ hook(self, messages)
3822
+
3823
+ def process_all_messages_before_reply(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
3824
+ """Calls any registered capability hooks to process all messages, potentially modifying the messages."""
3825
+ hook_list = self.hook_lists["process_all_messages_before_reply"]
3826
+ # If no hooks are registered, or if there are no messages to process, return the original message list.
3827
+ if len(hook_list) == 0 or messages is None:
3828
+ return messages
3829
+
3830
+ # Call each hook (in order of registration) to process the messages.
3831
+ processed_messages = messages
3832
+ for hook in hook_list:
3833
+ processed_messages = hook(processed_messages)
3834
+ return processed_messages
3835
+
3836
+ def process_last_received_message(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
3837
+ """Calls any registered capability hooks to use and potentially modify the text of the last message,
3838
+ as long as the last message is not a function call or exit command.
3839
+ """
3840
+ # If any required condition is not met, return the original message list.
3841
+ hook_list = self.hook_lists["process_last_received_message"]
3842
+ if len(hook_list) == 0:
3843
+ return messages # No hooks registered.
3844
+ if messages is None:
3845
+ return None # No message to process.
3846
+ if len(messages) == 0:
3847
+ return messages # No message to process.
3848
+ last_message = messages[-1]
3849
+ if "function_call" in last_message:
3850
+ return messages # Last message is a function call.
3851
+ if "context" in last_message:
3852
+ return messages # Last message contains a context key.
3853
+ if "content" not in last_message:
3854
+ return messages # Last message has no content.
3855
+
3856
+ user_content = last_message["content"]
3857
+ if not isinstance(user_content, str) and not isinstance(user_content, list):
3858
+ # if the user_content is a string, it is for regular LLM
3859
+ # if the user_content is a list, it should follow the multimodal LMM format.
3860
+ return messages
3861
+ if user_content == "exit":
3862
+ return messages # Last message is an exit command.
3863
+
3864
+ # Call each hook (in order of registration) to process the user's message.
3865
+ processed_user_content = user_content
3866
+ for hook in hook_list:
3867
+ processed_user_content = hook(processed_user_content)
3868
+
3869
+ if processed_user_content == user_content:
3870
+ return messages # No hooks actually modified the user's message.
3871
+
3872
+ # Replace the last user message with the expanded one.
3873
+ messages = messages.copy()
3874
+ messages[-1]["content"] = processed_user_content
3875
+ return messages
3876
+
3877
+ def _process_tool_input(self, tool_input: dict[str, Any]) -> dict[str, Any] | None:
3878
+ """Process tool input through registered hooks."""
3879
+ hook_list = self.hook_lists["safeguard_tool_inputs"]
3880
+
3881
+ # If no hooks are registered, allow the tool input
3882
+ if len(hook_list) == 0:
3883
+ return tool_input
3884
+
3885
+ # Process through each hook
3886
+ processed_input = tool_input
3887
+ for hook in hook_list:
3888
+ processed_input = hook(processed_input)
3889
+ if processed_input is None:
3890
+ return None
3891
+
3892
+ return processed_input
3893
+
3894
+ def _process_tool_output(self, response: dict[str, Any]) -> dict[str, Any]:
3895
+ """Process tool output through registered hooks"""
3896
+ hook_list = self.hook_lists["safeguard_tool_outputs"]
3897
+
3898
+ # If no hooks are registered, return original response
3899
+ if len(hook_list) == 0:
3900
+ return response
3901
+
3902
+ # Process through each hook
3903
+ processed_response = response
3904
+ for hook in hook_list:
3905
+ processed_response = hook(processed_response)
3906
+
3907
+ return processed_response
3908
+
3909
+ def _process_llm_input(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]] | None:
3910
+ """Process messages before sending to LLM through registered hooks."""
3911
+ hook_list = self.hook_lists["safeguard_llm_inputs"]
3912
+
3913
+ # If no hooks registered, allow the messages through
3914
+ if len(hook_list) == 0:
3915
+ return messages
3916
+
3917
+ # Process through each hook
3918
+ processed_messages = messages
3919
+ for hook in hook_list:
3920
+ processed_messages = hook(processed_messages)
3921
+ if processed_messages is None:
3922
+ return None
3923
+
3924
+ return processed_messages
3925
+
3926
+ def _process_llm_output(self, response: str | dict[str, Any]) -> str | dict[str, Any]:
3927
+ """Process LLM response through registered hooks"""
3928
+ hook_list = self.hook_lists["safeguard_llm_outputs"]
3929
+
3930
+ # If no hooks registered, return original response
3931
+ if len(hook_list) == 0:
3932
+ return response
3933
+
3934
+ # Process through each hook
3935
+ processed_response = response
3936
+ for hook in hook_list:
3937
+ processed_response = hook(processed_response)
3938
+
3939
+ return processed_response
3940
+
3941
+ def _process_human_input(self, human_input: str) -> str | None:
3942
+ """Process human input through registered hooks."""
3943
+ hook_list = self.hook_lists["safeguard_human_inputs"]
3944
+
3945
+ # If no hooks registered, allow the input through
3946
+ if len(hook_list) == 0:
3947
+ return human_input
3948
+
3949
+ # Process through each hook
3950
+ processed_input = human_input
3951
+ for hook in hook_list:
3952
+ processed_input = hook(processed_input)
3953
+ if processed_input is None:
3954
+ return None
3955
+
3956
+ return processed_input
3957
+
3958
+ def print_usage_summary(self, mode: str | list[str] = ["actual", "total"]) -> None:
3959
+ """Print the usage summary."""
3960
+ iostream = IOStream.get_default()
3961
+ if self.client is None:
3962
+ iostream.send(ConversableAgentUsageSummaryNoCostIncurredEvent(recipient=self))
3963
+ else:
3964
+ iostream.send(ConversableAgentUsageSummaryEvent(recipient=self))
3965
+
3966
+ if self.client is not None:
3967
+ self.client.print_usage_summary(mode)
3968
+
3969
+ def get_actual_usage(self) -> None | dict[str, int]:
3970
+ """Get the actual usage summary."""
3971
+ if self.client is None:
3972
+ return None
3973
+ else:
3974
+ return self.client.actual_usage_summary
3975
+
3976
+ def get_total_usage(self) -> None | dict[str, int]:
3977
+ """Get the total usage summary."""
3978
+ if self.client is None:
3979
+ return None
3980
+ else:
3981
+ return self.client.total_usage_summary
3982
+
3983
+ @contextmanager
3984
+ def _create_or_get_executor(
3985
+ self,
3986
+ executor_kwargs: dict[str, Any] | None = None,
3987
+ tools: Tool | Iterable[Tool] | None = None,
3988
+ agent_name: str = "executor",
3989
+ agent_human_input_mode: str = "NEVER",
3990
+ ) -> Generator["ConversableAgent", None, None]:
3991
+ """Creates a user proxy / tool executor agent.
3992
+
3993
+ Note: Code execution is not enabled by default. Pass the code execution config into executor_kwargs, if needed.
3994
+
3995
+ Args:
3996
+ executor_kwargs: agent's arguments.
3997
+ tools: tools to register for execution with the agent.
3998
+ agent_name: agent's name, defaults to 'executor'.
3999
+ agent_human_input_mode: agent's human input mode, defaults to 'NEVER'.
4000
+ """
4001
+ if executor_kwargs is None:
4002
+ executor_kwargs = {}
4003
+ if "is_termination_msg" not in executor_kwargs:
4004
+ executor_kwargs["is_termination_msg"] = lambda x: "TERMINATE" in (
4005
+ content_str(x.get("content"))
4006
+ if isinstance(x.get("content"), (str, list)) or x.get("content") is None
4007
+ else str(x.get("content"))
4008
+ )
4009
+
4010
+ try:
4011
+ if not self.run_executor:
4012
+ self.run_executor = ConversableAgent(
4013
+ name=agent_name,
4014
+ human_input_mode=agent_human_input_mode,
4015
+ **executor_kwargs,
4016
+ )
4017
+
4018
+ # Combine agent's existing tools with passed tools
4019
+ agent_tools = self._tools.copy() # Get agent's pre-registered tools
4020
+ passed_tools = [] if tools is None else tools
4021
+ passed_tools = [passed_tools] if isinstance(passed_tools, Tool) else passed_tools
4022
+
4023
+ # Combine both sets of tools (avoid duplicates)
4024
+ all_tools = agent_tools.copy()
4025
+ for tool in passed_tools:
4026
+ if tool not in all_tools:
4027
+ all_tools.append(tool)
4028
+
4029
+ # Register all tools with the executor
4030
+ for tool in all_tools:
4031
+ tool.register_for_execution(self.run_executor)
4032
+
4033
+ # Register only newly passed tools for LLM (agent's pre-existing tools are already registered)
4034
+ for tool in passed_tools:
4035
+ tool.register_for_llm(self)
4036
+ yield self.run_executor
4037
+ finally:
4038
+ # Clean up only newly passed tools (not agent's pre-existing tools)
4039
+ if "passed_tools" in locals():
4040
+ for tool in passed_tools:
4041
+ self.update_tool_signature(tool_sig=tool.tool_schema["function"]["name"], is_remove=True)
4042
+
4043
+ def _deprecated_run(
4044
+ self,
4045
+ message: str,
4046
+ *,
4047
+ tools: Tool | Iterable[Tool] | None = None,
4048
+ executor_kwargs: dict[str, Any] | None = None,
4049
+ max_turns: int | None = None,
4050
+ msg_to: Literal["agent", "user"] = "agent",
4051
+ clear_history: bool = False,
4052
+ user_input: bool = True,
4053
+ summary_method: str | Callable[..., Any] | None = DEFAULT_SUMMARY_METHOD,
4054
+ ) -> ChatResult:
4055
+ """Run a chat with the agent using the given message.
4056
+
4057
+ A second agent will be created to represent the user, this agent will by known by the name 'user'. This agent does not have code execution enabled by default, if needed pass the code execution config in with the executor_kwargs parameter.
4058
+
4059
+ The user can terminate the conversation when prompted or, if agent's reply contains 'TERMINATE', it will terminate.
4060
+
4061
+ Args:
4062
+ message: the message to be processed.
4063
+ tools: the tools to be used by the agent.
4064
+ executor_kwargs: the keyword arguments for the executor.
4065
+ max_turns: maximum number of turns (a turn is equivalent to both agents having replied), defaults no None which means unlimited. The original message is included.
4066
+ msg_to: which agent is receiving the message and will be the first to reply, defaults to the agent.
4067
+ clear_history: whether to clear the chat history.
4068
+ user_input: the user will be asked for input at their turn.
4069
+ summary_method: the method to summarize the chat.
4070
+ """
4071
+ with self._create_or_get_executor(
4072
+ executor_kwargs=executor_kwargs,
4073
+ tools=tools,
4074
+ agent_name="user",
4075
+ agent_human_input_mode="ALWAYS" if user_input else "NEVER",
4076
+ ) as executor:
4077
+ if msg_to == "agent":
4078
+ return executor.initiate_chat(
4079
+ self,
4080
+ message=message,
4081
+ clear_history=clear_history,
4082
+ max_turns=max_turns,
4083
+ summary_method=summary_method,
4084
+ )
4085
+ else:
4086
+ return self.initiate_chat(
4087
+ executor,
4088
+ message=message,
4089
+ clear_history=clear_history,
4090
+ max_turns=max_turns,
4091
+ summary_method=summary_method,
4092
+ )
4093
+
4094
+ async def _deprecated_a_run(
4095
+ self,
4096
+ message: str,
4097
+ *,
4098
+ tools: Tool | Iterable[Tool] | None = None,
4099
+ executor_kwargs: dict[str, Any] | None = None,
4100
+ max_turns: int | None = None,
4101
+ msg_to: Literal["agent", "user"] = "agent",
4102
+ clear_history: bool = False,
4103
+ user_input: bool = True,
4104
+ summary_method: str | Callable[..., Any] | None = DEFAULT_SUMMARY_METHOD,
4105
+ ) -> ChatResult:
4106
+ """Run a chat asynchronously with the agent using the given message.
4107
+
4108
+ A second agent will be created to represent the user, this agent will by known by the name 'user'.
4109
+
4110
+ The user can terminate the conversation when prompted or, if agent's reply contains 'TERMINATE', it will terminate.
4111
+
4112
+ Args:
4113
+ message: the message to be processed.
4114
+ tools: the tools to be used by the agent.
4115
+ executor_kwargs: the keyword arguments for the executor.
4116
+ max_turns: maximum number of turns (a turn is equivalent to both agents having replied), defaults no None which means unlimited. The original message is included.
4117
+ msg_to: which agent is receiving the message and will be the first to reply, defaults to the agent.
4118
+ clear_history: whether to clear the chat history.
4119
+ user_input: the user will be asked for input at their turn.
4120
+ summary_method: the method to summarize the chat.
4121
+ """
4122
+ with self._create_or_get_executor(
4123
+ executor_kwargs=executor_kwargs,
4124
+ tools=tools,
4125
+ agent_name="user",
4126
+ agent_human_input_mode="ALWAYS" if user_input else "NEVER",
4127
+ ) as executor:
4128
+ if msg_to == "agent":
4129
+ return await executor.a_initiate_chat(
4130
+ self,
4131
+ message=message,
4132
+ clear_history=clear_history,
4133
+ max_turns=max_turns,
4134
+ summary_method=summary_method,
4135
+ )
4136
+ else:
4137
+ return await self.a_initiate_chat(
4138
+ executor,
4139
+ message=message,
4140
+ clear_history=clear_history,
4141
+ max_turns=max_turns,
4142
+ summary_method=summary_method,
4143
+ )
4144
+
4145
+ def register_handoff(self, condition: Union["OnContextCondition", "OnCondition"]) -> None:
4146
+ """Register a single handoff condition (OnContextCondition or OnCondition).
4147
+
4148
+ Args:
4149
+ condition: The condition to add (OnContextCondition, OnCondition)
4150
+ """
4151
+ self.handoffs.add(condition)
4152
+
4153
+ def register_handoffs(self, conditions: list[Union["OnContextCondition", "OnCondition"]]) -> None:
4154
+ """Register multiple handoff conditions (OnContextCondition or OnCondition).
4155
+
4156
+ Args:
4157
+ conditions: List of conditions to add
4158
+ """
4159
+ self.handoffs.add_many(conditions)
4160
+
4161
+ def register_input_guardrail(self, guardrail: "Guardrail") -> None:
4162
+ """Register a guardrail to be used for input validation.
4163
+
4164
+ Args:
4165
+ guardrail: The guardrail to register.
4166
+ """
4167
+ self.input_guardrails.append(guardrail)
4168
+
4169
+ def register_input_guardrails(self, guardrails: list["Guardrail"]) -> None:
4170
+ """Register multiple guardrails to be used for input validation.
4171
+
4172
+ Args:
4173
+ guardrails: List of guardrails to register.
4174
+ """
4175
+ self.input_guardrails.extend(guardrails)
4176
+
4177
+ def register_output_guardrail(self, guardrail: "Guardrail") -> None:
4178
+ """Register a guardrail to be used for output validation.
4179
+
4180
+ Args:
4181
+ guardrail: The guardrail to register.
4182
+ """
4183
+ self.output_guardrails.append(guardrail)
4184
+
4185
+ def register_output_guardrails(self, guardrails: list["Guardrail"]) -> None:
4186
+ """Register multiple guardrails to be used for output validation.
4187
+
4188
+ Args:
4189
+ guardrails: List of guardrails to register.
4190
+ """
4191
+ self.output_guardrails.extend(guardrails)
4192
+
4193
+ def run_input_guardrails(self, messages: list[dict[str, Any]] | None = None) -> GuardrailResult | None:
4194
+ """Run input guardrails for an agent before the reply is generated.
4195
+
4196
+ Args:
4197
+ messages (Optional[list[dict[str, Any]]]): The messages to check against the guardrails.
4198
+ """
4199
+ for guardrail in self.input_guardrails:
4200
+ guardrail_result = guardrail.check(context=messages)
4201
+
4202
+ if guardrail_result.activated:
4203
+ return guardrail_result
4204
+ return None
4205
+
4206
+ def run_output_guardrails(self, reply: str | dict[str, Any]) -> GuardrailResult | None:
4207
+ """Run output guardrails for an agent after the reply is generated.
4208
+
4209
+ Args:
4210
+ reply (str | dict[str, Any]): The reply generated by the agent.
4211
+ """
4212
+ for guardrail in self.output_guardrails:
4213
+ guardrail_result = guardrail.check(context=reply)
4214
+
4215
+ if guardrail_result.activated:
4216
+ return guardrail_result
4217
+ return None
4218
+
4219
+
4220
+ @export_module("autogen")
4221
+ def register_function(
4222
+ f: Callable[..., Any],
4223
+ *,
4224
+ caller: ConversableAgent,
4225
+ executor: ConversableAgent,
4226
+ name: str | None = None,
4227
+ description: str,
4228
+ ) -> None:
4229
+ """Register a function to be proposed by an agent and executed for an executor.
4230
+
4231
+ This function can be used instead of function decorators `@ConversationAgent.register_for_llm` and
4232
+ `@ConversationAgent.register_for_execution`.
4233
+
4234
+ Args:
4235
+ f: the function to be registered.
4236
+ caller: the agent calling the function, typically an instance of ConversableAgent.
4237
+ executor: the agent executing the function, typically an instance of UserProxy.
4238
+ name: name of the function. If None, the function name will be used (default: None).
4239
+ description: description of the function. The description is used by LLM to decode whether the function
4240
+ is called. Make sure the description is properly describing what the function does or it might not be
4241
+ called by LLM when needed.
4242
+
4243
+ """
4244
+ f = caller.register_for_llm(name=name, description=description)(f)
4245
+ executor.register_for_execution(name=name)(f)
4246
+
4247
+
4248
+ def normilize_message_to_oai(
4249
+ message: dict[str, Any] | str,
4250
+ name: str,
4251
+ role: str = "assistant",
4252
+ ) -> tuple[bool, dict[str, Any]]:
4253
+ message = message_to_dict(message)
4254
+ # create oai message to be appended to the oai conversation that can be passed to oai directly.
4255
+ oai_message = {
4256
+ k: message[k]
4257
+ for k in ("content", "function_call", "tool_responses", "tool_call_id", "name", "context")
4258
+ if k in message and message[k] is not None
4259
+ }
4260
+
4261
+ if tools := message.get("tool_calls"): # check for [], None and missed key
4262
+ oai_message["tool_calls"] = tools
4263
+
4264
+ if "content" not in oai_message:
4265
+ if "function_call" in oai_message or "tool_calls" in oai_message:
4266
+ oai_message["content"] = None # if only function_call is provided, content will be set to None.
4267
+ else:
4268
+ return False, oai_message
4269
+
4270
+ if message.get("role") in ["function", "tool"]:
4271
+ oai_message["role"] = message.get("role")
4272
+ if "tool_responses" in oai_message:
4273
+ for tool_response in oai_message["tool_responses"]:
4274
+ content_value = tool_response.get("content")
4275
+ tool_response["content"] = (
4276
+ content_str(content_value)
4277
+ if isinstance(content_value, (str, list)) or content_value is None
4278
+ else str(content_value)
4279
+ )
4280
+ elif "override_role" in message:
4281
+ # If we have a direction to override the role then set the
4282
+ # role accordingly. Used to customise the role for the
4283
+ # select speaker prompt.
4284
+ oai_message["role"] = message.get("override_role")
4285
+ else:
4286
+ oai_message["role"] = role
4287
+
4288
+ if oai_message.get("function_call", False) or oai_message.get("tool_calls", False):
4289
+ oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
4290
+ elif "name" not in oai_message:
4291
+ # If we don't have a name field, append it
4292
+ oai_message["name"] = name
4293
+
4294
+ return True, oai_message
4295
+
4296
+
4297
+ def message_to_dict(message: dict[str, Any] | str) -> dict:
4298
+ """Convert a message to a dictionary.
4299
+
4300
+ The message can be a string or a dictionary. The string will be put in the "content" field of the new dictionary.
4301
+ """
4302
+ if isinstance(message, str):
4303
+ return {"content": message}
4304
+ elif isinstance(message, dict):
4305
+ return message
4306
+ else:
4307
+ return dict(message)