ag2 0.9.9__py3-none-any.whl → 0.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (88) hide show
  1. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/METADATA +232 -210
  2. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/RECORD +88 -80
  3. autogen/_website/generate_mkdocs.py +3 -3
  4. autogen/_website/notebook_processor.py +1 -1
  5. autogen/_website/utils.py +1 -1
  6. autogen/agentchat/assistant_agent.py +15 -15
  7. autogen/agentchat/chat.py +52 -40
  8. autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
  9. autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
  10. autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
  11. autogen/agentchat/contrib/capabilities/transforms.py +1 -1
  12. autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
  13. autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
  14. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
  15. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
  16. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
  17. autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
  18. autogen/agentchat/contrib/rag/query_engine.py +11 -11
  19. autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
  20. autogen/agentchat/contrib/swarm_agent.py +3 -2
  21. autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
  22. autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
  23. autogen/agentchat/contrib/web_surfer.py +1 -1
  24. autogen/agentchat/conversable_agent.py +184 -80
  25. autogen/agentchat/group/context_expression.py +21 -21
  26. autogen/agentchat/group/handoffs.py +11 -11
  27. autogen/agentchat/group/multi_agent_chat.py +3 -2
  28. autogen/agentchat/group/on_condition.py +11 -11
  29. autogen/agentchat/group/safeguards/__init__.py +21 -0
  30. autogen/agentchat/group/safeguards/api.py +224 -0
  31. autogen/agentchat/group/safeguards/enforcer.py +1064 -0
  32. autogen/agentchat/group/safeguards/events.py +119 -0
  33. autogen/agentchat/group/safeguards/validator.py +435 -0
  34. autogen/agentchat/groupchat.py +58 -17
  35. autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
  36. autogen/agentchat/realtime/experimental/function_observer.py +2 -3
  37. autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
  38. autogen/agentchat/realtime/experimental/realtime_swarm.py +21 -10
  39. autogen/agentchat/user_proxy_agent.py +55 -53
  40. autogen/agents/experimental/document_agent/document_agent.py +1 -10
  41. autogen/agents/experimental/document_agent/parser_utils.py +5 -1
  42. autogen/browser_utils.py +4 -4
  43. autogen/cache/abstract_cache_base.py +2 -6
  44. autogen/cache/disk_cache.py +1 -6
  45. autogen/cache/in_memory_cache.py +2 -6
  46. autogen/cache/redis_cache.py +1 -5
  47. autogen/coding/__init__.py +10 -2
  48. autogen/coding/base.py +2 -1
  49. autogen/coding/docker_commandline_code_executor.py +1 -6
  50. autogen/coding/factory.py +9 -0
  51. autogen/coding/jupyter/docker_jupyter_server.py +1 -7
  52. autogen/coding/jupyter/jupyter_client.py +2 -9
  53. autogen/coding/jupyter/jupyter_code_executor.py +2 -7
  54. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  55. autogen/coding/local_commandline_code_executor.py +0 -65
  56. autogen/coding/yepcode_code_executor.py +197 -0
  57. autogen/environments/docker_python_environment.py +3 -3
  58. autogen/environments/system_python_environment.py +5 -5
  59. autogen/environments/venv_python_environment.py +5 -5
  60. autogen/events/agent_events.py +1 -1
  61. autogen/events/client_events.py +1 -1
  62. autogen/fast_depends/utils.py +10 -0
  63. autogen/graph_utils.py +5 -7
  64. autogen/import_utils.py +3 -1
  65. autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
  66. autogen/io/processors/console_event_processor.py +8 -3
  67. autogen/llm_config/config.py +168 -91
  68. autogen/llm_config/entry.py +38 -26
  69. autogen/llm_config/types.py +35 -0
  70. autogen/llm_config/utils.py +223 -0
  71. autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
  72. autogen/messages/agent_messages.py +1 -1
  73. autogen/messages/client_messages.py +1 -1
  74. autogen/oai/__init__.py +8 -1
  75. autogen/oai/client.py +10 -3
  76. autogen/oai/client_utils.py +1 -1
  77. autogen/oai/cohere.py +4 -4
  78. autogen/oai/gemini.py +4 -6
  79. autogen/oai/gemini_types.py +1 -0
  80. autogen/oai/openai_utils.py +44 -115
  81. autogen/tools/dependency_injection.py +4 -8
  82. autogen/tools/experimental/reliable/reliable.py +3 -2
  83. autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
  84. autogen/tools/function_utils.py +2 -1
  85. autogen/version.py +1 -1
  86. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/WHEEL +0 -0
  87. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/licenses/LICENSE +0 -0
  88. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/licenses/NOTICE.md +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ag2
3
- Version: 0.9.9
3
+ Version: 0.9.10
4
4
  Summary: A programming framework for agentic AI
5
5
  Project-URL: Homepage, https://ag2.ai/
6
6
  Project-URL: Documentation, https://docs.ag2.ai
@@ -30,7 +30,6 @@ Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
30
30
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
31
31
  Requires-Python: <3.14,>=3.10
32
32
  Requires-Dist: anyio<5.0.0,>=3.0.0
33
- Requires-Dist: asyncer==0.0.8
34
33
  Requires-Dist: diskcache
35
34
  Requires-Dist: docker
36
35
  Requires-Dist: httpx<1,>=0.28.1
@@ -61,7 +60,7 @@ Requires-Dist: cerebras-cloud-sdk>=1.0.0; extra == 'cerebras'
61
60
  Provides-Extra: cohere
62
61
  Requires-Dist: cohere>=5.13.5; extra == 'cohere'
63
62
  Provides-Extra: commsagent-discord
64
- Requires-Dist: discord-py<2.6,>=2.4.0; extra == 'commsagent-discord'
63
+ Requires-Dist: discord-py<2.7,>=2.4.0; extra == 'commsagent-discord'
65
64
  Provides-Extra: commsagent-slack
66
65
  Requires-Dist: slack-sdk<3.40,>=3.33.0; extra == 'commsagent-slack'
67
66
  Provides-Extra: commsagent-telegram
@@ -84,10 +83,10 @@ Requires-Dist: mcp>=1.11.0; extra == 'dev'
84
83
  Requires-Dist: mdx-include==1.4.2; extra == 'dev'
85
84
  Requires-Dist: mike==2.1.3; extra == 'dev'
86
85
  Requires-Dist: mkdocs-git-revision-date-localized-plugin==1.4.7; extra == 'dev'
87
- Requires-Dist: mkdocs-glightbox==0.4.0; extra == 'dev'
86
+ Requires-Dist: mkdocs-glightbox==0.5.1; extra == 'dev'
88
87
  Requires-Dist: mkdocs-literate-nav==0.6.2; extra == 'dev'
89
- Requires-Dist: mkdocs-macros-plugin==1.3.7; extra == 'dev'
90
- Requires-Dist: mkdocs-material==9.6.16; extra == 'dev'
88
+ Requires-Dist: mkdocs-macros-plugin==1.3.9; extra == 'dev'
89
+ Requires-Dist: mkdocs-material==9.6.19; extra == 'dev'
91
90
  Requires-Dist: mkdocs-minify-plugin==0.8.0; extra == 'dev'
92
91
  Requires-Dist: mkdocs-redirects==1.2.2; extra == 'dev'
93
92
  Requires-Dist: mkdocstrings[python]==0.30.0; extra == 'dev'
@@ -97,26 +96,26 @@ Requires-Dist: nbclient==0.10.2; extra == 'dev'
97
96
  Requires-Dist: nbconvert==7.16.6; extra == 'dev'
98
97
  Requires-Dist: nbformat==5.10.4; extra == 'dev'
99
98
  Requires-Dist: openai>=1.99.3; extra == 'dev'
100
- Requires-Dist: pandas==2.3.1; extra == 'dev'
99
+ Requires-Dist: pandas==2.3.2; extra == 'dev'
101
100
  Requires-Dist: pdoc3==0.11.6; extra == 'dev'
102
101
  Requires-Dist: pillow; extra == 'dev'
103
102
  Requires-Dist: pre-commit==4.3.0; extra == 'dev'
104
103
  Requires-Dist: pytest-asyncio==1.1.0; extra == 'dev'
105
- Requires-Dist: pytest-cov==6.2.1; extra == 'dev'
106
- Requires-Dist: pytest==8.4.1; extra == 'dev'
104
+ Requires-Dist: pytest-cov==6.3.0; extra == 'dev'
105
+ Requires-Dist: pytest==8.4.2; extra == 'dev'
107
106
  Requires-Dist: pyupgrade-directories==0.3.0; extra == 'dev'
108
107
  Requires-Dist: pyyaml==6.0.2; extra == 'dev'
109
- Requires-Dist: ruff==0.12.8; extra == 'dev'
108
+ Requires-Dist: ruff==0.12.12; extra == 'dev'
110
109
  Requires-Dist: termcolor==3.1.0; extra == 'dev'
111
110
  Requires-Dist: toml==0.10.2; extra == 'dev'
112
- Requires-Dist: typer==0.16.0; extra == 'dev'
111
+ Requires-Dist: typer==0.17.4; extra == 'dev'
113
112
  Requires-Dist: types-decorator; extra == 'dev'
114
113
  Requires-Dist: types-pycurl; extra == 'dev'
115
114
  Requires-Dist: types-python-dateutil; extra == 'dev'
116
115
  Requires-Dist: types-pyyaml; extra == 'dev'
117
116
  Requires-Dist: types-requests; extra == 'dev'
118
117
  Requires-Dist: types-ujson; extra == 'dev'
119
- Requires-Dist: uv==0.8.8; extra == 'dev'
118
+ Requires-Dist: uv==0.8.15; extra == 'dev'
120
119
  Provides-Extra: docs
121
120
  Requires-Dist: cairosvg; extra == 'docs'
122
121
  Requires-Dist: jinja2==3.1.6; extra == 'docs'
@@ -124,10 +123,10 @@ Requires-Dist: mcp>=1.11.0; extra == 'docs'
124
123
  Requires-Dist: mdx-include==1.4.2; extra == 'docs'
125
124
  Requires-Dist: mike==2.1.3; extra == 'docs'
126
125
  Requires-Dist: mkdocs-git-revision-date-localized-plugin==1.4.7; extra == 'docs'
127
- Requires-Dist: mkdocs-glightbox==0.4.0; extra == 'docs'
126
+ Requires-Dist: mkdocs-glightbox==0.5.1; extra == 'docs'
128
127
  Requires-Dist: mkdocs-literate-nav==0.6.2; extra == 'docs'
129
- Requires-Dist: mkdocs-macros-plugin==1.3.7; extra == 'docs'
130
- Requires-Dist: mkdocs-material==9.6.16; extra == 'docs'
128
+ Requires-Dist: mkdocs-macros-plugin==1.3.9; extra == 'docs'
129
+ Requires-Dist: mkdocs-material==9.6.19; extra == 'docs'
131
130
  Requires-Dist: mkdocs-minify-plugin==0.8.0; extra == 'docs'
132
131
  Requires-Dist: mkdocs-redirects==1.2.2; extra == 'docs'
133
132
  Requires-Dist: mkdocstrings[python]==0.30.0; extra == 'docs'
@@ -136,7 +135,7 @@ Requires-Dist: pdoc3==0.11.6; extra == 'docs'
136
135
  Requires-Dist: pillow; extra == 'docs'
137
136
  Requires-Dist: pyyaml==6.0.2; extra == 'docs'
138
137
  Requires-Dist: termcolor==3.1.0; extra == 'docs'
139
- Requires-Dist: typer==0.16.0; extra == 'docs'
138
+ Requires-Dist: typer==0.17.4; extra == 'docs'
140
139
  Provides-Extra: duckduckgo
141
140
  Requires-Dist: duckduckgo-search>=8.0.2; extra == 'duckduckgo'
142
141
  Provides-Extra: flaml
@@ -171,23 +170,23 @@ Requires-Dist: matplotlib; extra == 'graph'
171
170
  Requires-Dist: networkx; extra == 'graph'
172
171
  Provides-Extra: graph-rag-falkor-db
173
172
  Requires-Dist: falkordb>=1.0.10; extra == 'graph-rag-falkor-db'
174
- Requires-Dist: graphrag-sdk==0.7.1; extra == 'graph-rag-falkor-db'
173
+ Requires-Dist: graphrag-sdk==0.8.0; extra == 'graph-rag-falkor-db'
175
174
  Provides-Extra: groq
176
175
  Requires-Dist: groq>=0.9.0; extra == 'groq'
177
176
  Provides-Extra: interop
178
177
  Requires-Dist: crewai[tools]<1,>=0.76; (python_version >= '3.10' and python_version < '3.13') and extra == 'interop'
179
178
  Requires-Dist: langchain-community<1,>=0.3.12; extra == 'interop'
180
- Requires-Dist: litellm<=1.75.5.post1; extra == 'interop'
181
- Requires-Dist: pydantic-ai==0.6.2; extra == 'interop'
179
+ Requires-Dist: litellm<=1.76.3; extra == 'interop'
180
+ Requires-Dist: pydantic-ai==1.0.1; extra == 'interop'
182
181
  Requires-Dist: weaviate-client<5,>=4; (python_version >= '3.10' and python_version < '3.13') and extra == 'interop'
183
182
  Provides-Extra: interop-crewai
184
183
  Requires-Dist: crewai[tools]<1,>=0.76; (python_version >= '3.10' and python_version < '3.13') and extra == 'interop-crewai'
185
- Requires-Dist: litellm<=1.75.5.post1; extra == 'interop-crewai'
184
+ Requires-Dist: litellm<=1.76.3; extra == 'interop-crewai'
186
185
  Requires-Dist: weaviate-client<5,>=4; (python_version >= '3.10' and python_version < '3.13') and extra == 'interop-crewai'
187
186
  Provides-Extra: interop-langchain
188
187
  Requires-Dist: langchain-community<1,>=0.3.12; extra == 'interop-langchain'
189
188
  Provides-Extra: interop-pydantic-ai
190
- Requires-Dist: pydantic-ai==0.6.2; extra == 'interop-pydantic-ai'
189
+ Requires-Dist: pydantic-ai==1.0.1; extra == 'interop-pydantic-ai'
191
190
  Provides-Extra: jupyter-executor
192
191
  Requires-Dist: ipykernel>=6.29.0; extra == 'jupyter-executor'
193
192
  Requires-Dist: jupyter-client>=8.6.0; extra == 'jupyter-executor'
@@ -197,7 +196,7 @@ Requires-Dist: websocket-client; extra == 'jupyter-executor'
197
196
  Provides-Extra: lint
198
197
  Requires-Dist: codespell==2.4.1; extra == 'lint'
199
198
  Requires-Dist: pyupgrade-directories==0.3.0; extra == 'lint'
200
- Requires-Dist: ruff==0.12.8; extra == 'lint'
199
+ Requires-Dist: ruff==0.12.12; extra == 'lint'
201
200
  Provides-Extra: lmm
202
201
  Requires-Dist: pillow; extra == 'lmm'
203
202
  Requires-Dist: replicate; extra == 'lmm'
@@ -218,9 +217,10 @@ Provides-Extra: mistral
218
217
  Requires-Dist: mistralai>=1.0.1; extra == 'mistral'
219
218
  Provides-Extra: neo4j
220
219
  Requires-Dist: docx2txt==0.9; extra == 'neo4j'
221
- Requires-Dist: llama-index-graph-stores-neo4j==0.5.0; extra == 'neo4j'
222
- Requires-Dist: llama-index-readers-web==0.5.0; extra == 'neo4j'
223
- Requires-Dist: llama-index<1,>=0.12; extra == 'neo4j'
220
+ Requires-Dist: llama-index-core<0.14,>=0.12; extra == 'neo4j'
221
+ Requires-Dist: llama-index-graph-stores-neo4j<0.6,>=0.4; extra == 'neo4j'
222
+ Requires-Dist: llama-index-readers-web<0.6,>=0.4; extra == 'neo4j'
223
+ Requires-Dist: llama-index<0.14,>=0.12; extra == 'neo4j'
224
224
  Provides-Extra: ollama
225
225
  Requires-Dist: fix-busted-json>=0.0.18; extra == 'ollama'
226
226
  Requires-Dist: ollama>=0.4.7; extra == 'ollama'
@@ -232,11 +232,14 @@ Requires-Dist: openai[realtime]; extra == 'openai-realtime'
232
232
  Provides-Extra: rag
233
233
  Requires-Dist: chromadb<2,>=0.5; extra == 'rag'
234
234
  Requires-Dist: docling<3,>=2.15.1; extra == 'rag'
235
- Requires-Dist: llama-index-embeddings-huggingface==0.6.0; extra == 'rag'
236
- Requires-Dist: llama-index-llms-langchain==0.7.0; extra == 'rag'
237
- Requires-Dist: llama-index-vector-stores-chroma==0.4.1; extra == 'rag'
238
- Requires-Dist: llama-index-vector-stores-mongodb==0.8.0; extra == 'rag'
239
- Requires-Dist: llama-index<1,>=0.12; extra == 'rag'
235
+ Requires-Dist: llama-index-core<0.14,>=0.12; extra == 'rag'
236
+ Requires-Dist: llama-index-embeddings-huggingface<0.7,>=0.5; extra == 'rag'
237
+ Requires-Dist: llama-index-embeddings-openai<0.6,>=0.3; extra == 'rag'
238
+ Requires-Dist: llama-index-llms-langchain<0.8,>=0.6; extra == 'rag'
239
+ Requires-Dist: llama-index-llms-openai<0.6,>=0.4; extra == 'rag'
240
+ Requires-Dist: llama-index-vector-stores-chroma<0.6,>=0.4; extra == 'rag'
241
+ Requires-Dist: llama-index-vector-stores-mongodb<0.9,>=0.6; extra == 'rag'
242
+ Requires-Dist: llama-index<0.14,>=0.12; extra == 'rag'
240
243
  Requires-Dist: requests<3,>=2.32.3; extra == 'rag'
241
244
  Requires-Dist: selenium<5,>=4.28.1; extra == 'rag'
242
245
  Requires-Dist: webdriver-manager==4.0.2; extra == 'rag'
@@ -244,50 +247,50 @@ Provides-Extra: redis
244
247
  Requires-Dist: redis; extra == 'redis'
245
248
  Provides-Extra: retrievechat
246
249
  Requires-Dist: beautifulsoup4; extra == 'retrievechat'
247
- Requires-Dist: chromadb==1.0.16; extra == 'retrievechat'
250
+ Requires-Dist: chromadb==1.0.20; extra == 'retrievechat'
248
251
  Requires-Dist: ipython; extra == 'retrievechat'
249
252
  Requires-Dist: markdownify; extra == 'retrievechat'
250
- Requires-Dist: protobuf==6.31.1; extra == 'retrievechat'
253
+ Requires-Dist: protobuf==6.32.0; extra == 'retrievechat'
251
254
  Requires-Dist: pypdf; extra == 'retrievechat'
252
255
  Requires-Dist: sentence-transformers<=5.1.0; extra == 'retrievechat'
253
256
  Provides-Extra: retrievechat-couchbase
254
257
  Requires-Dist: beautifulsoup4; extra == 'retrievechat-couchbase'
255
- Requires-Dist: chromadb==1.0.16; extra == 'retrievechat-couchbase'
258
+ Requires-Dist: chromadb==1.0.20; extra == 'retrievechat-couchbase'
256
259
  Requires-Dist: couchbase>=4.3.0; extra == 'retrievechat-couchbase'
257
260
  Requires-Dist: ipython; extra == 'retrievechat-couchbase'
258
261
  Requires-Dist: markdownify; extra == 'retrievechat-couchbase'
259
262
  Requires-Dist: numpy; extra == 'retrievechat-couchbase'
260
- Requires-Dist: protobuf==6.31.1; extra == 'retrievechat-couchbase'
263
+ Requires-Dist: protobuf==6.32.0; extra == 'retrievechat-couchbase'
261
264
  Requires-Dist: pypdf; extra == 'retrievechat-couchbase'
262
265
  Requires-Dist: sentence-transformers<=5.1.0; extra == 'retrievechat-couchbase'
263
266
  Provides-Extra: retrievechat-mongodb
264
267
  Requires-Dist: beautifulsoup4; extra == 'retrievechat-mongodb'
265
- Requires-Dist: chromadb==1.0.16; extra == 'retrievechat-mongodb'
268
+ Requires-Dist: chromadb==1.0.20; extra == 'retrievechat-mongodb'
266
269
  Requires-Dist: ipython; extra == 'retrievechat-mongodb'
267
270
  Requires-Dist: markdownify; extra == 'retrievechat-mongodb'
268
271
  Requires-Dist: numpy; extra == 'retrievechat-mongodb'
269
- Requires-Dist: protobuf==6.31.1; extra == 'retrievechat-mongodb'
272
+ Requires-Dist: protobuf==6.32.0; extra == 'retrievechat-mongodb'
270
273
  Requires-Dist: pymongo>=4.0.0; extra == 'retrievechat-mongodb'
271
274
  Requires-Dist: pypdf; extra == 'retrievechat-mongodb'
272
275
  Requires-Dist: sentence-transformers<=5.1.0; extra == 'retrievechat-mongodb'
273
276
  Provides-Extra: retrievechat-pgvector
274
277
  Requires-Dist: beautifulsoup4; extra == 'retrievechat-pgvector'
275
- Requires-Dist: chromadb==1.0.16; extra == 'retrievechat-pgvector'
278
+ Requires-Dist: chromadb==1.0.20; extra == 'retrievechat-pgvector'
276
279
  Requires-Dist: ipython; extra == 'retrievechat-pgvector'
277
280
  Requires-Dist: markdownify; extra == 'retrievechat-pgvector'
278
281
  Requires-Dist: pgvector>=0.2.5; extra == 'retrievechat-pgvector'
279
- Requires-Dist: protobuf==6.31.1; extra == 'retrievechat-pgvector'
282
+ Requires-Dist: protobuf==6.32.0; extra == 'retrievechat-pgvector'
280
283
  Requires-Dist: psycopg>=3.1.18; (platform_system == 'Linux') and extra == 'retrievechat-pgvector'
281
284
  Requires-Dist: psycopg[binary]>=3.1.18; (platform_system == 'Windows' or platform_system == 'Darwin') and extra == 'retrievechat-pgvector'
282
285
  Requires-Dist: pypdf; extra == 'retrievechat-pgvector'
283
286
  Requires-Dist: sentence-transformers<=5.1.0; extra == 'retrievechat-pgvector'
284
287
  Provides-Extra: retrievechat-qdrant
285
288
  Requires-Dist: beautifulsoup4; extra == 'retrievechat-qdrant'
286
- Requires-Dist: chromadb==1.0.16; extra == 'retrievechat-qdrant'
289
+ Requires-Dist: chromadb==1.0.20; extra == 'retrievechat-qdrant'
287
290
  Requires-Dist: fastembed>=0.3.1; extra == 'retrievechat-qdrant'
288
291
  Requires-Dist: ipython; extra == 'retrievechat-qdrant'
289
292
  Requires-Dist: markdownify; extra == 'retrievechat-qdrant'
290
- Requires-Dist: protobuf==6.31.1; extra == 'retrievechat-qdrant'
293
+ Requires-Dist: protobuf==6.32.0; extra == 'retrievechat-qdrant'
291
294
  Requires-Dist: pypdf; extra == 'retrievechat-qdrant'
292
295
  Requires-Dist: qdrant-client; extra == 'retrievechat-qdrant'
293
296
  Requires-Dist: sentence-transformers<=5.1.0; extra == 'retrievechat-qdrant'
@@ -303,10 +306,10 @@ Requires-Dist: mcp>=1.11.0; extra == 'test'
303
306
  Requires-Dist: mock==5.2.0; extra == 'test'
304
307
  Requires-Dist: nbconvert==7.16.6; extra == 'test'
305
308
  Requires-Dist: nbformat==5.10.4; extra == 'test'
306
- Requires-Dist: pandas==2.3.1; extra == 'test'
309
+ Requires-Dist: pandas==2.3.2; extra == 'test'
307
310
  Requires-Dist: pytest-asyncio==1.1.0; extra == 'test'
308
- Requires-Dist: pytest-cov==6.2.1; extra == 'test'
309
- Requires-Dist: pytest==8.4.1; extra == 'test'
311
+ Requires-Dist: pytest-cov==6.3.0; extra == 'test'
312
+ Requires-Dist: pytest==8.4.2; extra == 'test'
310
313
  Provides-Extra: together
311
314
  Requires-Dist: together>=1.2; extra == 'together'
312
315
  Provides-Extra: twilio
@@ -323,10 +326,10 @@ Requires-Dist: mypy==1.17.1; extra == 'types'
323
326
  Requires-Dist: nbconvert==7.16.6; extra == 'types'
324
327
  Requires-Dist: nbformat==5.10.4; extra == 'types'
325
328
  Requires-Dist: openai>=1.99.3; extra == 'types'
326
- Requires-Dist: pandas==2.3.1; extra == 'types'
329
+ Requires-Dist: pandas==2.3.2; extra == 'types'
327
330
  Requires-Dist: pytest-asyncio==1.1.0; extra == 'types'
328
- Requires-Dist: pytest-cov==6.2.1; extra == 'types'
329
- Requires-Dist: pytest==8.4.1; extra == 'types'
331
+ Requires-Dist: pytest-cov==6.3.0; extra == 'types'
332
+ Requires-Dist: pytest==8.4.2; extra == 'types'
330
333
  Requires-Dist: types-decorator; extra == 'types'
331
334
  Requires-Dist: types-pycurl; extra == 'types'
332
335
  Requires-Dist: types-python-dateutil; extra == 'types'
@@ -342,6 +345,9 @@ Requires-Dist: pathvalidate; extra == 'websurfer'
342
345
  Requires-Dist: pdfminer-six; extra == 'websurfer'
343
346
  Provides-Extra: wikipedia
344
347
  Requires-Dist: wikipedia-api<1.0,>=0.8.1; extra == 'wikipedia'
348
+ Provides-Extra: yepcode
349
+ Requires-Dist: python-dotenv; extra == 'yepcode'
350
+ Requires-Dist: yepcode-run>=1.6.1; extra == 'yepcode'
345
351
  Description-Content-Type: text/markdown
346
352
 
347
353
  <a name="readme-top"></a>
@@ -411,9 +417,9 @@ The project is currently maintained by a [dynamic group of volunteers](MAINTAINE
411
417
  - [Tools](#tools)
412
418
  - [Advanced agentic design patterns](#advanced-agentic-design-patterns)
413
419
  - [Announcements](#announcements)
414
- - [Contributors Wall](#contributors-wall)
415
420
  - [Code style and linting](#code-style-and-linting)
416
421
  - [Related papers](#related-papers)
422
+ - [Contributors Wall](#contributors-wall)
417
423
  - [Cite the project](#cite-the-project)
418
424
  - [License](#license)
419
425
 
@@ -425,22 +431,30 @@ For a step-by-step walk through of AG2 concepts and code, see [Basic Concepts](h
425
431
 
426
432
  AG2 requires **Python version >= 3.10, < 3.14**. AG2 is available via `ag2` (or its alias `autogen`) on PyPI.
427
433
 
434
+ **Windows/Linux:**
428
435
  ```bash
429
436
  pip install ag2[openai]
430
437
  ```
431
438
 
439
+ **Mac:**
440
+ ```bash
441
+ pip install 'ag2[openai]'
442
+ ```
443
+
432
444
  Minimal dependencies are installed by default. You can install extra options based on the features you need.
433
445
 
434
446
  ### Setup your API keys
435
447
 
436
- To keep your LLM dependencies neat we recommend using the `OAI_CONFIG_LIST` file to store your API keys.
448
+ To keep your LLM dependencies neat and avoid accidentally checking in code with your API key, we recommend storing your keys in a configuration file.
437
449
 
438
- You can use the sample file `OAI_CONFIG_LIST_sample` as a template.
450
+ In our examples, we use a file named **`OAI_CONFIG_LIST`** to store API keys. You can choose any filename, but make sure to add it to `.gitignore` so it will not be committed to source control.
451
+
452
+ You can use the following content as a template:
439
453
 
440
454
  ```json
441
455
  [
442
456
  {
443
- "model": "gpt-4o",
457
+ "model": "gpt-5",
444
458
  "api_key": "<your OpenAI API key here>"
445
459
  }
446
460
  ]
@@ -455,12 +469,11 @@ from autogen import AssistantAgent, UserProxyAgent, LLMConfig
455
469
 
456
470
  llm_config = LLMConfig.from_json(path="OAI_CONFIG_LIST")
457
471
 
472
+ assistant = AssistantAgent("assistant", llm_config=llm_config)
458
473
 
459
- with llm_config:
460
- assistant = AssistantAgent("assistant")
461
474
  user_proxy = UserProxyAgent("user_proxy", code_execution_config={"work_dir": "coding", "use_docker": False})
462
- user_proxy.initiate_chat(assistant, message="Plot a chart of NVDA and TESLA stock price change YTD.")
463
- # This initiates an automated chat between the two agents to solve the task
475
+
476
+ user_proxy.run(assistant, message="Summarize the main differences between Python lists and tuples.").process()
464
477
  ```
465
478
 
466
479
  ## Example applications
@@ -484,208 +497,216 @@ We have several agent concepts in AG2 to help you build your AI agents. We intro
484
497
 
485
498
  The [ConversableAgent](https://docs.ag2.ai/latest/docs/api-reference/autogen/ConversableAgent) is the fundamental building block of AG2, designed to enable seamless communication between AI entities. This core agent type handles message exchange and response generation, serving as the base class for all agents in the framework.
486
499
 
487
- In the example below, we'll create a simple information validation workflow with two specialized agents that communicate with each other:
488
-
489
- Note: Before running this code, make sure to set your `OPENAI_API_KEY` as an environment variable. This example uses `gpt-4o-mini`, but you can replace it with any other [model](https://docs.ag2.ai/latest/docs/user-guide/models/amazon-bedrock) supported by AG2.
500
+ Let's begin with a simple example where two agents collaborate:
501
+ - A **coder agent** that writes Python code.
502
+ - A **reviewer agent** that critiques the code without rewriting it.
490
503
 
491
504
  ```python
492
- # 1. Import ConversableAgent class
505
+ import logging
493
506
  from autogen import ConversableAgent, LLMConfig
494
507
 
495
- # 2. Define our LLM configuration for OpenAI's GPT-4o mini
496
- # uses the OPENAI_API_KEY environment variable
497
- llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini")
498
-
499
-
500
- # 3. Create our LLM agent
501
- with llm_config:
502
- # Create an AI agent
503
- assistant = ConversableAgent(
504
- name="assistant",
505
- system_message="You are an assistant that responds concisely.",
506
- )
507
-
508
- # Create another AI agent
509
- fact_checker = ConversableAgent(
510
- name="fact_checker",
511
- system_message="You are a fact-checking assistant.",
512
- )
513
-
514
- # 4. Start the conversation
515
- assistant.initiate_chat(
516
- recipient=fact_checker,
517
- message="What is AG2?",
518
- max_turns=2
508
+ # Configure logging
509
+ logging.basicConfig(level=logging.INFO)
510
+ logger = logging.getLogger(__name__)
511
+
512
+ # Load LLM configuration
513
+ llm_config = LLMConfig.from_json(path="OAI_CONFIG_LIST")
514
+
515
+ # Define agents
516
+ coder = ConversableAgent(
517
+ name="coder",
518
+ system_message="You are a Python developer. Write short Python scripts.",
519
+ llm_config=llm_config,
519
520
  )
520
- ```
521
521
 
522
- ### Human in the loop
522
+ reviewer = ConversableAgent(
523
+ name="reviewer",
524
+ system_message="You are a code reviewer. Analyze provided code and suggest improvements. "
525
+ "Do not generate code, only suggest improvements.",
526
+ llm_config=llm_config,
527
+ )
523
528
 
524
- Human oversight is crucial for many AI workflows, especially when dealing with critical decisions, creative tasks, or situations requiring expert judgment. AG2 makes integrating human feedback seamless through its human-in-the-loop functionality.
525
- You can configure how and when human input is solicited using the `human_input_mode` parameter:
529
+ # Start a conversation
530
+ response = reviewer.run(
531
+ recipient=coder,
532
+ message="Write a Python function that computes Fibonacci numbers.",
533
+ max_turns=10
534
+ )
526
535
 
527
- - `ALWAYS`: Requires human input for every response
528
- - `NEVER`: Operates autonomously without human involvement
529
- - `TERMINATE`: Only requests human input to end conversations
536
+ response.process()
530
537
 
531
- For convenience, AG2 provides the specialized `UserProxyAgent` class that automatically sets `human_input_mode` to `ALWAYS` and supports code execution:
538
+ logger.info("Final output:\n%s", response.summary)
539
+ ```
540
+
541
+ ---
542
+ ### Orchestrating Multiple Agents
543
+
544
+ AG2 enables sophisticated multi-agent collaboration through flexible orchestration patterns, allowing you to create dynamic systems where specialized agents work together to solve complex problems.
532
545
 
533
- Note: Before running this code, make sure to set your `OPENAI_API_KEY` as an environment variable. This example uses `gpt-4o-mini`, but you can replace it with any other [model](https://docs.ag2.ai/latest/docs/user-guide/models/amazon-bedrock) supported by AG2.
546
+ Here’s how to build a team of **teacher**, **lesson planner**, and **reviewer** agents working together to design a lesson plan:
534
547
 
535
548
  ```python
536
- # 1. Import ConversableAgent and UserProxyAgent classes
537
- from autogen import ConversableAgent, UserProxyAgent, LLMConfig
549
+ import logging
550
+ from autogen import ConversableAgent, LLMConfig
551
+ from autogen.agentchat import run_group_chat
552
+ from autogen.agentchat.group.patterns import AutoPattern
538
553
 
539
- # 2. Define our LLM configuration for OpenAI's GPT-4o mini
540
- # uses the OPENAI_API_KEY environment variable
541
- llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini")
554
+ logging.basicConfig(level=logging.INFO)
555
+ logger = logging.getLogger(__name__)
542
556
 
557
+ llm_config = LLMConfig.from_json(path="OAI_CONFIG_LIST")
543
558
 
544
- # 3. Create our LLM agent
545
- with llm_config:
546
- assistant = ConversableAgent(
547
- name="assistant",
548
- system_message="You are a helpful assistant.",
549
- )
559
+ # Define lesson planner and reviewer
560
+ planner_message = "You are a classroom lesson planner. Given a topic, write a lesson plan for a fourth grade class."
561
+ reviewer_message = "You are a classroom lesson reviewer. Compare the plan to the curriculum and suggest up to 3 improvements."
550
562
 
551
- # 4. Create a human agent with manual input mode
552
- human = ConversableAgent(
553
- name="human",
554
- human_input_mode="ALWAYS"
563
+ lesson_planner = ConversableAgent(
564
+ name="planner_agent",
565
+ system_message=planner_message,
566
+ description="Creates or revises lesson plans.",
567
+ llm_config=llm_config,
555
568
  )
556
- # or
557
- human = UserProxyAgent(name="human", code_execution_config={"work_dir": "coding", "use_docker": False})
558
569
 
559
- # 5. Start the chat
560
- human.initiate_chat(
561
- recipient=assistant,
562
- message="Hello! What's 2 + 2?"
570
+ lesson_reviewer = ConversableAgent(
571
+ name="reviewer_agent",
572
+ system_message=reviewer_message,
573
+ description="Provides one round of feedback to lesson plans.",
574
+ llm_config=llm_config,
563
575
  )
564
576
 
565
- ```
577
+ teacher_message = "You are a classroom teacher. You decide topics and collaborate with planner and reviewer to finalize lesson plans. When satisfied, output DONE!"
566
578
 
567
- ### Orchestrating multiple agents
579
+ teacher = ConversableAgent(
580
+ name="teacher_agent",
581
+ system_message=teacher_message,
582
+ is_termination_msg=lambda x: "DONE!" in (x.get("content", "") or "").upper(),
583
+ llm_config=llm_config,
584
+ )
568
585
 
569
- AG2 enables sophisticated multi-agent collaboration through flexible orchestration patterns, allowing you to create dynamic systems where specialized agents work together to solve complex problems.
586
+ auto_selection = AutoPattern(
587
+ agents=[teacher, lesson_planner, lesson_reviewer],
588
+ initial_agent=lesson_planner,
589
+ group_manager_args={"name": "group_manager", "llm_config": llm_config},
590
+ )
591
+
592
+ response = run_group_chat(
593
+ pattern=auto_selection,
594
+ messages="Let's introduce our kids to the solar system.",
595
+ max_rounds=20,
596
+ )
570
597
 
571
- The framework offers both custom orchestration and several built-in collaboration patterns including `GroupChat` and `Swarm`.
598
+ response.process()
599
+
600
+ logger.info("Final output:\n%s", response.summary)
601
+ ```
572
602
 
573
- Here's how to implement a collaborative team for curriculum development using GroupChat:
603
+ ---
574
604
 
575
- Note: Before running this code, make sure to set your `OPENAI_API_KEY` as an environment variable. This example uses `gpt-4o-mini`, but you can replace it with any other [model](https://docs.ag2.ai/latest/docs/user-guide/models/amazon-bedrock) supported by AG2.
605
+ ### Human in the Loop
606
+
607
+ Human oversight is often essential for validating or guiding AI outputs.
608
+ AG2 provides the `UserProxyAgent` for seamless integration of human feedback.
609
+
610
+ Here we extend the **teacher–planner–reviewer** example by introducing a **human agent** who validates the final lesson:
576
611
 
577
612
  ```python
578
- from autogen import ConversableAgent, GroupChat, GroupChatManager, LLMConfig
579
-
580
- # Put your key in the OPENAI_API_KEY environment variable
581
- llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini")
582
-
583
- planner_message = """You are a classroom lesson agent.
584
- Given a topic, write a lesson plan for a fourth grade class.
585
- Use the following format:
586
- <title>Lesson plan title</title>
587
- <learning_objectives>Key learning objectives</learning_objectives>
588
- <script>How to introduce the topic to the kids</script>
589
- """
590
-
591
- reviewer_message = """You are a classroom lesson reviewer.
592
- You compare the lesson plan to the fourth grade curriculum and provide a maximum of 3 recommended changes.
593
- Provide only one round of reviews to a lesson plan.
594
- """
595
-
596
- # 1. Add a separate 'description' for our planner and reviewer agents
597
- planner_description = "Creates or revises lesson plans."
598
-
599
- reviewer_description = """Provides one round of reviews to a lesson plan
600
- for the lesson_planner to revise."""
601
-
602
- with llm_config:
603
- lesson_planner = ConversableAgent(
604
- name="planner_agent",
605
- system_message=planner_message,
606
- description=planner_description,
607
- )
608
-
609
- lesson_reviewer = ConversableAgent(
610
- name="reviewer_agent",
611
- system_message=reviewer_message,
612
- description=reviewer_description,
613
- )
614
-
615
- # 2. The teacher's system message can also be used as a description, so we don't define it
616
- teacher_message = """You are a classroom teacher.
617
- You decide topics for lessons and work with a lesson planner.
618
- and reviewer to create and finalise lesson plans.
619
- When you are happy with a lesson plan, output "DONE!".
620
- """
621
-
622
- with llm_config:
623
- teacher = ConversableAgent(
624
- name="teacher_agent",
625
- system_message=teacher_message,
626
- # 3. Our teacher can end the conversation by saying DONE!
627
- is_termination_msg=lambda x: "DONE!" in (x.get("content", "") or "").upper(),
628
- )
629
-
630
- # 4. Create the GroupChat with agents and selection method
631
- groupchat = GroupChat(
632
- agents=[teacher, lesson_planner, lesson_reviewer],
633
- speaker_selection_method="auto",
634
- messages=[],
613
+ import logging
614
+ from autogen import ConversableAgent, LLMConfig, UserProxyAgent
615
+ from autogen.agentchat import run_group_chat
616
+ from autogen.agentchat.group.patterns import AutoPattern
617
+
618
+ logging.basicConfig(level=logging.INFO)
619
+ logger = logging.getLogger(__name__)
620
+
621
+ llm_config = LLMConfig.from_json(path="OAI_CONFIG_LIST")
622
+
623
+ # Same agents as before, but now the human validator will pass to the planner who will check for "APPROVED" and terminate
624
+ planner_message = "You are a classroom lesson planner. Given a topic, write a lesson plan for a fourth grade class."
625
+ reviewer_message = "You are a classroom lesson reviewer. Compare the plan to the curriculum and suggest up to 3 improvements."
626
+ teacher_message = "You are an experienced classroom teacher. You don't prepare plans, you provide simple guidance to the planner to prepare a lesson plan on the key topic."
627
+
628
+ lesson_planner = ConversableAgent(
629
+ name="planner_agent",
630
+ system_message=planner_message,
631
+ description="Creates or revises lesson plans before having them reviewed.",
632
+ is_termination_msg=lambda x: "APPROVED" in (x.get("content", "") or "").upper(),
633
+ human_input_mode="NEVER",
634
+ llm_config=llm_config,
635
635
  )
636
636
 
637
- # 5. Our GroupChatManager will manage the conversation and uses an LLM to select the next agent
638
- manager = GroupChatManager(
639
- name="group_manager",
640
- groupchat=groupchat,
637
+ lesson_reviewer = ConversableAgent(
638
+ name="reviewer_agent",
639
+ system_message=reviewer_message,
640
+ description="Provides one round of feedback to lesson plans back to the lesson planner before requiring the human validator.",
641
641
  llm_config=llm_config,
642
642
  )
643
643
 
644
- # 6. Initiate the chat with the GroupChatManager as the recipient
645
- teacher.initiate_chat(
646
- recipient=manager,
647
- message="Today, let's introduce our kids to the solar system."
644
+ teacher = ConversableAgent(
645
+ name="teacher_agent",
646
+ system_message=teacher_message,
647
+ description="Provides guidance on the topic and content, if required.",
648
+ llm_config=llm_config,
649
+ )
650
+
651
+ human_validator = UserProxyAgent(
652
+ name="human_validator",
653
+ system_message="You are a human educator who provides final approval for lesson plans.",
654
+ description="Evaluates the proposed lesson plan and either approves it or requests revisions, before returning to the planner.",
648
655
  )
649
- ```
650
656
 
651
- When executed, this code creates a collaborative system where the teacher initiates the conversation, and the lesson planner and reviewer agents work together to create and refine a lesson plan. The GroupChatManager orchestrates the conversation, selecting the next agent to respond based on the context of the discussion.
657
+ auto_selection = AutoPattern(
658
+ agents=[teacher, lesson_planner, lesson_reviewer],
659
+ initial_agent=teacher,
660
+ user_agent=human_validator,
661
+ group_manager_args={"name": "group_manager", "llm_config": llm_config},
662
+ )
652
663
 
653
- For workflows requiring more structured processes, explore the Group Chat pattern in the detailed [documentation](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/orchestration/group-chat/introduction).
664
+ response = run_group_chat(
665
+ pattern=auto_selection,
666
+ messages="Let's introduce our kids to the solar system.",
667
+ max_rounds=20,
668
+ )
654
669
 
655
- ### Tools
670
+ response.process()
656
671
 
657
- Agents gain significant utility through tools as they provide access to external data, APIs, and functionality.
672
+ logger.info("Final output:\n%s", response.summary)
673
+ ```
674
+
675
+ ---
676
+
677
+ ### Tools
658
678
 
659
- Note: Before running this code, make sure to set your `OPENAI_API_KEY` as an environment variable. This example uses `gpt-4o-mini`, but you can replace it with any other [model](https://docs.ag2.ai/latest/docs/user-guide/models/amazon-bedrock) supported by AG2.
679
+ Agents gain significant utility through **tools**, which extend their capabilities with external data, APIs, or functions.
660
680
 
661
681
  ```python
682
+ import logging
662
683
  from datetime import datetime
663
684
  from typing import Annotated
664
-
665
685
  from autogen import ConversableAgent, register_function, LLMConfig
666
686
 
667
- # Put your key in the OPENAI_API_KEY environment variable
668
- llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini")
687
+ logging.basicConfig(level=logging.INFO)
688
+ logger = logging.getLogger(__name__)
689
+
690
+ llm_config = LLMConfig.from_json(path="OAI_CONFIG_LIST")
669
691
 
670
- # 1. Our tool, returns the day of the week for a given date
692
+ # Tool: returns weekday for a given date
671
693
  def get_weekday(date_string: Annotated[str, "Format: YYYY-MM-DD"]) -> str:
672
694
  date = datetime.strptime(date_string, "%Y-%m-%d")
673
695
  return date.strftime("%A")
674
696
 
675
- # 2. Agent for determining whether to run the tool
676
- with llm_config:
677
- date_agent = ConversableAgent(
678
- name="date_agent",
679
- system_message="You get the day of the week for a given date.",
680
- )
697
+ date_agent = ConversableAgent(
698
+ name="date_agent",
699
+ system_message="You find the day of the week for a given date.",
700
+ llm_config=llm_config,
701
+ )
681
702
 
682
- # 3. And an agent for executing the tool
683
703
  executor_agent = ConversableAgent(
684
704
  name="executor_agent",
685
705
  human_input_mode="NEVER",
706
+ llm_config=llm_config,
686
707
  )
687
708
 
688
- # 4. Registers the tool with the agents, the description will be used by the LLM
709
+ # Register tool
689
710
  register_function(
690
711
  get_weekday,
691
712
  caller=date_agent,
@@ -693,14 +714,14 @@ register_function(
693
714
  description="Get the day of the week for a given date",
694
715
  )
695
716
 
696
- # 5. Two-way chat ensures the executor agent follows the suggesting agent
717
+ # Use tool in chat
697
718
  chat_result = executor_agent.initiate_chat(
698
719
  recipient=date_agent,
699
- message="I was born on the 25th of March 1995, what day was it?",
720
+ message="I was born on 1995-03-25, what day was it?",
700
721
  max_turns=2,
701
722
  )
702
723
 
703
- print(chat_result.chat_history[-1]["content"])
724
+ logger.info("Final output:\n%s", chat_result.chat_history[-1]["content"])
704
725
  ```
705
726
 
706
727
  ### Advanced agentic design patterns
@@ -712,6 +733,7 @@ AG2 supports more advanced concepts to help you build your AI agent workflows. Y
712
733
  - [Retrieval Augmented Generation (RAG)](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/rag/)
713
734
  - [Code Execution](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/code-execution)
714
735
  - [Tools with Secrets](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/tools/tools-with-secrets/)
736
+ - [Pattern Cookbook (9 group orchestrations)](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/pattern-cookbook/overview/)
715
737
 
716
738
  ## Announcements
717
739
 
@@ -729,12 +751,6 @@ We adopt the Apache 2.0 license from v0.3. This enhances our commitment to open-
729
751
 
730
752
  [More Announcements](announcements.md)
731
753
 
732
- ## Contributors Wall
733
-
734
- <a href="https://github.com/ag2ai/ag2/graphs/contributors">
735
- <img src="https://contrib.rocks/image?repo=ag2ai/ag2&max=204" />
736
- </a>
737
-
738
754
  ## Code style and linting
739
755
 
740
756
  This project uses pre-commit hooks to maintain code quality. Before contributing:
@@ -764,6 +780,12 @@ pre-commit run --all-files
764
780
 
765
781
  - [StateFlow: Enhancing LLM Task-Solving through State-Driven Workflows](https://arxiv.org/abs/2403.11322)
766
782
 
783
+ ## Contributors Wall
784
+
785
+ <a href="https://github.com/ag2ai/ag2/graphs/contributors">
786
+ <img src="https://contrib.rocks/image?repo=ag2ai/ag2&max=204" />
787
+ </a>
788
+
767
789
  ## Cite the project
768
790
 
769
791
  ```