ag2 0.9.2__py3-none-any.whl → 0.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ag2
3
- Version: 0.9.2
3
+ Version: 0.9.3
4
4
  Summary: A programming framework for agentic AI
5
5
  Project-URL: Homepage, https://ag2.ai/
6
6
  Project-URL: Documentation, https://docs.ag2.ai
@@ -72,7 +72,7 @@ Requires-Dist: azure-cosmos>=4.2.0; extra == 'cosmosdb'
72
72
  Provides-Extra: crawl4ai
73
73
  Requires-Dist: crawl4ai<0.5,>=0.4.247; extra == 'crawl4ai'
74
74
  Provides-Extra: deepseek
75
- Requires-Dist: openai>=1.66.2; extra == 'deepseek'
75
+ Requires-Dist: openai>=1.87.0; extra == 'deepseek'
76
76
  Provides-Extra: dev
77
77
  Requires-Dist: cairosvg; extra == 'dev'
78
78
  Requires-Dist: codespell==2.4.1; extra == 'dev'
@@ -96,7 +96,7 @@ Requires-Dist: mypy==1.15.0; extra == 'dev'
96
96
  Requires-Dist: nbclient==0.10.2; extra == 'dev'
97
97
  Requires-Dist: nbconvert==7.16.6; extra == 'dev'
98
98
  Requires-Dist: nbformat==5.10.4; extra == 'dev'
99
- Requires-Dist: openai>=1.66.2; extra == 'dev'
99
+ Requires-Dist: openai>=1.87.0; extra == 'dev'
100
100
  Requires-Dist: pandas==2.2.3; extra == 'dev'
101
101
  Requires-Dist: pdoc3==0.11.6; extra == 'dev'
102
102
  Requires-Dist: pillow; extra == 'dev'
@@ -130,6 +130,8 @@ Requires-Dist: pillow; extra == 'docs'
130
130
  Requires-Dist: pyyaml==6.0.2; extra == 'docs'
131
131
  Requires-Dist: termcolor==3.0.1; extra == 'docs'
132
132
  Requires-Dist: typer==0.15.2; extra == 'docs'
133
+ Provides-Extra: duckduckgo
134
+ Requires-Dist: duckduckgo-search>=8.0.2; extra == 'duckduckgo'
133
135
  Provides-Extra: flaml
134
136
  Requires-Dist: flaml; extra == 'flaml'
135
137
  Requires-Dist: numpy<2.0.0,>=1.24.0; (python_version < '3.13') and extra == 'flaml'
@@ -138,14 +140,14 @@ Provides-Extra: gemini
138
140
  Requires-Dist: google-api-core; extra == 'gemini'
139
141
  Requires-Dist: google-auth; extra == 'gemini'
140
142
  Requires-Dist: google-cloud-aiplatform; extra == 'gemini'
141
- Requires-Dist: google-genai>=1.2.0; extra == 'gemini'
143
+ Requires-Dist: google-genai>=1.20.0; extra == 'gemini'
142
144
  Requires-Dist: jsonschema; extra == 'gemini'
143
145
  Requires-Dist: pillow; extra == 'gemini'
144
146
  Provides-Extra: gemini-realtime
145
147
  Requires-Dist: google-api-core; extra == 'gemini-realtime'
146
148
  Requires-Dist: google-auth; extra == 'gemini-realtime'
147
149
  Requires-Dist: google-cloud-aiplatform; extra == 'gemini-realtime'
148
- Requires-Dist: google-genai>=1.2.0; extra == 'gemini-realtime'
150
+ Requires-Dist: google-genai>=1.20.0; extra == 'gemini-realtime'
149
151
  Requires-Dist: jsonschema; extra == 'gemini-realtime'
150
152
  Requires-Dist: pillow; extra == 'gemini-realtime'
151
153
  Requires-Dist: websockets<16,>=14.0; extra == 'gemini-realtime'
@@ -198,7 +200,7 @@ Provides-Extra: mathchat
198
200
  Requires-Dist: sympy; extra == 'mathchat'
199
201
  Requires-Dist: wolframalpha; extra == 'mathchat'
200
202
  Provides-Extra: mcp
201
- Requires-Dist: mcp<1.6,>=1.4.0; (python_version >= '3.10') and extra == 'mcp'
203
+ Requires-Dist: mcp>=1.9.4; (python_version >= '3.10') and extra == 'mcp'
202
204
  Provides-Extra: mcp-proxy-gen
203
205
  Requires-Dist: fastapi-code-generator>=0.5.4; extra == 'mcp-proxy-gen'
204
206
  Requires-Dist: fastapi<1,>=0.112; extra == 'mcp-proxy-gen'
@@ -216,9 +218,9 @@ Provides-Extra: ollama
216
218
  Requires-Dist: fix-busted-json>=0.0.18; extra == 'ollama'
217
219
  Requires-Dist: ollama>=0.4.7; extra == 'ollama'
218
220
  Provides-Extra: openai
219
- Requires-Dist: openai>=1.66.2; extra == 'openai'
221
+ Requires-Dist: openai>=1.87.0; extra == 'openai'
220
222
  Provides-Extra: openai-realtime
221
- Requires-Dist: openai>=1.66.2; extra == 'openai-realtime'
223
+ Requires-Dist: openai>=1.87.0; extra == 'openai-realtime'
222
224
  Requires-Dist: openai[realtime]; extra == 'openai-realtime'
223
225
  Provides-Extra: rag
224
226
  Requires-Dist: chromadb<1,>=0.5; extra == 'rag'
@@ -282,6 +284,8 @@ Requires-Dist: protobuf==5.29.3; extra == 'retrievechat-qdrant'
282
284
  Requires-Dist: pypdf; extra == 'retrievechat-qdrant'
283
285
  Requires-Dist: qdrant-client; extra == 'retrievechat-qdrant'
284
286
  Requires-Dist: sentence-transformers<=4.1.0; extra == 'retrievechat-qdrant'
287
+ Provides-Extra: tavily
288
+ Requires-Dist: tavily-python>=0.7.4; extra == 'tavily'
285
289
  Provides-Extra: teachable
286
290
  Requires-Dist: chromadb; extra == 'teachable'
287
291
  Provides-Extra: test
@@ -309,7 +313,7 @@ Requires-Dist: mock==5.2.0; extra == 'types'
309
313
  Requires-Dist: mypy==1.15.0; extra == 'types'
310
314
  Requires-Dist: nbconvert==7.16.6; extra == 'types'
311
315
  Requires-Dist: nbformat==5.10.4; extra == 'types'
312
- Requires-Dist: openai>=1.66.2; extra == 'types'
316
+ Requires-Dist: openai>=1.87.0; extra == 'types'
313
317
  Requires-Dist: pandas==2.2.3; extra == 'types'
314
318
  Requires-Dist: pytest-asyncio==0.26.0; extra == 'types'
315
319
  Requires-Dist: pytest-cov==6.1.1; extra == 'types'
@@ -678,7 +682,7 @@ AG2 supports more advanced concepts to help you build your AI agent workflows. Y
678
682
 
679
683
  - [Structured Output](https://docs.ag2.ai/latest/docs/user-guide/basic-concepts/structured-outputs)
680
684
  - [Ending a conversation](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/orchestration/ending-a-chat/)
681
- - [Retrieval Augmented Generation (RAG)](https://docs.ag2.ai/docs/user-guide/advanced-concepts/rag)
685
+ - [Retrieval Augmented Generation (RAG)](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/rag/)
682
686
  - [Code Execution](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/code-execution)
683
687
  - [Tools with Secrets](https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/tools/tools-with-secrets/)
684
688
 
@@ -14,7 +14,7 @@ autogen/retrieve_utils.py,sha256=R3Yp5d8dH4o9ayLZrGn4rCjIaY4glOHIiyQjwClmdi8,200
14
14
  autogen/runtime_logging.py,sha256=yCmZODvwqYR91m8lX3Q4SoPcY-DK48NF4m56CP6Om3c,4692
15
15
  autogen/token_count_utils.py,sha256=n4wTFVNHwrfjZkrErFr8kNig2K-YCGgMLWsjDRS9D6g,10797
16
16
  autogen/types.py,sha256=qu-7eywhakW2AxQ5lYisLLeIg45UoOW-b3ErIuyRTuw,1000
17
- autogen/version.py,sha256=zvCJ0lHDfEBMWZDWk8uOP6RODc_0sB12xDf8whkwhGc,193
17
+ autogen/version.py,sha256=tN9tkN8TlaYs7tWFTkUc6inNMUFd2YXRCAXmqLFBOI4,193
18
18
  autogen/_website/__init__.py,sha256=c8B9TpO07x9neD0zsJWj6AaEdlcP-WvxrvVOGWLtamk,143
19
19
  autogen/_website/generate_api_references.py,sha256=yKqyeSP_NE27wwLYWsZbTYRceEoxzNxPXqn6vsIzEvk,14789
20
20
  autogen/_website/generate_mkdocs.py,sha256=TkmLnUDv1Ms5cGClXPmenA8nxmwg4kR0E-FHCVjw_og,43246
@@ -26,7 +26,7 @@ autogen/agentchat/agent.py,sha256=HePNJ5BXJTcZtaD2a8CoTeHAoLUUy3scM6Ihm-NsSWk,58
26
26
  autogen/agentchat/assistant_agent.py,sha256=XTJvD66r4qYkdNAJJLr1CC-wTYFJWvhmD5_G0WbbX2I,5741
27
27
  autogen/agentchat/chat.py,sha256=6Gx2t1-Xa8kP6ZoUihHBNGOqNlrGhhqLPKrckL0n-RI,14003
28
28
  autogen/agentchat/conversable_agent.py,sha256=F5OtTxBYb4Cqw5cp6V1gt8tL6EVVFuAKmbCR9SVosT8,191085
29
- autogen/agentchat/groupchat.py,sha256=eh_JNZWU-0gbxO7rIngokh71kpHLkA1Qunn6zXDdM0k,85361
29
+ autogen/agentchat/groupchat.py,sha256=X54uAoAFYxMvxDVCMQJdlVnviOWvjqWp7bbt14PQUmk,85373
30
30
  autogen/agentchat/user_proxy_agent.py,sha256=-gbDblRvE09FGuVB6-y5ZT9Cpvv--rM3FMi8PnPIFBA,7445
31
31
  autogen/agentchat/utils.py,sha256=2ZweUqe4ynxji0jnvd0r9Uxg3n0elif4a-jZOyeeqcg,8238
32
32
  autogen/agentchat/contrib/__init__.py,sha256=tOTe4nwbKj7elHpftAy3zS_embMDzncrKL98XKhY6-c,168
@@ -58,7 +58,7 @@ autogen/agentchat/contrib/capabilities/teachability.py,sha256=tn3o7q-5vC7O-EFy7I
58
58
  autogen/agentchat/contrib/capabilities/text_compressors.py,sha256=tm5WDf0AC0VznFJ44Hy7zHh_Erar2c1OjExVt1MG8j8,2985
59
59
  autogen/agentchat/contrib/capabilities/tools_capability.py,sha256=iSECQqsHp-MBWu6Huo6OAH4ehSI04QYDGQBjUupFsPI,773
60
60
  autogen/agentchat/contrib/capabilities/transform_messages.py,sha256=bFUxDkq0jWLcgccE3Zp4_JD12zIVRE7AMVFwSJqssSY,3783
61
- autogen/agentchat/contrib/capabilities/transforms.py,sha256=Q_LSNQRRECKY1sd9r2KV6q4QukandWZSzVTgfgf1Rnk,25749
61
+ autogen/agentchat/contrib/capabilities/transforms.py,sha256=Ow3zPEPAnUfHoiH96vNxvjWGk5TVCqtttyqlkS3X2Dk,26292
62
62
  autogen/agentchat/contrib/capabilities/transforms_util.py,sha256=XjTkE_i-SArRPBuLmVaZYhJMY2RgNHjz2m_iF-lKUJM,4559
63
63
  autogen/agentchat/contrib/capabilities/vision_capability.py,sha256=kHUeIPvICOR-tLQ6g6AdNWtcbhrUKPIfg42ISqGzrA4,9872
64
64
  autogen/agentchat/contrib/captainagent/__init__.py,sha256=12X-ClPVPXBnN59wduSLhQ-PmUWXO45vvafHPQOUVV8,414
@@ -132,18 +132,18 @@ autogen/agentchat/group/context_expression.py,sha256=zFWPV3yfV-0ayYXfrhRq6iWQZnR
132
132
  autogen/agentchat/group/context_str.py,sha256=EHjpDr8MLMl5AMXktMi9Wp4BIL_1hbIeJPMEXlLkTjs,1270
133
133
  autogen/agentchat/group/context_variables.py,sha256=d2Q31aoV2o_5QSd3Oh1fYDIV0fDaaHOOsjNQ92TC_H0,5649
134
134
  autogen/agentchat/group/group_tool_executor.py,sha256=lCUg0Z_R8u0mRsWE350p6pGNj-6fJUMGqWqs3xmznCQ,9104
135
- autogen/agentchat/group/group_utils.py,sha256=XjVXvJ25Ka4l3kbPOVRgZbxM9PpaoS8ekQyeieJPF4U,25600
136
- autogen/agentchat/group/handoffs.py,sha256=ftGN8pUSQ0Y8Zu7M57kEazFuHG1kte4gqD8LD7gT54s,8463
135
+ autogen/agentchat/group/group_utils.py,sha256=W3zUghHffO0_seZefB68-U0jIufFDxAkd1NuutuaGmM,27240
136
+ autogen/agentchat/group/handoffs.py,sha256=ergiOsXC4z8N44LTUh-abIPrErjkXZWFRMXflinNq1Y,11743
137
137
  autogen/agentchat/group/llm_condition.py,sha256=wfuEET1VhyVVGedYxcyuhX_Vus6uZHxUl_SPpu4YIsc,2951
138
138
  autogen/agentchat/group/multi_agent_chat.py,sha256=oKAWiziaSZ0otwfGhSWaR26dYbfRQj_vau_P5Z1dvfY,7688
139
139
  autogen/agentchat/group/on_condition.py,sha256=ZxLiI4APceCGUcPFnLrsik7DEX9YgRW200k1QIhyaWg,2185
140
- autogen/agentchat/group/on_context_condition.py,sha256=rH4YlKnbUwUQ84lfEgACS9AQz6PCOYMZv2mQVjDSMUE,2008
140
+ autogen/agentchat/group/on_context_condition.py,sha256=GwB2qb2V7RbaOJPeXYXinuxDQ8ou0kvp7-9jZ85KFfk,2084
141
141
  autogen/agentchat/group/reply_result.py,sha256=KUJ2HWpRLEyc5SIVh60-GirsN7jFg-ceAeT4p7I0ZyQ,740
142
142
  autogen/agentchat/group/speaker_selection_result.py,sha256=2pvl-9zJ1al0AbAmLDqGRm9JgfmS-lyEOFiZoKIkUHY,1623
143
143
  autogen/agentchat/group/patterns/__init__.py,sha256=SUw-biSWow_uGHuHcMA3Pu2cq_CTTVeIqY2J8A5fq8A,446
144
144
  autogen/agentchat/group/patterns/auto.py,sha256=h0LjzGHv3yRqDCAnlOfjofkYNKiVzSKVc64T_o8e7wY,6075
145
145
  autogen/agentchat/group/patterns/manual.py,sha256=8ltqGFtt9xfyADl_OZFD0g-HydFQvDdrc35iwT612ok,6324
146
- autogen/agentchat/group/patterns/pattern.py,sha256=Uz7FgUQq5VGg4k_FqSuLoOcTsuLFNJ96lEvahjfrPVE,10721
146
+ autogen/agentchat/group/patterns/pattern.py,sha256=8DS9lMloLPemY10aIqTnttuRTd50YPN2-UFCj7wjhBk,10868
147
147
  autogen/agentchat/group/patterns/random.py,sha256=yrNCYwcodhdcquNEkdaWO_T32jJjiqaslLithAX1dSM,3367
148
148
  autogen/agentchat/group/patterns/round_robin.py,sha256=nS7nsQJKCq9lD0wyCx__gkWK3N9Z8bvlY2stUfE9JxA,3895
149
149
  autogen/agentchat/group/targets/__init__.py,sha256=AJNSbl9iMe2hiDmZojTp8h889o5OYN3V7f2_2nr8px4,145
@@ -158,7 +158,7 @@ autogen/agentchat/realtime/experimental/function_observer.py,sha256=M0cXXJNoBQ8s
158
158
  autogen/agentchat/realtime/experimental/realtime_agent.py,sha256=i8rxU-Tjk2Pz-WOZJ5PuRymaMvVLH66lqH2LJ85PxLM,5713
159
159
  autogen/agentchat/realtime/experimental/realtime_events.py,sha256=zmRr3pwPJpme5VZEADIz5vg9IZoT3Z1NAc3vt1RdWLk,1083
160
160
  autogen/agentchat/realtime/experimental/realtime_observer.py,sha256=nTouVj5-il0q2_P2LTpyb4pnHqyfwP5MJh_QmMJF3e8,3061
161
- autogen/agentchat/realtime/experimental/realtime_swarm.py,sha256=SRxG5jHGf52IGSXAUITPA9I_V2MqsBm-7ZlYVWbd6Ns,17501
161
+ autogen/agentchat/realtime/experimental/realtime_swarm.py,sha256=ENR7URgzaa4roTTLPAbUr44TT9FznG57kAU_0PIb34s,17809
162
162
  autogen/agentchat/realtime/experimental/websockets.py,sha256=bj9b5eq80L3KlGWPP6nn7uyfT_Z47kQqtIRbQkeE5SI,667
163
163
  autogen/agentchat/realtime/experimental/audio_adapters/__init__.py,sha256=rd0pEy91LYq0JMvIk8Fv7ZKIQLK7oZbVdgVAwNZDCmQ,315
164
164
  autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py,sha256=-O10rpqPKZKxZO58rQOxPnwECe-RQJoSUTU_K8i0A98,6110
@@ -185,7 +185,7 @@ autogen/agents/experimental/discord/discord.py,sha256=S5OkCgXJj2AnkEXZ3Z-pRG3_iD
185
185
  autogen/agents/experimental/document_agent/__init__.py,sha256=YNuO2YqxckrfAxcmRcI5JmiE7w52lkyVdLyvWxmbSUw,603
186
186
  autogen/agents/experimental/document_agent/chroma_query_engine.py,sha256=izKOopdDpmMfoAOCChR3evlLgJZkMh-x4XvsxiO9ol4,13864
187
187
  autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py,sha256=3QBqyZxtQCidxEdwwEHE0WRMgW1Q1nnFZnm9Z3ahwZI,5060
188
- autogen/agents/experimental/document_agent/document_agent.py,sha256=gxgbhTWfZSNHqY8SmJ7XdUxlRN_rT3Kgm7GADySCyaU,21797
188
+ autogen/agents/experimental/document_agent/document_agent.py,sha256=3QVW_Gt9IMFmXKtw9oWzUroa0N_lK9S3Ry4lnMzugkA,29628
189
189
  autogen/agents/experimental/document_agent/document_conditions.py,sha256=yahgDlnG6ORw9RQ6GLLrn_ZczHv9XrLcdouo6M9rSZQ,2051
190
190
  autogen/agents/experimental/document_agent/document_utils.py,sha256=g8PBcOFbVWtqP6rnRHJtY97ZJDXADcPzylkKZnhJfNA,14088
191
191
  autogen/agents/experimental/document_agent/inmemory_query_engine.py,sha256=UFv0u2V0QG3mne9BuQksWggEjGSmO4em4VGkU1Zm2to,8966
@@ -297,8 +297,8 @@ autogen/oai/cerebras.py,sha256=8hiSBq88l2yTXUJPV7AvGXRCtwvW0Y9hIYUnYK2S2os,12462
297
297
  autogen/oai/client.py,sha256=BB_6Heny6_7lq8q7ZAPKohHAK63zs9UGzRoUknTxjYY,65051
298
298
  autogen/oai/client_utils.py,sha256=lVbHyff7OnpdM-tXskC23xLdFccj2AalTdWA4DxzxS4,7543
299
299
  autogen/oai/cohere.py,sha256=pRcQWjbzKbZ1RfC1vk9WGjgndwjHbIaOVoKEYdV2L6c,19421
300
- autogen/oai/gemini.py,sha256=c0uA8FKpU1S3GpN7sKwU96uHbA1wEtlWHodwZvwMP58,41884
301
- autogen/oai/gemini_types.py,sha256=jRai-e2Qf73Yga17xm33OwKtkcU1oOKLITzFNB68LSg,5759
300
+ autogen/oai/gemini.py,sha256=45-MiC-m43KgqLHSBsCw7qUymI3Zic3NUeefZ-HeQtI,42398
301
+ autogen/oai/gemini_types.py,sha256=lIrQGcret7213YR07uFfrw1CnFiZ9VwPyb55KyS0GPI,5864
302
302
  autogen/oai/groq.py,sha256=pQWtaAY_AjT30XKbZNHXDzWsawBys3yFWlfy6K4Nqr8,12431
303
303
  autogen/oai/mistral.py,sha256=SlOYPdnNLHuTEHBGCzsemG9sLEgphdUukRurERdMsvI,12677
304
304
  autogen/oai/ollama.py,sha256=t0fIgDCoIfsQZ3hhpseam5N-fbpI7-fw82bG55mA8nU,29103
@@ -306,7 +306,7 @@ autogen/oai/openai_utils.py,sha256=4kEu50WeTGGG2uh1fOeMxRIZkEoov-YkkTgx2n5DhkM,3
306
306
  autogen/oai/together.py,sha256=Sj4LOk9RrBG3Bb5IVsrjBYz-hDECCyCgofsCdtk6PSM,14867
307
307
  autogen/oai/oai_models/__init__.py,sha256=cILDaaCCvSC3aAX85iLwE1RCpNEokA9925Zse5hX2K4,549
308
308
  autogen/oai/oai_models/_models.py,sha256=jr5nlvk7Be4W7wDVnwyjDL6m2CSj0RY1nOL1W3Kq0xI,478
309
- autogen/oai/oai_models/chat_completion.py,sha256=-swXmkd7jNtOK8jGi84KG-kx1zeYL2roBq0Jco7wKWA,3179
309
+ autogen/oai/oai_models/chat_completion.py,sha256=xs6OH9bWDzN5YQjDRmbJSPr19zySRwJOmyPtJJiNC-M,3188
310
310
  autogen/oai/oai_models/chat_completion_audio.py,sha256=a55i5E1EnT8qWdiKxbwF2kmgt4fih6x6HaChjs0ZuZE,950
311
311
  autogen/oai/oai_models/chat_completion_message.py,sha256=6MbrdgmqoAz0dUuyZZtM4NQd80ljVm3zDpP9_-l5zyw,2808
312
312
  autogen/oai/oai_models/chat_completion_message_tool_call.py,sha256=CWuqlwrk8VMSevpOZAMMPyw9KzNVnxEOfYs9y5tN5zw,1206
@@ -399,8 +399,8 @@ autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py,sha256=
399
399
  autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py,sha256=iqgpFJdyBHPPNCqkehSIbeuV8Rabr2eDMilT23Wx7PI,1687
400
400
  autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py,sha256=-6T5r6Er4mONPldRxv3F9tLoE7Og3qmeSeTC7Du_tTg,596
401
401
  autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py,sha256=Xig7K3A3DRnbv-UXfyo5bybGZUQYAQsltthfTYW5eV8,509
402
- ag2-0.9.2.dist-info/METADATA,sha256=w0e1Z82GEyYEh-kPA2PXa-nrXEPobW4vYHXSwHHmo-E,35095
403
- ag2-0.9.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
404
- ag2-0.9.2.dist-info/licenses/LICENSE,sha256=GEFQVNayAR-S_rQD5l8hPdgvgyktVdy4Bx5-v90IfRI,11384
405
- ag2-0.9.2.dist-info/licenses/NOTICE.md,sha256=07iCPQGbth4pQrgkSgZinJGT5nXddkZ6_MGYcBd2oiY,1134
406
- ag2-0.9.2.dist-info/RECORD,,
402
+ ag2-0.9.3.dist-info/METADATA,sha256=tFxYI45V9ZmkhlvptG5JWYFlC61We-KYQXvQdgWrmiI,35268
403
+ ag2-0.9.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
404
+ ag2-0.9.3.dist-info/licenses/LICENSE,sha256=GEFQVNayAR-S_rQD5l8hPdgvgyktVdy4Bx5-v90IfRI,11384
405
+ ag2-0.9.3.dist-info/licenses/NOTICE.md,sha256=07iCPQGbth4pQrgkSgZinJGT5nXddkZ6_MGYcBd2oiY,1134
406
+ ag2-0.9.3.dist-info/RECORD,,
@@ -60,15 +60,23 @@ class MessageHistoryLimiter:
60
60
  It trims the conversation history by removing older messages, retaining only the most recent messages.
61
61
  """
62
62
 
63
- def __init__(self, max_messages: Optional[int] = None, keep_first_message: bool = False):
63
+ def __init__(
64
+ self,
65
+ max_messages: Optional[int] = None,
66
+ keep_first_message: bool = False,
67
+ exclude_names: Optional[list[str]] = None,
68
+ ):
64
69
  """Args:
65
70
  max_messages Optional[int]: Maximum number of messages to keep in the context. Must be greater than 0 if not None.
66
71
  keep_first_message bool: Whether to keep the original first message in the conversation history.
67
72
  Defaults to False.
73
+ exclude_names Optional[list[str]]: List of message sender names to exclude from the message history.
74
+ Messages from these senders will be filtered out before applying the message limit. Defaults to None.
68
75
  """
69
76
  self._validate_max_messages(max_messages)
70
77
  self._max_messages = max_messages
71
78
  self._keep_first_message = keep_first_message
79
+ self._exclude_names = exclude_names
72
80
 
73
81
  def apply_transform(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
74
82
  """Truncates the conversation history to the specified maximum number of messages.
@@ -83,25 +91,30 @@ class MessageHistoryLimiter:
83
91
  Returns:
84
92
  List[Dict]: A new list containing the most recent messages up to the specified maximum.
85
93
  """
86
- if self._max_messages is None or len(messages) <= self._max_messages:
87
- return messages
94
+
95
+ exclude_names = getattr(self, "_exclude_names", None)
96
+
97
+ filtered = [msg for msg in messages if msg.get("name") not in exclude_names] if exclude_names else messages
98
+
99
+ if self._max_messages is None or len(filtered) <= self._max_messages:
100
+ return filtered
88
101
 
89
102
  truncated_messages = []
90
103
  remaining_count = self._max_messages
91
104
 
92
105
  # Start with the first message if we need to keep it
93
- if self._keep_first_message:
94
- truncated_messages = [messages[0]]
106
+ if self._keep_first_message and filtered:
107
+ truncated_messages = [filtered[0]]
95
108
  remaining_count -= 1
96
109
 
97
110
  # Loop through messages in reverse
98
- for i in range(len(messages) - 1, 0, -1):
111
+ for i in range(len(filtered) - 1, 0, -1):
99
112
  if remaining_count > 1:
100
- truncated_messages.insert(1 if self._keep_first_message else 0, messages[i])
113
+ truncated_messages.insert(1 if self._keep_first_message else 0, filtered[i])
101
114
  if remaining_count == 1: # noqa: SIM102
102
115
  # If there's only 1 slot left and it's a 'tools' message, ignore it.
103
- if messages[i].get("role") != "tool":
104
- truncated_messages.insert(1, messages[i])
116
+ if filtered[i].get("role") != "tool":
117
+ truncated_messages.insert(1, filtered[i])
105
118
 
106
119
  remaining_count -= 1
107
120
  if remaining_count == 0:
@@ -82,6 +82,46 @@ def link_agents_to_group_manager(agents: list[Agent], group_chat_manager: Agent)
82
82
  agent._group_manager = group_chat_manager # type: ignore[attr-defined]
83
83
 
84
84
 
85
+ def _evaluate_after_works_conditions(
86
+ agent: "ConversableAgent",
87
+ groupchat: GroupChat,
88
+ user_agent: Optional["ConversableAgent"],
89
+ ) -> Optional[Union[Agent, str]]:
90
+ """Evaluate after_works context conditions for an agent.
91
+
92
+ Args:
93
+ agent: The agent to evaluate after_works conditions for
94
+ groupchat: The current group chat
95
+ user_agent: Optional user proxy agent
96
+
97
+ Returns:
98
+ The resolved speaker selection result if a condition matches, None otherwise
99
+ """
100
+ if not hasattr(agent, "handoffs") or not agent.handoffs.after_works: # type: ignore[attr-defined]
101
+ return None
102
+
103
+ for after_work_condition in agent.handoffs.after_works: # type: ignore[attr-defined]
104
+ # Check if condition is available
105
+ is_available = (
106
+ after_work_condition.available.is_available(agent, groupchat.messages)
107
+ if after_work_condition.available
108
+ else True
109
+ )
110
+
111
+ # Evaluate the condition (None condition means always true)
112
+ if is_available and (
113
+ after_work_condition.condition is None or after_work_condition.condition.evaluate(agent.context_variables)
114
+ ):
115
+ # Condition matched, resolve and return
116
+ return after_work_condition.target.resolve(
117
+ groupchat,
118
+ agent,
119
+ user_agent,
120
+ ).get_speaker_selection_result(groupchat)
121
+
122
+ return None
123
+
124
+
85
125
  def _run_oncontextconditions(
86
126
  agent: "ConversableAgent",
87
127
  messages: Optional[list[dict[str, Any]]] = None,
@@ -94,7 +134,9 @@ def _run_oncontextconditions(
94
134
  on_condition.available.is_available(agent, messages if messages else []) if on_condition.available else True
95
135
  )
96
136
 
97
- if is_available and on_condition.condition.evaluate(agent.context_variables):
137
+ if is_available and (
138
+ on_condition.condition is None or on_condition.condition.evaluate(agent.context_variables)
139
+ ):
98
140
  # Condition has been met, we'll set the Tool Executor's next target
99
141
  # attribute and that will be picked up on the next iteration when
100
142
  # _determine_next_agent is called
@@ -161,12 +203,13 @@ def ensure_handoff_agents_in_group(agents: list["ConversableAgent"]) -> None:
161
203
  and context_conditions.target.agent_name not in agent_names
162
204
  ):
163
205
  raise ValueError("Agent in OnContextCondition Hand-offs must be in the agents list")
164
- if (
165
- agent.handoffs.after_work is not None
166
- and isinstance(agent.handoffs.after_work, (AgentTarget, AgentNameTarget))
167
- and agent.handoffs.after_work.agent_name not in agent_names
168
- ):
169
- raise ValueError("Agent in after work target Hand-offs must be in the agents list")
206
+ # Check after_works targets
207
+ for after_work_condition in agent.handoffs.after_works:
208
+ if (
209
+ isinstance(after_work_condition.target, (AgentTarget, AgentNameTarget))
210
+ and after_work_condition.target.agent_name not in agent_names
211
+ ):
212
+ raise ValueError("Agent in after work target Hand-offs must be in the agents list")
170
213
 
171
214
 
172
215
  def prepare_exclude_transit_messages(agents: list["ConversableAgent"]) -> None:
@@ -320,17 +363,19 @@ def setup_context_variables(
320
363
  tool_execution: "ConversableAgent",
321
364
  agents: list["ConversableAgent"],
322
365
  manager: GroupChatManager,
366
+ user_agent: Optional["ConversableAgent"],
323
367
  context_variables: ContextVariables,
324
368
  ) -> None:
325
- """Assign a common context_variables reference to all agents in the group, including the tool executor and group chat manager.
369
+ """Assign a common context_variables reference to all agents in the group, including the tool executor, group chat manager, and user proxy agent.
326
370
 
327
371
  Args:
328
372
  tool_execution: The tool execution agent.
329
373
  agents: List of all agents in the conversation.
330
374
  manager: GroupChatManager instance.
375
+ user_agent: Optional user proxy agent.
331
376
  context_variables: Context variables to assign to all agents.
332
377
  """
333
- for agent in agents + [tool_execution] + [manager]:
378
+ for agent in agents + [tool_execution] + [manager] + ([user_agent] if user_agent else []):
334
379
  agent.context_variables = context_variables
335
380
 
336
381
 
@@ -426,22 +471,25 @@ def determine_next_agent(
426
471
 
427
472
  # If the user last spoke, return to the agent prior to them (if they don't have an after work, otherwise it's treated like any other agent)
428
473
  if user_agent and last_speaker == user_agent:
429
- if user_agent.handoffs.after_work is None:
474
+ if not user_agent.handoffs.after_works:
430
475
  return last_agent_speaker
431
476
  else:
432
477
  last_agent_speaker = user_agent
433
478
 
434
479
  # AFTER WORK:
435
480
 
436
- # Get the appropriate After Work condition (from the agent if they have one, otherwise the group level one)
437
- after_work_condition = (
438
- last_agent_speaker.handoffs.after_work # type: ignore[attr-defined]
439
- if last_agent_speaker.handoffs.after_work is not None # type: ignore[attr-defined]
440
- else group_after_work
481
+ # First, try to evaluate after_works context conditions
482
+ after_works_result = _evaluate_after_works_conditions(
483
+ last_agent_speaker, # type: ignore[arg-type]
484
+ groupchat,
485
+ user_agent,
441
486
  )
487
+ if after_works_result is not None:
488
+ return after_works_result
442
489
 
490
+ # If no after_works conditions matched, use the group-level after_work
443
491
  # Resolve the next agent, termination, or speaker selection method
444
- resolved_speaker_selection_result = after_work_condition.resolve(
492
+ resolved_speaker_selection_result = group_after_work.resolve(
445
493
  groupchat,
446
494
  last_agent_speaker, # type: ignore[arg-type]
447
495
  user_agent,
@@ -525,10 +573,7 @@ def create_group_manager(
525
573
  if (
526
574
  len(agent.handoffs.get_context_conditions_by_target_type(GroupManagerTarget)) > 0
527
575
  or len(agent.handoffs.get_llm_conditions_by_target_type(GroupManagerTarget)) > 0
528
- or (
529
- agent.handoffs.after_work is not None
530
- and isinstance(agent.handoffs.after_work, GroupManagerTarget)
531
- )
576
+ or any(isinstance(aw.target, GroupManagerTarget) for aw in agent.handoffs.after_works)
532
577
  ):
533
578
  has_group_manager_target = True
534
579
  break
@@ -2,7 +2,7 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from typing import Optional, Union, overload
5
+ from typing import Union, overload
6
6
 
7
7
  from pydantic import BaseModel, Field
8
8
 
@@ -30,7 +30,7 @@ class Handoffs(BaseModel):
30
30
 
31
31
  context_conditions: list[OnContextCondition] = Field(default_factory=list)
32
32
  llm_conditions: list[OnCondition] = Field(default_factory=list)
33
- after_work: Optional[TransitionTarget] = None
33
+ after_works: list[OnContextCondition] = Field(default_factory=list)
34
34
 
35
35
  def add_context_condition(self, condition: OnContextCondition) -> "Handoffs":
36
36
  """
@@ -102,7 +102,9 @@ class Handoffs(BaseModel):
102
102
 
103
103
  def set_after_work(self, target: TransitionTarget) -> "Handoffs":
104
104
  """
105
- Set the after work target (only one allowed).
105
+ Set the after work target (replaces all after_works with single entry).
106
+
107
+ For backward compatibility, this creates an OnContextCondition with no condition (always true).
106
108
 
107
109
  Args:
108
110
  target: The after work TransitionTarget to set
@@ -113,7 +115,81 @@ class Handoffs(BaseModel):
113
115
  if not isinstance(target, TransitionTarget):
114
116
  raise TypeError(f"Expected a TransitionTarget instance, got {type(target).__name__}")
115
117
 
116
- self.after_work = target
118
+ # Create OnContextCondition with no condition (always true)
119
+ after_work_condition = OnContextCondition(target=target, condition=None)
120
+ self.after_works = [after_work_condition]
121
+ return self
122
+
123
+ def add_after_work(self, condition: OnContextCondition) -> "Handoffs":
124
+ """
125
+ Add a single after-work condition.
126
+
127
+ If the condition has condition=None, it will replace any existing
128
+ condition=None entry and be placed at the end.
129
+
130
+ Args:
131
+ condition: The OnContextCondition to add
132
+
133
+ Returns:
134
+ Self for method chaining
135
+ """
136
+ if not isinstance(condition, OnContextCondition):
137
+ raise TypeError(f"Expected an OnContextCondition instance, got {type(condition).__name__}")
138
+
139
+ if condition.condition is None:
140
+ # Remove any existing condition=None entries
141
+ self.after_works = [c for c in self.after_works if c.condition is not None]
142
+ # Add the new one at the end
143
+ self.after_works.append(condition)
144
+ else:
145
+ # For regular conditions, check if we need to move condition=None to the end
146
+ none_conditions = [c for c in self.after_works if c.condition is None]
147
+ if none_conditions:
148
+ # Remove the None condition temporarily
149
+ self.after_works = [c for c in self.after_works if c.condition is not None]
150
+ # Add the new regular condition
151
+ self.after_works.append(condition)
152
+ # Re-add the None condition at the end
153
+ self.after_works.append(none_conditions[0])
154
+ else:
155
+ # No None condition exists, just append
156
+ self.after_works.append(condition)
157
+
158
+ return self
159
+
160
+ def add_after_works(self, conditions: list[OnContextCondition]) -> "Handoffs":
161
+ """
162
+ Add multiple after-work conditions.
163
+
164
+ Special handling for condition=None entries:
165
+ - Only one condition=None entry is allowed (the fallback)
166
+ - It will always be placed at the end of the list
167
+ - If multiple condition=None entries are provided, only the last one is kept
168
+
169
+ Args:
170
+ conditions: List of OnContextConditions to add
171
+
172
+ Returns:
173
+ Self for method chaining
174
+ """
175
+ # Validate that it is a list of OnContextConditions
176
+ if not all(isinstance(condition, OnContextCondition) for condition in conditions):
177
+ raise TypeError("All conditions must be of type OnContextCondition")
178
+
179
+ # Separate conditions with None and without None
180
+ none_conditions = [c for c in conditions if c.condition is None]
181
+ regular_conditions = [c for c in conditions if c.condition is not None]
182
+
183
+ # Remove any existing condition=None entries
184
+ self.after_works = [c for c in self.after_works if c.condition is not None]
185
+
186
+ # Add regular conditions
187
+ self.after_works.extend(regular_conditions)
188
+
189
+ # Add at most one None condition at the end
190
+ if none_conditions:
191
+ self.after_works.append(none_conditions[-1]) # Use the last one if multiple provided
192
+
117
193
  return self
118
194
 
119
195
  @overload
@@ -186,7 +262,7 @@ class Handoffs(BaseModel):
186
262
  """
187
263
  self.context_conditions.clear()
188
264
  self.llm_conditions.clear()
189
- self.after_work = None
265
+ self.after_works.clear()
190
266
  return self
191
267
 
192
268
  def get_llm_conditions_by_target_type(self, target_type: type) -> list[OnCondition]:
@@ -24,12 +24,12 @@ class OnContextCondition(BaseModel): # noqa: N801
24
24
 
25
25
  Args:
26
26
  target (TransitionTarget): The transition (essentially an agent) to hand off to.
27
- condition (ContextCondition): The context variable based condition for transitioning to the target agent.
27
+ condition (Optional[ContextCondition]): The context variable based condition for transitioning to the target agent. If None, the condition always evaluates to True.
28
28
  available (AvailableCondition): Optional condition to determine if this OnCondition is included for the LLM to evaluate based on context variables using classes like StringAvailableCondition and ContextExpressionAvailableCondition.
29
29
  """
30
30
 
31
31
  target: TransitionTarget
32
- condition: ContextCondition
32
+ condition: Optional[ContextCondition] = None
33
33
  available: Optional[AvailableCondition] = None
34
34
 
35
35
  def has_target_type(self, target_type: type) -> bool:
@@ -152,7 +152,13 @@ class Pattern(ABC):
152
152
  manager = create_group_manager(groupchat, self.group_manager_args, self.agents, self.group_after_work)
153
153
 
154
154
  # Point all agent's context variables to this function's context_variables
155
- setup_context_variables(tool_executor, self.agents, manager, self.context_variables)
155
+ setup_context_variables(
156
+ tool_execution=tool_executor,
157
+ agents=self.agents,
158
+ manager=manager,
159
+ user_agent=self.user_agent,
160
+ context_variables=self.context_variables,
161
+ )
156
162
 
157
163
  # Link all agents with the GroupChatManager to allow access to the group chat
158
164
  link_agents_to_group_manager(groupchat.agents, manager)
@@ -1489,10 +1489,10 @@ class GroupChatManager(ConversableAgent):
1489
1489
  for agent in self._groupchat.agents:
1490
1490
  if agent.name == message["name"]:
1491
1491
  # An agent`s message is sent to the Group Chat Manager
1492
- agent.a_send(message, self, request_reply=False, silent=True)
1492
+ await agent.a_send(message, self, request_reply=False, silent=True)
1493
1493
  else:
1494
1494
  # Otherwise, messages are sent from the Group Chat Manager to the agent
1495
- self.a_send(message, agent, request_reply=False, silent=True)
1495
+ await self.a_send(message, agent, request_reply=False, silent=True)
1496
1496
 
1497
1497
  # Add previous message to the new groupchat, if it's an admin message the name may not match so add the message directly
1498
1498
  if message_speaker_agent:
@@ -104,7 +104,7 @@ def parse_oai_message(message: Union[dict[str, Any], str], role: str, adressee:
104
104
  return oai_message
105
105
 
106
106
 
107
- class SwarmableAgent:
107
+ class SwarmableAgent(Agent):
108
108
  """A class for an agent that can participate in a swarm chat."""
109
109
 
110
110
  def __init__(
@@ -239,7 +239,7 @@ class SwarmableAgent:
239
239
  sender: Optional["Agent"] = None,
240
240
  **kwargs: Any,
241
241
  ) -> Union[str, dict[str, Any], None]:
242
- raise NotImplementedError
242
+ return self.generate_reply(messages=messages, sender=sender, **kwargs)
243
243
 
244
244
  async def a_receive(
245
245
  self,
@@ -247,7 +247,7 @@ class SwarmableAgent:
247
247
  sender: "Agent",
248
248
  request_reply: Optional[bool] = None,
249
249
  ) -> None:
250
- raise NotImplementedError
250
+ self.receive(message, sender, request_reply)
251
251
 
252
252
  async def a_send(
253
253
  self,
@@ -255,7 +255,7 @@ class SwarmableAgent:
255
255
  recipient: "Agent",
256
256
  request_reply: Optional[bool] = None,
257
257
  ) -> None:
258
- raise NotImplementedError
258
+ self.send(message, recipient, request_reply)
259
259
 
260
260
  @property
261
261
  def chat_messages(self) -> dict[Agent, list[dict[str, Any]]]:
@@ -293,6 +293,14 @@ class SwarmableAgent:
293
293
  def _raise_exception_on_async_reply_functions(self) -> None:
294
294
  pass
295
295
 
296
+ def set_ui_tools(self, tools: Optional[list] = None) -> None:
297
+ """Set UI tools for the agent."""
298
+ pass
299
+
300
+ def unset_ui_tools(self) -> None:
301
+ """Unset UI tools for the agent."""
302
+ pass
303
+
296
304
  @staticmethod
297
305
  def _last_msg_as_summary(sender: Agent, recipient: Agent, summary_args: Optional[dict[str, Any]]) -> str:
298
306
  """Get a chat summary from the last message of the recipient."""
@@ -42,7 +42,8 @@ TASK_MANAGER_NAME = "TaskManagerAgent"
42
42
  TASK_MANAGER_SYSTEM_MESSAGE = """
43
43
  You are a task manager agent. You have 2 priorities:
44
44
  1. You initiate the tasks which updates the context variables based on the task decisions (DocumentTask) from the DocumentTriageAgent.
45
- If the DocumentTriageAgent has suggested any ingestions or queries, call initiate_tasks to record them.
45
+ ALWAYS call initiate_tasks first when you receive a message from the DocumentTriageAgent, even if you think there are no new tasks.
46
+ This ensures that any new ingestions or queries from the triage agent are properly recorded.
46
47
  Put all ingestion and query tasks into the one tool call.
47
48
  i.e. output
48
49
  {
@@ -75,7 +76,7 @@ TASK_MANAGER_SYSTEM_MESSAGE = """
75
76
  Transfer to the summary agent if all ingestion and query tasks are done.
76
77
  """
77
78
 
78
- DEFAULT_ERROR_SWARM_MESSAGE: str = """
79
+ DEFAULT_ERROR_GROUP_CHAT_MESSAGE: str = """
79
80
  Document Agent failed to perform task.
80
81
  """
81
82
 
@@ -147,7 +148,7 @@ class DocAgent(ConversableAgent):
147
148
  """
148
149
  The DocAgent is responsible for ingest and querying documents.
149
150
 
150
- Internally, it generates a group of swarm agents to solve tasks.
151
+ Internally, it generates a group chat with a set of agents to ingest, query, and summarize.
151
152
  """
152
153
 
153
154
  def __init__(
@@ -196,7 +197,7 @@ class DocAgent(ConversableAgent):
196
197
  llm_config=llm_config,
197
198
  human_input_mode="NEVER",
198
199
  )
199
- self.register_reply([ConversableAgent, None], self.generate_inner_swarm_reply, position=0)
200
+ self.register_reply([ConversableAgent, None], self.generate_inner_group_chat_reply, position=0)
200
201
 
201
202
  self.context_variables: ContextVariables = ContextVariables(
202
203
  data={
@@ -210,7 +211,15 @@ class DocAgent(ConversableAgent):
210
211
  self._triage_agent = DocumentTriageAgent(llm_config=llm_config)
211
212
 
212
213
  def create_error_agent_prompt(agent: ConversableAgent, messages: list[dict[str, Any]]) -> str:
213
- """Create the error agent prompt, primarily used to update ingested documents for ending"""
214
+ """Create the error agent prompt, primarily used to update ingested documents for ending.
215
+
216
+ Args:
217
+ agent: The conversable agent requesting the prompt
218
+ messages: List of conversation messages
219
+
220
+ Returns:
221
+ str: The error manager system message
222
+ """
214
223
  update_ingested_documents()
215
224
 
216
225
  return ERROR_MANAGER_SYSTEM_MESSAGE
@@ -223,7 +232,11 @@ class DocAgent(ConversableAgent):
223
232
  )
224
233
 
225
234
  def update_ingested_documents() -> None:
226
- """Updates the list of ingested documents, persisted so we can keep a list over multiple replies"""
235
+ """Updates the list of ingested documents, persisted so we can keep a list over multiple replies.
236
+
237
+ This function updates self.documents_ingested with any new documents that have been ingested
238
+ by the triage agent, ensuring persistence across multiple DocAgent interactions.
239
+ """
227
240
  agent_documents_ingested = self._triage_agent.context_variables.get("DocumentsIngested", [])
228
241
  # Update self.documents_ingested with any new documents ingested
229
242
  for doc in agent_documents_ingested: # type: ignore[union-attr]
@@ -234,21 +247,162 @@ class DocAgent(ConversableAgent):
234
247
  ingestions: Annotated[list[Ingest], Field(description="List of documents, files, and URLs to ingest")]
235
248
  queries: Annotated[list[Query], Field(description="List of queries to run")]
236
249
 
250
+ def _deduplicate_ingestions(
251
+ new_ingestions: list[Ingest], existing_ingestions: list[Ingest], documents_ingested: list[str]
252
+ ) -> tuple[list[Ingest], list[str]]:
253
+ """Deduplicate ingestions against existing pending and already ingested documents.
254
+
255
+ Args:
256
+ new_ingestions: List of new ingestion requests to process
257
+ existing_ingestions: List of ingestions already pending
258
+ documents_ingested: List of document paths already ingested
259
+
260
+ Returns:
261
+ tuple: (new_unique_ingestions, ignored_duplicate_paths)
262
+ """
263
+ unique_ingestions = []
264
+ ignored_paths = []
265
+
266
+ for ingestion in new_ingestions:
267
+ ingestion_path = ingestion.path_or_url
268
+ # Check if already in pending ingestions
269
+ already_pending = any(existing.path_or_url == ingestion_path for existing in existing_ingestions)
270
+ # Check if already ingested
271
+ already_ingested = ingestion_path in documents_ingested
272
+
273
+ if already_pending or already_ingested:
274
+ ignored_paths.append(ingestion_path)
275
+ else:
276
+ unique_ingestions.append(ingestion)
277
+
278
+ return unique_ingestions, ignored_paths
279
+
280
+ def _deduplicate_queries(
281
+ new_queries: list[Query], existing_queries: list[Query]
282
+ ) -> tuple[list[Query], list[str]]:
283
+ """Deduplicate queries against existing pending queries.
284
+
285
+ Args:
286
+ new_queries: List of new query requests to process
287
+ existing_queries: List of queries already pending
288
+
289
+ Returns:
290
+ tuple: (new_unique_queries, ignored_duplicate_query_texts)
291
+ """
292
+ unique_queries = []
293
+ ignored_query_texts = []
294
+
295
+ for query in new_queries:
296
+ query_text = query.query
297
+ # Check if query already exists in pending queries
298
+ already_pending = any(existing.query == query_text for existing in existing_queries)
299
+
300
+ if already_pending:
301
+ ignored_query_texts.append(query_text)
302
+ else:
303
+ unique_queries.append(query)
304
+
305
+ return unique_queries, ignored_query_texts
306
+
307
+ def _build_response_message(
308
+ added_ingestions: int, ignored_ingestions: list[str], added_queries: int, ignored_queries: list[str]
309
+ ) -> str:
310
+ """Build a descriptive response message about what was added/ignored.
311
+
312
+ Args:
313
+ added_ingestions: Number of unique ingestions added
314
+ ignored_ingestions: List of duplicate ingestion paths ignored
315
+ added_queries: Number of unique queries added
316
+ ignored_queries: List of duplicate query texts ignored
317
+
318
+ Returns:
319
+ str: Formatted message describing the results
320
+ """
321
+ messages = []
322
+
323
+ if added_ingestions > 0:
324
+ messages.append(f"Added {added_ingestions} new document(s) for ingestion")
325
+
326
+ if ignored_ingestions:
327
+ messages.append(
328
+ f"Ignored {len(ignored_ingestions)} duplicate document(s): {', '.join(ignored_ingestions)}"
329
+ )
330
+
331
+ if added_queries > 0:
332
+ messages.append(f"Added {added_queries} new query/queries")
333
+
334
+ if ignored_queries:
335
+ messages.append(f"Ignored {len(ignored_queries)} duplicate query/queries: {', '.join(ignored_queries)}")
336
+
337
+ if messages:
338
+ return "; ".join(messages)
339
+ else:
340
+ return "All requested tasks were duplicates and ignored"
341
+
237
342
  def initiate_tasks(
238
343
  task_init_info: Annotated[TaskInitInfo, "Documents, Files, URLs to ingest and the queries to run"],
239
344
  context_variables: Annotated[ContextVariables, "Context variables"],
240
345
  ) -> ReplyResult:
241
- """Add documents to ingest and queries to answer when received."""
346
+ """Add documents to ingest and queries to answer when received.
347
+
348
+ Args:
349
+ task_init_info: Information about documents to ingest and queries to run
350
+ context_variables: The current context variables containing task state
351
+
352
+ Returns:
353
+ ReplyResult: Contains response message, updated context, and target agent
354
+ """
242
355
  ingestions = task_init_info.ingestions
243
356
  queries = task_init_info.queries
244
357
 
245
358
  if "TaskInitiated" in context_variables:
246
- return ReplyResult(message="Task already initiated", context_variables=context_variables)
247
- context_variables["DocumentsToIngest"] = ingestions
248
- context_variables["QueriesToRun"] = [query for query in queries]
249
- context_variables["TaskInitiated"] = True
359
+ # Handle follow-up tasks with deduplication
360
+ added_ingestions_count = 0
361
+ ignored_ingestions = []
362
+ added_queries_count = 0
363
+ ignored_queries = []
364
+
365
+ if ingestions:
366
+ existing_ingestions: list[Ingest] = context_variables.get("DocumentsToIngest", []) # type: ignore[assignment]
367
+ documents_ingested: list[str] = context_variables.get("DocumentsIngested", []) # type: ignore[assignment]
368
+
369
+ unique_ingestions, ignored_ingestion_paths = _deduplicate_ingestions(
370
+ ingestions, existing_ingestions, documents_ingested
371
+ )
372
+
373
+ if unique_ingestions:
374
+ context_variables["DocumentsToIngest"] = existing_ingestions + unique_ingestions
375
+ added_ingestions_count = len(unique_ingestions)
376
+
377
+ ignored_ingestions = ignored_ingestion_paths
378
+
379
+ if queries:
380
+ existing_queries: list[Query] = context_variables.get("QueriesToRun", []) # type: ignore[assignment]
381
+
382
+ unique_queries, ignored_query_texts = _deduplicate_queries(queries, existing_queries)
383
+
384
+ if unique_queries:
385
+ context_variables["QueriesToRun"] = existing_queries + unique_queries
386
+ added_queries_count = len(unique_queries)
387
+
388
+ ignored_queries = ignored_query_texts
389
+
390
+ if not ingestions and not queries:
391
+ return ReplyResult(message="No new tasks to initiate", context_variables=context_variables)
392
+
393
+ response_message = _build_response_message(
394
+ added_ingestions_count, ignored_ingestions, added_queries_count, ignored_queries
395
+ )
396
+
397
+ else:
398
+ # First time initialization - no deduplication needed
399
+ context_variables["DocumentsToIngest"] = ingestions
400
+ context_variables["QueriesToRun"] = [query for query in queries]
401
+ context_variables["TaskInitiated"] = True
402
+ response_message = "Updated context variables with task decisions"
403
+
250
404
  return ReplyResult(
251
- message="Updated context variables with task decisions",
405
+ message=response_message,
252
406
  context_variables=context_variables,
253
407
  target=AgentNameTarget(agent_name=TASK_MANAGER_NAME),
254
408
  )
@@ -271,7 +425,14 @@ class DocAgent(ConversableAgent):
271
425
  )
272
426
 
273
427
  def execute_rag_query(context_variables: ContextVariables) -> ReplyResult: # type: ignore[type-arg]
274
- """Execute outstanding RAG queries, call the tool once for each outstanding query. Call this tool with no arguments."""
428
+ """Execute outstanding RAG queries, call the tool once for each outstanding query. Call this tool with no arguments.
429
+
430
+ Args:
431
+ context_variables: The current context variables containing queries to run
432
+
433
+ Returns:
434
+ ReplyResult: Contains query answer, updated context, and target agent
435
+ """
275
436
  if len(context_variables["QueriesToRun"]) == 0:
276
437
  return ReplyResult(
277
438
  target=AgentNameTarget(agent_name=TASK_MANAGER_NAME),
@@ -303,6 +464,9 @@ class DocAgent(ConversableAgent):
303
464
  context_variables["QueriesToRun"].pop(0)
304
465
  context_variables["CompletedTaskCount"] += 1
305
466
  context_variables["QueryResults"].append({"query": query, "answer": answer, "citations": txt_citations})
467
+
468
+ # Query completed
469
+
306
470
  return ReplyResult(message=answer, context_variables=context_variables)
307
471
  except Exception as e:
308
472
  return ReplyResult(
@@ -322,9 +486,17 @@ class DocAgent(ConversableAgent):
322
486
  functions=[execute_rag_query],
323
487
  )
324
488
 
325
- # Summary agent prompt will include the results of the ingestions and swarms
489
+ # Summary agent prompt will include the results of the ingestions and queries
326
490
  def create_summary_agent_prompt(agent: ConversableAgent, messages: list[dict[str, Any]]) -> str:
327
- """Create the summary agent prompt and updates ingested documents"""
491
+ """Create the summary agent prompt and updates ingested documents.
492
+
493
+ Args:
494
+ agent: The conversable agent requesting the prompt
495
+ messages: List of conversation messages
496
+
497
+ Returns:
498
+ str: The summary agent system message with context information
499
+ """
328
500
  update_ingested_documents()
329
501
 
330
502
  documents_to_ingest: list[Ingest] = cast(list[Ingest], agent.context_variables.get("DocumentsToIngest", []))
@@ -368,14 +540,7 @@ class DocAgent(ConversableAgent):
368
540
  expression=ContextExpression(expression="len(${QueriesToRun}) > 0")
369
541
  ),
370
542
  ),
371
- OnContextCondition( # Go to Summary agent if no documents or queries left to run and we have query results
372
- target=AgentTarget(agent=self._summary_agent),
373
- condition=ExpressionContextCondition(
374
- expression=ContextExpression(
375
- expression="len(${DocumentsToIngest}) == 0 and len(${QueriesToRun}) == 0 and len(${QueryResults}) > 0"
376
- )
377
- ),
378
- ),
543
+ # Removed automatic context condition - let task manager decide when to summarize
379
544
  OnCondition(
380
545
  target=AgentTarget(agent=self._summary_agent),
381
546
  condition=StringLLMCondition(
@@ -396,28 +561,45 @@ class DocAgent(ConversableAgent):
396
561
  # The Error Agent always terminates the DocumentAgent
397
562
  self._error_agent.handoffs.set_after_work(target=TerminateTarget())
398
563
 
399
- self.register_reply([Agent, None], DocAgent.generate_inner_swarm_reply)
564
+ self.register_reply([Agent, None], DocAgent.generate_inner_group_chat_reply)
400
565
 
401
566
  self.documents_ingested: list[str] = []
567
+ self._group_chat_context_variables: Optional[ContextVariables] = None
402
568
 
403
- def generate_inner_swarm_reply(
569
+ def generate_inner_group_chat_reply(
404
570
  self,
405
571
  messages: Optional[Union[list[dict[str, Any]], str]] = None,
406
572
  sender: Optional[Agent] = None,
407
573
  config: Optional[OpenAIWrapper] = None,
408
574
  ) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
409
- """Reply function that generates the inner swarm reply for the DocAgent."""
410
- context_variables: ContextVariables = ContextVariables(
411
- data={
412
- "CompletedTaskCount": 0,
413
- "DocumentsToIngest": [],
414
- "DocumentsIngested": self.documents_ingested,
415
- "QueriesToRun": [],
416
- "QueryResults": [],
417
- }
418
- )
575
+ """Reply function that generates the inner group chat reply for the DocAgent.
576
+
577
+ Args:
578
+ messages: Input messages to process
579
+ sender: The agent that sent the message
580
+ config: OpenAI wrapper configuration
581
+
582
+ Returns:
583
+ tuple: (should_terminate, reply_message)
584
+ """
585
+ # Use existing context_variables if available, otherwise create new ones
586
+ if hasattr(self, "_group_chat_context_variables") and self._group_chat_context_variables is not None:
587
+ context_variables = self._group_chat_context_variables
588
+ # Reset for the new run
589
+ context_variables["DocumentsToIngest"] = [] # type: ignore[index]
590
+ else:
591
+ context_variables = ContextVariables(
592
+ data={
593
+ "CompletedTaskCount": 0,
594
+ "DocumentsToIngest": [],
595
+ "DocumentsIngested": self.documents_ingested,
596
+ "QueriesToRun": [],
597
+ "QueryResults": [],
598
+ }
599
+ )
600
+ self._group_chat_context_variables = context_variables
419
601
 
420
- swarm_agents = [
602
+ group_chat_agents = [
421
603
  self._triage_agent,
422
604
  self._task_manager_agent,
423
605
  self._data_ingestion_agent,
@@ -428,7 +610,7 @@ class DocAgent(ConversableAgent):
428
610
 
429
611
  agent_pattern = DefaultPattern(
430
612
  initial_agent=self._triage_agent,
431
- agents=swarm_agents,
613
+ agents=group_chat_agents,
432
614
  context_variables=context_variables,
433
615
  group_after_work=TerminateTarget(),
434
616
  )
@@ -441,13 +623,23 @@ class DocAgent(ConversableAgent):
441
623
  # If we finish with the error agent, we return their message which contains the error
442
624
  return True, chat_result.summary
443
625
  if last_speaker != self._summary_agent:
444
- # If the swarm finished but not with the summary agent, we assume something has gone wrong with the flow
445
- return True, DEFAULT_ERROR_SWARM_MESSAGE
626
+ # If the group chat finished but not with the summary agent, we assume something has gone wrong with the flow
627
+ return True, DEFAULT_ERROR_GROUP_CHAT_MESSAGE
446
628
 
447
629
  return True, chat_result.summary
448
630
 
449
631
  def _get_document_input_message(self, messages: Optional[Union[list[dict[str, Any]], str]]) -> str: # type: ignore[type-arg]
450
- """Gets and validates the input message(s) for the document agent."""
632
+ """Gets and validates the input message(s) for the document agent.
633
+
634
+ Args:
635
+ messages: Input messages as string or list of message dictionaries
636
+
637
+ Returns:
638
+ str: The extracted message content
639
+
640
+ Raises:
641
+ NotImplementedError: If messages format is invalid
642
+ """
451
643
  if isinstance(messages, str):
452
644
  return messages
453
645
  elif (
autogen/oai/gemini.py CHANGED
@@ -574,7 +574,16 @@ class GeminiClient:
574
574
  if self.use_vertexai
575
575
  else rst.append(Content(parts=parts, role=role))
576
576
  )
577
- elif part_type == "tool" or part_type == "tool_call":
577
+ elif part_type == "tool":
578
+ # Function responses should be assigned "model" role to keep them separate from function calls
579
+ role = "function" if version.parse(genai.__version__) < version.parse("1.4.0") else "model"
580
+ rst.append(
581
+ VertexAIContent(parts=parts, role=role)
582
+ if self.use_vertexai
583
+ else rst.append(Content(parts=parts, role=role))
584
+ )
585
+ elif part_type == "tool_call":
586
+ # Function calls should be assigned "user" role
578
587
  role = "function" if version.parse(genai.__version__) < version.parse("1.4.0") else "user"
579
588
  rst.append(
580
589
  VertexAIContent(parts=parts, role=role)
@@ -141,6 +141,7 @@ class RetrievalConfig(CommonBaseModel):
141
141
  """Retrieval config."""
142
142
 
143
143
  lat_lng: Optional[LatLng] = Field(default=None, description="""Optional. The location of the user.""")
144
+ language_code: Optional[str] = Field(default=None, description="""The language code of the user.""")
144
145
 
145
146
 
146
147
  class ToolConfig(CommonBaseModel):
@@ -66,7 +66,7 @@ class ChatCompletion(BaseModel):
66
66
  object: Literal["chat.completion"]
67
67
  """The object type, which is always `chat.completion`."""
68
68
 
69
- service_tier: Optional[Literal["auto", "default", "flex"]] = None
69
+ service_tier: Optional[Literal["auto", "default", "flex", "scale"]] = None
70
70
  """The service tier used for processing the request."""
71
71
 
72
72
  system_fingerprint: Optional[str] = None
autogen/version.py CHANGED
@@ -4,4 +4,4 @@
4
4
 
5
5
  __all__ = ["__version__"]
6
6
 
7
- __version__ = "0.9.2"
7
+ __version__ = "0.9.3"
File without changes