ag2 0.8.0b1__tar.gz → 0.8.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ag2
3
- Version: 0.8.0b1
3
+ Version: 0.8.1
4
4
  Summary: Alias package for pyautogen
5
5
  Home-page: https://github.com/ag2ai/ag2
6
6
  Author: Chi Wang & Qingyun Wu
@@ -39,6 +39,7 @@ Provides-Extra: teachable
39
39
  Provides-Extra: lmm
40
40
  Provides-Extra: graph
41
41
  Provides-Extra: gemini
42
+ Provides-Extra: gemini-realtime
42
43
  Provides-Extra: together
43
44
  Provides-Extra: websurfer
44
45
  Provides-Extra: redis
@@ -52,6 +53,7 @@ Provides-Extra: groq
52
53
  Provides-Extra: cohere
53
54
  Provides-Extra: ollama
54
55
  Provides-Extra: bedrock
56
+ Provides-Extra: deepseek
55
57
  Provides-Extra: commsagent-discord
56
58
  Provides-Extra: commsagent-slack
57
59
  Provides-Extra: commsagent-telegram
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ag2
3
- Version: 0.8.0b1
3
+ Version: 0.8.1
4
4
  Summary: Alias package for pyautogen
5
5
  Home-page: https://github.com/ag2ai/ag2
6
6
  Author: Chi Wang & Qingyun Wu
@@ -39,6 +39,7 @@ Provides-Extra: teachable
39
39
  Provides-Extra: lmm
40
40
  Provides-Extra: graph
41
41
  Provides-Extra: gemini
42
+ Provides-Extra: gemini-realtime
42
43
  Provides-Extra: together
43
44
  Provides-Extra: websurfer
44
45
  Provides-Extra: redis
@@ -52,6 +53,7 @@ Provides-Extra: groq
52
53
  Provides-Extra: cohere
53
54
  Provides-Extra: ollama
54
55
  Provides-Extra: bedrock
56
+ Provides-Extra: deepseek
55
57
  Provides-Extra: commsagent-discord
56
58
  Provides-Extra: commsagent-slack
57
59
  Provides-Extra: commsagent-telegram
@@ -0,0 +1,151 @@
1
+ pyautogen==0.8.1
2
+
3
+ [anthropic]
4
+ pyautogen[anthropic]==0.8.1
5
+
6
+ [autobuild]
7
+ pyautogen[autobuild]==0.8.1
8
+
9
+ [bedrock]
10
+ pyautogen[bedrock]==0.8.1
11
+
12
+ [blendsearch]
13
+ pyautogen[blendsearch]==0.8.1
14
+
15
+ [browser-use]
16
+ pyautogen[browser-use]==0.8.1
17
+
18
+ [captainagent]
19
+ pyautogen[captainagent]==0.8.1
20
+
21
+ [cerebras]
22
+ pyautogen[cerebras]==0.8.1
23
+
24
+ [cohere]
25
+ pyautogen[cohere]==0.8.1
26
+
27
+ [commsagent-discord]
28
+ pyautogen[commsagent-discord]==0.8.1
29
+
30
+ [commsagent-slack]
31
+ pyautogen[commsagent-slack]==0.8.1
32
+
33
+ [commsagent-telegram]
34
+ pyautogen[commsagent-telegram]==0.8.1
35
+
36
+ [cosmosdb]
37
+ pyautogen[cosmosdb]==0.8.1
38
+
39
+ [crawl4ai]
40
+ pyautogen[crawl4ai]==0.8.1
41
+
42
+ [deepseek]
43
+ pyautogen[deepseek]==0.8.1
44
+
45
+ [dev]
46
+ pyautogen[dev]==0.8.1
47
+
48
+ [docs]
49
+ pyautogen[docs]==0.8.1
50
+
51
+ [flaml]
52
+ pyautogen[flaml]==0.8.1
53
+
54
+ [gemini]
55
+ pyautogen[gemini]==0.8.1
56
+
57
+ [gemini-realtime]
58
+ pyautogen[gemini-realtime]==0.8.1
59
+
60
+ [graph]
61
+ pyautogen[graph]==0.8.1
62
+
63
+ [graph-rag-falkor-db]
64
+ pyautogen[graph-rag-falkor-db]==0.8.1
65
+
66
+ [groq]
67
+ pyautogen[groq]==0.8.1
68
+
69
+ [interop]
70
+ pyautogen[interop]==0.8.1
71
+
72
+ [interop-crewai]
73
+ pyautogen[interop-crewai]==0.8.1
74
+
75
+ [interop-langchain]
76
+ pyautogen[interop-langchain]==0.8.1
77
+
78
+ [interop-pydantic-ai]
79
+ pyautogen[interop-pydantic-ai]==0.8.1
80
+
81
+ [jupyter-executor]
82
+ pyautogen[jupyter-executor]==0.8.1
83
+
84
+ [lint]
85
+ pyautogen[lint]==0.8.1
86
+
87
+ [lmm]
88
+ pyautogen[lmm]==0.8.1
89
+
90
+ [long-context]
91
+ pyautogen[long-context]==0.8.1
92
+
93
+ [mathchat]
94
+ pyautogen[mathchat]==0.8.1
95
+
96
+ [mistral]
97
+ pyautogen[mistral]==0.8.1
98
+
99
+ [neo4j]
100
+ pyautogen[neo4j]==0.8.1
101
+
102
+ [ollama]
103
+ pyautogen[ollama]==0.8.1
104
+
105
+ [openai]
106
+ pyautogen[openai]==0.8.1
107
+
108
+ [openai-realtime]
109
+ pyautogen[openai-realtime]==0.8.1
110
+
111
+ [rag]
112
+ pyautogen[rag]==0.8.1
113
+
114
+ [redis]
115
+ pyautogen[redis]==0.8.1
116
+
117
+ [retrievechat]
118
+ pyautogen[retrievechat]==0.8.1
119
+
120
+ [retrievechat-couchbase]
121
+ pyautogen[retrievechat-couchbase]==0.8.1
122
+
123
+ [retrievechat-mongodb]
124
+ pyautogen[retrievechat-mongodb]==0.8.1
125
+
126
+ [retrievechat-pgvector]
127
+ pyautogen[retrievechat-pgvector]==0.8.1
128
+
129
+ [retrievechat-qdrant]
130
+ pyautogen[retrievechat-qdrant]==0.8.1
131
+
132
+ [teachable]
133
+ pyautogen[teachable]==0.8.1
134
+
135
+ [test]
136
+ pyautogen[test]==0.8.1
137
+
138
+ [together]
139
+ pyautogen[together]==0.8.1
140
+
141
+ [twilio]
142
+ pyautogen[twilio]==0.8.1
143
+
144
+ [types]
145
+ pyautogen[types]==0.8.1
146
+
147
+ [websockets]
148
+ pyautogen[websockets]==0.8.1
149
+
150
+ [websurfer]
151
+ pyautogen[websurfer]==0.8.1
@@ -80,7 +80,7 @@ openai = [
80
80
  ]
81
81
 
82
82
  openai-realtime = [
83
- "autogen[openai]",
83
+ "pyautogen[openai]",
84
84
  "openai[realtime]",
85
85
  ]
86
86
 
@@ -139,6 +139,10 @@ rag = [
139
139
  "chromadb>=0.5,<1",
140
140
  "llama-index>=0.12,<1",
141
141
  "llama-index-vector-stores-chroma==0.4.1",
142
+ "llama-index-vector-stores-mongodb==0.6.0",
143
+ "llama-index-embeddings-huggingface==0.5.2",
144
+ "llama-index-llms-langchain==0.6.0",
145
+ "requests>=2.32.3,<3",
142
146
  ]
143
147
 
144
148
 
@@ -154,10 +158,10 @@ browser-use = [
154
158
 
155
159
  neo4j = [
156
160
  "docx2txt==0.8",
157
- "llama-index==0.12.19",
161
+ "llama-index==0.12.22",
158
162
  "llama-index-graph-stores-neo4j==0.4.6",
159
- "llama-index-core==0.12.19",
160
- "llama-index-readers-web==0.3.5",
163
+ "llama-index-core==0.12.22",
164
+ "llama-index-readers-web==0.3.7",
161
165
  ]
162
166
 
163
167
  # used for agentchat_realtime_swarm notebook and realtime agent twilio demo
@@ -172,7 +176,7 @@ interop-crewai = [
172
176
  "weaviate-client>=4,<5; python_version>='3.10' and python_version<'3.13'",
173
177
  ]
174
178
  interop-langchain = ["langchain-community>=0.3.12,<1"]
175
- interop-pydantic-ai = ["pydantic-ai==0.0.24"]
179
+ interop-pydantic-ai = ["pydantic-ai==0.0.31"]
176
180
  interop =[
177
181
  "pyautogen[interop-crewai, interop-langchain, interop-pydantic-ai]",
178
182
  ]
@@ -188,7 +192,7 @@ lmm = ["replicate", "pillow"]
188
192
  graph = ["networkx", "matplotlib"]
189
193
  gemini = [
190
194
  "google-api-core",
191
- "google-genai>=1.2.0,<2.0",
195
+ "google-genai>=1.2.0",
192
196
  "google-cloud-aiplatform",
193
197
  "google-auth",
194
198
  "pillow",
@@ -196,11 +200,16 @@ gemini = [
196
200
  "jsonref>=1,<2",
197
201
  ]
198
202
 
203
+ gemini-realtime = [
204
+ "pyautogen[gemini]",
205
+ "pyautogen[websockets]",
206
+ ]
207
+
199
208
  together = ["together>=1.2"]
200
209
  websurfer = ["beautifulsoup4", "markdownify", "pdfminer.six", "pathvalidate"]
201
210
  redis = ["redis"]
202
211
  cosmosdb = ["azure-cosmos>=4.2.0"]
203
- websockets = ["websockets>=14.0,<15"]
212
+ websockets = ["websockets>=14.0,<16"]
204
213
  long-context = ["llmlingua<0.3"]
205
214
  anthropic = ["anthropic[vertex]>=0.23.1"]
206
215
  cerebras = ["cerebras_cloud_sdk>=1.0.0"]
@@ -209,8 +218,9 @@ groq = ["groq>=0.9.0"]
209
218
  cohere = ["cohere>=5.13.5"]
210
219
  ollama = ["ollama>=0.4.5", "fix_busted_json>=0.0.18"]
211
220
  bedrock = ["boto3>=1.34.149"]
221
+ deepseek = ["pyautogen[openai]"]
212
222
 
213
- commsagent-discord = ["discord.py>=2.4.0,<2.5"]
223
+ commsagent-discord = ["discord.py>=2.4.0,<2.6"]
214
224
  commsagent-slack = ["slack_sdk>=3.33.0,<3.40"]
215
225
  commsagent-telegram = ["telethon>=1.38.1, <2"]
216
226
 
@@ -223,23 +233,24 @@ test = [
223
233
  "nbformat==5.10.4",
224
234
  "pytest-cov==6.0.0",
225
235
  "pytest-asyncio==0.25.3",
226
- "pytest==8.3.4",
236
+ "pytest==8.3.5",
227
237
  "mock==5.1.0",
228
238
  "pandas==2.2.3",
229
- "fastapi==0.115.8",
239
+ "fastapi==0.115.11",
230
240
  ]
231
241
 
232
242
  docs = [
233
- "mkdocs-material==9.6.4",
234
- "mkdocstrings[python]==0.28.1",
243
+ "mkdocs-material==9.6.7",
244
+ "mkdocstrings[python]==0.28.2",
235
245
  "mkdocs-literate-nav==0.6.1",
236
246
  "mdx-include==1.4.2",
237
247
  "mkdocs-git-revision-date-localized-plugin==1.3.0",
238
248
  "mike==2.1.3",
239
- "typer==0.15.1",
249
+ "typer==0.15.2",
240
250
  "mkdocs-minify-plugin==0.8.0",
241
251
  "mkdocs-macros-plugin==1.3.7", # includes with variables
242
252
  "mkdocs-glightbox==0.4.0", # img zoom
253
+ "mkdocs-ezlinks-plugin==0.1.14", # converts abs links to rel links
243
254
  "pillow", # required for mkdocs-glightbo
244
255
  "cairosvg", # required for mkdocs-glightbo
245
256
  "pdoc3==0.11.5",
@@ -255,7 +266,7 @@ types = [
255
266
  ]
256
267
 
257
268
  lint = [
258
- "ruff==0.9.7",
269
+ "ruff==0.9.9",
259
270
  "codespell==2.4.1",
260
271
  "pyupgrade-directories==0.3.0",
261
272
  ]
@@ -265,7 +276,7 @@ dev = [
265
276
  "pyautogen[lint,test,types,docs]",
266
277
  "pre-commit==4.1.0",
267
278
  "detect-secrets==1.5.0",
268
- "uv==0.6.2",
279
+ "uv==0.6.4",
269
280
  ]
270
281
 
271
282
 
@@ -294,23 +305,31 @@ exclude = ["test", "notebook"]
294
305
  "autogen" = "autogen"
295
306
  "autogen/agentchat/contrib/captainagent/tools" = "autogen/agentchat/contrib/captainagent/tools"
296
307
 
297
-
298
308
  [tool.pytest.ini_options]
299
309
  addopts = '--cov=autogen --cov-append --cov-branch --cov-report=xml -m "not conda"'
300
310
  testpaths = [
301
311
  "test",
302
312
  ]
313
+
303
314
  markers = [
304
315
  "conda: test related to conda forge distribution",
305
316
  "all",
306
317
  "openai",
318
+ "openai_realtime",
307
319
  "gemini",
320
+ "gemini_realtime",
308
321
  "anthropic",
309
322
  "deepseek",
323
+ "cosmosdb",
324
+ "ollama",
325
+ "bedrock",
326
+ "cerebras",
327
+ "aux_neg_flag",
328
+ # optional deps
310
329
  "redis",
311
330
  "docker",
312
331
  "docs",
313
- # optional deps
332
+ "rag",
314
333
  "jupyter_executor",
315
334
  "retrievechat",
316
335
  "retrievechat_pgvector",
@@ -323,6 +342,10 @@ markers = [
323
342
  "browser_use",
324
343
  "crawl4ai",
325
344
  "websockets",
345
+ "commsagent_discord",
346
+ "commsagent_slack",
347
+ "commsagent-telegram",
348
+ "lmm",
326
349
  ]
327
350
 
328
351
  [tool.black]
@@ -379,6 +402,8 @@ select = [
379
402
  # "UP", # pyupgrade https://docs.astral.sh/ruff/rules/#pydocstyle-d
380
403
  ]
381
404
 
405
+ extend-select = ["D417"]
406
+
382
407
  ignore = ["E501", "F403", "C901",
383
408
  "E402",
384
409
  "E721",
@@ -404,9 +429,12 @@ convention = "google"
404
429
 
405
430
  [tool.mypy]
406
431
  files = [
432
+ "autogen/agentchat/agent.py",
407
433
  "autogen/agentchat/contrib/rag",
408
434
  "autogen/agentchat/contrib/graph_rag",
435
+ "autogen/agentchat/contrib/swarm_agent.py",
409
436
  "autogen/agentchat/realtime_agent",
437
+ "autogen/agentchat/utils.py",
410
438
  "autogen/agents",
411
439
  "autogen/coding",
412
440
  "autogen/exception_utils.py",
@@ -418,9 +446,11 @@ files = [
418
446
  "autogen/oai/oai_models",
419
447
  "autogen/oai/openai_utils.py",
420
448
  "autogen/tools",
449
+ "autogen/_website",
421
450
  "website/*.py",
451
+ "test/agentchat/contrib/rag",
422
452
  "test/agentchat/contrib/graph_rag",
423
- # "test/agentchat/contrib/rag",
453
+ "test/agentchat/contrib/test_swarm.py",
424
454
  "test/agentchat/realtime_agent",
425
455
  "test/agents",
426
456
  "test/conftest.py",
@@ -55,6 +55,7 @@ setuptools.setup(
55
55
  "lmm": ["pyautogen[lmm]==" + __version__],
56
56
  "graph": ["pyautogen[graph]==" + __version__],
57
57
  "gemini": ["pyautogen[gemini]==" + __version__],
58
+ "gemini-realtime": ["pyautogen[gemini-realtime]==" + __version__],
58
59
  "together": ["pyautogen[together]==" + __version__],
59
60
  "websurfer": ["pyautogen[websurfer]==" + __version__],
60
61
  "redis": ["pyautogen[redis]==" + __version__],
@@ -68,6 +69,7 @@ setuptools.setup(
68
69
  "cohere": ["pyautogen[cohere]==" + __version__],
69
70
  "ollama": ["pyautogen[ollama]==" + __version__],
70
71
  "bedrock": ["pyautogen[bedrock]==" + __version__],
72
+ "deepseek": ["pyautogen[deepseek]==" + __version__],
71
73
  "commsagent-discord": ["pyautogen[commsagent-discord]==" + __version__],
72
74
  "commsagent-slack": ["pyautogen[commsagent-slack]==" + __version__],
73
75
  "commsagent-telegram": ["pyautogen[commsagent-telegram]==" + __version__],
@@ -8,7 +8,6 @@
8
8
 
9
9
  import os
10
10
  import tempfile
11
- import unittest
12
11
  from io import StringIO
13
12
  from types import SimpleNamespace
14
13
  from unittest.mock import patch
@@ -24,15 +23,10 @@ from autogen.code_utils import (
24
23
  execute_code,
25
24
  extract_code,
26
25
  get_powershell_command,
27
- improve_code,
28
- improve_function,
29
26
  in_docker_container,
30
27
  infer_lang,
31
28
  is_docker_running,
32
29
  )
33
- from autogen.import_utils import skip_on_missing_imports
34
-
35
- from .conftest import Credentials
36
30
 
37
31
  here = os.path.abspath(os.path.dirname(__file__))
38
32
 
@@ -387,69 +381,40 @@ def test_create_virtual_env_with_extra_args():
387
381
  assert venv_context.env_name == os.path.split(temp_dir)[1]
388
382
 
389
383
 
390
- @skip_on_missing_imports(["openai"])
391
- def _test_improve(credentials_all: Credentials):
392
- config_list = credentials_all.config_list
393
- improved, _ = improve_function(
394
- "autogen/math_utils.py",
395
- "solve_problem",
396
- "Solve math problems accurately, by avoiding calculation errors and reduce reasoning errors.",
397
- config_list=config_list,
398
- )
399
- with open(f"{here}/math_utils.py.improved", "w") as f:
400
- f.write(improved)
401
- suggestion, _ = improve_code(
402
- ["autogen/code_utils.py", "autogen/math_utils.py"],
403
- "leverage generative AI smartly and cost-effectively",
404
- config_list=config_list,
405
- )
406
- print(suggestion)
407
- improvement, cost = improve_code(
408
- ["autogen/code_utils.py", "autogen/math_utils.py"],
409
- "leverage generative AI smartly and cost-effectively",
410
- suggest_only=False,
411
- config_list=config_list,
412
- )
413
- print(cost)
414
- with open(f"{here}/suggested_improvement.txt", "w") as f:
415
- f.write(improvement)
416
-
417
-
418
- class TestContentStr(unittest.TestCase):
384
+ class TestContentStr:
419
385
  def test_string_content(self):
420
- self.assertEqual(content_str("simple string"), "simple string")
386
+ assert content_str("simple string") == "simple string"
421
387
 
422
388
  def test_list_of_text_content(self):
423
389
  content = [{"type": "text", "text": "hello"}, {"type": "text", "text": " world"}]
424
- self.assertEqual(content_str(content), "hello world")
390
+ assert content_str(content) == "hello world"
425
391
 
426
392
  def test_mixed_content(self):
427
393
  content = [{"type": "text", "text": "hello"}, {"type": "image_url", "url": "http://example.com/image.png"}]
428
- self.assertEqual(content_str(content), "hello<image>")
394
+ assert content_str(content) == "hello<image>"
429
395
 
430
396
  def test_invalid_content(self):
431
397
  content = [{"type": "text", "text": "hello"}, {"type": "wrong_type", "url": "http://example.com/image.png"}]
432
- with self.assertRaises(ValueError) as context:
398
+ with pytest.raises(ValueError):
433
399
  content_str(content)
434
- self.assertIn("Wrong content format", str(context.exception))
435
400
 
436
401
  def test_empty_list(self):
437
- self.assertEqual(content_str([]), "")
402
+ assert content_str([]) == ""
438
403
 
439
404
  def test_non_dict_in_list(self):
440
405
  content = ["string", {"type": "text", "text": "text"}]
441
- with self.assertRaises(TypeError):
406
+ with pytest.raises(TypeError):
442
407
  content_str(content)
443
408
 
444
409
 
445
- class TestGetPowerShellCommand(unittest.TestCase):
410
+ class TestGetPowerShellCommand:
446
411
  @patch("subprocess.run")
447
412
  def test_get_powershell_command_powershell(self, mock_subprocess_run):
448
413
  # Set up the mock to return a successful result for 'powershell'
449
414
  mock_subprocess_run.return_value.returncode = 0
450
415
  mock_subprocess_run.return_value.stdout = StringIO("5")
451
416
 
452
- self.assertEqual(get_powershell_command(), "powershell")
417
+ assert get_powershell_command() == "powershell"
453
418
 
454
419
  @patch("subprocess.run")
455
420
  def test_get_powershell_command_pwsh(self, mock_subprocess_run):
@@ -458,18 +423,18 @@ class TestGetPowerShellCommand(unittest.TestCase):
458
423
  mock_subprocess_run.return_value.returncode = 0
459
424
  mock_subprocess_run.return_value.stdout = StringIO("7")
460
425
 
461
- self.assertEqual(get_powershell_command(), "pwsh")
426
+ assert get_powershell_command() == "pwsh"
462
427
 
463
428
  @patch("subprocess.run")
464
429
  def test_get_powershell_command_not_found(self, mock_subprocess_run):
465
430
  mock_subprocess_run.side_effect = [FileNotFoundError, FileNotFoundError]
466
- with self.assertRaises(FileNotFoundError):
431
+ with pytest.raises(FileNotFoundError):
467
432
  get_powershell_command()
468
433
 
469
434
  @patch("subprocess.run")
470
435
  def test_get_powershell_command_no_permission(self, mock_subprocess_run):
471
436
  mock_subprocess_run.side_effect = [PermissionError, FileNotFoundError]
472
- with self.assertRaises(PermissionError):
437
+ with pytest.raises(PermissionError):
473
438
  get_powershell_command()
474
439
 
475
440
 
@@ -14,7 +14,7 @@ from unittest.mock import Mock, patch
14
14
 
15
15
  import pytest
16
16
 
17
- from autogen.import_utils import optional_import_block, skip_on_missing_imports
17
+ from autogen.import_utils import optional_import_block, run_for_optional_imports
18
18
 
19
19
  with optional_import_block() as result:
20
20
  import openai # noqa: F401
@@ -160,7 +160,7 @@ def test_log_function_use(db_connection):
160
160
  assert row["returns"] == json.dumps(returns)
161
161
 
162
162
 
163
- @skip_on_missing_imports(["openai"], "openai")
163
+ @run_for_optional_imports(["openai"], "openai")
164
164
  def test_log_new_agent(db_connection):
165
165
  from autogen import AssistantAgent
166
166
 
@@ -186,7 +186,7 @@ def test_log_new_agent(db_connection):
186
186
  assert row["init_args"] == json.dumps(init_args)
187
187
 
188
188
 
189
- @skip_on_missing_imports(["openai"], "openai")
189
+ @run_for_optional_imports(["openai"], "openai")
190
190
  def test_log_oai_wrapper(db_connection):
191
191
  from autogen import OpenAIWrapper
192
192
 
@@ -213,7 +213,7 @@ def test_log_oai_wrapper(db_connection):
213
213
  assert "base_config" in saved_init_args
214
214
 
215
215
 
216
- @skip_on_missing_imports(["openai"], "openai")
216
+ @run_for_optional_imports(["openai"], "openai")
217
217
  def test_log_oai_client(db_connection):
218
218
  cur = db_connection.cursor()
219
219
 
@@ -11,7 +11,7 @@ import sys
11
11
 
12
12
  import pytest
13
13
 
14
- from autogen.import_utils import skip_on_missing_imports
14
+ from autogen.import_utils import run_for_optional_imports
15
15
 
16
16
  here = os.path.abspath(os.path.dirname(__file__))
17
17
 
@@ -44,102 +44,102 @@ def run_notebook(input_nb, output_nb="executed_openai_notebook.ipynb", save=Fals
44
44
  nbformat.write(nb, nb_executed_file)
45
45
 
46
46
 
47
- @pytest.mark.openai
47
+ @run_for_optional_imports("openai", "openai")
48
48
  @pytest.mark.skipif(
49
49
  not sys.version.startswith("3.13"),
50
50
  reason="do not run if py!=3.13",
51
51
  )
52
- @skip_on_missing_imports(["openai"])
52
+ @run_for_optional_imports(["openai"], "openai")
53
53
  def test_agentchat_auto_feedback_from_code(save=False):
54
54
  run_notebook("agentchat_auto_feedback_from_code_execution.ipynb", save=save)
55
55
 
56
56
 
57
- @pytest.mark.openai
57
+ @run_for_optional_imports("openai", "openai")
58
58
  @pytest.mark.skipif(
59
59
  not sys.version.startswith("3.11"),
60
60
  reason="do not run if py!=3.11",
61
61
  )
62
- @skip_on_missing_imports(["openai"])
62
+ @run_for_optional_imports(["openai"], "openai")
63
63
  def _test_oai_completion(save=False):
64
64
  run_notebook("oai_completion.ipynb", save=save)
65
65
 
66
66
 
67
- @pytest.mark.openai
67
+ @run_for_optional_imports("openai", "openai")
68
68
  @pytest.mark.skipif(
69
69
  not sys.version.startswith("3.12"),
70
70
  reason="do not run if py!=3.12",
71
71
  )
72
- @skip_on_missing_imports(["openai"])
72
+ @run_for_optional_imports(["openai"], "openai")
73
73
  def test_agentchat_function_call(save=False):
74
74
  run_notebook("agentchat_function_call.ipynb", save=save)
75
75
 
76
76
 
77
- @pytest.mark.openai
77
+ @run_for_optional_imports("openai", "openai")
78
78
  @pytest.mark.skipif(
79
79
  not sys.version.startswith("3.10"),
80
80
  reason="do not run if py!=3.10",
81
81
  )
82
- @skip_on_missing_imports(["openai"])
82
+ @run_for_optional_imports(["openai"], "openai")
83
83
  def test_agentchat_function_call_currency_calculator(save=False):
84
84
  run_notebook("agentchat_function_call_currency_calculator.ipynb", save=save)
85
85
 
86
86
 
87
- @pytest.mark.openai
87
+ @run_for_optional_imports("openai", "openai")
88
88
  @pytest.mark.skipif(
89
89
  not sys.version.startswith("3.13"),
90
90
  reason="do not run if py!=3.13",
91
91
  )
92
- @skip_on_missing_imports(["openai"])
92
+ @run_for_optional_imports(["openai"], "openai")
93
93
  def test_agentchat_function_call_async(save=False):
94
94
  run_notebook("agentchat_function_call_async.ipynb", save=save)
95
95
 
96
96
 
97
- @pytest.mark.openai
97
+ @run_for_optional_imports("openai", "openai")
98
98
  @pytest.mark.skipif(
99
99
  not sys.version.startswith("3.12"),
100
100
  reason="do not run if py!=3.12",
101
101
  )
102
- @skip_on_missing_imports(["openai"])
102
+ @run_for_optional_imports(["openai"], "openai")
103
103
  def _test_agentchat_MathChat(save=False): # noqa: N802
104
104
  run_notebook("agentchat_MathChat.ipynb", save=save)
105
105
 
106
106
 
107
- @pytest.mark.openai
107
+ @run_for_optional_imports("openai", "openai")
108
108
  @pytest.mark.skipif(
109
109
  not sys.version.startswith("3.10"),
110
110
  reason="do not run if py!=3.10",
111
111
  )
112
- @skip_on_missing_imports(["openai"])
112
+ @run_for_optional_imports(["openai"], "openai")
113
113
  def _test_oai_chatgpt_gpt4(save=False):
114
114
  run_notebook("oai_chatgpt_gpt4.ipynb", save=save)
115
115
 
116
116
 
117
- @pytest.mark.openai
117
+ @run_for_optional_imports("openai", "openai")
118
118
  @pytest.mark.skipif(
119
119
  not sys.version.startswith("3.12"),
120
120
  reason="do not run if py!=3.12",
121
121
  )
122
- @skip_on_missing_imports(["openai"])
122
+ @run_for_optional_imports(["openai"], "openai")
123
123
  def test_agentchat_groupchat_finite_state_machine(save=False):
124
124
  run_notebook("agentchat_groupchat_finite_state_machine.ipynb", save=save)
125
125
 
126
126
 
127
- @pytest.mark.openai
127
+ @run_for_optional_imports("openai", "openai")
128
128
  @pytest.mark.skipif(
129
129
  not sys.version.startswith("3.11"),
130
130
  reason="do not run if py!=3.11",
131
131
  )
132
- @skip_on_missing_imports(["openai"])
132
+ @run_for_optional_imports(["openai"], "openai")
133
133
  def test_agentchat_cost_token_tracking(save=False):
134
134
  run_notebook("agentchat_cost_token_tracking.ipynb", save=save)
135
135
 
136
136
 
137
- @pytest.mark.openai
137
+ @run_for_optional_imports("openai", "openai")
138
138
  @pytest.mark.skipif(
139
139
  not sys.version.startswith("3.11"),
140
140
  reason="do not run if py!=3.11",
141
141
  )
142
- @skip_on_missing_imports(["openai"])
142
+ @run_for_optional_imports(["openai"], "openai")
143
143
  def test_agentchat_groupchat_stateflow(save=False):
144
144
  run_notebook("agentchat_groupchat_stateflow.ipynb", save=save)
145
145
 
@@ -13,7 +13,7 @@ from contextlib import suppress
13
13
 
14
14
  import pytest
15
15
 
16
- from autogen.import_utils import optional_import_block, skip_on_missing_imports
16
+ from autogen.import_utils import optional_import_block, run_for_optional_imports
17
17
  from autogen.retrieve_utils import (
18
18
  create_vector_db_from_dir,
19
19
  extract_text_from_pdf,
@@ -36,7 +36,7 @@ simplify the process of building applications that leverage the power of LLMs, a
36
36
  integration, testing, and deployment."""
37
37
 
38
38
 
39
- @skip_on_missing_imports(["bs4", "chromadb", "markdownify", "pypdf"], "retrievechat")
39
+ @run_for_optional_imports(["bs4", "chromadb", "markdownify", "pypdf"], "retrievechat")
40
40
  class TestRetrieveUtils:
41
41
  def test_split_text_to_chunks(self):
42
42
  long_text = "A" * 10000
@@ -127,7 +127,10 @@ class TestRetrieveUtils:
127
127
  results = query_vector_db(["autogen"], client=client)
128
128
  assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", []))
129
129
 
130
- @skip_on_missing_imports(["lancedb"], "unknown")
130
+ @pytest.mark.skip(
131
+ reason="This test is failing due to lancedb missing in project install, TODO: add lancedb installation to CI?"
132
+ )
133
+ @run_for_optional_imports(["lancedb"], "unknown")
131
134
  def test_custom_vector_db(self):
132
135
  with optional_import_block() as result:
133
136
  import lancedb
@@ -231,7 +234,10 @@ class TestRetrieveUtils:
231
234
  print(results["ids"][0])
232
235
  assert len(results["ids"][0]) > 0
233
236
 
234
- @skip_on_missing_imports(["unstructured"], "unknown")
237
+ @pytest.mark.skip(
238
+ reason="This test is failing due to unstructured missing in project install, TODO: add unstructured installation to CI?"
239
+ )
240
+ @run_for_optional_imports(["unstructured"], "unknown")
235
241
  def test_unstructured(self):
236
242
  pdf_file_path = os.path.join(test_dir, "example.pdf")
237
243
  txt_file_path = os.path.join(test_dir, "example.txt")
@@ -8,7 +8,7 @@
8
8
 
9
9
  import pytest
10
10
 
11
- from autogen.import_utils import skip_on_missing_imports
11
+ from autogen.import_utils import run_for_optional_imports
12
12
  from autogen.token_count_utils import (
13
13
  _num_token_from_messages,
14
14
  count_token,
@@ -89,7 +89,7 @@ def test_num_token_from_messages(model: str, expected_count: int) -> None:
89
89
  assert _num_token_from_messages(messages=messages, model=model) == expected_count
90
90
 
91
91
 
92
- @skip_on_missing_imports("PIL", "unknown")
92
+ @run_for_optional_imports("PIL", "unknown")
93
93
  def test_num_tokens_from_gpt_image():
94
94
  # mock num_tokens_from_gpt_image function
95
95
  base64_encoded_image = (
@@ -1,145 +0,0 @@
1
- pyautogen==0.8.0b1
2
-
3
- [anthropic]
4
- pyautogen[anthropic]==0.8.0b1
5
-
6
- [autobuild]
7
- pyautogen[autobuild]==0.8.0b1
8
-
9
- [bedrock]
10
- pyautogen[bedrock]==0.8.0b1
11
-
12
- [blendsearch]
13
- pyautogen[blendsearch]==0.8.0b1
14
-
15
- [browser-use]
16
- pyautogen[browser-use]==0.8.0b1
17
-
18
- [captainagent]
19
- pyautogen[captainagent]==0.8.0b1
20
-
21
- [cerebras]
22
- pyautogen[cerebras]==0.8.0b1
23
-
24
- [cohere]
25
- pyautogen[cohere]==0.8.0b1
26
-
27
- [commsagent-discord]
28
- pyautogen[commsagent-discord]==0.8.0b1
29
-
30
- [commsagent-slack]
31
- pyautogen[commsagent-slack]==0.8.0b1
32
-
33
- [commsagent-telegram]
34
- pyautogen[commsagent-telegram]==0.8.0b1
35
-
36
- [cosmosdb]
37
- pyautogen[cosmosdb]==0.8.0b1
38
-
39
- [crawl4ai]
40
- pyautogen[crawl4ai]==0.8.0b1
41
-
42
- [dev]
43
- pyautogen[dev]==0.8.0b1
44
-
45
- [docs]
46
- pyautogen[docs]==0.8.0b1
47
-
48
- [flaml]
49
- pyautogen[flaml]==0.8.0b1
50
-
51
- [gemini]
52
- pyautogen[gemini]==0.8.0b1
53
-
54
- [graph]
55
- pyautogen[graph]==0.8.0b1
56
-
57
- [graph-rag-falkor-db]
58
- pyautogen[graph-rag-falkor-db]==0.8.0b1
59
-
60
- [groq]
61
- pyautogen[groq]==0.8.0b1
62
-
63
- [interop]
64
- pyautogen[interop]==0.8.0b1
65
-
66
- [interop-crewai]
67
- pyautogen[interop-crewai]==0.8.0b1
68
-
69
- [interop-langchain]
70
- pyautogen[interop-langchain]==0.8.0b1
71
-
72
- [interop-pydantic-ai]
73
- pyautogen[interop-pydantic-ai]==0.8.0b1
74
-
75
- [jupyter-executor]
76
- pyautogen[jupyter-executor]==0.8.0b1
77
-
78
- [lint]
79
- pyautogen[lint]==0.8.0b1
80
-
81
- [lmm]
82
- pyautogen[lmm]==0.8.0b1
83
-
84
- [long-context]
85
- pyautogen[long-context]==0.8.0b1
86
-
87
- [mathchat]
88
- pyautogen[mathchat]==0.8.0b1
89
-
90
- [mistral]
91
- pyautogen[mistral]==0.8.0b1
92
-
93
- [neo4j]
94
- pyautogen[neo4j]==0.8.0b1
95
-
96
- [ollama]
97
- pyautogen[ollama]==0.8.0b1
98
-
99
- [openai]
100
- pyautogen[openai]==0.8.0b1
101
-
102
- [openai-realtime]
103
- pyautogen[openai-realtime]==0.8.0b1
104
-
105
- [rag]
106
- pyautogen[rag]==0.8.0b1
107
-
108
- [redis]
109
- pyautogen[redis]==0.8.0b1
110
-
111
- [retrievechat]
112
- pyautogen[retrievechat]==0.8.0b1
113
-
114
- [retrievechat-couchbase]
115
- pyautogen[retrievechat-couchbase]==0.8.0b1
116
-
117
- [retrievechat-mongodb]
118
- pyautogen[retrievechat-mongodb]==0.8.0b1
119
-
120
- [retrievechat-pgvector]
121
- pyautogen[retrievechat-pgvector]==0.8.0b1
122
-
123
- [retrievechat-qdrant]
124
- pyautogen[retrievechat-qdrant]==0.8.0b1
125
-
126
- [teachable]
127
- pyautogen[teachable]==0.8.0b1
128
-
129
- [test]
130
- pyautogen[test]==0.8.0b1
131
-
132
- [together]
133
- pyautogen[together]==0.8.0b1
134
-
135
- [twilio]
136
- pyautogen[twilio]==0.8.0b1
137
-
138
- [types]
139
- pyautogen[types]==0.8.0b1
140
-
141
- [websockets]
142
- pyautogen[websockets]==0.8.0b1
143
-
144
- [websurfer]
145
- pyautogen[websurfer]==0.8.0b1
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes