ag2 0.9.8.post1__py3-none-any.whl → 0.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (88) hide show
  1. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/METADATA +232 -210
  2. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/RECORD +88 -80
  3. autogen/_website/generate_mkdocs.py +3 -3
  4. autogen/_website/notebook_processor.py +1 -1
  5. autogen/_website/utils.py +1 -1
  6. autogen/agentchat/assistant_agent.py +15 -15
  7. autogen/agentchat/chat.py +52 -40
  8. autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
  9. autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
  10. autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
  11. autogen/agentchat/contrib/capabilities/transforms.py +1 -1
  12. autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
  13. autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
  14. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
  15. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
  16. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
  17. autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
  18. autogen/agentchat/contrib/rag/query_engine.py +11 -11
  19. autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
  20. autogen/agentchat/contrib/swarm_agent.py +3 -2
  21. autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
  22. autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
  23. autogen/agentchat/contrib/web_surfer.py +1 -1
  24. autogen/agentchat/conversable_agent.py +184 -80
  25. autogen/agentchat/group/context_expression.py +21 -21
  26. autogen/agentchat/group/handoffs.py +11 -11
  27. autogen/agentchat/group/multi_agent_chat.py +3 -2
  28. autogen/agentchat/group/on_condition.py +11 -11
  29. autogen/agentchat/group/safeguards/__init__.py +21 -0
  30. autogen/agentchat/group/safeguards/api.py +224 -0
  31. autogen/agentchat/group/safeguards/enforcer.py +1064 -0
  32. autogen/agentchat/group/safeguards/events.py +119 -0
  33. autogen/agentchat/group/safeguards/validator.py +435 -0
  34. autogen/agentchat/groupchat.py +60 -19
  35. autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
  36. autogen/agentchat/realtime/experimental/function_observer.py +2 -3
  37. autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
  38. autogen/agentchat/realtime/experimental/realtime_swarm.py +21 -10
  39. autogen/agentchat/user_proxy_agent.py +55 -53
  40. autogen/agents/experimental/document_agent/document_agent.py +1 -10
  41. autogen/agents/experimental/document_agent/parser_utils.py +5 -1
  42. autogen/browser_utils.py +4 -4
  43. autogen/cache/abstract_cache_base.py +2 -6
  44. autogen/cache/disk_cache.py +1 -6
  45. autogen/cache/in_memory_cache.py +2 -6
  46. autogen/cache/redis_cache.py +1 -5
  47. autogen/coding/__init__.py +10 -2
  48. autogen/coding/base.py +2 -1
  49. autogen/coding/docker_commandline_code_executor.py +1 -6
  50. autogen/coding/factory.py +9 -0
  51. autogen/coding/jupyter/docker_jupyter_server.py +1 -7
  52. autogen/coding/jupyter/jupyter_client.py +2 -9
  53. autogen/coding/jupyter/jupyter_code_executor.py +2 -7
  54. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  55. autogen/coding/local_commandline_code_executor.py +0 -65
  56. autogen/coding/yepcode_code_executor.py +197 -0
  57. autogen/environments/docker_python_environment.py +3 -3
  58. autogen/environments/system_python_environment.py +5 -5
  59. autogen/environments/venv_python_environment.py +5 -5
  60. autogen/events/agent_events.py +1 -1
  61. autogen/events/client_events.py +1 -1
  62. autogen/fast_depends/utils.py +10 -0
  63. autogen/graph_utils.py +5 -7
  64. autogen/import_utils.py +28 -15
  65. autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
  66. autogen/io/processors/console_event_processor.py +8 -3
  67. autogen/llm_config/config.py +168 -91
  68. autogen/llm_config/entry.py +38 -26
  69. autogen/llm_config/types.py +35 -0
  70. autogen/llm_config/utils.py +223 -0
  71. autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
  72. autogen/messages/agent_messages.py +1 -1
  73. autogen/messages/client_messages.py +1 -1
  74. autogen/oai/__init__.py +8 -1
  75. autogen/oai/client.py +10 -3
  76. autogen/oai/client_utils.py +1 -1
  77. autogen/oai/cohere.py +4 -4
  78. autogen/oai/gemini.py +4 -6
  79. autogen/oai/gemini_types.py +1 -0
  80. autogen/oai/openai_utils.py +44 -115
  81. autogen/tools/dependency_injection.py +4 -8
  82. autogen/tools/experimental/reliable/reliable.py +3 -2
  83. autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
  84. autogen/tools/function_utils.py +2 -1
  85. autogen/version.py +1 -1
  86. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/WHEEL +0 -0
  87. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/licenses/LICENSE +0 -0
  88. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/licenses/NOTICE.md +0 -0
@@ -48,6 +48,7 @@ SELECT_SPEAKER_PROMPT_TEMPLATE = (
48
48
  @export_module("autogen")
49
49
  class GroupChat:
50
50
  """(In preview) A group chat class that contains the following data fields:
51
+
51
52
  - agents: a list of participating agents.
52
53
  - messages: a list of messages in the group chat.
53
54
  - max_round: the maximum number of rounds.
@@ -103,9 +104,9 @@ class GroupChat:
103
104
  Default is 2.
104
105
  - select_speaker_transform_messages: (optional) the message transformations to apply to the nested select speaker agent-to-agent chat messages.
105
106
  Takes a TransformMessages object, defaults to None and is only utilised when the speaker selection method is "auto".
106
- - select_speaker_auto_verbose: whether to output the select speaker responses and selections
107
+ - select_speaker_auto_verbose: whether to output the select speaker responses and selections.
107
108
  If set to True, the outputs from the two agents in the nested select speaker chat will be output, along with
108
- whether the responses were successful, or not, in selecting an agent
109
+ whether the responses were successful, or not, in selecting an agent.
109
110
  Applies only to "auto" speaker selection method.
110
111
  - allow_repeat_speaker: whether to allow the same speaker to speak consecutively.
111
112
  Default is True, in which case all speakers are allowed to speak consecutively.
@@ -174,6 +175,7 @@ class GroupChat:
174
175
  )
175
176
 
176
177
  allowed_speaker_transitions_dict: dict[str, list[Agent]] = field(init=False)
178
+ _inter_agent_guardrails: list = field(default_factory=list, init=False)
177
179
 
178
180
  def __post_init__(self):
179
181
  # Post init steers clears of the automatically generated __init__ method from dataclass
@@ -575,7 +577,7 @@ class GroupChat:
575
577
  return self.next_agent(last_speaker)
576
578
 
577
579
  # auto speaker selection with 2-agent chat
578
- return self._auto_select_speaker(last_speaker, selector, messages, agents)
580
+ return self._auto_select_speaker(last_speaker, selector, messages if messages else self.messages, agents)
579
581
 
580
582
  async def a_select_speaker(self, last_speaker: Agent, selector: ConversableAgent) -> Agent:
581
583
  """Select the next speaker (with requery), asynchronously."""
@@ -587,7 +589,7 @@ class GroupChat:
587
589
  return self.next_agent(last_speaker)
588
590
 
589
591
  # auto speaker selection with 2-agent chat
590
- return await self.a_auto_select_speaker(last_speaker, selector, messages, agents)
592
+ return await self.a_auto_select_speaker(last_speaker, selector, messages if messages else self.messages, agents)
591
593
 
592
594
  def _finalize_speaker(self, last_speaker: Agent, final: bool, name: str, agents: list[Agent] | None) -> Agent:
593
595
  if not final:
@@ -1000,7 +1002,7 @@ class GroupChat:
1000
1002
  message_content = message_content["content"]
1001
1003
  message_content = content_str(message_content)
1002
1004
 
1003
- mentions = dict()
1005
+ mentions = {}
1004
1006
  for agent in agents:
1005
1007
  # Finds agent mentions, taking word boundaries into account,
1006
1008
  # accommodates escaping underscores and underscores as spaces
@@ -1052,6 +1054,30 @@ class GroupChat:
1052
1054
  return f"{guardrail.activation_message}\nJustification: {guardrail_result.justification}"
1053
1055
  return None
1054
1056
 
1057
+ def _run_inter_agent_guardrails(
1058
+ self,
1059
+ *,
1060
+ src_agent_name: str,
1061
+ dst_agent_name: str,
1062
+ message_content: str,
1063
+ ) -> str | None:
1064
+ """Run policy-driven inter-agent guardrails, if any are configured.
1065
+
1066
+ Returns optional replacement content when a guardrail triggers.
1067
+ """
1068
+ guardrails = getattr(self, "_inter_agent_guardrails", None)
1069
+ if not guardrails:
1070
+ return None
1071
+ for gr in guardrails:
1072
+ reply = gr.check_and_act(
1073
+ src_agent_name=src_agent_name,
1074
+ dst_agent_name=dst_agent_name,
1075
+ message_content=message_content,
1076
+ )
1077
+ if reply is not None:
1078
+ return reply
1079
+ return None
1080
+
1055
1081
 
1056
1082
  @export_module("autogen")
1057
1083
  class GroupChatManager(ConversableAgent):
@@ -1208,7 +1234,21 @@ class GroupChatManager(ConversableAgent):
1208
1234
  # broadcast the message to all agents except the speaker
1209
1235
  for agent in groupchat.agents:
1210
1236
  if agent != speaker:
1211
- self.send(message, agent, request_reply=False, silent=True)
1237
+ inter_reply = groupchat._run_inter_agent_guardrails(
1238
+ src_agent_name=speaker.name,
1239
+ dst_agent_name=agent.name,
1240
+ message_content=message,
1241
+ )
1242
+ if inter_reply is not None:
1243
+ replacement = (
1244
+ {"content": inter_reply, "name": speaker.name}
1245
+ if not isinstance(inter_reply, dict)
1246
+ else inter_reply
1247
+ )
1248
+ self.send(replacement, agent, request_reply=False, silent=True)
1249
+ else:
1250
+ self.send(message, agent, request_reply=False, silent=True)
1251
+
1212
1252
  if self._is_termination_msg(message):
1213
1253
  # The conversation is over
1214
1254
  termination_reason = f"Termination message condition on the GroupChatManager '{self.name}' met"
@@ -1707,19 +1747,20 @@ class GroupChatManager(ConversableAgent):
1707
1747
  agent._raise_exception_on_async_reply_functions()
1708
1748
 
1709
1749
  def clear_agents_history(self, reply: dict[str, Any], groupchat: GroupChat) -> str:
1710
- """Clears history of messages for all agents or selected one. Can preserve selected number of last messages.
1711
- That function is called when user manually provide "clear history" phrase in his reply.
1712
- When "clear history" is provided, the history of messages for all agents is cleared.
1713
- When "clear history `<agent_name>`" is provided, the history of messages for selected agent is cleared.
1714
- When "clear history `<nr_of_messages_to_preserve>`" is provided, the history of messages for all agents is cleared
1715
- except last `<nr_of_messages_to_preserve>` messages.
1716
- When "clear history `<agent_name>` `<nr_of_messages_to_preserve>`" is provided, the history of messages for selected
1717
- agent is cleared except last `<nr_of_messages_to_preserve>` messages.
1718
- Phrase "clear history" and optional arguments are cut out from the reply before it passed to the chat.
1719
-
1720
- Args:
1721
- reply (dict): reply message dict to analyze.
1722
- groupchat (GroupChat): GroupChat object.
1750
+ """Clears history of messages for all agents or a selected one. Can preserve a selected number of last messages.\n
1751
+ \n
1752
+ This function is called when the user manually provides the "clear history" phrase in their reply.\n
1753
+ When "clear history" is provided, the history of messages for all agents is cleared.\n
1754
+ When "clear history `<agent_name>`" is provided, the history of messages for the selected agent is cleared.\n
1755
+ When "clear history `<nr_of_messages_to_preserve>`" is provided, the history of messages for all agents is cleared\n
1756
+ except for the last `<nr_of_messages_to_preserve>` messages.\n
1757
+ When "clear history `<agent_name>` `<nr_of_messages_to_preserve>`" is provided, the history of messages for the selected\n
1758
+ agent is cleared except for the last `<nr_of_messages_to_preserve>` messages.\n
1759
+ The phrase "clear history" and optional arguments are cut out from the reply before it is passed to the chat.\n
1760
+ \n
1761
+ Args:\n
1762
+ reply (dict): reply message dict to analyze.\n
1763
+ groupchat (GroupChat): GroupChat object.\n
1723
1764
  """
1724
1765
  iostream = IOStream.get_default()
1725
1766
 
@@ -8,7 +8,7 @@ from contextlib import AbstractAsyncContextManager
8
8
  from logging import Logger
9
9
  from typing import Any, Literal, Protocol, TypeVar, runtime_checkable
10
10
 
11
- from asyncer import create_task_group
11
+ from anyio import create_task_group
12
12
 
13
13
  from .....doc_utils import export_module
14
14
  from .....llm_config import LLMConfig
@@ -140,7 +140,7 @@ class RealtimeClientBase:
140
140
  Args:
141
141
  audio (str): The audio.
142
142
  """
143
- await self.add_event(InputAudioBufferDelta(delta=audio, item_id=None, raw_message=dict()))
143
+ await self.add_event(InputAudioBufferDelta(delta=audio, item_id=None, raw_message={}))
144
144
 
145
145
 
146
146
  _realtime_client_classes: dict[str, type[RealtimeClientProtocol]] = {}
@@ -2,14 +2,13 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- import asyncio
6
5
  import json
7
6
  from typing import TYPE_CHECKING, Any, Optional
8
7
 
9
- from asyncer import asyncify
10
8
  from pydantic import BaseModel
11
9
 
12
10
  from ....doc_utils import export_module
11
+ from ....fast_depends.utils import asyncify
13
12
  from .realtime_events import FunctionCall, RealtimeEvent
14
13
  from .realtime_observer import RealtimeObserver
15
14
 
@@ -49,7 +48,7 @@ class FunctionObserver(RealtimeObserver):
49
48
  """
50
49
  if name in self.agent.registered_realtime_tools:
51
50
  func = self.agent.registered_realtime_tools[name].func
52
- func = func if asyncio.iscoroutinefunction(func) else asyncify(func)
51
+ func = asyncify(func)
53
52
  try:
54
53
  result = await func(**kwargs)
55
54
  except Exception:
@@ -7,8 +7,7 @@ from dataclasses import dataclass
7
7
  from logging import Logger, getLogger
8
8
  from typing import Any, TypeVar
9
9
 
10
- from anyio import lowlevel
11
- from asyncer import create_task_group
10
+ from anyio import create_task_group, lowlevel
12
11
 
13
12
  from ....doc_utils import export_module
14
13
  from ....llm_config import LLMConfig
@@ -102,7 +101,7 @@ class RealtimeAgent:
102
101
 
103
102
  async def start_observers(self) -> None:
104
103
  for observer in self._observers:
105
- self._tg.soonify(observer.run)(self)
104
+ self._tg.start_soon(observer.run, self)
106
105
 
107
106
  # wait for the observers to be ready
108
107
  for observer in self._observers:
@@ -3,18 +3,21 @@
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
5
  import logging
6
+ import uuid
6
7
  import warnings
7
8
  from collections import defaultdict
8
9
  from collections.abc import Callable
10
+ from functools import partial
9
11
  from typing import TYPE_CHECKING, Any, Optional, TypeVar
10
12
 
11
13
  import anyio
12
- from asyncer import asyncify, create_task_group, syncify
14
+ from anyio import create_task_group, from_thread
13
15
 
14
16
  from ....agentchat.contrib.swarm_agent import AfterWorkOption, initiate_swarm_chat
15
17
  from ....cache import AbstractCache
16
18
  from ....code_utils import content_str
17
19
  from ....doc_utils import export_module
20
+ from ....fast_depends.utils import asyncify
18
21
  from ... import Agent, ChatResult, ConversableAgent, LLMAgent
19
22
  from ...utils import consolidate_chat_info, gather_usage_summary
20
23
 
@@ -211,6 +214,7 @@ class SwarmableAgent(Agent):
211
214
  summary_args: dict[str, Any] | None = {},
212
215
  **kwargs: dict[str, Any],
213
216
  ) -> ChatResult:
217
+ chat_id = uuid.uuid4().int
214
218
  _chat_info = locals().copy()
215
219
  _chat_info["sender"] = self
216
220
  consolidate_chat_info(_chat_info, uniform_sender=self)
@@ -226,6 +230,7 @@ class SwarmableAgent(Agent):
226
230
  recipient.previous_cache = None # type: ignore[attr-defined]
227
231
 
228
232
  chat_result = ChatResult(
233
+ chat_id=chat_id,
229
234
  chat_history=self.chat_messages[recipient],
230
235
  summary=summary,
231
236
  cost=gather_usage_summary([self, recipient]), # type: ignore[arg-type]
@@ -349,7 +354,7 @@ class SwarmableRealtimeAgent(SwarmableAgent):
349
354
  self._agents = agents
350
355
  self._realtime_agent = realtime_agent
351
356
 
352
- self._answer_event: anyio.Event = anyio.Event()
357
+ self._answer_event = anyio.Event()
353
358
  self._answer: str = ""
354
359
  self.question_message = question_message or QUESTION_MESSAGE
355
360
 
@@ -419,12 +424,13 @@ class SwarmableRealtimeAgent(SwarmableAgent):
419
424
 
420
425
  async def get_input() -> None:
421
426
  async with create_task_group() as tg:
422
- tg.soonify(self.ask_question)(
427
+ tg.start_soon(
428
+ self.ask_question,
423
429
  self.question_message.format(messages[-1]["content"]),
424
430
  question_timeout=QUESTION_TIMEOUT_SECONDS,
425
431
  )
426
432
 
427
- syncify(get_input)()
433
+ from_thread.run_sync(get_input)
428
434
 
429
435
  return True, {"role": "user", "content": self._answer} # type: ignore[return-value]
430
436
 
@@ -449,12 +455,17 @@ class SwarmableRealtimeAgent(SwarmableAgent):
449
455
  )(self.set_answer)
450
456
 
451
457
  async def on_observers_ready() -> None:
452
- self._realtime_agent._tg.soonify(asyncify(initiate_swarm_chat))(
453
- initial_agent=self._initial_agent,
454
- agents=self._agents,
455
- user_agent=self, # type: ignore[arg-type]
456
- messages="Find out what the user wants.",
457
- after_work=AfterWorkOption.REVERT_TO_USER,
458
+ self._realtime_agent._tg.start_soon(
459
+ asyncify(
460
+ partial(
461
+ initiate_swarm_chat,
462
+ initial_agent=self._initial_agent,
463
+ agents=self._agents,
464
+ user_agent=self, # type: ignore[arg-type]
465
+ messages="Find out what the user wants.",
466
+ after_work=AfterWorkOption.REVERT_TO_USER,
467
+ )
468
+ )
458
469
  )
459
470
 
460
471
  self._realtime_agent.callbacks.on_observers_ready = on_observers_ready
@@ -15,15 +15,15 @@ from .conversable_agent import ConversableAgent
15
15
 
16
16
  @export_module("autogen")
17
17
  class UserProxyAgent(ConversableAgent):
18
- """(In preview) A proxy agent for the user, that can execute code and provide feedback to the other agents.
19
-
20
- UserProxyAgent is a subclass of ConversableAgent configured with `human_input_mode` to ALWAYS
21
- and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.
22
- Code execution is enabled by default. LLM-based auto reply is disabled by default.
23
- To modify auto reply, register a method with [`register_reply`](../ConversableAgent#register-reply).
24
- To modify the way to get human input, override `get_human_input` method.
25
- To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
26
- `run_code`, and `execute_function` methods respectively.
18
+ """(In preview) A proxy agent for the user, that can execute code and provide feedback to the other agents.\n
19
+ \n
20
+ UserProxyAgent is a subclass of ConversableAgent configured with `human_input_mode` to ALWAYS\n
21
+ and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.\n
22
+ Code execution is enabled by default. LLM-based auto reply is disabled by default.\n
23
+ To modify auto reply, register a method with [`register_reply`](../ConversableAgent#register-reply).\n
24
+ To modify the way to get human input, override `get_human_input` method.\n
25
+ To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,\n
26
+ `run_code`, and `execute_function` methods respectively.\n
27
27
  """
28
28
 
29
29
  # Default UserProxyAgent.description values, based on human_input_mode
@@ -47,50 +47,52 @@ class UserProxyAgent(ConversableAgent):
47
47
  description: str | None = None,
48
48
  **kwargs: Any,
49
49
  ):
50
- """Args:
51
- name (str): name of the agent.
52
- is_termination_msg (function): a function that takes a message in the form of a dictionary
53
- and returns a boolean value indicating if this received message is a termination message.
54
- The dict can contain the following keys: "content", "role", "name", "function_call".
55
- max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
56
- default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
57
- The limit only plays a role when human_input_mode is not "ALWAYS".
58
- human_input_mode (str): whether to ask for human inputs every time a message is received.
59
- Possible values are "ALWAYS", "TERMINATE", "NEVER".
60
- (1) When "ALWAYS", the agent prompts for human input every time a message is received.
61
- Under this mode, the conversation stops when the human input is "exit",
62
- or when is_termination_msg is True and there is no human input.
63
- (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
64
- the number of auto reply reaches the max_consecutive_auto_reply.
65
- (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
66
- when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
67
- function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions.
68
- code_execution_config (dict or False): config for the code execution.
69
- To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:
70
- - work_dir (Optional, str): The working directory for the code execution.
71
- If None, a default working directory will be used.
72
- The default working directory is the "extensions" directory under
73
- "path_to_autogen".
74
- - use_docker (Optional, list, str or bool): The docker image to use for code execution.
75
- Default is True, which means the code will be executed in a docker container. A default list of images will be used.
76
- If a list or a str of image name(s) is provided, the code will be executed in a docker container
77
- with the first image successfully pulled.
78
- If False, the code will be executed in the current environment.
79
- We strongly recommend using docker for code execution.
80
- - timeout (Optional, int): The maximum execution time in seconds.
81
- - last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
82
- default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
83
- llm_config (LLMConfig or dict or False or None): llm inference configuration.
84
- Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)
85
- for available options.
86
- Default to False, which disables llm-based auto reply.
87
- When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
88
- system_message (str or List): system message for ChatCompletion inference.
89
- Only used when llm_config is not False. Use it to reprogram the agent.
90
- description (str): a short description of the agent. This description is used by other agents
91
- (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
92
- **kwargs (dict): Please refer to other kwargs in
93
- [ConversableAgent](https://docs.ag2.ai/latest/docs/api-reference/autogen/ConversableAgent).
50
+ """Initialize a UserProxyAgent.
51
+
52
+ Args:
53
+ name (str): name of the agent.\n
54
+ is_termination_msg (function): a function that takes a message in the form of a dictionary\n
55
+ and returns a boolean value indicating if this received message is a termination message.\n
56
+ The dict can contain the following keys: "content", "role", "name", "function_call".\n
57
+ max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.\n
58
+ default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).\n
59
+ The limit only plays a role when human_input_mode is not "ALWAYS".\n
60
+ human_input_mode (str): whether to ask for human inputs every time a message is received.\n
61
+ Possible values are "ALWAYS", "TERMINATE", "NEVER".\n
62
+ (1) When "ALWAYS", the agent prompts for human input every time a message is received.\n
63
+ Under this mode, the conversation stops when the human input is "exit",\n
64
+ or when is_termination_msg is True and there is no human input.\n
65
+ (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or\n
66
+ the number of auto reply reaches the max_consecutive_auto_reply.\n
67
+ (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops\n
68
+ when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.\n
69
+ function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions.\n
70
+ code_execution_config (dict or False): config for the code execution.\n
71
+ To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:\n
72
+ - work_dir (Optional, str): The working directory for the code execution.\n
73
+ If None, a default working directory will be used.\n
74
+ The default working directory is the "extensions" directory under\n
75
+ "path_to_autogen".\n
76
+ - use_docker (Optional, list, str or bool): The docker image to use for code execution.\n
77
+ Default is True, which means the code will be executed in a docker container. A default list of images will be used.\n
78
+ If a list or a str of image name(s) is provided, the code will be executed in a docker container\n
79
+ with the first image successfully pulled.\n
80
+ If False, the code will be executed in the current environment.\n
81
+ We strongly recommend using docker for code execution.\n
82
+ - timeout (Optional, int): The maximum execution time in seconds.\n
83
+ - last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.\n
84
+ default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.\n
85
+ llm_config (LLMConfig or dict or False or None): llm inference configuration.\n
86
+ Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)\n
87
+ for available options.\n
88
+ Default to False, which disables llm-based auto reply.\n
89
+ When set to None, will use self.DEFAULT_CONFIG, which defaults to False.\n
90
+ system_message (str or List): system message for ChatCompletion inference.\n
91
+ Only used when llm_config is not False. Use it to reprogram the agent.\n
92
+ description (str): a short description of the agent. This description is used by other agents\n
93
+ (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)\n
94
+ **kwargs (dict): Please refer to other kwargs in\n
95
+ [ConversableAgent](https://docs.ag2.ai/latest/docs/api-reference/autogen/ConversableAgent).\n
94
96
  """
95
97
  super().__init__(
96
98
  name=name,
@@ -198,15 +198,6 @@ class DocAgent(ConversableAgent):
198
198
  )
199
199
  self.register_reply([ConversableAgent, None], self.generate_inner_group_chat_reply, position=0)
200
200
 
201
- self.context_variables: ContextVariables = ContextVariables(
202
- data={
203
- "DocumentsToIngest": [],
204
- "DocumentsIngested": [],
205
- "QueriesToRun": [],
206
- "QueryResults": [],
207
- }
208
- )
209
-
210
201
  self._triage_agent = DocumentTriageAgent(llm_config=llm_config)
211
202
 
212
203
  def create_error_agent_prompt(agent: ConversableAgent, messages: list[dict[str, Any]]) -> str:
@@ -396,7 +387,7 @@ class DocAgent(ConversableAgent):
396
387
  else:
397
388
  # First time initialization - no deduplication needed
398
389
  context_variables["DocumentsToIngest"] = ingestions
399
- context_variables["QueriesToRun"] = [query for query in queries]
390
+ context_variables["QueriesToRun"] = list(queries)
400
391
  context_variables["TaskInitiated"] = True
401
392
  response_message = "Updated context variables with task decisions"
402
393
 
@@ -15,7 +15,11 @@ from .document_utils import handle_input
15
15
 
16
16
  with optional_import_block():
17
17
  from docling.datamodel.base_models import InputFormat
18
- from docling.datamodel.pipeline_options import AcceleratorDevice, AcceleratorOptions, PdfPipelineOptions
18
+ from docling.datamodel.pipeline_options import ( # type: ignore[attr-defined]
19
+ AcceleratorDevice,
20
+ AcceleratorOptions,
21
+ PdfPipelineOptions,
22
+ )
19
23
  from docling.document_converter import DocumentConverter, PdfFormatOption
20
24
 
21
25
  __all__ = ["docling_parse_docs"]
autogen/browser_utils.py CHANGED
@@ -58,10 +58,10 @@ class SimpleTextBrowser:
58
58
  self.start_page: str = start_page if start_page else "about:blank"
59
59
  self.viewport_size = viewport_size # Applies only to the standard uri types
60
60
  self.downloads_folder = downloads_folder
61
- self.history: list[str] = list()
61
+ self.history: list[str] = []
62
62
  self.page_title: str | None = None
63
63
  self.viewport_current_page = 0
64
- self.viewport_pages: list[tuple[int, int]] = list()
64
+ self.viewport_pages: list[tuple[int, int]] = []
65
65
  self.set_address(self.start_page)
66
66
  self.bing_base_url = bing_base_url
67
67
  self.bing_api_key = bing_api_key
@@ -182,7 +182,7 @@ class SimpleTextBrowser:
182
182
  def _bing_search(self, query: str) -> None:
183
183
  results = self._bing_api_call(query)
184
184
 
185
- web_snippets: list[str] = list()
185
+ web_snippets: list[str] = []
186
186
  idx = 0
187
187
  for page in results["webPages"]["value"]:
188
188
  idx += 1
@@ -194,7 +194,7 @@ class SimpleTextBrowser:
194
194
  f"{idx}. [{dl['name']}]({dl['url']})\n{dl.get('snippet', '')}" # type: ignore[index]
195
195
  )
196
196
 
197
- news_snippets = list()
197
+ news_snippets = []
198
198
  if "news" in results:
199
199
  for page in results["news"]["value"]:
200
200
  idx += 1
@@ -4,16 +4,12 @@
4
4
  #
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
- import sys
8
7
  from types import TracebackType
9
8
  from typing import Any, Protocol
10
9
 
11
- from ..doc_utils import export_module
10
+ from typing_extensions import Self
12
11
 
13
- if sys.version_info >= (3, 11):
14
- from typing import Self
15
- else:
16
- from typing_extensions import Self
12
+ from ..doc_utils import export_module
17
13
 
18
14
 
19
15
  @export_module("autogen.cache")
@@ -4,19 +4,14 @@
4
4
  #
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
- import sys
8
7
  from types import TracebackType
9
8
  from typing import Any
10
9
 
11
10
  import diskcache
11
+ from typing_extensions import Self
12
12
 
13
13
  from .abstract_cache_base import AbstractCache
14
14
 
15
- if sys.version_info >= (3, 11):
16
- from typing import Self
17
- else:
18
- from typing_extensions import Self
19
-
20
15
 
21
16
  class DiskCache(AbstractCache):
22
17
  """Implementation of AbstractCache using the DiskCache library.
@@ -4,16 +4,12 @@
4
4
  #
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
- import sys
8
7
  from types import TracebackType
9
8
  from typing import Any
10
9
 
11
- from .abstract_cache_base import AbstractCache
10
+ from typing_extensions import Self
12
11
 
13
- if sys.version_info >= (3, 11):
14
- from typing import Self
15
- else:
16
- from typing_extensions import Self
12
+ from .abstract_cache_base import AbstractCache
17
13
 
18
14
 
19
15
  class InMemoryCache(AbstractCache):
@@ -5,14 +5,10 @@
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
7
  import pickle
8
- import sys
9
8
  from types import TracebackType
10
9
  from typing import Any
11
10
 
12
- if sys.version_info >= (3, 11):
13
- from typing import Self
14
- else:
15
- from typing_extensions import Self
11
+ from typing_extensions import Self
16
12
 
17
13
  from ..import_utils import optional_import_block, require_optional_import
18
14
  from .abstract_cache_base import AbstractCache
@@ -10,7 +10,7 @@ from .factory import CodeExecutorFactory
10
10
  from .local_commandline_code_executor import LocalCommandLineCodeExecutor
11
11
  from .markdown_code_extractor import MarkdownCodeExtractor
12
12
 
13
- __all__ = (
13
+ __all__ = [
14
14
  "CodeBlock",
15
15
  "CodeExecutor",
16
16
  "CodeExecutorFactory",
@@ -19,4 +19,12 @@ __all__ = (
19
19
  "DockerCommandLineCodeExecutor",
20
20
  "LocalCommandLineCodeExecutor",
21
21
  "MarkdownCodeExtractor",
22
- )
22
+ ]
23
+
24
+ # Try to import YepCode executor and add to __all__ if available
25
+ try:
26
+ from .yepcode_code_executor import YepCodeCodeExecutor, YepCodeCodeResult # noqa: F401
27
+
28
+ __all__.extend(["YepCodeCodeExecutor", "YepCodeCodeResult"])
29
+ except ImportError:
30
+ pass
autogen/coding/base.py CHANGED
@@ -98,13 +98,14 @@ class IPythonCodeResult(CodeResult):
98
98
  CodeExecutionConfig = TypedDict(
99
99
  "CodeExecutionConfig",
100
100
  {
101
- "executor": Literal["ipython-embedded", "commandline-local"] | CodeExecutor,
101
+ "executor": Literal["ipython-embedded", "commandline-local", "yepcode"] | CodeExecutor,
102
102
  "last_n_messages": int | Literal["auto"],
103
103
  "timeout": int,
104
104
  "use_docker": bool | str | list[str],
105
105
  "work_dir": str,
106
106
  "ipython-embedded": Mapping[str, Any],
107
107
  "commandline-local": Mapping[str, Any],
108
+ "yepcode": Mapping[str, Any],
108
109
  },
109
110
  total=False,
110
111
  )