ag2 0.9.9__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/METADATA +243 -214
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/RECORD +113 -87
- autogen/_website/generate_mkdocs.py +3 -3
- autogen/_website/notebook_processor.py +1 -1
- autogen/_website/utils.py +1 -1
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +105 -0
- autogen/a2a/client.py +280 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +165 -0
- autogen/agentchat/__init__.py +3 -0
- autogen/agentchat/agent.py +0 -2
- autogen/agentchat/assistant_agent.py +15 -15
- autogen/agentchat/chat.py +57 -41
- autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
- autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
- autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
- autogen/agentchat/contrib/capabilities/transforms.py +1 -1
- autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
- autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
- autogen/agentchat/contrib/llava_agent.py +1 -13
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
- autogen/agentchat/contrib/rag/query_engine.py +11 -11
- autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
- autogen/agentchat/contrib/swarm_agent.py +3 -2
- autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
- autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
- autogen/agentchat/contrib/web_surfer.py +1 -1
- autogen/agentchat/conversable_agent.py +359 -150
- autogen/agentchat/group/context_expression.py +21 -21
- autogen/agentchat/group/group_tool_executor.py +46 -15
- autogen/agentchat/group/guardrails.py +41 -33
- autogen/agentchat/group/handoffs.py +11 -11
- autogen/agentchat/group/multi_agent_chat.py +56 -2
- autogen/agentchat/group/on_condition.py +11 -11
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +119 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/groupchat.py +102 -49
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
- autogen/agentchat/realtime/experimental/function_observer.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_swarm.py +22 -13
- autogen/agentchat/user_proxy_agent.py +55 -53
- autogen/agents/experimental/document_agent/document_agent.py +1 -10
- autogen/agents/experimental/document_agent/parser_utils.py +5 -1
- autogen/browser_utils.py +4 -4
- autogen/cache/abstract_cache_base.py +2 -6
- autogen/cache/disk_cache.py +1 -6
- autogen/cache/in_memory_cache.py +2 -6
- autogen/cache/redis_cache.py +1 -5
- autogen/coding/__init__.py +10 -2
- autogen/coding/base.py +2 -1
- autogen/coding/docker_commandline_code_executor.py +1 -6
- autogen/coding/factory.py +9 -0
- autogen/coding/jupyter/docker_jupyter_server.py +1 -7
- autogen/coding/jupyter/jupyter_client.py +2 -9
- autogen/coding/jupyter/jupyter_code_executor.py +2 -7
- autogen/coding/jupyter/local_jupyter_server.py +2 -6
- autogen/coding/local_commandline_code_executor.py +0 -65
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/environments/docker_python_environment.py +3 -3
- autogen/environments/system_python_environment.py +5 -5
- autogen/environments/venv_python_environment.py +5 -5
- autogen/events/agent_events.py +1 -1
- autogen/events/client_events.py +1 -1
- autogen/fast_depends/utils.py +10 -0
- autogen/graph_utils.py +5 -7
- autogen/import_utils.py +3 -1
- autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
- autogen/io/processors/console_event_processor.py +8 -3
- autogen/llm_config/client.py +3 -2
- autogen/llm_config/config.py +168 -91
- autogen/llm_config/entry.py +38 -26
- autogen/llm_config/types.py +35 -0
- autogen/llm_config/utils.py +223 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
- autogen/messages/agent_messages.py +1 -1
- autogen/messages/client_messages.py +1 -1
- autogen/oai/__init__.py +8 -1
- autogen/oai/bedrock.py +0 -13
- autogen/oai/client.py +25 -11
- autogen/oai/client_utils.py +31 -1
- autogen/oai/cohere.py +4 -14
- autogen/oai/gemini.py +4 -6
- autogen/oai/gemini_types.py +1 -0
- autogen/oai/openai_utils.py +44 -115
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +142 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/tools/dependency_injection.py +4 -8
- autogen/tools/experimental/reliable/reliable.py +3 -2
- autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
- autogen/tools/function_utils.py +2 -1
- autogen/version.py +1 -1
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
autogen/oai/openai_utils.py
CHANGED
|
@@ -7,7 +7,6 @@
|
|
|
7
7
|
|
|
8
8
|
import importlib
|
|
9
9
|
import importlib.metadata
|
|
10
|
-
import inspect
|
|
11
10
|
import json
|
|
12
11
|
import logging
|
|
13
12
|
import os
|
|
@@ -22,6 +21,10 @@ from typing import TYPE_CHECKING, Any, Union
|
|
|
22
21
|
from dotenv import find_dotenv, load_dotenv
|
|
23
22
|
from packaging.version import parse
|
|
24
23
|
from pydantic_core import to_jsonable_python
|
|
24
|
+
from typing_extensions import deprecated
|
|
25
|
+
|
|
26
|
+
from ..llm_config.utils import config_list_from_json as latest_config_list_from_json
|
|
27
|
+
from ..llm_config.utils import filter_config as latest_filter
|
|
25
28
|
|
|
26
29
|
if TYPE_CHECKING:
|
|
27
30
|
from openai import OpenAI
|
|
@@ -472,6 +475,11 @@ def config_list_gpt4_gpt35(
|
|
|
472
475
|
|
|
473
476
|
|
|
474
477
|
@export_module("autogen")
|
|
478
|
+
@deprecated(
|
|
479
|
+
"`autogen.filter_config(...)` is deprecated. "
|
|
480
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST").where(model="gpt-4o")" method instead. '
|
|
481
|
+
"Scheduled for removal in 0.11.0 version."
|
|
482
|
+
)
|
|
475
483
|
def filter_config(
|
|
476
484
|
config_list: list[dict[str, Any]],
|
|
477
485
|
filter_dict: dict[str, list[str | None] | set[str | None]] | None,
|
|
@@ -489,7 +497,9 @@ def filter_config(
|
|
|
489
497
|
|
|
490
498
|
filter_dict (dict, optional): A dictionary specifying filter criteria where:
|
|
491
499
|
- Keys are field names to check in each configuration dictionary
|
|
492
|
-
- Values
|
|
500
|
+
- Values can be:
|
|
501
|
+
* a single string value (e.g., {"model": "gpt-4o"})
|
|
502
|
+
* a list or set of acceptable values for that field (e.g., {"model": ["gpt-4o", "gpt-4o-mini"]})
|
|
493
503
|
- A configuration matches if ALL filter keys are satisfied AND for each key,
|
|
494
504
|
the config's field value matches at least one acceptable value
|
|
495
505
|
- If a filter value includes None, configurations missing that field will match
|
|
@@ -517,22 +527,27 @@ def filter_config(
|
|
|
517
527
|
{"model": "gpt-4", "tags": ["premium", "latest"]},
|
|
518
528
|
]
|
|
519
529
|
|
|
520
|
-
# Example 1: Single criterion
|
|
530
|
+
# Example 1: Single criterion with single string
|
|
531
|
+
filter_dict = {"model": "gpt-4o"}
|
|
532
|
+
result = filter_config(configs, filter_dict)
|
|
533
|
+
# Returns: [{"model": "gpt-4o", "api_type": "openai"}] if present
|
|
534
|
+
|
|
535
|
+
# Example 2: Single criterion - matches any model in the list
|
|
521
536
|
filter_dict = {"model": ["gpt-4", "gpt-4o"]}
|
|
522
537
|
result = filter_config(configs, filter_dict)
|
|
523
538
|
# Returns: [{"model": "gpt-4", "api_type": "openai"}, {"model": "gpt-4", "tags": ["premium", "latest"]}]
|
|
524
539
|
|
|
525
|
-
# Example
|
|
540
|
+
# Example 3: Multiple criteria - must satisfy ALL the conditions
|
|
526
541
|
filter_dict = {"model": ["gpt-3.5-turbo"], "api_type": ["azure"]}
|
|
527
542
|
result = filter_config(configs, filter_dict)
|
|
528
543
|
# Returns: [{"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"}]
|
|
529
544
|
|
|
530
|
-
# Example
|
|
545
|
+
# Example 4: Tag filtering with list intersection
|
|
531
546
|
filter_dict = {"tags": ["premium"]}
|
|
532
547
|
result = filter_config(configs, filter_dict)
|
|
533
548
|
# Returns: [{"model": "gpt-4", "tags": ["premium", "latest"]}]
|
|
534
549
|
|
|
535
|
-
# Example
|
|
550
|
+
# Example 5: Exclude matching configurations
|
|
536
551
|
filter_dict = {"api_type": ["openai"]}
|
|
537
552
|
result = filter_config(configs, filter_dict, exclude=True)
|
|
538
553
|
# Returns configs that do NOT have api_type="openai"
|
|
@@ -543,92 +558,22 @@ def filter_config(
|
|
|
543
558
|
it is considered a non-match and is excluded from the result.
|
|
544
559
|
|
|
545
560
|
"""
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
if filter_dict:
|
|
554
|
-
return [
|
|
555
|
-
item
|
|
556
|
-
for item in config_list
|
|
557
|
-
if all(_satisfies_criteria(item.get(key), values) != exclude for key, values in filter_dict.items())
|
|
558
|
-
]
|
|
559
|
-
return config_list
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
def _satisfies_criteria(config_value: Any, criteria_values: Any) -> bool:
|
|
563
|
-
"""Check if a configuration field value satisfies the filter criteria.
|
|
564
|
-
|
|
565
|
-
This helper function implements the matching logic between a single configuration
|
|
566
|
-
field value and the acceptable values specified in the filter criteria. It handles
|
|
567
|
-
both scalar and list-type configuration values with appropriate matching strategies.
|
|
568
|
-
|
|
569
|
-
Args:
|
|
570
|
-
config_value (Any): The value from a configuration dictionary field.
|
|
571
|
-
Can be None, a scalar value, or a list of values.
|
|
572
|
-
criteria_values (Any): The acceptable values from the filter dictionary.
|
|
573
|
-
Can be a single value or a list/set of acceptable values.
|
|
574
|
-
|
|
575
|
-
Returns:
|
|
576
|
-
bool: True if the config_value satisfies the criteria, False otherwise.
|
|
577
|
-
|
|
578
|
-
Matching Logic:
|
|
579
|
-
- **None config values**: Always return False (missing fields don't match)
|
|
580
|
-
- **List config values**:
|
|
581
|
-
- If criteria is a list: Match if there's any intersection (set overlap)
|
|
582
|
-
- If criteria is scalar: Match if the scalar is contained in the config list
|
|
583
|
-
- **Scalar config values**:
|
|
584
|
-
- If criteria is a list: Match if the config value is in the criteria list
|
|
585
|
-
- If criteria is scalar: Match if the values are exactly equal
|
|
586
|
-
|
|
587
|
-
Examples:
|
|
588
|
-
```python
|
|
589
|
-
# List config value with list criteria (intersection matching)
|
|
590
|
-
_satisfies_criteria(["gpt-4", "gpt-3.5"], ["gpt-4", "claude"]) # True (gpt-4 intersects)
|
|
591
|
-
_satisfies_criteria(["tag1", "tag2"], ["tag3", "tag4"]) # False (no intersection)
|
|
592
|
-
|
|
593
|
-
# List config value with scalar criteria (containment matching)
|
|
594
|
-
_satisfies_criteria(["premium", "latest"], "premium") # True (premium is in list)
|
|
595
|
-
_satisfies_criteria(["tag1", "tag2"], "tag3") # False (tag3 not in list)
|
|
596
|
-
|
|
597
|
-
# Scalar config value with list criteria (membership matching)
|
|
598
|
-
_satisfies_criteria("gpt-4", ["gpt-4", "gpt-3.5"]) # True (gpt-4 in criteria)
|
|
599
|
-
_satisfies_criteria("claude", ["gpt-4", "gpt-3.5"]) # False (claude not in criteria)
|
|
600
|
-
|
|
601
|
-
# Scalar config value with scalar criteria (equality matching)
|
|
602
|
-
_satisfies_criteria("openai", "openai") # True (exact match)
|
|
603
|
-
_satisfies_criteria("openai", "azure") # False (different values)
|
|
604
|
-
|
|
605
|
-
# None config values (missing fields)
|
|
606
|
-
_satisfies_criteria(None, ["gpt-4"]) # False (missing field)
|
|
607
|
-
_satisfies_criteria(None, "gpt-4") # False (missing field)
|
|
608
|
-
```
|
|
561
|
+
warnings.warn(
|
|
562
|
+
"`autogen.filter_config(...)` is deprecated. "
|
|
563
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST").where(model="gpt-4o")" method instead. '
|
|
564
|
+
"Scheduled for removal in 0.11.0 version.",
|
|
565
|
+
DeprecationWarning,
|
|
566
|
+
)
|
|
609
567
|
|
|
610
|
-
|
|
611
|
-
This is an internal helper function used by `filter_config()`. The function
|
|
612
|
-
assumes that both parameters can be of various types and handles type
|
|
613
|
-
checking internally to determine the appropriate matching strategy.
|
|
614
|
-
"""
|
|
615
|
-
if config_value is None:
|
|
616
|
-
return False
|
|
617
|
-
|
|
618
|
-
if isinstance(config_value, list):
|
|
619
|
-
if isinstance(criteria_values, list):
|
|
620
|
-
return bool(set(config_value) & set(criteria_values)) # Non-empty intersection
|
|
621
|
-
else:
|
|
622
|
-
return criteria_values in config_value
|
|
623
|
-
else:
|
|
624
|
-
# In filter_dict, filter could be either a list of values or a single value.
|
|
625
|
-
# For example, filter_dict = {"model": ["gpt-3.5-turbo"]} or {"model": "gpt-3.5-turbo"}
|
|
626
|
-
if isinstance(criteria_values, list):
|
|
627
|
-
return config_value in criteria_values
|
|
628
|
-
return bool(config_value == criteria_values)
|
|
568
|
+
return latest_filter(config_list=config_list, filter_dict=filter_dict, exclude=exclude)
|
|
629
569
|
|
|
630
570
|
|
|
631
571
|
@export_module("autogen")
|
|
572
|
+
@deprecated(
|
|
573
|
+
"`autogen.config_list_from_json(...)` is deprecated. "
|
|
574
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST")" method instead. '
|
|
575
|
+
"Scheduled for removal in 0.11.0 version."
|
|
576
|
+
)
|
|
632
577
|
def config_list_from_json(
|
|
633
578
|
env_or_file: str,
|
|
634
579
|
file_location: str | None = "",
|
|
@@ -669,34 +614,18 @@ def config_list_from_json(
|
|
|
669
614
|
Raises:
|
|
670
615
|
FileNotFoundError: if env_or_file is neither found as an environment variable nor a file
|
|
671
616
|
"""
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
env_str = os.environ.get(env_or_file)
|
|
680
|
-
|
|
681
|
-
if env_str:
|
|
682
|
-
# The environment variable exists. We should use information from it.
|
|
683
|
-
if os.path.exists(env_str):
|
|
684
|
-
# It is a file location, and we need to load the json from the file.
|
|
685
|
-
with open(env_str) as file:
|
|
686
|
-
json_str = file.read()
|
|
687
|
-
else:
|
|
688
|
-
# Else, it should be a JSON string by itself.
|
|
689
|
-
json_str = env_str
|
|
690
|
-
config_list = json.loads(json_str)
|
|
691
|
-
else:
|
|
692
|
-
# The environment variable does not exist.
|
|
693
|
-
# So, `env_or_file` is a filename. We should use the file location.
|
|
694
|
-
config_list_path = os.path.join(file_location, env_or_file) if file_location is not None else env_or_file
|
|
695
|
-
|
|
696
|
-
with open(config_list_path) as json_file:
|
|
697
|
-
config_list = json.load(json_file)
|
|
617
|
+
warnings.warn(
|
|
618
|
+
"`autogen.config_list_from_json(...)` is deprecated. "
|
|
619
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST")" method instead. '
|
|
620
|
+
"Scheduled for removal in 0.11.0 version.",
|
|
621
|
+
DeprecationWarning,
|
|
622
|
+
)
|
|
698
623
|
|
|
699
|
-
return
|
|
624
|
+
return latest_config_list_from_json(
|
|
625
|
+
env_or_file=env_or_file,
|
|
626
|
+
file_location=file_location,
|
|
627
|
+
filter_dict=filter_dict,
|
|
628
|
+
)
|
|
700
629
|
|
|
701
630
|
|
|
702
631
|
def get_config(
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
import httpx # noqa: F401
|
|
7
|
+
except ImportError as e:
|
|
8
|
+
raise ImportError("httpx is not installed. Please install it with:\npip install httpx") from e
|
|
9
|
+
|
|
10
|
+
from .agent import HTTPRemoteAgent
|
|
11
|
+
from .httpx_client_factory import HttpxClientFactory
|
|
12
|
+
from .runtime import HTTPAgentBus
|
|
13
|
+
|
|
14
|
+
__all__ = (
|
|
15
|
+
"HTTPAgentBus",
|
|
16
|
+
"HTTPRemoteAgent",
|
|
17
|
+
"HttpxClientFactory",
|
|
18
|
+
)
|
autogen/remote/agent.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from autogen import ConversableAgent
|
|
9
|
+
from autogen.agentchat.group import ContextVariables
|
|
10
|
+
from autogen.oai.client import OpenAIWrapper
|
|
11
|
+
|
|
12
|
+
from .errors import RemoteAgentError, RemoteAgentNotFoundError
|
|
13
|
+
from .httpx_client_factory import ClientFactory, EmptyClientFactory
|
|
14
|
+
from .protocol import RequestMessage, ResponseMessage
|
|
15
|
+
from .retry import NoRetryPolicy, RetryPolicy
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class HTTPRemoteAgent(ConversableAgent):
|
|
19
|
+
"""A remote agent that communicates with other agents via HTTP long-polling.
|
|
20
|
+
|
|
21
|
+
This agent forwards messages to a remote endpoint and handles the response
|
|
22
|
+
through HTTP requests. It supports both synchronous and asynchronous operations.
|
|
23
|
+
|
|
24
|
+
Example:
|
|
25
|
+
>>> remote_agent = HTTPRemoteAgent(url="http://api.example.com/agents", name="my_remote_agent")
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
url: str,
|
|
31
|
+
name: str,
|
|
32
|
+
*,
|
|
33
|
+
silent: bool = False,
|
|
34
|
+
client: ClientFactory | None = None,
|
|
35
|
+
retry_policy: RetryPolicy | None = None,
|
|
36
|
+
) -> None:
|
|
37
|
+
"""Initialize the HTTPRemoteAgent.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
url (str): The base URL of the remote agent service.
|
|
41
|
+
name (str): The name of this agent.
|
|
42
|
+
silent (bool): If True, suppresses logging output.
|
|
43
|
+
client (ClientFactory | None): HTTP client factory. If None, uses EmptyClientFactory.
|
|
44
|
+
retry_policy (RetryPolicy | None): Retry policy for HTTP requests. If None, uses NoRetryPolicy.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
self.url = url
|
|
48
|
+
self.retry_policy: RetryPolicy = retry_policy or NoRetryPolicy
|
|
49
|
+
|
|
50
|
+
self._httpx_client_factory = client or EmptyClientFactory()
|
|
51
|
+
|
|
52
|
+
super().__init__(name, silent=silent)
|
|
53
|
+
|
|
54
|
+
self.__llm_config: dict[str, Any] = {}
|
|
55
|
+
|
|
56
|
+
self.replace_reply_func(
|
|
57
|
+
ConversableAgent.generate_oai_reply,
|
|
58
|
+
HTTPRemoteAgent.generate_remote_reply,
|
|
59
|
+
)
|
|
60
|
+
self.replace_reply_func(
|
|
61
|
+
ConversableAgent.a_generate_oai_reply,
|
|
62
|
+
HTTPRemoteAgent.a_generate_remote_reply,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
def generate_remote_reply(
|
|
66
|
+
self,
|
|
67
|
+
messages: list[dict[str, Any]] | None = None,
|
|
68
|
+
sender: ConversableAgent | None = None,
|
|
69
|
+
config: OpenAIWrapper | None = None,
|
|
70
|
+
) -> tuple[bool, dict[str, Any] | None]:
|
|
71
|
+
if messages is None:
|
|
72
|
+
messages = self._oai_messages[sender]
|
|
73
|
+
|
|
74
|
+
retry_policy = self.retry_policy()
|
|
75
|
+
|
|
76
|
+
task_id: Any = None
|
|
77
|
+
with self._httpx_client_factory.make_sync() as client:
|
|
78
|
+
while True:
|
|
79
|
+
with retry_policy:
|
|
80
|
+
if task_id is None:
|
|
81
|
+
# initiate remote procedure
|
|
82
|
+
task_id = self._process_create_remote_task_response(
|
|
83
|
+
client.post(
|
|
84
|
+
f"{self.url}/{self.name}",
|
|
85
|
+
content=RequestMessage(
|
|
86
|
+
messages=messages,
|
|
87
|
+
context=self.context_variables.data,
|
|
88
|
+
client_tools=self.__llm_config.get("tools", []),
|
|
89
|
+
).model_dump_json(),
|
|
90
|
+
)
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
reply_response = client.get(f"{self.url}/{self.name}/{task_id}")
|
|
94
|
+
|
|
95
|
+
if reply_response.status_code in (200, 204): # valid answer codes
|
|
96
|
+
break
|
|
97
|
+
|
|
98
|
+
if reply_response.status_code == 425: # task still in progress
|
|
99
|
+
continue
|
|
100
|
+
|
|
101
|
+
if reply_response.status_code == 404:
|
|
102
|
+
task_id = None # recreate task due remote agent lost it
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
raise RemoteAgentError(f"Remote client error: {reply_response}, {reply_response.content!r}")
|
|
106
|
+
|
|
107
|
+
if reply := self._process_remote_reply(reply_response):
|
|
108
|
+
if sender:
|
|
109
|
+
context_variables = ContextVariables(reply.context)
|
|
110
|
+
sender.context_variables.update(context_variables.to_dict())
|
|
111
|
+
# TODO: support multiple messages response for remote chat history
|
|
112
|
+
return True, reply.messages[-1]
|
|
113
|
+
|
|
114
|
+
return True, None
|
|
115
|
+
|
|
116
|
+
async def a_generate_remote_reply(
|
|
117
|
+
self,
|
|
118
|
+
messages: list[dict[str, Any]] | None = None,
|
|
119
|
+
sender: ConversableAgent | None = None,
|
|
120
|
+
config: OpenAIWrapper | None = None,
|
|
121
|
+
) -> tuple[bool, dict[str, Any] | None]:
|
|
122
|
+
if messages is None:
|
|
123
|
+
messages = self._oai_messages[sender]
|
|
124
|
+
|
|
125
|
+
retry_policy = self.retry_policy()
|
|
126
|
+
|
|
127
|
+
task_id: Any = None
|
|
128
|
+
async with self._httpx_client_factory() as client:
|
|
129
|
+
while True:
|
|
130
|
+
with retry_policy:
|
|
131
|
+
if task_id is None:
|
|
132
|
+
# initiate remote procedure
|
|
133
|
+
task_id = self._process_create_remote_task_response(
|
|
134
|
+
await client.post(
|
|
135
|
+
f"{self.url}/{self.name}",
|
|
136
|
+
content=RequestMessage(
|
|
137
|
+
messages=messages,
|
|
138
|
+
context=self.context_variables.data,
|
|
139
|
+
client_tools=self.__llm_config.get("tools", []),
|
|
140
|
+
).model_dump_json(),
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
reply_response = await client.get(f"{self.url}/{self.name}/{task_id}")
|
|
145
|
+
|
|
146
|
+
if reply_response.status_code in (200, 204): # valid answer codes
|
|
147
|
+
break
|
|
148
|
+
|
|
149
|
+
if reply_response.status_code == 425: # task still in progress
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
if reply_response.status_code == 404:
|
|
153
|
+
task_id = None # recreate task due remote agent lost it
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
raise RemoteAgentError(f"Remote client error: {reply_response}, {reply_response.content!r}")
|
|
157
|
+
|
|
158
|
+
if reply := self._process_remote_reply(reply_response):
|
|
159
|
+
if sender:
|
|
160
|
+
context_variables = ContextVariables(reply.context)
|
|
161
|
+
sender.context_variables.update(context_variables.to_dict())
|
|
162
|
+
# TODO: support multiple messages response for remote chat history
|
|
163
|
+
return True, reply.messages[-1]
|
|
164
|
+
|
|
165
|
+
return True, None
|
|
166
|
+
|
|
167
|
+
def _process_create_remote_task_response(self, response: httpx.Response) -> Any:
|
|
168
|
+
if response.status_code == 404:
|
|
169
|
+
raise RemoteAgentNotFoundError(self.name)
|
|
170
|
+
|
|
171
|
+
if response.status_code != 202:
|
|
172
|
+
raise RemoteAgentError(f"Remote client error: {response}, {response.content!r}")
|
|
173
|
+
|
|
174
|
+
return response.json()
|
|
175
|
+
|
|
176
|
+
def _process_remote_reply(self, reply_response: httpx.Response) -> ResponseMessage | None:
|
|
177
|
+
if reply_response.status_code == 204:
|
|
178
|
+
return None
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
serialized_message = ResponseMessage.model_validate_json(reply_response.content)
|
|
182
|
+
|
|
183
|
+
except Exception as e:
|
|
184
|
+
raise RemoteAgentError(f"Remote client error: {reply_response}, {reply_response.content!r}") from e
|
|
185
|
+
|
|
186
|
+
return serialized_message
|
|
187
|
+
|
|
188
|
+
def update_tool_signature(
|
|
189
|
+
self,
|
|
190
|
+
tool_sig: str | dict[str, Any],
|
|
191
|
+
is_remove: bool,
|
|
192
|
+
silent_override: bool = False,
|
|
193
|
+
) -> None:
|
|
194
|
+
self.__llm_config = self._update_tool_config(
|
|
195
|
+
self.__llm_config,
|
|
196
|
+
tool_sig=tool_sig,
|
|
197
|
+
is_remove=is_remove,
|
|
198
|
+
silent_override=silent_override,
|
|
199
|
+
)
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import warnings
|
|
6
|
+
from typing import Any, Literal, cast
|
|
7
|
+
|
|
8
|
+
from autogen.agentchat import ConversableAgent
|
|
9
|
+
from autogen.agentchat.conversable_agent import normilize_message_to_oai
|
|
10
|
+
from autogen.agentchat.group.context_variables import ContextVariables
|
|
11
|
+
from autogen.agentchat.group.group_tool_executor import GroupToolExecutor
|
|
12
|
+
from autogen.agentchat.group.reply_result import ReplyResult
|
|
13
|
+
from autogen.agentchat.group.targets.transition_target import TransitionTarget
|
|
14
|
+
|
|
15
|
+
from .protocol import RemoteService, RequestMessage, ResponseMessage, get_tool_names
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgentService(RemoteService):
|
|
19
|
+
def __init__(self, agent: ConversableAgent) -> None:
|
|
20
|
+
self.name = agent.name
|
|
21
|
+
self.agent = agent
|
|
22
|
+
|
|
23
|
+
async def __call__(self, state: RequestMessage) -> ResponseMessage | None:
|
|
24
|
+
out_message: dict[str, Any] | None
|
|
25
|
+
if guardrail_result := self.agent.run_input_guardrails(state.messages):
|
|
26
|
+
# input guardrail activated by initial messages
|
|
27
|
+
_, out_message = normilize_message_to_oai(guardrail_result.reply, self.agent.name, role="assistant")
|
|
28
|
+
return ResponseMessage(messages=[out_message], context=state.context)
|
|
29
|
+
|
|
30
|
+
context_variables = ContextVariables(state.context)
|
|
31
|
+
tool_executor = self._make_tool_executor(context_variables)
|
|
32
|
+
|
|
33
|
+
local_history: list[dict[str, Any]] = []
|
|
34
|
+
while True:
|
|
35
|
+
messages = state.messages + local_history
|
|
36
|
+
|
|
37
|
+
# TODO: catch ask user input event
|
|
38
|
+
is_final, _ = await self.agent.a_check_termination_and_human_reply(messages)
|
|
39
|
+
if is_final:
|
|
40
|
+
break
|
|
41
|
+
|
|
42
|
+
reply = await self.agent.a_generate_reply(
|
|
43
|
+
messages,
|
|
44
|
+
exclude=(
|
|
45
|
+
ConversableAgent.check_termination_and_human_reply,
|
|
46
|
+
ConversableAgent.a_check_termination_and_human_reply,
|
|
47
|
+
ConversableAgent.generate_oai_reply,
|
|
48
|
+
ConversableAgent.a_generate_oai_reply,
|
|
49
|
+
),
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
if not reply:
|
|
53
|
+
_, reply = await self.agent.a_generate_oai_reply(
|
|
54
|
+
messages,
|
|
55
|
+
tools=state.client_tools,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
should_continue, out_message = self._add_message_to_local_history(reply, role="assistant")
|
|
59
|
+
if out_message:
|
|
60
|
+
local_history.append(out_message)
|
|
61
|
+
if not should_continue:
|
|
62
|
+
break
|
|
63
|
+
out_message = cast(dict[str, Any], out_message)
|
|
64
|
+
|
|
65
|
+
called_tools = get_tool_names(out_message.get("tool_calls", []))
|
|
66
|
+
if state.client_tool_names.intersection(called_tools):
|
|
67
|
+
break # return client tool execution command back to client
|
|
68
|
+
|
|
69
|
+
tool_result, updated_context_variables = self._try_execute_local_tool(tool_executor, out_message)
|
|
70
|
+
|
|
71
|
+
if updated_context_variables:
|
|
72
|
+
context_variables.update(updated_context_variables.to_dict())
|
|
73
|
+
|
|
74
|
+
should_continue, out_message = self._add_message_to_local_history(tool_result, role="tool")
|
|
75
|
+
if out_message:
|
|
76
|
+
local_history.append(out_message)
|
|
77
|
+
if not should_continue:
|
|
78
|
+
break
|
|
79
|
+
|
|
80
|
+
if not local_history:
|
|
81
|
+
return None
|
|
82
|
+
|
|
83
|
+
return ResponseMessage(messages=local_history, context=context_variables.data or None)
|
|
84
|
+
|
|
85
|
+
def _add_message_to_local_history(
|
|
86
|
+
self, message: str | dict[str, Any] | None, role: str
|
|
87
|
+
) -> tuple[Literal[True], dict[str, Any]] | tuple[Literal[False], dict[str, Any] | None]:
|
|
88
|
+
if message is None:
|
|
89
|
+
return False, None # output message is empty, interrupt the loop
|
|
90
|
+
|
|
91
|
+
if guardrail_result := self.agent.run_output_guardrails(message):
|
|
92
|
+
_, out_message = normilize_message_to_oai(guardrail_result.reply, self.agent.name, role=role)
|
|
93
|
+
return False, out_message # output guardrail activated, interrupt the loop
|
|
94
|
+
|
|
95
|
+
valid, out_message = normilize_message_to_oai(message, self.agent.name, role=role)
|
|
96
|
+
if not valid:
|
|
97
|
+
return False, None # tool result is not valid OAI message, interrupt the loop
|
|
98
|
+
|
|
99
|
+
return True, out_message
|
|
100
|
+
|
|
101
|
+
def _make_tool_executor(self, context_variables: ContextVariables) -> GroupToolExecutor:
|
|
102
|
+
tool_executor = GroupToolExecutor()
|
|
103
|
+
for tool in self.agent.tools:
|
|
104
|
+
# TODO: inject ChatContext to tool
|
|
105
|
+
new_tool = tool_executor.make_tool_copy_with_context_variables(tool, context_variables) or tool
|
|
106
|
+
tool_executor.register_for_execution(serialize=False, silent_override=True)(new_tool)
|
|
107
|
+
return tool_executor
|
|
108
|
+
|
|
109
|
+
def _try_execute_local_tool(
|
|
110
|
+
self,
|
|
111
|
+
tool_executor: GroupToolExecutor,
|
|
112
|
+
tool_message: dict[str, Any],
|
|
113
|
+
) -> tuple[dict[str, Any] | None, ContextVariables | None]:
|
|
114
|
+
tool_result: dict[str, Any] | None = None
|
|
115
|
+
updated_context_variables: ContextVariables | None = None
|
|
116
|
+
|
|
117
|
+
if "tool_calls" in tool_message:
|
|
118
|
+
_, tool_result = tool_executor.generate_tool_calls_reply([tool_message])
|
|
119
|
+
if tool_result is None:
|
|
120
|
+
return tool_result, updated_context_variables
|
|
121
|
+
|
|
122
|
+
if "tool_responses" in tool_result:
|
|
123
|
+
# TODO: catch handoffs
|
|
124
|
+
for tool_response in tool_result["tool_responses"]:
|
|
125
|
+
content = tool_response["content"]
|
|
126
|
+
|
|
127
|
+
if isinstance(content, TransitionTarget):
|
|
128
|
+
warnings.warn(
|
|
129
|
+
f"Tool {self.agent.name} returned a target, which is not supported in remote mode"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
elif isinstance(content, ReplyResult):
|
|
133
|
+
if content.target:
|
|
134
|
+
warnings.warn(
|
|
135
|
+
f"Tool {self.agent.name} returned a target, which is not supported in remote mode"
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
if content.context_variables:
|
|
139
|
+
updated_context_variables = content.context_variables
|
|
140
|
+
tool_response["content"] = content.message
|
|
141
|
+
|
|
142
|
+
return tool_result, updated_context_variables
|
autogen/remote/errors.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class RemoteAgentError(Exception):
|
|
7
|
+
"""Base class for remote agent errors"""
|
|
8
|
+
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RemoteAgentNotFoundError(RemoteAgentError):
|
|
13
|
+
"""Raised when a remote agent is not found"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, agent_name: str) -> None:
|
|
16
|
+
self.agent_name = agent_name
|
|
17
|
+
super().__init__(f"Remote agent `{agent_name}` not found")
|