genxai-framework 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli/commands/__init__.py +3 -1
- cli/commands/connector.py +309 -0
- cli/commands/workflow.py +80 -0
- cli/main.py +3 -1
- genxai/__init__.py +33 -0
- genxai/agents/__init__.py +8 -0
- genxai/agents/presets.py +53 -0
- genxai/connectors/__init__.py +10 -0
- genxai/connectors/base.py +3 -3
- genxai/connectors/config_store.py +106 -0
- genxai/connectors/github.py +117 -0
- genxai/connectors/google_workspace.py +124 -0
- genxai/connectors/jira.py +108 -0
- genxai/connectors/notion.py +97 -0
- genxai/connectors/slack.py +121 -0
- genxai/core/agent/config_io.py +32 -1
- genxai/core/agent/runtime.py +41 -4
- genxai/core/graph/__init__.py +3 -0
- genxai/core/graph/engine.py +218 -11
- genxai/core/graph/executor.py +103 -10
- genxai/core/graph/nodes.py +28 -0
- genxai/core/graph/workflow_io.py +199 -0
- genxai/flows/__init__.py +33 -0
- genxai/flows/auction.py +66 -0
- genxai/flows/base.py +134 -0
- genxai/flows/conditional.py +45 -0
- genxai/flows/coordinator_worker.py +62 -0
- genxai/flows/critic_review.py +62 -0
- genxai/flows/ensemble_voting.py +49 -0
- genxai/flows/loop.py +42 -0
- genxai/flows/map_reduce.py +61 -0
- genxai/flows/p2p.py +146 -0
- genxai/flows/parallel.py +27 -0
- genxai/flows/round_robin.py +24 -0
- genxai/flows/router.py +45 -0
- genxai/flows/selector.py +63 -0
- genxai/flows/subworkflow.py +35 -0
- genxai/llm/factory.py +17 -10
- genxai/llm/providers/anthropic.py +116 -1
- genxai/observability/logging.py +2 -2
- genxai/security/auth.py +10 -6
- genxai/security/cost_control.py +6 -6
- genxai/security/jwt.py +2 -2
- genxai/security/pii.py +2 -2
- genxai/tools/builtin/__init__.py +3 -0
- genxai/tools/builtin/communication/human_input.py +32 -0
- genxai/tools/custom/test-2.py +19 -0
- genxai/tools/custom/test_tool_ui.py +9 -0
- genxai/tools/persistence/service.py +3 -3
- genxai/triggers/schedule.py +2 -2
- genxai/utils/tokens.py +6 -0
- {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.2.dist-info}/METADATA +63 -12
- {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.2.dist-info}/RECORD +57 -28
- {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.2.dist-info}/WHEEL +0 -0
- {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.2.dist-info}/entry_points.txt +0 -0
- {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.2.dist-info}/licenses/LICENSE +0 -0
- {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.2.dist-info}/top_level.txt +0 -0
genxai/flows/base.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""Base orchestrator for composable flow patterns."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from typing import Any, Dict, Iterable, List, Optional
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
|
|
9
|
+
from genxai.core.agent.base import Agent
|
|
10
|
+
from genxai.core.agent.registry import AgentRegistry
|
|
11
|
+
from genxai.core.graph.engine import Graph
|
|
12
|
+
from genxai.core.graph.nodes import AgentNode
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class FlowOrchestrator(ABC):
|
|
16
|
+
"""Base class for flow orchestrators.
|
|
17
|
+
|
|
18
|
+
A flow orchestrator converts a list of agents into a Graph
|
|
19
|
+
and executes it using the existing graph engine.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
agents: Iterable[Agent],
|
|
25
|
+
name: str = "flow",
|
|
26
|
+
llm_provider: Any = None,
|
|
27
|
+
allow_empty_agents: bool = False,
|
|
28
|
+
timeout_seconds: float = 120.0,
|
|
29
|
+
retry_count: int = 3,
|
|
30
|
+
backoff_base: float = 1.0,
|
|
31
|
+
backoff_multiplier: float = 2.0,
|
|
32
|
+
cancel_on_failure: bool = True,
|
|
33
|
+
) -> None:
|
|
34
|
+
self.agents = list(agents)
|
|
35
|
+
if not self.agents and not allow_empty_agents:
|
|
36
|
+
raise ValueError("FlowOrchestrator requires at least one agent")
|
|
37
|
+
|
|
38
|
+
self.name = name
|
|
39
|
+
self.llm_provider = llm_provider
|
|
40
|
+
self.timeout_seconds = timeout_seconds
|
|
41
|
+
self.retry_count = retry_count
|
|
42
|
+
self.backoff_base = backoff_base
|
|
43
|
+
self.backoff_multiplier = backoff_multiplier
|
|
44
|
+
self.cancel_on_failure = cancel_on_failure
|
|
45
|
+
|
|
46
|
+
for agent in self.agents:
|
|
47
|
+
AgentRegistry.register(agent)
|
|
48
|
+
|
|
49
|
+
@abstractmethod
|
|
50
|
+
def build_graph(self) -> Graph:
|
|
51
|
+
"""Construct a Graph for this flow pattern."""
|
|
52
|
+
|
|
53
|
+
async def run(
|
|
54
|
+
self,
|
|
55
|
+
input_data: Any,
|
|
56
|
+
state: Optional[Dict[str, Any]] = None,
|
|
57
|
+
max_iterations: int = 100,
|
|
58
|
+
) -> Dict[str, Any]:
|
|
59
|
+
"""Execute the flow graph with the provided input."""
|
|
60
|
+
if state is None:
|
|
61
|
+
state = {}
|
|
62
|
+
state.setdefault(
|
|
63
|
+
"execution_config",
|
|
64
|
+
{
|
|
65
|
+
"timeout_seconds": self.timeout_seconds,
|
|
66
|
+
"retry_count": self.retry_count,
|
|
67
|
+
"backoff_base": self.backoff_base,
|
|
68
|
+
"backoff_multiplier": self.backoff_multiplier,
|
|
69
|
+
"cancel_on_failure": self.cancel_on_failure,
|
|
70
|
+
},
|
|
71
|
+
)
|
|
72
|
+
graph = self.build_graph()
|
|
73
|
+
return await graph.run(
|
|
74
|
+
input_data=input_data,
|
|
75
|
+
max_iterations=max_iterations,
|
|
76
|
+
state=state,
|
|
77
|
+
llm_provider=self.llm_provider,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
def _agent_nodes(self) -> List[AgentNode]:
|
|
81
|
+
"""Create AgentNode instances for each registered agent."""
|
|
82
|
+
return [AgentNode(id=agent.id, agent_id=agent.id) for agent in self.agents]
|
|
83
|
+
|
|
84
|
+
async def _execute_with_retry(
|
|
85
|
+
self,
|
|
86
|
+
runtime: Any,
|
|
87
|
+
task: str,
|
|
88
|
+
context: Dict[str, Any],
|
|
89
|
+
) -> Any:
|
|
90
|
+
"""Execute a runtime task with retries and timeout."""
|
|
91
|
+
delay = self.backoff_base
|
|
92
|
+
for attempt in range(self.retry_count + 1):
|
|
93
|
+
try:
|
|
94
|
+
coro = runtime.execute(task=task, context=context)
|
|
95
|
+
if self.timeout_seconds:
|
|
96
|
+
return await asyncio.wait_for(coro, timeout=self.timeout_seconds)
|
|
97
|
+
return await coro
|
|
98
|
+
except asyncio.CancelledError:
|
|
99
|
+
raise
|
|
100
|
+
except Exception:
|
|
101
|
+
if attempt >= self.retry_count:
|
|
102
|
+
raise
|
|
103
|
+
await asyncio.sleep(delay)
|
|
104
|
+
delay *= self.backoff_multiplier
|
|
105
|
+
|
|
106
|
+
async def _gather_tasks(self, coros: List[Any]) -> List[Any]:
|
|
107
|
+
"""Run tasks concurrently, optionally canceling on first failure."""
|
|
108
|
+
tasks = [asyncio.create_task(coro) for coro in coros]
|
|
109
|
+
if not tasks:
|
|
110
|
+
return []
|
|
111
|
+
|
|
112
|
+
if not self.cancel_on_failure:
|
|
113
|
+
return await asyncio.gather(*tasks, return_exceptions=True)
|
|
114
|
+
|
|
115
|
+
results: List[Any] = [None] * len(tasks)
|
|
116
|
+
index_map = {task: idx for idx, task in enumerate(tasks)}
|
|
117
|
+
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
|
|
118
|
+
|
|
119
|
+
for task in done:
|
|
120
|
+
idx = index_map[task]
|
|
121
|
+
exc = task.exception()
|
|
122
|
+
if exc:
|
|
123
|
+
for pending_task in pending:
|
|
124
|
+
pending_task.cancel()
|
|
125
|
+
await asyncio.gather(*pending, return_exceptions=True)
|
|
126
|
+
raise exc
|
|
127
|
+
results[idx] = task.result()
|
|
128
|
+
|
|
129
|
+
if pending:
|
|
130
|
+
pending_results = await asyncio.gather(*pending, return_exceptions=True)
|
|
131
|
+
for task, result in zip(pending, pending_results):
|
|
132
|
+
results[index_map[task]] = result
|
|
133
|
+
|
|
134
|
+
return results
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Conditional flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from typing import Callable, List
|
|
4
|
+
|
|
5
|
+
from genxai.core.graph.engine import Graph
|
|
6
|
+
from genxai.core.graph.edges import Edge
|
|
7
|
+
from genxai.core.graph.nodes import AgentNode, InputNode, OutputNode
|
|
8
|
+
from genxai.flows.base import FlowOrchestrator
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ConditionalFlow(FlowOrchestrator):
|
|
12
|
+
"""Route to a single agent based on a condition function."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
agents: List,
|
|
17
|
+
condition: Callable[[dict], str],
|
|
18
|
+
name: str = "conditional_flow",
|
|
19
|
+
llm_provider=None,
|
|
20
|
+
) -> None:
|
|
21
|
+
super().__init__(agents=agents, name=name, llm_provider=llm_provider)
|
|
22
|
+
self.condition = condition
|
|
23
|
+
|
|
24
|
+
def build_graph(self) -> Graph:
|
|
25
|
+
graph = Graph(name=self.name)
|
|
26
|
+
start = InputNode(id="input")
|
|
27
|
+
graph.add_node(start)
|
|
28
|
+
|
|
29
|
+
end = OutputNode(id="output")
|
|
30
|
+
graph.add_node(end)
|
|
31
|
+
nodes = self._agent_nodes()
|
|
32
|
+
for node in nodes:
|
|
33
|
+
graph.add_node(node)
|
|
34
|
+
graph.add_edge(
|
|
35
|
+
Edge(
|
|
36
|
+
source=start.id,
|
|
37
|
+
target=node.id,
|
|
38
|
+
condition=lambda state, agent_id=node.id: self.condition(state) == agent_id,
|
|
39
|
+
)
|
|
40
|
+
)
|
|
41
|
+
graph.add_edge(Edge(source=node.id, target=end.id))
|
|
42
|
+
|
|
43
|
+
graph.add_edge(Edge(source=start.id, target=end.id))
|
|
44
|
+
|
|
45
|
+
return graph
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""Coordinator-worker flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from genxai.core.agent.runtime import AgentRuntime
|
|
6
|
+
from genxai.flows.base import FlowOrchestrator
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CoordinatorWorkerFlow(FlowOrchestrator):
|
|
10
|
+
"""Coordinator assigns tasks to worker agents."""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
agents: List[Any],
|
|
15
|
+
name: str = "coordinator_worker_flow",
|
|
16
|
+
llm_provider: Any = None,
|
|
17
|
+
) -> None:
|
|
18
|
+
super().__init__(agents=agents, name=name, llm_provider=llm_provider)
|
|
19
|
+
|
|
20
|
+
async def run(
|
|
21
|
+
self,
|
|
22
|
+
input_data: Any,
|
|
23
|
+
state: Optional[Dict[str, Any]] = None,
|
|
24
|
+
max_iterations: int = 100,
|
|
25
|
+
) -> Dict[str, Any]:
|
|
26
|
+
if state is None:
|
|
27
|
+
state = {}
|
|
28
|
+
state["input"] = input_data
|
|
29
|
+
state.setdefault("worker_results", [])
|
|
30
|
+
|
|
31
|
+
if len(self.agents) < 2:
|
|
32
|
+
raise ValueError("CoordinatorWorkerFlow requires at least two agents")
|
|
33
|
+
|
|
34
|
+
coordinator = self.agents[0]
|
|
35
|
+
workers = self.agents[1:]
|
|
36
|
+
coordinator_runtime = AgentRuntime(agent=coordinator, llm_provider=self.llm_provider)
|
|
37
|
+
worker_runtimes = {
|
|
38
|
+
agent.id: AgentRuntime(agent=agent, llm_provider=self.llm_provider)
|
|
39
|
+
for agent in workers
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
plan = await self._execute_with_retry(
|
|
43
|
+
coordinator_runtime,
|
|
44
|
+
task=state.get("task", "Break the task into worker assignments"),
|
|
45
|
+
context=state,
|
|
46
|
+
)
|
|
47
|
+
state["plan"] = plan
|
|
48
|
+
|
|
49
|
+
worker_task = state.get("worker_task", "Execute assigned task")
|
|
50
|
+
tasks = [
|
|
51
|
+
self._execute_with_retry(
|
|
52
|
+
worker_runtimes[worker.id],
|
|
53
|
+
task=worker_task,
|
|
54
|
+
context={**state, "worker_id": worker.id},
|
|
55
|
+
)
|
|
56
|
+
for worker in workers
|
|
57
|
+
]
|
|
58
|
+
results = await self._gather_tasks(tasks)
|
|
59
|
+
for worker, result in zip(workers, results):
|
|
60
|
+
state["worker_results"].append({"worker_id": worker.id, "result": result})
|
|
61
|
+
|
|
62
|
+
return state
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""Critic review flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from genxai.core.agent.runtime import AgentRuntime
|
|
6
|
+
from genxai.flows.base import FlowOrchestrator
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CriticReviewFlow(FlowOrchestrator):
|
|
10
|
+
"""Generator -> critic -> revise loop with bounded iterations."""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
agents: List[Any],
|
|
15
|
+
name: str = "critic_review_flow",
|
|
16
|
+
llm_provider: Any = None,
|
|
17
|
+
max_iterations: int = 3,
|
|
18
|
+
) -> None:
|
|
19
|
+
super().__init__(agents=agents, name=name, llm_provider=llm_provider)
|
|
20
|
+
self.max_iterations = max_iterations
|
|
21
|
+
|
|
22
|
+
async def run(
|
|
23
|
+
self,
|
|
24
|
+
input_data: Any,
|
|
25
|
+
state: Optional[Dict[str, Any]] = None,
|
|
26
|
+
max_iterations: int = 100,
|
|
27
|
+
) -> Dict[str, Any]:
|
|
28
|
+
if state is None:
|
|
29
|
+
state = {}
|
|
30
|
+
state["input"] = input_data
|
|
31
|
+
state.setdefault("drafts", [])
|
|
32
|
+
|
|
33
|
+
if len(self.agents) < 2:
|
|
34
|
+
raise ValueError("CriticReviewFlow requires at least two agents")
|
|
35
|
+
|
|
36
|
+
generator = self.agents[0]
|
|
37
|
+
critic = self.agents[1]
|
|
38
|
+
gen_runtime = AgentRuntime(agent=generator, llm_provider=self.llm_provider)
|
|
39
|
+
critic_runtime = AgentRuntime(agent=critic, llm_provider=self.llm_provider)
|
|
40
|
+
|
|
41
|
+
draft = None
|
|
42
|
+
for _ in range(self.max_iterations):
|
|
43
|
+
gen_result = await self._execute_with_retry(
|
|
44
|
+
gen_runtime,
|
|
45
|
+
task=state.get("task", "Generate a draft"),
|
|
46
|
+
context={**state, "draft": draft},
|
|
47
|
+
)
|
|
48
|
+
draft = gen_result.get("output")
|
|
49
|
+
state["drafts"].append(draft)
|
|
50
|
+
|
|
51
|
+
critique = await self._execute_with_retry(
|
|
52
|
+
critic_runtime,
|
|
53
|
+
task=state.get("critic_task", "Critique the draft"),
|
|
54
|
+
context={**state, "draft": draft},
|
|
55
|
+
)
|
|
56
|
+
state["last_critique"] = critique
|
|
57
|
+
|
|
58
|
+
if state.get("accept", False):
|
|
59
|
+
break
|
|
60
|
+
|
|
61
|
+
state["final"] = draft
|
|
62
|
+
return state
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""Ensemble voting flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from genxai.core.agent.runtime import AgentRuntime
|
|
6
|
+
from genxai.flows.base import FlowOrchestrator
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class EnsembleVotingFlow(FlowOrchestrator):
|
|
10
|
+
"""Run all agents and aggregate outputs via simple voting."""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
agents: List[Any],
|
|
15
|
+
name: str = "ensemble_voting_flow",
|
|
16
|
+
llm_provider: Any = None,
|
|
17
|
+
) -> None:
|
|
18
|
+
super().__init__(agents=agents, name=name, llm_provider=llm_provider)
|
|
19
|
+
|
|
20
|
+
async def run(
|
|
21
|
+
self,
|
|
22
|
+
input_data: Any,
|
|
23
|
+
state: Optional[Dict[str, Any]] = None,
|
|
24
|
+
max_iterations: int = 100,
|
|
25
|
+
) -> Dict[str, Any]:
|
|
26
|
+
if state is None:
|
|
27
|
+
state = {}
|
|
28
|
+
state["input"] = input_data
|
|
29
|
+
state.setdefault("votes", {})
|
|
30
|
+
|
|
31
|
+
runtimes = {
|
|
32
|
+
agent.id: AgentRuntime(agent=agent, llm_provider=self.llm_provider)
|
|
33
|
+
for agent in self.agents
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
task = state.get("task", "Provide your answer")
|
|
37
|
+
tasks = [
|
|
38
|
+
self._execute_with_retry(runtimes[agent.id], task=task, context=state)
|
|
39
|
+
for agent in self.agents
|
|
40
|
+
]
|
|
41
|
+
results = await self._gather_tasks(tasks)
|
|
42
|
+
for result in results:
|
|
43
|
+
output = str(getattr(result, "get", lambda *_: "")("output", "")).strip()
|
|
44
|
+
state["votes"].setdefault(output, 0)
|
|
45
|
+
state["votes"][output] += 1
|
|
46
|
+
|
|
47
|
+
if state["votes"]:
|
|
48
|
+
state["winner"] = max(state["votes"], key=state["votes"].get)
|
|
49
|
+
return state
|
genxai/flows/loop.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Loop flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from genxai.core.graph.engine import Graph
|
|
6
|
+
from genxai.core.graph.edges import Edge
|
|
7
|
+
from genxai.core.graph.nodes import AgentNode, InputNode, LoopNode, OutputNode
|
|
8
|
+
from genxai.flows.base import FlowOrchestrator
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LoopFlow(FlowOrchestrator):
|
|
12
|
+
"""Execute an agent inside a loop with a termination condition."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
agents: List,
|
|
17
|
+
condition_key: str,
|
|
18
|
+
max_iterations: int = 5,
|
|
19
|
+
name: str = "loop_flow",
|
|
20
|
+
llm_provider=None,
|
|
21
|
+
) -> None:
|
|
22
|
+
super().__init__(agents=agents, name=name, llm_provider=llm_provider)
|
|
23
|
+
self.condition_key = condition_key
|
|
24
|
+
self.loop_iterations = max_iterations
|
|
25
|
+
|
|
26
|
+
def build_graph(self) -> Graph:
|
|
27
|
+
graph = Graph(name=self.name)
|
|
28
|
+
start = InputNode(id="input")
|
|
29
|
+
loop = LoopNode(id="loop", condition=self.condition_key, max_iterations=self.loop_iterations)
|
|
30
|
+
agent = self._agent_nodes()[0]
|
|
31
|
+
end = OutputNode(id="output")
|
|
32
|
+
|
|
33
|
+
graph.add_node(start)
|
|
34
|
+
graph.add_node(loop)
|
|
35
|
+
graph.add_node(agent)
|
|
36
|
+
graph.add_node(end)
|
|
37
|
+
|
|
38
|
+
graph.add_edge(Edge(source=start.id, target=loop.id))
|
|
39
|
+
graph.add_edge(Edge(source=loop.id, target=agent.id))
|
|
40
|
+
graph.add_edge(Edge(source=agent.id, target=end.id))
|
|
41
|
+
|
|
42
|
+
return graph
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""MapReduce flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import Any, Dict, List, Optional
|
|
5
|
+
|
|
6
|
+
from genxai.core.agent.runtime import AgentRuntime
|
|
7
|
+
from genxai.flows.base import FlowOrchestrator
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class MapReduceFlow(FlowOrchestrator):
|
|
11
|
+
"""Run multiple agents in map phase, then summarize with reducer."""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
agents: List[Any],
|
|
16
|
+
name: str = "map_reduce_flow",
|
|
17
|
+
llm_provider: Any = None,
|
|
18
|
+
) -> None:
|
|
19
|
+
super().__init__(agents=agents, name=name, llm_provider=llm_provider)
|
|
20
|
+
|
|
21
|
+
async def run(
|
|
22
|
+
self,
|
|
23
|
+
input_data: Any,
|
|
24
|
+
state: Optional[Dict[str, Any]] = None,
|
|
25
|
+
max_iterations: int = 100,
|
|
26
|
+
) -> Dict[str, Any]:
|
|
27
|
+
if state is None:
|
|
28
|
+
state = {}
|
|
29
|
+
state["input"] = input_data
|
|
30
|
+
state.setdefault("map_results", [])
|
|
31
|
+
|
|
32
|
+
if len(self.agents) < 2:
|
|
33
|
+
raise ValueError("MapReduceFlow requires at least two agents")
|
|
34
|
+
|
|
35
|
+
*mappers, reducer = self.agents
|
|
36
|
+
mapper_runtimes = {
|
|
37
|
+
agent.id: AgentRuntime(agent=agent, llm_provider=self.llm_provider)
|
|
38
|
+
for agent in mappers
|
|
39
|
+
}
|
|
40
|
+
reducer_runtime = AgentRuntime(agent=reducer, llm_provider=self.llm_provider)
|
|
41
|
+
|
|
42
|
+
map_task = state.get("map_task", "Process shard")
|
|
43
|
+
tasks = [
|
|
44
|
+
self._execute_with_retry(
|
|
45
|
+
mapper_runtimes[mapper.id],
|
|
46
|
+
task=map_task,
|
|
47
|
+
context={**state, "mapper_id": mapper.id},
|
|
48
|
+
)
|
|
49
|
+
for mapper in mappers
|
|
50
|
+
]
|
|
51
|
+
results = await self._gather_tasks(tasks)
|
|
52
|
+
for mapper, result in zip(mappers, results):
|
|
53
|
+
state["map_results"].append({"mapper_id": mapper.id, "result": result})
|
|
54
|
+
|
|
55
|
+
reduce_result = await self._execute_with_retry(
|
|
56
|
+
reducer_runtime,
|
|
57
|
+
task=state.get("reduce_task", "Summarize map results"),
|
|
58
|
+
context={**state, "map_results": state["map_results"]},
|
|
59
|
+
)
|
|
60
|
+
state["reduce_result"] = reduce_result
|
|
61
|
+
return state
|
genxai/flows/p2p.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
"""Peer-to-peer flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
|
|
8
|
+
from genxai.core.agent.runtime import AgentRuntime
|
|
9
|
+
from genxai.flows.base import FlowOrchestrator
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class P2PFlow(FlowOrchestrator):
|
|
13
|
+
"""Run a peer-to-peer agent loop with lightweight consensus stopping.
|
|
14
|
+
|
|
15
|
+
This flow doesn't use the graph engine for routing; it mirrors the
|
|
16
|
+
P2P pattern where agents communicate directly and decide when to stop.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
agents: List[Any],
|
|
22
|
+
name: str = "p2p_flow",
|
|
23
|
+
llm_provider: Any = None,
|
|
24
|
+
max_rounds: int = 5,
|
|
25
|
+
timeout_seconds: float = 300,
|
|
26
|
+
consensus_threshold: float = 0.6,
|
|
27
|
+
convergence_window: int = 3,
|
|
28
|
+
quality_threshold: float = 0.85,
|
|
29
|
+
) -> None:
|
|
30
|
+
super().__init__(agents=agents, name=name, llm_provider=llm_provider)
|
|
31
|
+
self.max_rounds = max_rounds
|
|
32
|
+
self.timeout_seconds = timeout_seconds
|
|
33
|
+
self.consensus_threshold = consensus_threshold
|
|
34
|
+
self.convergence_window = convergence_window
|
|
35
|
+
self.quality_threshold = quality_threshold
|
|
36
|
+
self._start_time = datetime.now()
|
|
37
|
+
|
|
38
|
+
async def run(
|
|
39
|
+
self,
|
|
40
|
+
input_data: Any,
|
|
41
|
+
state: Optional[Dict[str, Any]] = None,
|
|
42
|
+
max_iterations: int = 100,
|
|
43
|
+
) -> Dict[str, Any]:
|
|
44
|
+
if state is None:
|
|
45
|
+
state = {}
|
|
46
|
+
|
|
47
|
+
state["input"] = input_data
|
|
48
|
+
state.setdefault("messages", [])
|
|
49
|
+
state.setdefault("solution_quality", 0.0)
|
|
50
|
+
state.setdefault("goal_achieved", False)
|
|
51
|
+
|
|
52
|
+
runtimes = {
|
|
53
|
+
agent.id: AgentRuntime(agent=agent, llm_provider=self.llm_provider)
|
|
54
|
+
for agent in self.agents
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
for round_idx in range(self.max_rounds):
|
|
58
|
+
for agent in self.agents:
|
|
59
|
+
result = await runtimes[agent.id].execute(
|
|
60
|
+
task=state.get("task", "Collaborate with peers"),
|
|
61
|
+
context=state,
|
|
62
|
+
)
|
|
63
|
+
state["messages"].append({
|
|
64
|
+
"round": round_idx + 1,
|
|
65
|
+
"agent_id": agent.id,
|
|
66
|
+
"result": result,
|
|
67
|
+
})
|
|
68
|
+
|
|
69
|
+
state["solution_quality"] = self._estimate_quality(state)
|
|
70
|
+
if state["solution_quality"] >= self.quality_threshold:
|
|
71
|
+
state["goal_achieved"] = True
|
|
72
|
+
|
|
73
|
+
should_stop, reason = self._should_terminate(state, round_idx + 1)
|
|
74
|
+
if should_stop:
|
|
75
|
+
state["termination_reason"] = reason
|
|
76
|
+
break
|
|
77
|
+
|
|
78
|
+
if state.get("iterations", 0) >= max_iterations:
|
|
79
|
+
state["termination_reason"] = "Max iterations reached"
|
|
80
|
+
break
|
|
81
|
+
|
|
82
|
+
return state
|
|
83
|
+
|
|
84
|
+
def _should_terminate(self, state: Dict[str, Any], iteration: int) -> tuple[bool, Optional[str]]:
|
|
85
|
+
if iteration >= self.max_rounds:
|
|
86
|
+
return True, f"Max rounds reached ({self.max_rounds})"
|
|
87
|
+
|
|
88
|
+
elapsed = (datetime.now() - self._start_time).total_seconds()
|
|
89
|
+
if elapsed >= self.timeout_seconds:
|
|
90
|
+
return True, f"Timeout reached ({self.timeout_seconds}s)"
|
|
91
|
+
|
|
92
|
+
if state.get("goal_achieved"):
|
|
93
|
+
return True, "Goal achieved"
|
|
94
|
+
|
|
95
|
+
if self._consensus_reached(state):
|
|
96
|
+
return True, "Consensus to terminate"
|
|
97
|
+
|
|
98
|
+
if self._detect_convergence(state):
|
|
99
|
+
return True, "Conversation converged"
|
|
100
|
+
|
|
101
|
+
if self._detect_deadlock(state):
|
|
102
|
+
return True, "Deadlock detected"
|
|
103
|
+
|
|
104
|
+
return False, None
|
|
105
|
+
|
|
106
|
+
def _consensus_reached(self, state: Dict[str, Any]) -> bool:
|
|
107
|
+
messages = state.get("messages", [])
|
|
108
|
+
if not messages:
|
|
109
|
+
return False
|
|
110
|
+
votes = 0
|
|
111
|
+
for msg in messages[-len(self.agents):]:
|
|
112
|
+
result = msg.get("result", {})
|
|
113
|
+
if isinstance(result, dict) and str(result.get("status", "")).lower() == "completed":
|
|
114
|
+
votes += 1
|
|
115
|
+
return (votes / max(1, len(self.agents))) >= self.consensus_threshold
|
|
116
|
+
|
|
117
|
+
def _detect_convergence(self, state: Dict[str, Any]) -> bool:
|
|
118
|
+
messages = state.get("messages", [])
|
|
119
|
+
if len(messages) < self.convergence_window:
|
|
120
|
+
return False
|
|
121
|
+
|
|
122
|
+
recent = messages[-self.convergence_window:]
|
|
123
|
+
summaries = set(
|
|
124
|
+
str(msg.get("result", {}).get("output", ""))[:100] for msg in recent
|
|
125
|
+
)
|
|
126
|
+
return len(summaries) <= 2
|
|
127
|
+
|
|
128
|
+
def _detect_deadlock(self, state: Dict[str, Any]) -> bool:
|
|
129
|
+
messages = state.get("messages", [])
|
|
130
|
+
if len(messages) < 6:
|
|
131
|
+
return False
|
|
132
|
+
recent = messages[-6:]
|
|
133
|
+
senders = [msg.get("agent_id", "") for msg in recent]
|
|
134
|
+
return len(senders) >= 6 and senders[:3] == senders[3:6]
|
|
135
|
+
|
|
136
|
+
def _estimate_quality(self, state: Dict[str, Any]) -> float:
|
|
137
|
+
messages = state.get("messages", [])
|
|
138
|
+
if not messages:
|
|
139
|
+
return 0.0
|
|
140
|
+
recent = messages[-len(self.agents):]
|
|
141
|
+
scores = []
|
|
142
|
+
for msg in recent:
|
|
143
|
+
result = msg.get("result", {})
|
|
144
|
+
output = result.get("output") if isinstance(result, dict) else ""
|
|
145
|
+
scores.append(min(1.0, len(str(output)) / 500))
|
|
146
|
+
return sum(scores) / max(1, len(scores))
|
genxai/flows/parallel.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Parallel flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from genxai.core.graph.engine import Graph
|
|
6
|
+
from genxai.core.graph.edges import Edge, ParallelEdge
|
|
7
|
+
from genxai.core.graph.nodes import AgentNode, InputNode, OutputNode
|
|
8
|
+
from genxai.flows.base import FlowOrchestrator
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ParallelFlow(FlowOrchestrator):
|
|
12
|
+
"""Execute all agents in parallel from a shared input node."""
|
|
13
|
+
|
|
14
|
+
def build_graph(self) -> Graph:
|
|
15
|
+
graph = Graph(name=self.name)
|
|
16
|
+
start = InputNode(id="input")
|
|
17
|
+
end = OutputNode(id="output")
|
|
18
|
+
graph.add_node(start)
|
|
19
|
+
graph.add_node(end)
|
|
20
|
+
|
|
21
|
+
nodes: List[AgentNode] = self._agent_nodes()
|
|
22
|
+
for node in nodes:
|
|
23
|
+
graph.add_node(node)
|
|
24
|
+
graph.add_edge(ParallelEdge(source=start.id, target=node.id))
|
|
25
|
+
graph.add_edge(Edge(source=node.id, target=end.id))
|
|
26
|
+
|
|
27
|
+
return graph
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Round-robin flow orchestrator."""
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from genxai.core.graph.engine import Graph
|
|
6
|
+
from genxai.core.graph.edges import Edge
|
|
7
|
+
from genxai.core.graph.nodes import AgentNode
|
|
8
|
+
from genxai.flows.base import FlowOrchestrator
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class RoundRobinFlow(FlowOrchestrator):
|
|
12
|
+
"""Execute agents in a fixed, round-robin sequence."""
|
|
13
|
+
|
|
14
|
+
def build_graph(self) -> Graph:
|
|
15
|
+
graph = Graph(name=self.name)
|
|
16
|
+
nodes: List[AgentNode] = self._agent_nodes()
|
|
17
|
+
|
|
18
|
+
for node in nodes:
|
|
19
|
+
graph.add_node(node)
|
|
20
|
+
|
|
21
|
+
for idx in range(len(nodes) - 1):
|
|
22
|
+
graph.add_edge(Edge(source=nodes[idx].id, target=nodes[idx + 1].id))
|
|
23
|
+
|
|
24
|
+
return graph
|