genxai-framework 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli/__init__.py +3 -0
- cli/commands/__init__.py +6 -0
- cli/commands/approval.py +85 -0
- cli/commands/audit.py +127 -0
- cli/commands/metrics.py +25 -0
- cli/commands/tool.py +389 -0
- cli/main.py +32 -0
- genxai/__init__.py +81 -0
- genxai/api/__init__.py +5 -0
- genxai/api/app.py +21 -0
- genxai/config/__init__.py +5 -0
- genxai/config/settings.py +37 -0
- genxai/connectors/__init__.py +19 -0
- genxai/connectors/base.py +122 -0
- genxai/connectors/kafka.py +92 -0
- genxai/connectors/postgres_cdc.py +95 -0
- genxai/connectors/registry.py +44 -0
- genxai/connectors/sqs.py +94 -0
- genxai/connectors/webhook.py +73 -0
- genxai/core/__init__.py +37 -0
- genxai/core/agent/__init__.py +32 -0
- genxai/core/agent/base.py +206 -0
- genxai/core/agent/config_io.py +59 -0
- genxai/core/agent/registry.py +98 -0
- genxai/core/agent/runtime.py +970 -0
- genxai/core/communication/__init__.py +6 -0
- genxai/core/communication/collaboration.py +44 -0
- genxai/core/communication/message_bus.py +192 -0
- genxai/core/communication/protocols.py +35 -0
- genxai/core/execution/__init__.py +22 -0
- genxai/core/execution/metadata.py +181 -0
- genxai/core/execution/queue.py +201 -0
- genxai/core/graph/__init__.py +30 -0
- genxai/core/graph/checkpoints.py +77 -0
- genxai/core/graph/edges.py +131 -0
- genxai/core/graph/engine.py +813 -0
- genxai/core/graph/executor.py +516 -0
- genxai/core/graph/nodes.py +161 -0
- genxai/core/graph/trigger_runner.py +40 -0
- genxai/core/memory/__init__.py +19 -0
- genxai/core/memory/base.py +72 -0
- genxai/core/memory/embedding.py +327 -0
- genxai/core/memory/episodic.py +448 -0
- genxai/core/memory/long_term.py +467 -0
- genxai/core/memory/manager.py +543 -0
- genxai/core/memory/persistence.py +297 -0
- genxai/core/memory/procedural.py +461 -0
- genxai/core/memory/semantic.py +526 -0
- genxai/core/memory/shared.py +62 -0
- genxai/core/memory/short_term.py +303 -0
- genxai/core/memory/vector_store.py +508 -0
- genxai/core/memory/working.py +211 -0
- genxai/core/state/__init__.py +6 -0
- genxai/core/state/manager.py +293 -0
- genxai/core/state/schema.py +115 -0
- genxai/llm/__init__.py +14 -0
- genxai/llm/base.py +150 -0
- genxai/llm/factory.py +329 -0
- genxai/llm/providers/__init__.py +1 -0
- genxai/llm/providers/anthropic.py +249 -0
- genxai/llm/providers/cohere.py +274 -0
- genxai/llm/providers/google.py +334 -0
- genxai/llm/providers/ollama.py +147 -0
- genxai/llm/providers/openai.py +257 -0
- genxai/llm/routing.py +83 -0
- genxai/observability/__init__.py +6 -0
- genxai/observability/logging.py +327 -0
- genxai/observability/metrics.py +494 -0
- genxai/observability/tracing.py +372 -0
- genxai/performance/__init__.py +39 -0
- genxai/performance/cache.py +256 -0
- genxai/performance/pooling.py +289 -0
- genxai/security/audit.py +304 -0
- genxai/security/auth.py +315 -0
- genxai/security/cost_control.py +528 -0
- genxai/security/default_policies.py +44 -0
- genxai/security/jwt.py +142 -0
- genxai/security/oauth.py +226 -0
- genxai/security/pii.py +366 -0
- genxai/security/policy_engine.py +82 -0
- genxai/security/rate_limit.py +341 -0
- genxai/security/rbac.py +247 -0
- genxai/security/validation.py +218 -0
- genxai/tools/__init__.py +21 -0
- genxai/tools/base.py +383 -0
- genxai/tools/builtin/__init__.py +131 -0
- genxai/tools/builtin/communication/__init__.py +15 -0
- genxai/tools/builtin/communication/email_sender.py +159 -0
- genxai/tools/builtin/communication/notification_manager.py +167 -0
- genxai/tools/builtin/communication/slack_notifier.py +118 -0
- genxai/tools/builtin/communication/sms_sender.py +118 -0
- genxai/tools/builtin/communication/webhook_caller.py +136 -0
- genxai/tools/builtin/computation/__init__.py +15 -0
- genxai/tools/builtin/computation/calculator.py +101 -0
- genxai/tools/builtin/computation/code_executor.py +183 -0
- genxai/tools/builtin/computation/data_validator.py +259 -0
- genxai/tools/builtin/computation/hash_generator.py +129 -0
- genxai/tools/builtin/computation/regex_matcher.py +201 -0
- genxai/tools/builtin/data/__init__.py +15 -0
- genxai/tools/builtin/data/csv_processor.py +213 -0
- genxai/tools/builtin/data/data_transformer.py +299 -0
- genxai/tools/builtin/data/json_processor.py +233 -0
- genxai/tools/builtin/data/text_analyzer.py +288 -0
- genxai/tools/builtin/data/xml_processor.py +175 -0
- genxai/tools/builtin/database/__init__.py +15 -0
- genxai/tools/builtin/database/database_inspector.py +157 -0
- genxai/tools/builtin/database/mongodb_query.py +196 -0
- genxai/tools/builtin/database/redis_cache.py +167 -0
- genxai/tools/builtin/database/sql_query.py +145 -0
- genxai/tools/builtin/database/vector_search.py +163 -0
- genxai/tools/builtin/file/__init__.py +17 -0
- genxai/tools/builtin/file/directory_scanner.py +214 -0
- genxai/tools/builtin/file/file_compressor.py +237 -0
- genxai/tools/builtin/file/file_reader.py +102 -0
- genxai/tools/builtin/file/file_writer.py +122 -0
- genxai/tools/builtin/file/image_processor.py +186 -0
- genxai/tools/builtin/file/pdf_parser.py +144 -0
- genxai/tools/builtin/test/__init__.py +15 -0
- genxai/tools/builtin/test/async_simulator.py +62 -0
- genxai/tools/builtin/test/data_transformer.py +99 -0
- genxai/tools/builtin/test/error_generator.py +82 -0
- genxai/tools/builtin/test/simple_math.py +94 -0
- genxai/tools/builtin/test/string_processor.py +72 -0
- genxai/tools/builtin/web/__init__.py +15 -0
- genxai/tools/builtin/web/api_caller.py +161 -0
- genxai/tools/builtin/web/html_parser.py +330 -0
- genxai/tools/builtin/web/http_client.py +187 -0
- genxai/tools/builtin/web/url_validator.py +162 -0
- genxai/tools/builtin/web/web_scraper.py +170 -0
- genxai/tools/custom/my_test_tool_2.py +9 -0
- genxai/tools/dynamic.py +105 -0
- genxai/tools/mcp_server.py +167 -0
- genxai/tools/persistence/__init__.py +6 -0
- genxai/tools/persistence/models.py +55 -0
- genxai/tools/persistence/service.py +322 -0
- genxai/tools/registry.py +227 -0
- genxai/tools/security/__init__.py +11 -0
- genxai/tools/security/limits.py +214 -0
- genxai/tools/security/policy.py +20 -0
- genxai/tools/security/sandbox.py +248 -0
- genxai/tools/templates.py +435 -0
- genxai/triggers/__init__.py +19 -0
- genxai/triggers/base.py +104 -0
- genxai/triggers/file_watcher.py +75 -0
- genxai/triggers/queue.py +68 -0
- genxai/triggers/registry.py +82 -0
- genxai/triggers/schedule.py +66 -0
- genxai/triggers/webhook.py +68 -0
- genxai/utils/__init__.py +1 -0
- genxai/utils/tokens.py +295 -0
- genxai_framework-0.1.0.dist-info/METADATA +495 -0
- genxai_framework-0.1.0.dist-info/RECORD +156 -0
- genxai_framework-0.1.0.dist-info/WHEEL +5 -0
- genxai_framework-0.1.0.dist-info/entry_points.txt +2 -0
- genxai_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
- genxai_framework-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,516 @@
|
|
|
1
|
+
"""Workflow execution engine for GenXAI."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import copy
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
import logging
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from genxai.core.graph.engine import Graph
|
|
10
|
+
from genxai.core.graph.nodes import InputNode, OutputNode, AgentNode, NodeType
|
|
11
|
+
from genxai.core.graph.edges import Edge, ConditionalEdge
|
|
12
|
+
from genxai.core.agent.base import Agent, AgentFactory
|
|
13
|
+
from genxai.core.agent.registry import AgentRegistry
|
|
14
|
+
from genxai.tools.registry import ToolRegistry
|
|
15
|
+
from genxai.core.execution import WorkerQueueEngine, ExecutionStore
|
|
16
|
+
from genxai.tools.builtin.computation.calculator import CalculatorTool
|
|
17
|
+
from genxai.tools.builtin.file.file_reader import FileReaderTool
|
|
18
|
+
from genxai.security.rbac import get_current_user, Permission
|
|
19
|
+
from genxai.security.policy_engine import get_policy_engine
|
|
20
|
+
from genxai.security.audit import get_audit_log, AuditEvent
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class EnhancedGraph(Graph):
|
|
26
|
+
"""Enhanced graph with agent execution support.
|
|
27
|
+
|
|
28
|
+
This extends the base Graph class to provide real agent execution
|
|
29
|
+
with tool integration. It's the recommended way to execute workflows
|
|
30
|
+
in GenXAI.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
async def _execute_node_logic(self, node: Any, state: Dict[str, Any]) -> Any:
|
|
34
|
+
"""Execute node logic with actual agent execution.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
node: Node to execute
|
|
38
|
+
state: Current state
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Result of node execution
|
|
42
|
+
"""
|
|
43
|
+
if node.type == NodeType.INPUT:
|
|
44
|
+
# IMPORTANT: Avoid returning the exact same `dict` object stored under
|
|
45
|
+
# `state["input"]`. If we do, the engine will store that same object
|
|
46
|
+
# under the input node id (e.g. "start"), creating shared references
|
|
47
|
+
# which Python's `json.dumps` treats as circular.
|
|
48
|
+
return copy.deepcopy(state.get("input"))
|
|
49
|
+
|
|
50
|
+
elif node.type == NodeType.OUTPUT:
|
|
51
|
+
# IMPORTANT: never return the live `state` dict. The engine stores the
|
|
52
|
+
# node result back into `state[node_id]`, so returning `state` would
|
|
53
|
+
# create a self-referential structure (circular reference) that can't
|
|
54
|
+
# be JSON-serialized for persistence.
|
|
55
|
+
# Also deep-copy to avoid shared references (json can't encode those).
|
|
56
|
+
return copy.deepcopy(state)
|
|
57
|
+
|
|
58
|
+
elif node.type == NodeType.AGENT:
|
|
59
|
+
# Get agent from registry
|
|
60
|
+
agent_id = node.config.data.get("agent_id")
|
|
61
|
+
if not agent_id:
|
|
62
|
+
raise ValueError(f"Agent node '{node.id}' missing agent_id in config.data")
|
|
63
|
+
|
|
64
|
+
agent = AgentRegistry.get(agent_id)
|
|
65
|
+
if agent is None:
|
|
66
|
+
raise ValueError(f"Agent '{agent_id}' not found in registry")
|
|
67
|
+
|
|
68
|
+
# Prepare task from state
|
|
69
|
+
task = state.get("task", "Process the input data")
|
|
70
|
+
|
|
71
|
+
# Execute agent with tools if available
|
|
72
|
+
result = await self._execute_agent_with_tools(agent, task, state)
|
|
73
|
+
|
|
74
|
+
return result
|
|
75
|
+
|
|
76
|
+
else:
|
|
77
|
+
# Default behavior
|
|
78
|
+
return {"node_id": node.id, "type": node.type.value}
|
|
79
|
+
|
|
80
|
+
async def _execute_agent_with_tools(
|
|
81
|
+
self, agent: Agent, task: str, state: Dict[str, Any]
|
|
82
|
+
) -> Dict[str, Any]:
|
|
83
|
+
"""Execute agent with tool support using AgentRuntime.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
agent: Agent to execute
|
|
87
|
+
task: Task description
|
|
88
|
+
state: Current state
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Execution result
|
|
92
|
+
"""
|
|
93
|
+
logger.debug(f"Executing agent '{agent.id}' ({agent.config.role})")
|
|
94
|
+
|
|
95
|
+
# Use AgentRuntime for full integration
|
|
96
|
+
from genxai.core.agent.runtime import AgentRuntime
|
|
97
|
+
|
|
98
|
+
runtime = AgentRuntime(agent=agent, enable_memory=True)
|
|
99
|
+
|
|
100
|
+
# Load tools from registry
|
|
101
|
+
if agent.config.tools:
|
|
102
|
+
tools = {}
|
|
103
|
+
for tool_name in agent.config.tools:
|
|
104
|
+
tool = ToolRegistry.get(tool_name)
|
|
105
|
+
if tool:
|
|
106
|
+
tools[tool_name] = tool
|
|
107
|
+
runtime.set_tools(tools)
|
|
108
|
+
logger.debug(f"Loaded {len(tools)} tools for agent")
|
|
109
|
+
|
|
110
|
+
# Execute agent with full runtime support
|
|
111
|
+
result = await runtime.execute(task, context=state)
|
|
112
|
+
|
|
113
|
+
return result
|
|
114
|
+
|
|
115
|
+
async def _execute_tool_for_task(
|
|
116
|
+
self, tool: Any, tool_name: str, task: str, state: Dict[str, Any]
|
|
117
|
+
) -> Any:
|
|
118
|
+
"""Execute a tool based on the task.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
tool: Tool instance
|
|
122
|
+
tool_name: Tool name
|
|
123
|
+
task: Task description
|
|
124
|
+
state: Current state
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Tool execution result or None
|
|
128
|
+
"""
|
|
129
|
+
try:
|
|
130
|
+
# Calculator tool
|
|
131
|
+
if tool_name == "calculator":
|
|
132
|
+
# Check if task involves calculation
|
|
133
|
+
if any(op in task.lower() for op in ["calculate", "compute", "add", "multiply", "+"]):
|
|
134
|
+
# Extract expression from state or use default
|
|
135
|
+
expression = state.get("expression", "10 * 5 + 3")
|
|
136
|
+
logger.debug(f"Executing calculator: {expression}")
|
|
137
|
+
result = await tool.execute(expression=expression)
|
|
138
|
+
if result.success:
|
|
139
|
+
logger.debug(f"Calculator result: {result.data['result']}")
|
|
140
|
+
return result.data
|
|
141
|
+
|
|
142
|
+
# File reader tool
|
|
143
|
+
elif tool_name == "file_reader":
|
|
144
|
+
# Check if task involves file reading
|
|
145
|
+
if any(word in task.lower() for word in ["read", "file", "load"]):
|
|
146
|
+
# Get file path from state
|
|
147
|
+
file_path = state.get("file_path")
|
|
148
|
+
|
|
149
|
+
if file_path:
|
|
150
|
+
logger.debug(f"Reading file: {file_path}")
|
|
151
|
+
result = await tool.execute(path=file_path)
|
|
152
|
+
if result.success:
|
|
153
|
+
logger.debug(f"Read {result.data['lines']} lines")
|
|
154
|
+
return result.data
|
|
155
|
+
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logger.error(f"Tool execution failed: {e}")
|
|
158
|
+
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class WorkflowExecutor:
|
|
163
|
+
"""Executes workflows using GenXAI engine.
|
|
164
|
+
|
|
165
|
+
This is the main class for executing workflows programmatically.
|
|
166
|
+
It handles agent creation, tool registration, graph building,
|
|
167
|
+
and execution.
|
|
168
|
+
|
|
169
|
+
Example:
|
|
170
|
+
```python
|
|
171
|
+
executor = WorkflowExecutor(openai_api_key="sk-...")
|
|
172
|
+
result = await executor.execute(nodes, edges, input_data)
|
|
173
|
+
```
|
|
174
|
+
"""
|
|
175
|
+
|
|
176
|
+
def __init__(
|
|
177
|
+
self,
|
|
178
|
+
openai_api_key: Optional[str] = None,
|
|
179
|
+
anthropic_api_key: Optional[str] = None,
|
|
180
|
+
register_builtin_tools: bool = True,
|
|
181
|
+
queue_engine: Optional[WorkerQueueEngine] = None,
|
|
182
|
+
execution_store: Optional[ExecutionStore] = None,
|
|
183
|
+
):
|
|
184
|
+
"""Initialize workflow executor.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
openai_api_key: OpenAI API key
|
|
188
|
+
anthropic_api_key: Anthropic API key
|
|
189
|
+
register_builtin_tools: Whether to register built-in tools
|
|
190
|
+
"""
|
|
191
|
+
self.openai_api_key = openai_api_key
|
|
192
|
+
self.anthropic_api_key = anthropic_api_key
|
|
193
|
+
|
|
194
|
+
self.queue_engine = queue_engine
|
|
195
|
+
self.execution_store = execution_store or ExecutionStore()
|
|
196
|
+
|
|
197
|
+
if register_builtin_tools:
|
|
198
|
+
self._setup_tools()
|
|
199
|
+
|
|
200
|
+
def _setup_tools(self) -> None:
|
|
201
|
+
"""Register built-in tools."""
|
|
202
|
+
# Register calculator tool
|
|
203
|
+
if not ToolRegistry.get("calculator"):
|
|
204
|
+
calculator = CalculatorTool()
|
|
205
|
+
ToolRegistry.register(calculator)
|
|
206
|
+
logger.info("Registered calculator tool")
|
|
207
|
+
|
|
208
|
+
# Register file reader tool
|
|
209
|
+
if not ToolRegistry.get("file_reader"):
|
|
210
|
+
file_reader = FileReaderTool()
|
|
211
|
+
ToolRegistry.register(file_reader)
|
|
212
|
+
logger.info("Registered file_reader tool")
|
|
213
|
+
|
|
214
|
+
def _create_agents_from_nodes(self, nodes: List[Dict[str, Any]]) -> None:
|
|
215
|
+
"""Create and register agents from workflow nodes.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
nodes: List of workflow nodes
|
|
219
|
+
"""
|
|
220
|
+
for node in nodes:
|
|
221
|
+
if node.get("type") == "agent":
|
|
222
|
+
agent_id = node.get("id")
|
|
223
|
+
config = node.get("config", {})
|
|
224
|
+
|
|
225
|
+
# Extract agent configuration
|
|
226
|
+
role = config.get("role", "Agent")
|
|
227
|
+
goal = config.get("goal", "Process tasks")
|
|
228
|
+
backstory = config.get("backstory", "")
|
|
229
|
+
tools = config.get("tools", [])
|
|
230
|
+
llm_model = config.get("llm_model", "gpt-4")
|
|
231
|
+
temperature = config.get("temperature", 0.7)
|
|
232
|
+
|
|
233
|
+
# Create agent
|
|
234
|
+
agent = AgentFactory.create_agent(
|
|
235
|
+
id=agent_id,
|
|
236
|
+
role=role,
|
|
237
|
+
goal=goal,
|
|
238
|
+
backstory=backstory,
|
|
239
|
+
tools=tools,
|
|
240
|
+
llm_model=llm_model,
|
|
241
|
+
temperature=temperature,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
# Register agent
|
|
245
|
+
AgentRegistry.register(agent)
|
|
246
|
+
logger.info(f"Created and registered agent: {agent_id}")
|
|
247
|
+
|
|
248
|
+
def _build_graph(self, nodes: List[Dict[str, Any]], edges: List[Dict[str, Any]]) -> EnhancedGraph:
|
|
249
|
+
"""Build GenXAI graph from workflow definition.
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
nodes: List of workflow nodes
|
|
253
|
+
edges: List of workflow edges
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
Constructed graph
|
|
257
|
+
"""
|
|
258
|
+
graph = EnhancedGraph(name="workflow")
|
|
259
|
+
|
|
260
|
+
# Add nodes
|
|
261
|
+
for node in nodes:
|
|
262
|
+
node_id = node.get("id")
|
|
263
|
+
node_type = node.get("type")
|
|
264
|
+
|
|
265
|
+
# Support some common aliases used by the Studio UI
|
|
266
|
+
# - "start" behaves like an input node
|
|
267
|
+
# - "end" behaves like an output node
|
|
268
|
+
if node_type in {"input", "start"}:
|
|
269
|
+
graph.add_node(InputNode(id=node_id))
|
|
270
|
+
elif node_type in {"output", "end"}:
|
|
271
|
+
graph.add_node(OutputNode(id=node_id))
|
|
272
|
+
elif node_type == "agent":
|
|
273
|
+
graph.add_node(AgentNode(id=node_id, agent_id=node_id))
|
|
274
|
+
else:
|
|
275
|
+
logger.warning(f"Unknown node type: {node_type}")
|
|
276
|
+
|
|
277
|
+
# Add edges
|
|
278
|
+
for edge in edges:
|
|
279
|
+
source = edge.get("source")
|
|
280
|
+
target = edge.get("target")
|
|
281
|
+
condition = edge.get("condition")
|
|
282
|
+
|
|
283
|
+
if condition:
|
|
284
|
+
# Conditional edge
|
|
285
|
+
graph.add_edge(
|
|
286
|
+
ConditionalEdge(
|
|
287
|
+
source=source,
|
|
288
|
+
target=target,
|
|
289
|
+
condition=lambda state, cond=condition: self._evaluate_condition(state, cond),
|
|
290
|
+
)
|
|
291
|
+
)
|
|
292
|
+
else:
|
|
293
|
+
# Regular edge
|
|
294
|
+
graph.add_edge(Edge(source=source, target=target))
|
|
295
|
+
|
|
296
|
+
return graph
|
|
297
|
+
|
|
298
|
+
def _evaluate_condition(self, state: Dict[str, Any], condition: str) -> bool:
|
|
299
|
+
"""Evaluate a condition string.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
state: Current workflow state
|
|
303
|
+
condition: Condition expression
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
Boolean result
|
|
307
|
+
"""
|
|
308
|
+
# Simple condition evaluation (can be enhanced)
|
|
309
|
+
try:
|
|
310
|
+
# For now, just check if condition key exists in state
|
|
311
|
+
return condition in state
|
|
312
|
+
except Exception as e:
|
|
313
|
+
logger.error(f"Error evaluating condition: {e}")
|
|
314
|
+
return False
|
|
315
|
+
|
|
316
|
+
async def execute(
|
|
317
|
+
self,
|
|
318
|
+
nodes: List[Dict[str, Any]],
|
|
319
|
+
edges: List[Dict[str, Any]],
|
|
320
|
+
input_data: Dict[str, Any],
|
|
321
|
+
run_id: Optional[str] = None,
|
|
322
|
+
checkpoint_dir: Optional[str] = None,
|
|
323
|
+
resume_from: Optional[str] = None,
|
|
324
|
+
) -> Dict[str, Any]:
|
|
325
|
+
"""Execute a workflow.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
nodes: Workflow nodes
|
|
329
|
+
edges: Workflow edges
|
|
330
|
+
input_data: Input data for execution
|
|
331
|
+
|
|
332
|
+
Returns:
|
|
333
|
+
Execution result with status, result, and metadata
|
|
334
|
+
"""
|
|
335
|
+
run_id = run_id or self.execution_store.generate_run_id()
|
|
336
|
+
record = self.execution_store.create(run_id, workflow="workflow", status="running")
|
|
337
|
+
|
|
338
|
+
try:
|
|
339
|
+
logger.info("Starting workflow execution")
|
|
340
|
+
|
|
341
|
+
# Create agents from nodes
|
|
342
|
+
self._create_agents_from_nodes(nodes)
|
|
343
|
+
|
|
344
|
+
# Build graph
|
|
345
|
+
graph = self._build_graph(nodes, edges)
|
|
346
|
+
|
|
347
|
+
# Validate graph
|
|
348
|
+
graph.validate()
|
|
349
|
+
logger.info(f"Graph validated: {len(graph.nodes)} nodes, {len(graph.edges)} edges")
|
|
350
|
+
|
|
351
|
+
checkpoint = None
|
|
352
|
+
if resume_from and checkpoint_dir:
|
|
353
|
+
checkpoint = graph.load_checkpoint(resume_from, Path(checkpoint_dir))
|
|
354
|
+
|
|
355
|
+
# Execute graph
|
|
356
|
+
user = get_current_user()
|
|
357
|
+
if user is not None:
|
|
358
|
+
get_policy_engine().check(user, "workflow:workflow", Permission.WORKFLOW_EXECUTE)
|
|
359
|
+
get_audit_log().record(
|
|
360
|
+
AuditEvent(
|
|
361
|
+
action="workflow.execute",
|
|
362
|
+
actor_id=user.user_id,
|
|
363
|
+
resource_id="workflow:workflow",
|
|
364
|
+
status="allowed",
|
|
365
|
+
)
|
|
366
|
+
)
|
|
367
|
+
result = await graph.run(input_data=input_data, resume_from=checkpoint)
|
|
368
|
+
|
|
369
|
+
logger.info("Workflow execution completed successfully")
|
|
370
|
+
|
|
371
|
+
self.execution_store.update(
|
|
372
|
+
run_id,
|
|
373
|
+
status="success",
|
|
374
|
+
result=result,
|
|
375
|
+
completed=True,
|
|
376
|
+
)
|
|
377
|
+
return {
|
|
378
|
+
"status": "success",
|
|
379
|
+
"run_id": run_id,
|
|
380
|
+
"result": result,
|
|
381
|
+
"nodes_executed": len(graph.nodes),
|
|
382
|
+
"message": "Workflow executed successfully"
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
except Exception as e:
|
|
386
|
+
logger.error(f"Workflow execution failed: {e}", exc_info=True)
|
|
387
|
+
self.execution_store.update(
|
|
388
|
+
run_id,
|
|
389
|
+
status="error",
|
|
390
|
+
error=str(e),
|
|
391
|
+
completed=True,
|
|
392
|
+
)
|
|
393
|
+
return {
|
|
394
|
+
"status": "error",
|
|
395
|
+
"run_id": run_id,
|
|
396
|
+
"error": str(e),
|
|
397
|
+
"message": f"Workflow execution failed: {str(e)}"
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
finally:
|
|
401
|
+
# Cleanup: Clear registries for next execution
|
|
402
|
+
AgentRegistry.clear()
|
|
403
|
+
logger.info("Cleared agent registry")
|
|
404
|
+
|
|
405
|
+
async def execute_queued(
|
|
406
|
+
self,
|
|
407
|
+
nodes: List[Dict[str, Any]],
|
|
408
|
+
edges: List[Dict[str, Any]],
|
|
409
|
+
input_data: Dict[str, Any],
|
|
410
|
+
run_id: Optional[str] = None,
|
|
411
|
+
checkpoint_dir: Optional[str] = None,
|
|
412
|
+
resume_from: Optional[str] = None,
|
|
413
|
+
) -> str:
|
|
414
|
+
"""Enqueue workflow execution using a worker queue engine."""
|
|
415
|
+
if not self.queue_engine:
|
|
416
|
+
self.queue_engine = WorkerQueueEngine()
|
|
417
|
+
|
|
418
|
+
run_id = run_id or self.execution_store.generate_run_id()
|
|
419
|
+
existing = self.execution_store.get(run_id)
|
|
420
|
+
if existing and existing.status in {"running", "success"}:
|
|
421
|
+
return run_id
|
|
422
|
+
|
|
423
|
+
self.execution_store.create(run_id, workflow="workflow", status="queued")
|
|
424
|
+
|
|
425
|
+
async def _handler(payload: Dict[str, Any]) -> None:
|
|
426
|
+
await self.execute(
|
|
427
|
+
nodes=payload["nodes"],
|
|
428
|
+
edges=payload["edges"],
|
|
429
|
+
input_data=payload["input_data"],
|
|
430
|
+
run_id=payload["run_id"],
|
|
431
|
+
checkpoint_dir=payload.get("checkpoint_dir"),
|
|
432
|
+
resume_from=payload.get("resume_from"),
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
await self.queue_engine.start()
|
|
436
|
+
return await self.queue_engine.enqueue(
|
|
437
|
+
{
|
|
438
|
+
"nodes": nodes,
|
|
439
|
+
"edges": edges,
|
|
440
|
+
"input_data": input_data,
|
|
441
|
+
"run_id": run_id,
|
|
442
|
+
"checkpoint_dir": checkpoint_dir,
|
|
443
|
+
"resume_from": resume_from,
|
|
444
|
+
},
|
|
445
|
+
_handler,
|
|
446
|
+
metadata={"workflow": "queued"},
|
|
447
|
+
run_id=run_id,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def execute_workflow_sync(
|
|
452
|
+
nodes: List[Dict[str, Any]],
|
|
453
|
+
edges: List[Dict[str, Any]],
|
|
454
|
+
input_data: Dict[str, Any],
|
|
455
|
+
openai_api_key: Optional[str] = None,
|
|
456
|
+
anthropic_api_key: Optional[str] = None,
|
|
457
|
+
) -> Dict[str, Any]:
|
|
458
|
+
"""Synchronous wrapper for workflow execution.
|
|
459
|
+
|
|
460
|
+
This is a convenience function for executing workflows in
|
|
461
|
+
synchronous contexts.
|
|
462
|
+
|
|
463
|
+
Args:
|
|
464
|
+
nodes: Workflow nodes
|
|
465
|
+
edges: Workflow edges
|
|
466
|
+
input_data: Input data
|
|
467
|
+
openai_api_key: OpenAI API key
|
|
468
|
+
anthropic_api_key: Anthropic API key
|
|
469
|
+
|
|
470
|
+
Returns:
|
|
471
|
+
Execution result
|
|
472
|
+
"""
|
|
473
|
+
executor = WorkflowExecutor(
|
|
474
|
+
openai_api_key=openai_api_key,
|
|
475
|
+
anthropic_api_key=anthropic_api_key
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
# Run async execution in sync context
|
|
479
|
+
loop = asyncio.new_event_loop()
|
|
480
|
+
asyncio.set_event_loop(loop)
|
|
481
|
+
try:
|
|
482
|
+
result = loop.run_until_complete(
|
|
483
|
+
executor.execute(nodes, edges, input_data)
|
|
484
|
+
)
|
|
485
|
+
return result
|
|
486
|
+
finally:
|
|
487
|
+
loop.close()
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
async def execute_workflow_async(
|
|
491
|
+
nodes: List[Dict[str, Any]],
|
|
492
|
+
edges: List[Dict[str, Any]],
|
|
493
|
+
input_data: Dict[str, Any],
|
|
494
|
+
openai_api_key: Optional[str] = None,
|
|
495
|
+
anthropic_api_key: Optional[str] = None,
|
|
496
|
+
) -> Dict[str, Any]:
|
|
497
|
+
"""Async convenience function for workflow execution.
|
|
498
|
+
|
|
499
|
+
This is the correct entry point when you're *already* inside an asyncio
|
|
500
|
+
event loop (e.g. FastAPI/Uvicorn request handlers).
|
|
501
|
+
|
|
502
|
+
Args:
|
|
503
|
+
nodes: Workflow nodes
|
|
504
|
+
edges: Workflow edges
|
|
505
|
+
input_data: Input data
|
|
506
|
+
openai_api_key: OpenAI API key
|
|
507
|
+
anthropic_api_key: Anthropic API key
|
|
508
|
+
|
|
509
|
+
Returns:
|
|
510
|
+
Execution result
|
|
511
|
+
"""
|
|
512
|
+
executor = WorkflowExecutor(
|
|
513
|
+
openai_api_key=openai_api_key,
|
|
514
|
+
anthropic_api_key=anthropic_api_key,
|
|
515
|
+
)
|
|
516
|
+
return await executor.execute(nodes, edges, input_data)
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
"""Node types and implementations for graph-based orchestration."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Any, Dict, Optional, Protocol
|
|
5
|
+
from pydantic import BaseModel, Field, ConfigDict
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class NodeType(str, Enum):
|
|
9
|
+
"""Types of nodes in the graph."""
|
|
10
|
+
|
|
11
|
+
AGENT = "agent"
|
|
12
|
+
TOOL = "tool"
|
|
13
|
+
CONDITION = "condition"
|
|
14
|
+
SUBGRAPH = "subgraph"
|
|
15
|
+
HUMAN = "human"
|
|
16
|
+
INPUT = "input"
|
|
17
|
+
OUTPUT = "output"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class NodeConfig(BaseModel):
|
|
21
|
+
"""Configuration for a node."""
|
|
22
|
+
|
|
23
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
24
|
+
|
|
25
|
+
type: NodeType
|
|
26
|
+
data: Dict[str, Any] = Field(default_factory=dict)
|
|
27
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class NodeStatus(str, Enum):
|
|
31
|
+
"""Execution status of a node."""
|
|
32
|
+
|
|
33
|
+
PENDING = "pending"
|
|
34
|
+
RUNNING = "running"
|
|
35
|
+
COMPLETED = "completed"
|
|
36
|
+
FAILED = "failed"
|
|
37
|
+
SKIPPED = "skipped"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class Node(BaseModel):
|
|
41
|
+
"""Base node in the execution graph."""
|
|
42
|
+
|
|
43
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
44
|
+
|
|
45
|
+
id: str
|
|
46
|
+
type: NodeType
|
|
47
|
+
config: NodeConfig
|
|
48
|
+
status: NodeStatus = NodeStatus.PENDING
|
|
49
|
+
result: Optional[Any] = None
|
|
50
|
+
error: Optional[str] = None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def __repr__(self) -> str:
|
|
54
|
+
"""String representation of the node."""
|
|
55
|
+
return f"Node(id={self.id}, type={self.type}, status={self.status})"
|
|
56
|
+
|
|
57
|
+
def __hash__(self) -> int:
|
|
58
|
+
"""Hash function for node."""
|
|
59
|
+
return hash(self.id)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class NodeExecutor(Protocol):
|
|
63
|
+
"""Protocol for node execution."""
|
|
64
|
+
|
|
65
|
+
async def execute(self, node: Node, context: Dict[str, Any]) -> Any:
|
|
66
|
+
"""Execute the node with given context."""
|
|
67
|
+
...
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class AgentNode(Node):
|
|
71
|
+
"""Node that executes an agent."""
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
id: str,
|
|
76
|
+
agent_id: Optional[str] = None,
|
|
77
|
+
agent: Optional[Any] = None,
|
|
78
|
+
task: Optional[str] = None,
|
|
79
|
+
**kwargs: Any,
|
|
80
|
+
) -> None:
|
|
81
|
+
"""Initialize agent node.
|
|
82
|
+
|
|
83
|
+
This supports two construction styles:
|
|
84
|
+
1) Internal graph API: AgentNode(id="x", agent_id="agent_x")
|
|
85
|
+
2) Integration-test/user API: AgentNode(id="x", agent=<Agent>, task="...")
|
|
86
|
+
|
|
87
|
+
When `agent` is provided, we register it into AgentRegistry to make
|
|
88
|
+
it discoverable by the execution layer.
|
|
89
|
+
"""
|
|
90
|
+
# Lazy import to avoid circular imports
|
|
91
|
+
from genxai.core.agent.registry import AgentRegistry
|
|
92
|
+
|
|
93
|
+
resolved_agent_id = agent_id
|
|
94
|
+
if agent is not None:
|
|
95
|
+
resolved_agent_id = agent.id
|
|
96
|
+
# Ensure agent is registered so EnhancedGraph can look it up.
|
|
97
|
+
try:
|
|
98
|
+
AgentRegistry.register(agent)
|
|
99
|
+
except Exception:
|
|
100
|
+
# If already registered, ignore.
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
if not resolved_agent_id:
|
|
104
|
+
raise TypeError("AgentNode requires either agent_id or agent")
|
|
105
|
+
|
|
106
|
+
data: Dict[str, Any] = {"agent_id": resolved_agent_id}
|
|
107
|
+
if task is not None:
|
|
108
|
+
data["task"] = task
|
|
109
|
+
|
|
110
|
+
super().__init__(
|
|
111
|
+
id=id,
|
|
112
|
+
type=NodeType.AGENT,
|
|
113
|
+
config=NodeConfig(type=NodeType.AGENT, data=data),
|
|
114
|
+
**kwargs,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class ToolNode(Node):
|
|
119
|
+
"""Node that executes a tool."""
|
|
120
|
+
|
|
121
|
+
def __init__(self, id: str, tool_name: str, **kwargs: Any) -> None:
|
|
122
|
+
"""Initialize tool node."""
|
|
123
|
+
super().__init__(
|
|
124
|
+
id=id,
|
|
125
|
+
type=NodeType.TOOL,
|
|
126
|
+
config=NodeConfig(type=NodeType.TOOL, data={"tool_name": tool_name}),
|
|
127
|
+
**kwargs,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class ConditionNode(Node):
|
|
132
|
+
"""Node that evaluates a condition."""
|
|
133
|
+
|
|
134
|
+
def __init__(self, id: str, condition: str, **kwargs: Any) -> None:
|
|
135
|
+
"""Initialize condition node."""
|
|
136
|
+
super().__init__(
|
|
137
|
+
id=id,
|
|
138
|
+
type=NodeType.CONDITION,
|
|
139
|
+
config=NodeConfig(type=NodeType.CONDITION, data={"condition": condition}),
|
|
140
|
+
**kwargs,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class InputNode(Node):
|
|
145
|
+
"""Node that receives input."""
|
|
146
|
+
|
|
147
|
+
def __init__(self, id: str = "input", **kwargs: Any) -> None:
|
|
148
|
+
"""Initialize input node."""
|
|
149
|
+
super().__init__(
|
|
150
|
+
id=id, type=NodeType.INPUT, config=NodeConfig(type=NodeType.INPUT), **kwargs
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class OutputNode(Node):
|
|
155
|
+
"""Node that produces output."""
|
|
156
|
+
|
|
157
|
+
def __init__(self, id: str = "output", **kwargs: Any) -> None:
|
|
158
|
+
"""Initialize output node."""
|
|
159
|
+
super().__init__(
|
|
160
|
+
id=id, type=NodeType.OUTPUT, config=NodeConfig(type=NodeType.OUTPUT), **kwargs
|
|
161
|
+
)
|