memra 0.0.1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
memra/__init__.py ADDED
@@ -0,0 +1,24 @@
1
+ """
2
+ Memra SDK - Declarative framework for enterprise workflows with MCP integration
3
+
4
+ A powerful orchestration framework that allows you to build AI-powered business workflows
5
+ with hybrid cloud/local execution capabilities.
6
+ """
7
+
8
+ __version__ = "0.2.0"
9
+ __author__ = "Memra"
10
+ __email__ = "info@memra.co"
11
+
12
+ # Core imports
13
+ from .models import Agent, Department, LLM, Tool
14
+ from .execution import ExecutionEngine
15
+
16
+ # Make key classes available at package level
17
+ __all__ = [
18
+ "Agent",
19
+ "Department",
20
+ "LLM",
21
+ "Tool",
22
+ "ExecutionEngine",
23
+ "__version__"
24
+ ]
memra/discovery.py ADDED
@@ -0,0 +1,15 @@
1
+ from typing import List, Dict, Any, Optional
2
+ from .tool_registry import ToolRegistry
3
+
4
+ def discover_tools(hosted_by: Optional[str] = None) -> List[Dict[str, Any]]:
5
+ """
6
+ Discover available tools in the Memra platform.
7
+
8
+ Args:
9
+ hosted_by: Filter tools by host ("memra" or "mcp"). If None, returns all tools.
10
+
11
+ Returns:
12
+ List of available tools with their metadata
13
+ """
14
+ registry = ToolRegistry()
15
+ return registry.discover_tools(hosted_by)
@@ -0,0 +1,49 @@
1
+ """
2
+ Client-side tool discovery for Memra SDK
3
+ Queries the Memra API to discover available tools
4
+ """
5
+
6
+ from typing import List, Dict, Any, Optional
7
+ from .tool_registry_client import ToolRegistryClient
8
+
9
+ def discover_tools(hosted_by: Optional[str] = None) -> List[Dict[str, Any]]:
10
+ """
11
+ Discover available tools from the Memra API
12
+
13
+ Args:
14
+ hosted_by: Filter tools by hosting provider ("memra" or "mcp")
15
+
16
+ Returns:
17
+ List of available tools with their descriptions
18
+ """
19
+ registry = ToolRegistryClient()
20
+ return registry.discover_tools(hosted_by)
21
+
22
+ def check_api_health() -> bool:
23
+ """
24
+ Check if the Memra API is available
25
+
26
+ Returns:
27
+ True if API is healthy, False otherwise
28
+ """
29
+ registry = ToolRegistryClient()
30
+ return registry.health_check()
31
+
32
+ def get_api_status() -> Dict[str, Any]:
33
+ """
34
+ Get detailed API status information
35
+
36
+ Returns:
37
+ Dictionary with API status details
38
+ """
39
+ registry = ToolRegistryClient()
40
+
41
+ is_healthy = registry.health_check()
42
+ tools = registry.discover_tools() if is_healthy else []
43
+
44
+ return {
45
+ "api_healthy": is_healthy,
46
+ "api_url": registry.api_base,
47
+ "tools_available": len(tools),
48
+ "tools": tools
49
+ }
memra/execution.py ADDED
@@ -0,0 +1,434 @@
1
+ import time
2
+ import logging
3
+ import os
4
+ from typing import Dict, Any, List, Optional
5
+ from .models import Department, Agent, DepartmentResult, ExecutionTrace, DepartmentAudit
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ class ExecutionEngine:
10
+ """Engine that executes department workflows by coordinating agents and tools"""
11
+
12
+ def __init__(self):
13
+ # Use API client if MEMRA_API_KEY is set, otherwise use local tools
14
+ if os.getenv('MEMRA_API_KEY'):
15
+ from .tool_registry_client import ToolRegistryClient
16
+ self.tool_registry = ToolRegistryClient()
17
+ logger.info("Using API client for tool execution")
18
+ else:
19
+ from .tool_registry import ToolRegistry
20
+ self.tool_registry = ToolRegistry()
21
+ logger.info("Using local tool registry")
22
+
23
+ self.last_execution_audit: Optional[DepartmentAudit] = None
24
+
25
+ def execute_department(self, department: Department, input_data: Dict[str, Any]) -> DepartmentResult:
26
+ """Execute a department workflow"""
27
+ start_time = time.time()
28
+ trace = ExecutionTrace()
29
+
30
+ try:
31
+ print(f"\n🏢 Starting {department.name} Department")
32
+ print(f"📋 Mission: {department.mission}")
33
+ print(f"👥 Team: {', '.join([agent.role for agent in department.agents])}")
34
+ if department.manager_agent:
35
+ print(f"👔 Manager: {department.manager_agent.role}")
36
+ print(f"🔄 Workflow: {' → '.join(department.workflow_order)}")
37
+ print("=" * 60)
38
+
39
+ logger.info(f"Starting execution of department: {department.name}")
40
+
41
+ # Initialize execution context
42
+ context = {
43
+ "input": input_data,
44
+ "department_context": department.context or {},
45
+ "results": {}
46
+ }
47
+
48
+ # Execute agents in workflow order
49
+ for i, agent_role in enumerate(department.workflow_order, 1):
50
+ print(f"\n🔄 Step {i}/{len(department.workflow_order)}: {agent_role}")
51
+
52
+ agent = self._find_agent_by_role(department, agent_role)
53
+ if not agent:
54
+ error_msg = f"Agent with role '{agent_role}' not found in department"
55
+ print(f"❌ Error: {error_msg}")
56
+ trace.errors.append(error_msg)
57
+ return DepartmentResult(
58
+ success=False,
59
+ error=error_msg,
60
+ trace=trace
61
+ )
62
+
63
+ # Execute agent
64
+ agent_start = time.time()
65
+ result = self._execute_agent(agent, context, trace)
66
+ agent_duration = time.time() - agent_start
67
+
68
+ trace.agents_executed.append(agent.role)
69
+ trace.execution_times[agent.role] = agent_duration
70
+
71
+ if not result.get("success", False):
72
+ # Try fallback if available
73
+ if department.manager_agent and agent.role in (department.manager_agent.fallback_agents or {}):
74
+ fallback_role = department.manager_agent.fallback_agents[agent.role]
75
+ print(f"🔄 {department.manager_agent.role}: Let me try {fallback_role} as backup for {agent.role}")
76
+ fallback_agent = self._find_agent_by_role(department, fallback_role)
77
+ if fallback_agent:
78
+ logger.info(f"Trying fallback agent: {fallback_role}")
79
+ result = self._execute_agent(fallback_agent, context, trace)
80
+ trace.agents_executed.append(fallback_agent.role)
81
+
82
+ if not result.get("success", False):
83
+ error_msg = f"Agent {agent.role} failed: {result.get('error', 'Unknown error')}"
84
+ print(f"❌ Workflow stopped: {error_msg}")
85
+ trace.errors.append(error_msg)
86
+ return DepartmentResult(
87
+ success=False,
88
+ error=error_msg,
89
+ trace=trace
90
+ )
91
+
92
+ # Store result for next agent
93
+ context["results"][agent.output_key] = result.get("data")
94
+ print(f"✅ Step {i} completed in {agent_duration:.1f}s")
95
+
96
+ # Execute manager agent for final validation if present
97
+ if department.manager_agent:
98
+ print(f"\n🔍 Final Review Phase")
99
+ manager_start = time.time()
100
+
101
+ # Prepare manager input with all workflow results
102
+ manager_input = {
103
+ "workflow_results": context["results"],
104
+ "department_context": context["department_context"]
105
+ }
106
+
107
+ # Add connection if available
108
+ if "connection" in context["input"]:
109
+ manager_input["connection"] = context["input"]["connection"]
110
+
111
+ # Execute manager validation
112
+ manager_result = self._execute_manager_validation(department.manager_agent, manager_input, trace)
113
+ manager_duration = time.time() - manager_start
114
+
115
+ trace.agents_executed.append(department.manager_agent.role)
116
+ trace.execution_times[department.manager_agent.role] = manager_duration
117
+
118
+ # Store manager validation results
119
+ context["results"][department.manager_agent.output_key] = manager_result.get("data")
120
+
121
+ # Check if manager validation failed
122
+ if not manager_result.get("success", False):
123
+ error_msg = f"Manager validation failed: {manager_result.get('error', 'Unknown error')}"
124
+ print(f"❌ {error_msg}")
125
+ trace.errors.append(error_msg)
126
+ return DepartmentResult(
127
+ success=False,
128
+ error=error_msg,
129
+ trace=trace
130
+ )
131
+
132
+ print(f"✅ Manager review completed in {manager_duration:.1f}s")
133
+
134
+ # Create audit record
135
+ total_duration = time.time() - start_time
136
+ self.last_execution_audit = DepartmentAudit(
137
+ agents_run=trace.agents_executed,
138
+ tools_invoked=trace.tools_invoked,
139
+ duration_seconds=total_duration
140
+ )
141
+
142
+ print(f"\n🎉 {department.name} Department workflow completed!")
143
+ print(f"⏱️ Total time: {total_duration:.1f}s")
144
+ print("=" * 60)
145
+
146
+ return DepartmentResult(
147
+ success=True,
148
+ data=context["results"],
149
+ trace=trace
150
+ )
151
+
152
+ except Exception as e:
153
+ print(f"💥 Unexpected error in {department.name} Department: {str(e)}")
154
+ logger.error(f"Execution failed: {str(e)}")
155
+ trace.errors.append(str(e))
156
+ return DepartmentResult(
157
+ success=False,
158
+ error=str(e),
159
+ trace=trace
160
+ )
161
+
162
+ def _find_agent_by_role(self, department: Department, role: str) -> Optional[Agent]:
163
+ """Find an agent by role in the department"""
164
+ for agent in department.agents:
165
+ if agent.role == role:
166
+ return agent
167
+ return None
168
+
169
+ def _execute_agent(self, agent: Agent, context: Dict[str, Any], trace: ExecutionTrace) -> Dict[str, Any]:
170
+ """Execute a single agent"""
171
+ print(f"\n👤 {agent.role}: Hi! I'm starting my work now...")
172
+ logger.info(f"Executing agent: {agent.role}")
173
+
174
+ try:
175
+ # Show what the agent is thinking about
176
+ print(f"💭 {agent.role}: My job is to {agent.job.lower()}")
177
+
178
+ # Prepare input data for agent
179
+ agent_input = {}
180
+ for key in agent.input_keys:
181
+ if key in context["input"]:
182
+ agent_input[key] = context["input"][key]
183
+ print(f"📥 {agent.role}: I received '{key}' as input")
184
+ elif key in context["results"]:
185
+ agent_input[key] = context["results"][key]
186
+ print(f"📥 {agent.role}: I got '{key}' from a previous agent")
187
+ else:
188
+ print(f"🤔 {agent.role}: Hmm, I'm missing input '{key}' but I'll try to work without it")
189
+ logger.warning(f"Missing input key '{key}' for agent {agent.role}")
190
+
191
+ # Always include connection string if available (for database tools)
192
+ if "connection" in context["input"]:
193
+ agent_input["connection"] = context["input"]["connection"]
194
+
195
+ # Execute agent's tools
196
+ result_data = {}
197
+ tools_with_real_work = []
198
+ tools_with_mock_work = []
199
+
200
+ print(f"🔧 {agent.role}: I need to use {len(agent.tools)} tool(s) to complete my work...")
201
+
202
+ for i, tool_spec in enumerate(agent.tools, 1):
203
+ tool_name = tool_spec["name"] if isinstance(tool_spec, dict) else tool_spec.name
204
+ hosted_by = tool_spec.get("hosted_by", "memra") if isinstance(tool_spec, dict) else tool_spec.hosted_by
205
+
206
+ # Extract tool-level config if available, otherwise use agent config
207
+ tool_config = None
208
+ if isinstance(tool_spec, dict) and "config" in tool_spec:
209
+ tool_config = tool_spec["config"]
210
+ elif agent.config:
211
+ tool_config = agent.config
212
+
213
+ print(f"⚡ {agent.role}: Using tool {i}/{len(agent.tools)}: {tool_name}")
214
+
215
+ trace.tools_invoked.append(tool_name)
216
+
217
+ # Get tool from registry and execute
218
+ tool_result = self.tool_registry.execute_tool(
219
+ tool_name,
220
+ hosted_by,
221
+ agent_input,
222
+ tool_config
223
+ )
224
+
225
+ if not tool_result.get("success", False):
226
+ print(f"😟 {agent.role}: Oh no! Tool {tool_name} failed: {tool_result.get('error', 'Unknown error')}")
227
+ return {
228
+ "success": False,
229
+ "error": f"Tool {tool_name} failed: {tool_result.get('error', 'Unknown error')}"
230
+ }
231
+
232
+ # Check if this tool did real work or mock work
233
+ tool_data = tool_result.get("data", {})
234
+ if self._is_real_work(tool_name, tool_data):
235
+ tools_with_real_work.append(tool_name)
236
+ print(f"✅ {agent.role}: Great! {tool_name} did real work and gave me useful results")
237
+ else:
238
+ tools_with_mock_work.append(tool_name)
239
+ print(f"🔄 {agent.role}: {tool_name} gave me simulated results (that's okay for testing)")
240
+
241
+ result_data.update(tool_data)
242
+
243
+ # Add metadata about real vs mock work
244
+ result_data["_memra_metadata"] = {
245
+ "agent_role": agent.role,
246
+ "tools_real_work": tools_with_real_work,
247
+ "tools_mock_work": tools_with_mock_work,
248
+ "work_quality": "real" if tools_with_real_work else "mock"
249
+ }
250
+
251
+ # Agent reports completion
252
+ if tools_with_real_work:
253
+ print(f"🎉 {agent.role}: Perfect! I completed my work with real data processing")
254
+ else:
255
+ print(f"📝 {agent.role}: I finished my work, but used simulated data (still learning!)")
256
+
257
+ print(f"📤 {agent.role}: Passing my results to the next agent via '{agent.output_key}'")
258
+
259
+ return {
260
+ "success": True,
261
+ "data": result_data
262
+ }
263
+
264
+ except Exception as e:
265
+ print(f"😰 {agent.role}: I encountered an error and couldn't complete my work: {str(e)}")
266
+ logger.error(f"Agent {agent.role} execution failed: {str(e)}")
267
+ return {
268
+ "success": False,
269
+ "error": str(e)
270
+ }
271
+
272
+ def _is_real_work(self, tool_name: str, tool_data: Dict[str, Any]) -> bool:
273
+ """Determine if a tool did real work or returned mock data"""
274
+
275
+ # Check for specific indicators of real work
276
+ if tool_name == "PDFProcessor":
277
+ # Real work if it has actual image paths and file size
278
+ return (
279
+ "metadata" in tool_data and
280
+ "file_size" in tool_data["metadata"] and
281
+ tool_data["metadata"]["file_size"] > 1000 and # Real file size
282
+ "pages" in tool_data and
283
+ len(tool_data["pages"]) > 0 and
284
+ "image_path" in tool_data["pages"][0]
285
+ )
286
+
287
+ elif tool_name == "InvoiceExtractionWorkflow":
288
+ # Real work if it has actual extracted data with specific vendor info
289
+ return (
290
+ "headerSection" in tool_data and
291
+ "vendorName" in tool_data["headerSection"] and
292
+ tool_data["headerSection"]["vendorName"] not in ["", "UNKNOWN", "Sample Vendor"] and
293
+ "chargesSummary" in tool_data and
294
+ "memra_checksum" in tool_data["chargesSummary"]
295
+ )
296
+
297
+ elif tool_name == "DatabaseQueryTool":
298
+ # Real work if it loaded the actual schema file (more than 3 columns)
299
+ return (
300
+ "columns" in tool_data and
301
+ len(tool_data["columns"]) > 3
302
+ )
303
+
304
+ elif tool_name == "DataValidator":
305
+ # Real work if it actually validated real data with meaningful validation
306
+ return (
307
+ "validation_errors" in tool_data and
308
+ isinstance(tool_data["validation_errors"], list) and
309
+ "is_valid" in tool_data and
310
+ # Check if it's validating real extracted data (not just mock data)
311
+ len(str(tool_data)) > 100 # Real validation results are more substantial
312
+ )
313
+
314
+ elif tool_name == "PostgresInsert":
315
+ # Real work if it successfully inserted into a real database
316
+ return (
317
+ "success" in tool_data and
318
+ tool_data["success"] == True and
319
+ "record_id" in tool_data and
320
+ isinstance(tool_data["record_id"], int) and # Real DB returns integer IDs
321
+ "database_table" in tool_data # Real implementation includes table name
322
+ )
323
+
324
+ # Default to mock work
325
+ return False
326
+
327
+ def get_last_audit(self) -> Optional[DepartmentAudit]:
328
+ """Get audit information from the last execution"""
329
+ return self.last_execution_audit
330
+
331
+ def _execute_manager_validation(self, manager_agent: Agent, manager_input: Dict[str, Any], trace: ExecutionTrace) -> Dict[str, Any]:
332
+ """Execute manager agent to validate workflow results"""
333
+ print(f"\n👔 {manager_agent.role}: Time for me to review everyone's work...")
334
+ logger.info(f"Manager {manager_agent.role} validating workflow results")
335
+
336
+ try:
337
+ # Analyze workflow results for real vs mock work
338
+ workflow_analysis = self._analyze_workflow_quality(manager_input["workflow_results"])
339
+
340
+ print(f"🔍 {manager_agent.role}: Let me analyze what each agent accomplished...")
341
+
342
+ # Prepare validation report
343
+ validation_report = {
344
+ "workflow_analysis": workflow_analysis,
345
+ "validation_status": "pass" if workflow_analysis["overall_quality"] == "real" else "fail",
346
+ "recommendations": [],
347
+ "agent_performance": {}
348
+ }
349
+
350
+ # Analyze each agent's performance
351
+ for result_key, result_data in manager_input["workflow_results"].items():
352
+ if isinstance(result_data, dict) and "_memra_metadata" in result_data:
353
+ metadata = result_data["_memra_metadata"]
354
+ agent_role = metadata["agent_role"]
355
+
356
+ if metadata["work_quality"] == "real":
357
+ print(f"👍 {manager_agent.role}: {agent_role} did excellent real work!")
358
+ else:
359
+ print(f"📋 {manager_agent.role}: {agent_role} completed their tasks but with simulated data")
360
+
361
+ validation_report["agent_performance"][agent_role] = {
362
+ "work_quality": metadata["work_quality"],
363
+ "tools_real_work": metadata["tools_real_work"],
364
+ "tools_mock_work": metadata["tools_mock_work"],
365
+ "status": "completed_real_work" if metadata["work_quality"] == "real" else "completed_mock_work"
366
+ }
367
+
368
+ # Add recommendations for mock work
369
+ if metadata["work_quality"] == "mock":
370
+ recommendation = f"Agent {agent_role} performed mock work - implement real {', '.join(metadata['tools_mock_work'])} functionality"
371
+ validation_report["recommendations"].append(recommendation)
372
+ print(f"💡 {manager_agent.role}: I recommend upgrading {agent_role}'s tools for production")
373
+
374
+ # Overall workflow validation
375
+ if workflow_analysis["overall_quality"] == "real":
376
+ validation_report["summary"] = "Workflow completed successfully with real data processing"
377
+ print(f"🎯 {manager_agent.role}: Excellent! This workflow is production-ready")
378
+ elif workflow_analysis["overall_quality"].startswith("mixed"):
379
+ validation_report["summary"] = "Workflow completed with mixed real and simulated data"
380
+ print(f"⚖️ {manager_agent.role}: Good progress! Some agents are production-ready, others need work")
381
+ else:
382
+ validation_report["summary"] = "Workflow completed but with mock/simulated data - production readiness requires real implementations"
383
+ print(f"🚧 {manager_agent.role}: This workflow needs more development before production use")
384
+
385
+ real_percentage = workflow_analysis["real_work_percentage"]
386
+ print(f"📊 {manager_agent.role}: Overall assessment: {real_percentage:.0f}% of agents did real work")
387
+
388
+ return {
389
+ "success": True,
390
+ "data": validation_report
391
+ }
392
+
393
+ except Exception as e:
394
+ print(f"😰 {manager_agent.role}: I had trouble analyzing the workflow: {str(e)}")
395
+ logger.error(f"Manager validation failed: {str(e)}")
396
+ return {
397
+ "success": False,
398
+ "error": str(e)
399
+ }
400
+
401
+ def _analyze_workflow_quality(self, workflow_results: Dict[str, Any]) -> Dict[str, Any]:
402
+ """Analyze the overall quality of workflow execution"""
403
+
404
+ total_agents = 0
405
+ real_work_agents = 0
406
+ mock_work_agents = 0
407
+
408
+ for result_key, result_data in workflow_results.items():
409
+ if isinstance(result_data, dict) and "_memra_metadata" in result_data:
410
+ metadata = result_data["_memra_metadata"]
411
+ total_agents += 1
412
+
413
+ if metadata["work_quality"] == "real":
414
+ real_work_agents += 1
415
+ else:
416
+ mock_work_agents += 1
417
+
418
+ # Determine overall quality
419
+ if real_work_agents > 0 and mock_work_agents == 0:
420
+ overall_quality = "real"
421
+ elif real_work_agents > mock_work_agents:
422
+ overall_quality = "mixed_mostly_real"
423
+ elif real_work_agents > 0:
424
+ overall_quality = "mixed_mostly_mock"
425
+ else:
426
+ overall_quality = "mock"
427
+
428
+ return {
429
+ "total_agents": total_agents,
430
+ "real_work_agents": real_work_agents,
431
+ "mock_work_agents": mock_work_agents,
432
+ "overall_quality": overall_quality,
433
+ "real_work_percentage": (real_work_agents / total_agents * 100) if total_agents > 0 else 0
434
+ }
memra/models.py ADDED
@@ -0,0 +1,98 @@
1
+ from typing import List, Dict, Optional, Any, Union
2
+ from pydantic import BaseModel, Field
3
+
4
+ class LLM(BaseModel):
5
+ model: str
6
+ temperature: float = 0.0
7
+ max_tokens: Optional[int] = None
8
+ stop: Optional[List[str]] = None
9
+
10
+ class Tool(BaseModel):
11
+ name: str
12
+ hosted_by: str = "memra" # or "mcp" for customer's Model Context Protocol
13
+ description: Optional[str] = None
14
+ parameters: Optional[Dict[str, Any]] = None
15
+
16
+ class Agent(BaseModel):
17
+ role: str
18
+ job: str
19
+ llm: Optional[Union[LLM, Dict[str, Any]]] = None
20
+ sops: List[str] = Field(default_factory=list)
21
+ tools: List[Union[Tool, Dict[str, Any]]] = Field(default_factory=list)
22
+ systems: List[str] = Field(default_factory=list)
23
+ input_keys: List[str] = Field(default_factory=list)
24
+ output_key: str
25
+ allow_delegation: bool = False
26
+ fallback_agents: Optional[Dict[str, str]] = None
27
+ config: Optional[Dict[str, Any]] = None
28
+
29
+ class ExecutionPolicy(BaseModel):
30
+ retry_on_fail: bool = True
31
+ max_retries: int = 2
32
+ halt_on_validation_error: bool = True
33
+ timeout_seconds: int = 300
34
+
35
+ class ExecutionTrace(BaseModel):
36
+ agents_executed: List[str] = Field(default_factory=list)
37
+ tools_invoked: List[str] = Field(default_factory=list)
38
+ execution_times: Dict[str, float] = Field(default_factory=dict)
39
+ errors: List[str] = Field(default_factory=list)
40
+
41
+ def show(self):
42
+ """Display execution trace information"""
43
+ print("=== Execution Trace ===")
44
+ print(f"Agents executed: {', '.join(self.agents_executed)}")
45
+ print(f"Tools invoked: {', '.join(self.tools_invoked)}")
46
+ if self.errors:
47
+ print(f"Errors: {', '.join(self.errors)}")
48
+
49
+ class DepartmentResult(BaseModel):
50
+ success: bool
51
+ data: Optional[Dict[str, Any]] = None
52
+ error: Optional[str] = None
53
+ trace: ExecutionTrace = Field(default_factory=ExecutionTrace)
54
+
55
+ class DepartmentAudit(BaseModel):
56
+ agents_run: List[str]
57
+ tools_invoked: List[str]
58
+ duration_seconds: float
59
+ total_cost: Optional[float] = None
60
+
61
+ class Department(BaseModel):
62
+ name: str
63
+ mission: str
64
+ agents: List[Agent]
65
+ manager_agent: Optional[Agent] = None
66
+ default_llm: Optional[LLM] = None
67
+ workflow_order: List[str] = Field(default_factory=list)
68
+ dependencies: List[str] = Field(default_factory=list)
69
+ execution_policy: Optional[ExecutionPolicy] = None
70
+ context: Optional[Dict[str, Any]] = None
71
+
72
+ def run(self, input: Dict[str, Any]) -> DepartmentResult:
73
+ """
74
+ Execute the department workflow with the given input data.
75
+ """
76
+ # Import here to avoid circular imports
77
+ from .execution import ExecutionEngine
78
+
79
+ engine = ExecutionEngine()
80
+ return engine.execute_department(self, input)
81
+
82
+ def audit(self) -> DepartmentAudit:
83
+ """
84
+ Return audit information about the last execution.
85
+ """
86
+ # Import here to avoid circular imports
87
+ from .execution import ExecutionEngine
88
+
89
+ engine = ExecutionEngine()
90
+ audit = engine.get_last_audit()
91
+ if audit:
92
+ return audit
93
+ else:
94
+ return DepartmentAudit(
95
+ agents_run=[],
96
+ tools_invoked=[],
97
+ duration_seconds=0.0
98
+ )