memra 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memra/__init__.py +12 -9
- memra/execution.py +3 -19
- memra/tool_registry.py +34 -154
- {memra-0.2.0.dist-info → memra-0.2.1.dist-info}/METADATA +6 -37
- memra-0.2.1.dist-info/RECORD +13 -0
- {memra-0.2.0.dist-info → memra-0.2.1.dist-info}/WHEEL +1 -1
- memra-0.2.1.dist-info/top_level.txt +1 -0
- memra-0.2.0.dist-info/RECORD +0 -19
- memra-0.2.0.dist-info/top_level.txt +0 -2
- memra-sdk-package/examples/accounts_payable_client.py +0 -207
- memra-sdk-package/memra/__init__.py +0 -28
- memra-sdk-package/memra/discovery_client.py +0 -49
- memra-sdk-package/memra/execution.py +0 -418
- memra-sdk-package/memra/models.py +0 -98
- memra-sdk-package/memra/tool_registry_client.py +0 -105
- {memra-0.2.0.dist-info/licenses → memra-0.2.1.dist-info}/LICENSE +0 -0
- {memra-0.2.0.dist-info → memra-0.2.1.dist-info}/entry_points.txt +0 -0
memra/__init__.py
CHANGED
@@ -1,24 +1,27 @@
|
|
1
1
|
"""
|
2
|
-
Memra SDK - Declarative
|
2
|
+
Memra SDK - Declarative AI Workflows
|
3
3
|
|
4
|
-
A
|
5
|
-
|
4
|
+
A framework for building AI-powered business workflows using a declarative approach.
|
5
|
+
Think of it as "Kubernetes for business logic" where agents are the pods and
|
6
|
+
departments are the deployments.
|
6
7
|
"""
|
7
8
|
|
8
|
-
__version__ = "0.2.
|
9
|
-
__author__ = "Memra"
|
10
|
-
__email__ = "info@memra.co"
|
9
|
+
__version__ = "0.2.1"
|
11
10
|
|
12
11
|
# Core imports
|
13
|
-
from .models import Agent, Department,
|
12
|
+
from .models import Agent, Department, Tool
|
14
13
|
from .execution import ExecutionEngine
|
15
14
|
|
16
15
|
# Make key classes available at package level
|
17
16
|
__all__ = [
|
18
17
|
"Agent",
|
19
18
|
"Department",
|
20
|
-
"LLM",
|
21
19
|
"Tool",
|
22
20
|
"ExecutionEngine",
|
23
21
|
"__version__"
|
24
|
-
]
|
22
|
+
]
|
23
|
+
|
24
|
+
# Optional: Add version check for compatibility
|
25
|
+
import sys
|
26
|
+
if sys.version_info < (3, 8):
|
27
|
+
raise RuntimeError("Memra requires Python 3.8 or higher")
|
memra/execution.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
import time
|
2
2
|
import logging
|
3
|
-
import os
|
4
3
|
from typing import Dict, Any, List, Optional
|
5
4
|
from .models import Department, Agent, DepartmentResult, ExecutionTrace, DepartmentAudit
|
5
|
+
from .tool_registry import ToolRegistry
|
6
6
|
|
7
7
|
logger = logging.getLogger(__name__)
|
8
8
|
|
@@ -10,16 +10,7 @@ class ExecutionEngine:
|
|
10
10
|
"""Engine that executes department workflows by coordinating agents and tools"""
|
11
11
|
|
12
12
|
def __init__(self):
|
13
|
-
|
14
|
-
if os.getenv('MEMRA_API_KEY'):
|
15
|
-
from .tool_registry_client import ToolRegistryClient
|
16
|
-
self.tool_registry = ToolRegistryClient()
|
17
|
-
logger.info("Using API client for tool execution")
|
18
|
-
else:
|
19
|
-
from .tool_registry import ToolRegistry
|
20
|
-
self.tool_registry = ToolRegistry()
|
21
|
-
logger.info("Using local tool registry")
|
22
|
-
|
13
|
+
self.tool_registry = ToolRegistry()
|
23
14
|
self.last_execution_audit: Optional[DepartmentAudit] = None
|
24
15
|
|
25
16
|
def execute_department(self, department: Department, input_data: Dict[str, Any]) -> DepartmentResult:
|
@@ -203,13 +194,6 @@ class ExecutionEngine:
|
|
203
194
|
tool_name = tool_spec["name"] if isinstance(tool_spec, dict) else tool_spec.name
|
204
195
|
hosted_by = tool_spec.get("hosted_by", "memra") if isinstance(tool_spec, dict) else tool_spec.hosted_by
|
205
196
|
|
206
|
-
# Extract tool-level config if available, otherwise use agent config
|
207
|
-
tool_config = None
|
208
|
-
if isinstance(tool_spec, dict) and "config" in tool_spec:
|
209
|
-
tool_config = tool_spec["config"]
|
210
|
-
elif agent.config:
|
211
|
-
tool_config = agent.config
|
212
|
-
|
213
197
|
print(f"⚡ {agent.role}: Using tool {i}/{len(agent.tools)}: {tool_name}")
|
214
198
|
|
215
199
|
trace.tools_invoked.append(tool_name)
|
@@ -219,7 +203,7 @@ class ExecutionEngine:
|
|
219
203
|
tool_name,
|
220
204
|
hosted_by,
|
221
205
|
agent_input,
|
222
|
-
|
206
|
+
agent.config
|
223
207
|
)
|
224
208
|
|
225
209
|
if not tool_result.get("success", False):
|
memra/tool_registry.py
CHANGED
@@ -8,59 +8,41 @@ from pathlib import Path
|
|
8
8
|
logger = logging.getLogger(__name__)
|
9
9
|
|
10
10
|
class ToolRegistry:
|
11
|
-
"""Registry for managing and executing tools"""
|
11
|
+
"""Registry for managing and executing tools via API calls only"""
|
12
12
|
|
13
13
|
def __init__(self):
|
14
14
|
self.tools: Dict[str, Dict[str, Any]] = {}
|
15
|
-
self.
|
16
|
-
self._load_builtin_tools()
|
15
|
+
self._register_known_tools()
|
17
16
|
|
18
|
-
def
|
19
|
-
"""
|
20
|
-
#
|
21
|
-
|
22
|
-
|
23
|
-
|
17
|
+
def _register_known_tools(self):
|
18
|
+
"""Register known tools with their metadata (no actual implementations)"""
|
19
|
+
# Server-hosted tools (executed via Memra API)
|
20
|
+
server_tools = [
|
21
|
+
("DatabaseQueryTool", "Query database schemas and data"),
|
22
|
+
("PDFProcessor", "Process PDF files and extract content"),
|
23
|
+
("OCRTool", "Perform OCR on images and documents"),
|
24
|
+
("InvoiceExtractionWorkflow", "Extract structured data from invoices"),
|
25
|
+
("FileReader", "Read files from the filesystem"),
|
26
|
+
]
|
24
27
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
)
|
36
|
-
|
37
|
-
|
38
|
-
"Query database schemas and data")
|
39
|
-
self.register_tool("PDFProcessor", PDFProcessor, "memra",
|
40
|
-
"Process PDF files and extract content")
|
41
|
-
self.register_tool("OCRTool", OCRTool, "memra",
|
42
|
-
"Perform OCR on images and documents")
|
43
|
-
self.register_tool("InvoiceExtractionWorkflow", InvoiceExtractionWorkflow, "memra",
|
44
|
-
"Extract structured data from invoices")
|
45
|
-
self.register_tool("DataValidator", DataValidator, "memra",
|
46
|
-
"Validate data against schemas")
|
47
|
-
self.register_tool("PostgresInsert", PostgresInsert, "memra",
|
48
|
-
"Insert data into PostgreSQL database")
|
49
|
-
|
50
|
-
# Load file tools
|
51
|
-
from logic.file_tools import FileReader
|
52
|
-
self.register_tool("FileReader", FileReader, "memra",
|
53
|
-
"Read files from the filesystem")
|
54
|
-
|
55
|
-
logger.info(f"Loaded {len(self.tools)} builtin tools")
|
56
|
-
|
57
|
-
except ImportError as e:
|
58
|
-
logger.warning(f"Could not load some tools: {e}")
|
28
|
+
for tool_name, description in server_tools:
|
29
|
+
self.register_tool(tool_name, None, "memra", description)
|
30
|
+
|
31
|
+
# MCP-hosted tools (executed via MCP bridge)
|
32
|
+
mcp_tools = [
|
33
|
+
("DataValidator", "Validate data against schemas"),
|
34
|
+
("PostgresInsert", "Insert data into PostgreSQL database"),
|
35
|
+
]
|
36
|
+
|
37
|
+
for tool_name, description in mcp_tools:
|
38
|
+
self.register_tool(tool_name, None, "mcp", description)
|
39
|
+
|
40
|
+
logger.info(f"Registered {len(self.tools)} tool definitions")
|
59
41
|
|
60
|
-
def register_tool(self, name: str, tool_class: type, hosted_by: str, description: str):
|
61
|
-
"""Register a tool in the registry"""
|
42
|
+
def register_tool(self, name: str, tool_class: Optional[type], hosted_by: str, description: str):
|
43
|
+
"""Register a tool in the registry (metadata only)"""
|
62
44
|
self.tools[name] = {
|
63
|
-
"class": tool_class,
|
45
|
+
"class": tool_class, # Will be None for API-based tools
|
64
46
|
"hosted_by": hosted_by,
|
65
47
|
"description": description
|
66
48
|
}
|
@@ -80,111 +62,9 @@ class ToolRegistry:
|
|
80
62
|
|
81
63
|
def execute_tool(self, tool_name: str, hosted_by: str, input_data: Dict[str, Any],
|
82
64
|
config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
83
|
-
"""Execute a tool
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
tool_info = self.tools[tool_name]
|
91
|
-
if tool_info["hosted_by"] != hosted_by:
|
92
|
-
return {
|
93
|
-
"success": False,
|
94
|
-
"error": f"Tool '{tool_name}' is hosted by '{tool_info['hosted_by']}', not '{hosted_by}'"
|
95
|
-
}
|
96
|
-
|
97
|
-
try:
|
98
|
-
# Instantiate tool
|
99
|
-
tool_class = tool_info["class"]
|
100
|
-
|
101
|
-
# Some tools need credentials/config for initialization
|
102
|
-
if tool_name in ["DatabaseQueryTool", "PostgresInsert"]:
|
103
|
-
if "connection" in input_data:
|
104
|
-
# Parse connection string or use credentials
|
105
|
-
credentials = self._parse_connection(input_data["connection"])
|
106
|
-
tool_instance = tool_class(credentials)
|
107
|
-
else:
|
108
|
-
return {
|
109
|
-
"success": False,
|
110
|
-
"error": f"Tool '{tool_name}' requires database credentials"
|
111
|
-
}
|
112
|
-
elif tool_name == "InvoiceExtractionWorkflow":
|
113
|
-
# This tool needs to be instantiated to initialize the LLM client
|
114
|
-
tool_instance = tool_class()
|
115
|
-
else:
|
116
|
-
tool_instance = tool_class()
|
117
|
-
|
118
|
-
# Execute tool based on its type
|
119
|
-
result = self._execute_tool_method(tool_instance, tool_name, input_data, config)
|
120
|
-
|
121
|
-
return {
|
122
|
-
"success": True,
|
123
|
-
"data": result
|
124
|
-
}
|
125
|
-
|
126
|
-
except Exception as e:
|
127
|
-
logger.error(f"Tool execution failed for {tool_name}: {str(e)}")
|
128
|
-
return {
|
129
|
-
"success": False,
|
130
|
-
"error": str(e)
|
131
|
-
}
|
132
|
-
|
133
|
-
def _execute_tool_method(self, tool_instance: Any, tool_name: str,
|
134
|
-
input_data: Dict[str, Any], config: Optional[Dict[str, Any]]) -> Dict[str, Any]:
|
135
|
-
"""Execute the appropriate method on the tool instance"""
|
136
|
-
|
137
|
-
if tool_name == "DatabaseQueryTool":
|
138
|
-
return tool_instance.get_schema("invoices") # Default to invoices table
|
139
|
-
|
140
|
-
elif tool_name == "PDFProcessor":
|
141
|
-
file_path = input_data.get("file", "")
|
142
|
-
return tool_instance.process_pdf(file_path)
|
143
|
-
|
144
|
-
elif tool_name == "OCRTool":
|
145
|
-
# Assume PDF processor output is passed as input
|
146
|
-
return {"extracted_text": tool_instance.extract_text(input_data)}
|
147
|
-
|
148
|
-
elif tool_name == "InvoiceExtractionWorkflow":
|
149
|
-
text = input_data.get("extracted_text", "")
|
150
|
-
schema = input_data.get("invoice_schema", {})
|
151
|
-
return tool_instance.extract_data(text, schema)
|
152
|
-
|
153
|
-
elif tool_name == "DataValidator":
|
154
|
-
data = input_data.get("invoice_data", {})
|
155
|
-
schema = input_data.get("invoice_schema", {})
|
156
|
-
return tool_instance.validate(data, schema)
|
157
|
-
|
158
|
-
elif tool_name == "PostgresInsert":
|
159
|
-
data = input_data.get("invoice_data", {})
|
160
|
-
return tool_instance.insert_record("invoices", data)
|
161
|
-
|
162
|
-
elif tool_name == "FileReader":
|
163
|
-
file_path = config.get("path") if config else input_data.get("file_path")
|
164
|
-
if not file_path:
|
165
|
-
raise ValueError("FileReader requires a file path")
|
166
|
-
return tool_instance.read_file(file_path)
|
167
|
-
|
168
|
-
else:
|
169
|
-
raise ValueError(f"Unknown tool execution method for {tool_name}")
|
170
|
-
|
171
|
-
def _parse_connection(self, connection_string: str) -> Dict[str, Any]:
|
172
|
-
"""Parse a connection string into credentials"""
|
173
|
-
# Simple parser for postgres://user:pass@host:port/database
|
174
|
-
if connection_string.startswith("postgres://"):
|
175
|
-
# This is a simplified parser - in production you'd use a proper URL parser
|
176
|
-
parts = connection_string.replace("postgres://", "").split("/")
|
177
|
-
db_part = parts[1] if len(parts) > 1 else "finance"
|
178
|
-
auth_host = parts[0].split("@")
|
179
|
-
host_port = auth_host[1].split(":") if len(auth_host) > 1 else ["localhost", "5432"]
|
180
|
-
user_pass = auth_host[0].split(":") if len(auth_host) > 1 else ["user", "pass"]
|
181
|
-
|
182
|
-
return {
|
183
|
-
"host": host_port[0],
|
184
|
-
"port": int(host_port[1]) if len(host_port) > 1 else 5432,
|
185
|
-
"database": db_part,
|
186
|
-
"user": user_pass[0],
|
187
|
-
"password": user_pass[1] if len(user_pass) > 1 else ""
|
188
|
-
}
|
189
|
-
|
190
|
-
return {"connection_string": connection_string}
|
65
|
+
"""Execute a tool - this should not be called directly in API-based mode"""
|
66
|
+
logger.warning(f"Direct tool execution attempted for {tool_name}. Use API client instead.")
|
67
|
+
return {
|
68
|
+
"success": False,
|
69
|
+
"error": "Direct tool execution not supported. Use API client for tool execution."
|
70
|
+
}
|
@@ -1,10 +1,10 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.1
|
2
2
|
Name: memra
|
3
|
-
Version: 0.2.
|
4
|
-
Summary: Declarative framework for enterprise workflows with MCP integration
|
3
|
+
Version: 0.2.1
|
4
|
+
Summary: Declarative framework for enterprise workflows with MCP integration - Client SDK
|
5
5
|
Home-page: https://github.com/memra/memra-sdk
|
6
6
|
Author: Memra
|
7
|
-
Author-email: Memra <
|
7
|
+
Author-email: Memra <support@memra.com>
|
8
8
|
License: MIT
|
9
9
|
Project-URL: Homepage, https://memra.co
|
10
10
|
Project-URL: Repository, https://github.com/memra-platform/memra-sdk
|
@@ -25,16 +25,13 @@ Requires-Dist: httpx>=0.24.0
|
|
25
25
|
Requires-Dist: typing-extensions>=4.0.0
|
26
26
|
Requires-Dist: aiohttp>=3.8.0
|
27
27
|
Requires-Dist: aiohttp-cors>=0.7.0
|
28
|
-
Requires-Dist: psycopg2-binary>=2.9.0
|
29
28
|
Provides-Extra: dev
|
30
29
|
Requires-Dist: pytest>=6.0; extra == "dev"
|
31
30
|
Requires-Dist: pytest-asyncio; extra == "dev"
|
32
31
|
Requires-Dist: black; extra == "dev"
|
33
32
|
Requires-Dist: flake8; extra == "dev"
|
34
|
-
|
35
|
-
|
36
|
-
Dynamic: license-file
|
37
|
-
Dynamic: requires-python
|
33
|
+
Provides-Extra: mcp
|
34
|
+
Requires-Dist: psycopg2-binary>=2.9.0; extra == "mcp"
|
38
35
|
|
39
36
|
# Memra SDK
|
40
37
|
|
@@ -131,31 +128,3 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
|
131
128
|
├── local/dependencies/ # Database setup & schemas
|
132
129
|
└── docker-compose.yml # Database setup
|
133
130
|
```
|
134
|
-
|
135
|
-
## ✨ New: MCP Integration
|
136
|
-
|
137
|
-
Memra now supports **Model Context Protocol (MCP)** integration, allowing you to execute operations on your local infrastructure while leveraging Memra's cloud-based AI processing.
|
138
|
-
|
139
|
-
**Key Benefits:**
|
140
|
-
- 🔒 **Keep sensitive data local** - Your databases stay on your infrastructure
|
141
|
-
- ⚡ **Hybrid processing** - AI processing in the cloud, data operations locally
|
142
|
-
- 🔐 **Secure communication** - HMAC-authenticated requests between cloud and local
|
143
|
-
- 🛠️ **Easy setup** - Simple bridge server connects your local resources
|
144
|
-
|
145
|
-
**Quick Example:**
|
146
|
-
```python
|
147
|
-
# Agent that uses local database via MCP
|
148
|
-
agent = Agent(
|
149
|
-
role="Data Writer",
|
150
|
-
tools=[{
|
151
|
-
"name": "PostgresInsert",
|
152
|
-
"hosted_by": "mcp", # Routes to your local infrastructure
|
153
|
-
"config": {
|
154
|
-
"bridge_url": "http://localhost:8081",
|
155
|
-
"bridge_secret": "your-secret"
|
156
|
-
}
|
157
|
-
}]
|
158
|
-
)
|
159
|
-
```
|
160
|
-
|
161
|
-
📖 **[Complete MCP Integration Guide →](docs/mcp_integration.md)**
|
@@ -0,0 +1,13 @@
|
|
1
|
+
memra/__init__.py,sha256=K3jA34FSI9LuDHAPyMFpG3cbX0pFVsTP2N5xzbUffiI,662
|
2
|
+
memra/discovery.py,sha256=yJIQnrDQu1nyzKykCIuzG_5SW5dIXHCEBLLKRWacIoY,480
|
3
|
+
memra/discovery_client.py,sha256=AbnKn6qhyrf7vmOvknEeDzH4tiGHsqPHtDaein_qaW0,1271
|
4
|
+
memra/execution.py,sha256=3UIP69x2Ba89vv7OQ3yAzlnl1lphGagFPgKUrqcqElk,20172
|
5
|
+
memra/models.py,sha256=nTaYLAp0tRzQ0CQaBLNBURfhBQ5_gyty0ams4mghyIc,3289
|
6
|
+
memra/tool_registry.py,sha256=vnsuH5q20AMXADNl3-7HCD26x1zHc67waxxqv_Ta6Ak,2951
|
7
|
+
memra/tool_registry_client.py,sha256=uzMQ4COvRams9vuPLcqcdljUpDlAYU_tyFxrRhrA0Lc,4009
|
8
|
+
memra-0.2.1.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
+
memra-0.2.1.dist-info/METADATA,sha256=LI5-Dte9XuNjsnM1KVs8Xr998nViC6jmI2S1nY37lkQ,3794
|
10
|
+
memra-0.2.1.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
|
11
|
+
memra-0.2.1.dist-info/entry_points.txt,sha256=LBVjwWoxWJRzNLgeByPn6xUvWFIRnqnemvAZgIoSt08,41
|
12
|
+
memra-0.2.1.dist-info/top_level.txt,sha256=pXWcTRS1zctdiSUivW4iyKpJ4tcfIu-1BW_fpbal3OY,6
|
13
|
+
memra-0.2.1.dist-info/RECORD,,
|
@@ -0,0 +1 @@
|
|
1
|
+
memra
|
memra-0.2.0.dist-info/RECORD
DELETED
@@ -1,19 +0,0 @@
|
|
1
|
-
memra/__init__.py,sha256=XLSWpo42Ffp_pi5mvk4xvdYBZ8eNLAJF4_3Oi102i90,560
|
2
|
-
memra/discovery.py,sha256=yJIQnrDQu1nyzKykCIuzG_5SW5dIXHCEBLLKRWacIoY,480
|
3
|
-
memra/discovery_client.py,sha256=AbnKn6qhyrf7vmOvknEeDzH4tiGHsqPHtDaein_qaW0,1271
|
4
|
-
memra/execution.py,sha256=5NIyFVtQEeatYQ-fxexT0eWMtCh28k1hRC2Y6cfQaac,20917
|
5
|
-
memra/models.py,sha256=nTaYLAp0tRzQ0CQaBLNBURfhBQ5_gyty0ams4mghyIc,3289
|
6
|
-
memra/tool_registry.py,sha256=zdyKRShcmKtG7BVfmAHflW9FDl7rooPPAgbdVV4gJ8o,8268
|
7
|
-
memra/tool_registry_client.py,sha256=uzMQ4COvRams9vuPLcqcdljUpDlAYU_tyFxrRhrA0Lc,4009
|
8
|
-
memra-0.2.0.dist-info/licenses/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
-
memra-sdk-package/examples/accounts_payable_client.py,sha256=Vu_h5C-qc6_80uz5dXJH4G3zfIbgUNAhQ2y8mWauao0,7401
|
10
|
-
memra-sdk-package/memra/__init__.py,sha256=QRk72YETLgL15GVt26tN_rBraCQkhZO7UB9T6d4u_uU,543
|
11
|
-
memra-sdk-package/memra/discovery_client.py,sha256=AbnKn6qhyrf7vmOvknEeDzH4tiGHsqPHtDaein_qaW0,1271
|
12
|
-
memra-sdk-package/memra/execution.py,sha256=UJ_MJ4getuSk4HJW1sCi7lc26avX-G6-GxnvE-DiSwk,20191
|
13
|
-
memra-sdk-package/memra/models.py,sha256=nTaYLAp0tRzQ0CQaBLNBURfhBQ5_gyty0ams4mghyIc,3289
|
14
|
-
memra-sdk-package/memra/tool_registry_client.py,sha256=KyNNxj84248E-8MoWNj6pJmlllUG8s0lmeXXmbu0U7o,3996
|
15
|
-
memra-0.2.0.dist-info/METADATA,sha256=eOuvH39VFUh-QxTdE5RwT6isgRIJkptEC2lsqlF2AA4,4816
|
16
|
-
memra-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
17
|
-
memra-0.2.0.dist-info/entry_points.txt,sha256=LBVjwWoxWJRzNLgeByPn6xUvWFIRnqnemvAZgIoSt08,41
|
18
|
-
memra-0.2.0.dist-info/top_level.txt,sha256=5dqePB77aj_pPFavlwxtBvdkUM-kP-WiQD3LRbQswwc,24
|
19
|
-
memra-0.2.0.dist-info/RECORD,,
|
@@ -1,207 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Client-side Accounts Payable Example
|
3
|
-
This version calls the Memra API hosted on Fly.io instead of running tools locally
|
4
|
-
"""
|
5
|
-
|
6
|
-
import os
|
7
|
-
from memra import Agent, Department, LLM, check_api_health, get_api_status
|
8
|
-
from memra.execution import ExecutionEngine
|
9
|
-
|
10
|
-
# Set API configuration
|
11
|
-
os.environ["MEMRA_API_URL"] = "http://localhost:8080" # Use local API for testing
|
12
|
-
os.environ["MEMRA_API_KEY"] = "dev-key"
|
13
|
-
|
14
|
-
# Check API health before starting
|
15
|
-
print("🔍 Checking Memra API status...")
|
16
|
-
api_status = get_api_status()
|
17
|
-
print(f"API Health: {'✅ Healthy' if api_status['api_healthy'] else '❌ Unavailable'}")
|
18
|
-
print(f"API URL: {api_status['api_url']}")
|
19
|
-
print(f"Tools Available: {api_status['tools_available']}")
|
20
|
-
|
21
|
-
if not api_status['api_healthy']:
|
22
|
-
print("❌ Cannot proceed - Memra API is not available")
|
23
|
-
print("Make sure the API server is running on localhost:8080")
|
24
|
-
exit(1)
|
25
|
-
|
26
|
-
# Define LLMs (these are just metadata - actual LLM calls happen on the server)
|
27
|
-
default_llm = LLM(
|
28
|
-
model="llama-3.2-11b-vision-preview",
|
29
|
-
temperature=0.1,
|
30
|
-
max_tokens=2000
|
31
|
-
)
|
32
|
-
|
33
|
-
parsing_llm = LLM(
|
34
|
-
model="llama-3.2-11b-vision-preview",
|
35
|
-
temperature=0.0,
|
36
|
-
max_tokens=4000
|
37
|
-
)
|
38
|
-
|
39
|
-
manager_llm = LLM(
|
40
|
-
model="llama-3.2-11b-vision-preview",
|
41
|
-
temperature=0.2,
|
42
|
-
max_tokens=1000
|
43
|
-
)
|
44
|
-
|
45
|
-
# Define agents (same declarative interface as before)
|
46
|
-
etl_agent = Agent(
|
47
|
-
role="Data Engineer",
|
48
|
-
job="Extract invoice schema from database",
|
49
|
-
llm=default_llm,
|
50
|
-
sops=[
|
51
|
-
"Connect to database using credentials",
|
52
|
-
"Query information_schema for invoices table",
|
53
|
-
"Extract column names, types, and constraints",
|
54
|
-
"Return schema as structured JSON"
|
55
|
-
],
|
56
|
-
systems=["Database"],
|
57
|
-
tools=[
|
58
|
-
{"name": "DatabaseQueryTool", "hosted_by": "memra"}
|
59
|
-
],
|
60
|
-
output_key="invoice_schema"
|
61
|
-
)
|
62
|
-
|
63
|
-
parser_agent = Agent(
|
64
|
-
role="Invoice Parser",
|
65
|
-
job="Extract structured data from invoice PDF using schema",
|
66
|
-
llm=parsing_llm,
|
67
|
-
sops=[
|
68
|
-
"Load invoice PDF file",
|
69
|
-
"Convert to high-contrast images if needed",
|
70
|
-
"Run OCR to extract text",
|
71
|
-
"Use schema to identify and extract fields",
|
72
|
-
"Validate extracted data against schema types",
|
73
|
-
"Return structured invoice data"
|
74
|
-
],
|
75
|
-
systems=["InvoiceStore"],
|
76
|
-
tools=[
|
77
|
-
{"name": "PDFProcessor", "hosted_by": "memra"},
|
78
|
-
{"name": "OCRTool", "hosted_by": "memra"},
|
79
|
-
{"name": "InvoiceExtractionWorkflow", "hosted_by": "memra"}
|
80
|
-
],
|
81
|
-
input_keys=["file", "invoice_schema"],
|
82
|
-
output_key="invoice_data"
|
83
|
-
)
|
84
|
-
|
85
|
-
writer_agent = Agent(
|
86
|
-
role="Data Entry Specialist",
|
87
|
-
job="Write validated invoice data to database",
|
88
|
-
llm=default_llm,
|
89
|
-
sops=[
|
90
|
-
"Validate invoice data completeness",
|
91
|
-
"Map fields to database columns using schema",
|
92
|
-
"Connect to database",
|
93
|
-
"Insert record into invoices table",
|
94
|
-
"Return confirmation with record ID"
|
95
|
-
],
|
96
|
-
systems=["Database"],
|
97
|
-
tools=[
|
98
|
-
{"name": "DataValidator", "hosted_by": "memra"},
|
99
|
-
{"name": "PostgresInsert", "hosted_by": "memra"}
|
100
|
-
],
|
101
|
-
input_keys=["invoice_data", "invoice_schema"],
|
102
|
-
output_key="write_confirmation"
|
103
|
-
)
|
104
|
-
|
105
|
-
manager_agent = Agent(
|
106
|
-
role="Accounts Payable Manager",
|
107
|
-
job="Coordinate invoice processing pipeline and handle exceptions",
|
108
|
-
llm=manager_llm,
|
109
|
-
sops=[
|
110
|
-
"Check if schema extraction succeeded",
|
111
|
-
"Validate parsed invoice has required fields",
|
112
|
-
"Ensure invoice total matches line items before DB write",
|
113
|
-
"Handle and log any errors with appropriate escalation"
|
114
|
-
],
|
115
|
-
allow_delegation=True,
|
116
|
-
output_key="workflow_status"
|
117
|
-
)
|
118
|
-
|
119
|
-
# Create department
|
120
|
-
ap_department = Department(
|
121
|
-
name="Accounts Payable",
|
122
|
-
mission="Process invoices accurately into financial system per company data standards",
|
123
|
-
agents=[etl_agent, parser_agent, writer_agent],
|
124
|
-
manager_agent=manager_agent,
|
125
|
-
workflow_order=["Data Engineer", "Invoice Parser", "Data Entry Specialist"],
|
126
|
-
dependencies=["Database", "InvoiceStore"],
|
127
|
-
execution_policy={
|
128
|
-
"retry_on_fail": True,
|
129
|
-
"max_retries": 2,
|
130
|
-
"halt_on_validation_error": True,
|
131
|
-
"timeout_seconds": 300
|
132
|
-
},
|
133
|
-
context={
|
134
|
-
"company_id": "acme_corp",
|
135
|
-
"fiscal_year": "2024"
|
136
|
-
}
|
137
|
-
)
|
138
|
-
|
139
|
-
# Execute the department (tools will run on Fly.io)
|
140
|
-
print("\n🚀 Starting invoice processing workflow...")
|
141
|
-
print("📡 Tools will execute on Memra API server")
|
142
|
-
|
143
|
-
engine = ExecutionEngine()
|
144
|
-
input_data = {
|
145
|
-
"file": "invoices/10352259310.PDF",
|
146
|
-
"connection": "postgresql://tarpus@localhost:5432/memra_invoice_db"
|
147
|
-
}
|
148
|
-
|
149
|
-
result = engine.execute_department(ap_department, input_data)
|
150
|
-
|
151
|
-
# Display results (same as before)
|
152
|
-
if result.success:
|
153
|
-
print("✅ Invoice processing completed successfully!")
|
154
|
-
|
155
|
-
# Show manager validation results
|
156
|
-
if 'workflow_status' in result.data:
|
157
|
-
manager_report = result.data['workflow_status']
|
158
|
-
print(f"\n🔍 Manager Validation Report:")
|
159
|
-
print(f"Status: {manager_report.get('validation_status', 'unknown')}")
|
160
|
-
print(f"Summary: {manager_report.get('summary', 'No summary available')}")
|
161
|
-
|
162
|
-
# Show agent performance analysis
|
163
|
-
if 'agent_performance' in manager_report:
|
164
|
-
print(f"\n📊 Agent Performance Analysis:")
|
165
|
-
for agent_role, performance in manager_report['agent_performance'].items():
|
166
|
-
work_quality = performance['work_quality']
|
167
|
-
status_emoji = "✅" if work_quality == "real" else "🔄"
|
168
|
-
print(f"{status_emoji} {agent_role}: {performance['status']}")
|
169
|
-
if performance['tools_real_work']:
|
170
|
-
print(f" Real work: {', '.join(performance['tools_real_work'])}")
|
171
|
-
if performance['tools_mock_work']:
|
172
|
-
print(f" Mock work: {', '.join(performance['tools_mock_work'])}")
|
173
|
-
|
174
|
-
# Show workflow analysis
|
175
|
-
if 'workflow_analysis' in manager_report:
|
176
|
-
analysis = manager_report['workflow_analysis']
|
177
|
-
print(f"\n📈 Workflow Analysis:")
|
178
|
-
print(f"Overall Quality: {analysis['overall_quality']}")
|
179
|
-
print(f"Real Work: {analysis['real_work_agents']}/{analysis['total_agents']} agents ({analysis['real_work_percentage']:.1f}%)")
|
180
|
-
|
181
|
-
# Show recommendations
|
182
|
-
if 'recommendations' in manager_report and manager_report['recommendations']:
|
183
|
-
print(f"\n💡 Recommendations:")
|
184
|
-
for rec in manager_report['recommendations']:
|
185
|
-
print(f" • {rec}")
|
186
|
-
|
187
|
-
# Try to get record_id if it exists
|
188
|
-
if result.data and 'write_confirmation' in result.data:
|
189
|
-
confirmation = result.data['write_confirmation']
|
190
|
-
if isinstance(confirmation, dict) and 'record_id' in confirmation:
|
191
|
-
print(f"\n💾 Invoice processed successfully: Record ID {confirmation['record_id']}")
|
192
|
-
else:
|
193
|
-
print(f"\n💾 Write confirmation: {confirmation}")
|
194
|
-
|
195
|
-
print(f"\n📡 All tools executed remotely on Memra API server")
|
196
|
-
|
197
|
-
else:
|
198
|
-
print(f"❌ Processing failed: {result.error}")
|
199
|
-
|
200
|
-
# Show execution trace
|
201
|
-
print("\n=== Execution Trace ===")
|
202
|
-
print(f"Agents executed: {', '.join(result.trace.agents_executed)}")
|
203
|
-
print(f"Tools invoked: {', '.join(result.trace.tools_invoked)}")
|
204
|
-
if result.trace.errors:
|
205
|
-
print(f"Errors: {', '.join(result.trace.errors)}")
|
206
|
-
|
207
|
-
print(f"\n🌐 API Calls made to: {api_status['api_url']}")
|
@@ -1,28 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Memra SDK - A declarative orchestration framework for AI-powered business workflows
|
3
|
-
"""
|
4
|
-
|
5
|
-
from .models import (
|
6
|
-
Agent,
|
7
|
-
Department,
|
8
|
-
LLM,
|
9
|
-
Tool,
|
10
|
-
ExecutionPolicy,
|
11
|
-
ExecutionTrace,
|
12
|
-
DepartmentResult,
|
13
|
-
DepartmentAudit
|
14
|
-
)
|
15
|
-
from .discovery_client import discover_tools, check_api_health, get_api_status
|
16
|
-
|
17
|
-
__version__ = "0.1.2"
|
18
|
-
__all__ = [
|
19
|
-
"Agent",
|
20
|
-
"Department",
|
21
|
-
"LLM",
|
22
|
-
"Tool",
|
23
|
-
"ExecutionPolicy",
|
24
|
-
"ExecutionTrace",
|
25
|
-
"DepartmentResult",
|
26
|
-
"DepartmentAudit",
|
27
|
-
"discover_tools"
|
28
|
-
]
|
@@ -1,49 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Client-side tool discovery for Memra SDK
|
3
|
-
Queries the Memra API to discover available tools
|
4
|
-
"""
|
5
|
-
|
6
|
-
from typing import List, Dict, Any, Optional
|
7
|
-
from .tool_registry_client import ToolRegistryClient
|
8
|
-
|
9
|
-
def discover_tools(hosted_by: Optional[str] = None) -> List[Dict[str, Any]]:
|
10
|
-
"""
|
11
|
-
Discover available tools from the Memra API
|
12
|
-
|
13
|
-
Args:
|
14
|
-
hosted_by: Filter tools by hosting provider ("memra" or "mcp")
|
15
|
-
|
16
|
-
Returns:
|
17
|
-
List of available tools with their descriptions
|
18
|
-
"""
|
19
|
-
registry = ToolRegistryClient()
|
20
|
-
return registry.discover_tools(hosted_by)
|
21
|
-
|
22
|
-
def check_api_health() -> bool:
|
23
|
-
"""
|
24
|
-
Check if the Memra API is available
|
25
|
-
|
26
|
-
Returns:
|
27
|
-
True if API is healthy, False otherwise
|
28
|
-
"""
|
29
|
-
registry = ToolRegistryClient()
|
30
|
-
return registry.health_check()
|
31
|
-
|
32
|
-
def get_api_status() -> Dict[str, Any]:
|
33
|
-
"""
|
34
|
-
Get detailed API status information
|
35
|
-
|
36
|
-
Returns:
|
37
|
-
Dictionary with API status details
|
38
|
-
"""
|
39
|
-
registry = ToolRegistryClient()
|
40
|
-
|
41
|
-
is_healthy = registry.health_check()
|
42
|
-
tools = registry.discover_tools() if is_healthy else []
|
43
|
-
|
44
|
-
return {
|
45
|
-
"api_healthy": is_healthy,
|
46
|
-
"api_url": registry.api_base,
|
47
|
-
"tools_available": len(tools),
|
48
|
-
"tools": tools
|
49
|
-
}
|
@@ -1,418 +0,0 @@
|
|
1
|
-
import time
|
2
|
-
import logging
|
3
|
-
from typing import Dict, Any, List, Optional
|
4
|
-
from .models import Department, Agent, DepartmentResult, ExecutionTrace, DepartmentAudit
|
5
|
-
from .tool_registry_client import ToolRegistryClient
|
6
|
-
|
7
|
-
logger = logging.getLogger(__name__)
|
8
|
-
|
9
|
-
class ExecutionEngine:
|
10
|
-
"""Engine that executes department workflows by coordinating agents and tools"""
|
11
|
-
|
12
|
-
def __init__(self):
|
13
|
-
self.tool_registry = ToolRegistryClient()
|
14
|
-
self.last_execution_audit: Optional[DepartmentAudit] = None
|
15
|
-
|
16
|
-
def execute_department(self, department: Department, input_data: Dict[str, Any]) -> DepartmentResult:
|
17
|
-
"""Execute a department workflow"""
|
18
|
-
start_time = time.time()
|
19
|
-
trace = ExecutionTrace()
|
20
|
-
|
21
|
-
try:
|
22
|
-
print(f"\n🏢 Starting {department.name} Department")
|
23
|
-
print(f"📋 Mission: {department.mission}")
|
24
|
-
print(f"👥 Team: {', '.join([agent.role for agent in department.agents])}")
|
25
|
-
if department.manager_agent:
|
26
|
-
print(f"👔 Manager: {department.manager_agent.role}")
|
27
|
-
print(f"🔄 Workflow: {' → '.join(department.workflow_order)}")
|
28
|
-
print("=" * 60)
|
29
|
-
|
30
|
-
logger.info(f"Starting execution of department: {department.name}")
|
31
|
-
|
32
|
-
# Initialize execution context
|
33
|
-
context = {
|
34
|
-
"input": input_data,
|
35
|
-
"department_context": department.context or {},
|
36
|
-
"results": {}
|
37
|
-
}
|
38
|
-
|
39
|
-
# Execute agents in workflow order
|
40
|
-
for i, agent_role in enumerate(department.workflow_order, 1):
|
41
|
-
print(f"\n🔄 Step {i}/{len(department.workflow_order)}: {agent_role}")
|
42
|
-
|
43
|
-
agent = self._find_agent_by_role(department, agent_role)
|
44
|
-
if not agent:
|
45
|
-
error_msg = f"Agent with role '{agent_role}' not found in department"
|
46
|
-
print(f"❌ Error: {error_msg}")
|
47
|
-
trace.errors.append(error_msg)
|
48
|
-
return DepartmentResult(
|
49
|
-
success=False,
|
50
|
-
error=error_msg,
|
51
|
-
trace=trace
|
52
|
-
)
|
53
|
-
|
54
|
-
# Execute agent
|
55
|
-
agent_start = time.time()
|
56
|
-
result = self._execute_agent(agent, context, trace)
|
57
|
-
agent_duration = time.time() - agent_start
|
58
|
-
|
59
|
-
trace.agents_executed.append(agent.role)
|
60
|
-
trace.execution_times[agent.role] = agent_duration
|
61
|
-
|
62
|
-
if not result.get("success", False):
|
63
|
-
# Try fallback if available
|
64
|
-
if department.manager_agent and agent.role in (department.manager_agent.fallback_agents or {}):
|
65
|
-
fallback_role = department.manager_agent.fallback_agents[agent.role]
|
66
|
-
print(f"🔄 {department.manager_agent.role}: Let me try {fallback_role} as backup for {agent.role}")
|
67
|
-
fallback_agent = self._find_agent_by_role(department, fallback_role)
|
68
|
-
if fallback_agent:
|
69
|
-
logger.info(f"Trying fallback agent: {fallback_role}")
|
70
|
-
result = self._execute_agent(fallback_agent, context, trace)
|
71
|
-
trace.agents_executed.append(fallback_agent.role)
|
72
|
-
|
73
|
-
if not result.get("success", False):
|
74
|
-
error_msg = f"Agent {agent.role} failed: {result.get('error', 'Unknown error')}"
|
75
|
-
print(f"❌ Workflow stopped: {error_msg}")
|
76
|
-
trace.errors.append(error_msg)
|
77
|
-
return DepartmentResult(
|
78
|
-
success=False,
|
79
|
-
error=error_msg,
|
80
|
-
trace=trace
|
81
|
-
)
|
82
|
-
|
83
|
-
# Store result for next agent
|
84
|
-
context["results"][agent.output_key] = result.get("data")
|
85
|
-
print(f"✅ Step {i} completed in {agent_duration:.1f}s")
|
86
|
-
|
87
|
-
# Execute manager agent for final validation if present
|
88
|
-
if department.manager_agent:
|
89
|
-
print(f"\n🔍 Final Review Phase")
|
90
|
-
manager_start = time.time()
|
91
|
-
|
92
|
-
# Prepare manager input with all workflow results
|
93
|
-
manager_input = {
|
94
|
-
"workflow_results": context["results"],
|
95
|
-
"department_context": context["department_context"]
|
96
|
-
}
|
97
|
-
|
98
|
-
# Add connection if available
|
99
|
-
if "connection" in context["input"]:
|
100
|
-
manager_input["connection"] = context["input"]["connection"]
|
101
|
-
|
102
|
-
# Execute manager validation
|
103
|
-
manager_result = self._execute_manager_validation(department.manager_agent, manager_input, trace)
|
104
|
-
manager_duration = time.time() - manager_start
|
105
|
-
|
106
|
-
trace.agents_executed.append(department.manager_agent.role)
|
107
|
-
trace.execution_times[department.manager_agent.role] = manager_duration
|
108
|
-
|
109
|
-
# Store manager validation results
|
110
|
-
context["results"][department.manager_agent.output_key] = manager_result.get("data")
|
111
|
-
|
112
|
-
# Check if manager validation failed
|
113
|
-
if not manager_result.get("success", False):
|
114
|
-
error_msg = f"Manager validation failed: {manager_result.get('error', 'Unknown error')}"
|
115
|
-
print(f"❌ {error_msg}")
|
116
|
-
trace.errors.append(error_msg)
|
117
|
-
return DepartmentResult(
|
118
|
-
success=False,
|
119
|
-
error=error_msg,
|
120
|
-
trace=trace
|
121
|
-
)
|
122
|
-
|
123
|
-
print(f"✅ Manager review completed in {manager_duration:.1f}s")
|
124
|
-
|
125
|
-
# Create audit record
|
126
|
-
total_duration = time.time() - start_time
|
127
|
-
self.last_execution_audit = DepartmentAudit(
|
128
|
-
agents_run=trace.agents_executed,
|
129
|
-
tools_invoked=trace.tools_invoked,
|
130
|
-
duration_seconds=total_duration
|
131
|
-
)
|
132
|
-
|
133
|
-
print(f"\n🎉 {department.name} Department workflow completed!")
|
134
|
-
print(f"⏱️ Total time: {total_duration:.1f}s")
|
135
|
-
print("=" * 60)
|
136
|
-
|
137
|
-
return DepartmentResult(
|
138
|
-
success=True,
|
139
|
-
data=context["results"],
|
140
|
-
trace=trace
|
141
|
-
)
|
142
|
-
|
143
|
-
except Exception as e:
|
144
|
-
print(f"💥 Unexpected error in {department.name} Department: {str(e)}")
|
145
|
-
logger.error(f"Execution failed: {str(e)}")
|
146
|
-
trace.errors.append(str(e))
|
147
|
-
return DepartmentResult(
|
148
|
-
success=False,
|
149
|
-
error=str(e),
|
150
|
-
trace=trace
|
151
|
-
)
|
152
|
-
|
153
|
-
def _find_agent_by_role(self, department: Department, role: str) -> Optional[Agent]:
|
154
|
-
"""Find an agent by role in the department"""
|
155
|
-
for agent in department.agents:
|
156
|
-
if agent.role == role:
|
157
|
-
return agent
|
158
|
-
return None
|
159
|
-
|
160
|
-
def _execute_agent(self, agent: Agent, context: Dict[str, Any], trace: ExecutionTrace) -> Dict[str, Any]:
|
161
|
-
"""Execute a single agent"""
|
162
|
-
print(f"\n👤 {agent.role}: Hi! I'm starting my work now...")
|
163
|
-
logger.info(f"Executing agent: {agent.role}")
|
164
|
-
|
165
|
-
try:
|
166
|
-
# Show what the agent is thinking about
|
167
|
-
print(f"💭 {agent.role}: My job is to {agent.job.lower()}")
|
168
|
-
|
169
|
-
# Prepare input data for agent
|
170
|
-
agent_input = {}
|
171
|
-
for key in agent.input_keys:
|
172
|
-
if key in context["input"]:
|
173
|
-
agent_input[key] = context["input"][key]
|
174
|
-
print(f"📥 {agent.role}: I received '{key}' as input")
|
175
|
-
elif key in context["results"]:
|
176
|
-
agent_input[key] = context["results"][key]
|
177
|
-
print(f"📥 {agent.role}: I got '{key}' from a previous agent")
|
178
|
-
else:
|
179
|
-
print(f"🤔 {agent.role}: Hmm, I'm missing input '{key}' but I'll try to work without it")
|
180
|
-
logger.warning(f"Missing input key '{key}' for agent {agent.role}")
|
181
|
-
|
182
|
-
# Always include connection string if available (for database tools)
|
183
|
-
if "connection" in context["input"]:
|
184
|
-
agent_input["connection"] = context["input"]["connection"]
|
185
|
-
|
186
|
-
# Execute agent's tools
|
187
|
-
result_data = {}
|
188
|
-
tools_with_real_work = []
|
189
|
-
tools_with_mock_work = []
|
190
|
-
|
191
|
-
print(f"🔧 {agent.role}: I need to use {len(agent.tools)} tool(s) to complete my work...")
|
192
|
-
|
193
|
-
for i, tool_spec in enumerate(agent.tools, 1):
|
194
|
-
tool_name = tool_spec["name"] if isinstance(tool_spec, dict) else tool_spec.name
|
195
|
-
hosted_by = tool_spec.get("hosted_by", "memra") if isinstance(tool_spec, dict) else tool_spec.hosted_by
|
196
|
-
|
197
|
-
print(f"⚡ {agent.role}: Using tool {i}/{len(agent.tools)}: {tool_name}")
|
198
|
-
|
199
|
-
trace.tools_invoked.append(tool_name)
|
200
|
-
|
201
|
-
# Get tool from registry and execute
|
202
|
-
tool_result = self.tool_registry.execute_tool(
|
203
|
-
tool_name,
|
204
|
-
hosted_by,
|
205
|
-
agent_input,
|
206
|
-
agent.config
|
207
|
-
)
|
208
|
-
|
209
|
-
if not tool_result.get("success", False):
|
210
|
-
print(f"😟 {agent.role}: Oh no! Tool {tool_name} failed: {tool_result.get('error', 'Unknown error')}")
|
211
|
-
return {
|
212
|
-
"success": False,
|
213
|
-
"error": f"Tool {tool_name} failed: {tool_result.get('error', 'Unknown error')}"
|
214
|
-
}
|
215
|
-
|
216
|
-
# Check if this tool did real work or mock work
|
217
|
-
tool_data = tool_result.get("data", {})
|
218
|
-
if self._is_real_work(tool_name, tool_data):
|
219
|
-
tools_with_real_work.append(tool_name)
|
220
|
-
print(f"✅ {agent.role}: Great! {tool_name} did real work and gave me useful results")
|
221
|
-
else:
|
222
|
-
tools_with_mock_work.append(tool_name)
|
223
|
-
print(f"🔄 {agent.role}: {tool_name} gave me simulated results (that's okay for testing)")
|
224
|
-
|
225
|
-
result_data.update(tool_data)
|
226
|
-
|
227
|
-
# Add metadata about real vs mock work
|
228
|
-
result_data["_memra_metadata"] = {
|
229
|
-
"agent_role": agent.role,
|
230
|
-
"tools_real_work": tools_with_real_work,
|
231
|
-
"tools_mock_work": tools_with_mock_work,
|
232
|
-
"work_quality": "real" if tools_with_real_work else "mock"
|
233
|
-
}
|
234
|
-
|
235
|
-
# Agent reports completion
|
236
|
-
if tools_with_real_work:
|
237
|
-
print(f"🎉 {agent.role}: Perfect! I completed my work with real data processing")
|
238
|
-
else:
|
239
|
-
print(f"📝 {agent.role}: I finished my work, but used simulated data (still learning!)")
|
240
|
-
|
241
|
-
print(f"📤 {agent.role}: Passing my results to the next agent via '{agent.output_key}'")
|
242
|
-
|
243
|
-
return {
|
244
|
-
"success": True,
|
245
|
-
"data": result_data
|
246
|
-
}
|
247
|
-
|
248
|
-
except Exception as e:
|
249
|
-
print(f"😰 {agent.role}: I encountered an error and couldn't complete my work: {str(e)}")
|
250
|
-
logger.error(f"Agent {agent.role} execution failed: {str(e)}")
|
251
|
-
return {
|
252
|
-
"success": False,
|
253
|
-
"error": str(e)
|
254
|
-
}
|
255
|
-
|
256
|
-
def _is_real_work(self, tool_name: str, tool_data: Dict[str, Any]) -> bool:
|
257
|
-
"""Determine if a tool did real work or returned mock data"""
|
258
|
-
|
259
|
-
# Check for specific indicators of real work
|
260
|
-
if tool_name == "PDFProcessor":
|
261
|
-
# Real work if it has actual image paths and file size
|
262
|
-
return (
|
263
|
-
"metadata" in tool_data and
|
264
|
-
"file_size" in tool_data["metadata"] and
|
265
|
-
tool_data["metadata"]["file_size"] > 1000 and # Real file size
|
266
|
-
"pages" in tool_data and
|
267
|
-
len(tool_data["pages"]) > 0 and
|
268
|
-
"image_path" in tool_data["pages"][0]
|
269
|
-
)
|
270
|
-
|
271
|
-
elif tool_name == "InvoiceExtractionWorkflow":
|
272
|
-
# Real work if it has actual extracted data with specific vendor info
|
273
|
-
return (
|
274
|
-
"headerSection" in tool_data and
|
275
|
-
"vendorName" in tool_data["headerSection"] and
|
276
|
-
tool_data["headerSection"]["vendorName"] not in ["", "UNKNOWN", "Sample Vendor"] and
|
277
|
-
"chargesSummary" in tool_data and
|
278
|
-
"memra_checksum" in tool_data["chargesSummary"]
|
279
|
-
)
|
280
|
-
|
281
|
-
elif tool_name == "DatabaseQueryTool":
|
282
|
-
# Real work if it loaded the actual schema file (more than 3 columns)
|
283
|
-
return (
|
284
|
-
"columns" in tool_data and
|
285
|
-
len(tool_data["columns"]) > 3
|
286
|
-
)
|
287
|
-
|
288
|
-
elif tool_name == "DataValidator":
|
289
|
-
# Real work if it actually validated real data with meaningful validation
|
290
|
-
return (
|
291
|
-
"validation_errors" in tool_data and
|
292
|
-
isinstance(tool_data["validation_errors"], list) and
|
293
|
-
"is_valid" in tool_data and
|
294
|
-
# Check if it's validating real extracted data (not just mock data)
|
295
|
-
len(str(tool_data)) > 100 # Real validation results are more substantial
|
296
|
-
)
|
297
|
-
|
298
|
-
elif tool_name == "PostgresInsert":
|
299
|
-
# Real work if it successfully inserted into a real database
|
300
|
-
return (
|
301
|
-
"success" in tool_data and
|
302
|
-
tool_data["success"] == True and
|
303
|
-
"record_id" in tool_data and
|
304
|
-
isinstance(tool_data["record_id"], int) and # Real DB returns integer IDs
|
305
|
-
"database_table" in tool_data # Real implementation includes table name
|
306
|
-
)
|
307
|
-
|
308
|
-
# Default to mock work
|
309
|
-
return False
|
310
|
-
|
311
|
-
def get_last_audit(self) -> Optional[DepartmentAudit]:
|
312
|
-
"""Get audit information from the last execution"""
|
313
|
-
return self.last_execution_audit
|
314
|
-
|
315
|
-
def _execute_manager_validation(self, manager_agent: Agent, manager_input: Dict[str, Any], trace: ExecutionTrace) -> Dict[str, Any]:
|
316
|
-
"""Execute manager agent to validate workflow results"""
|
317
|
-
print(f"\n👔 {manager_agent.role}: Time for me to review everyone's work...")
|
318
|
-
logger.info(f"Manager {manager_agent.role} validating workflow results")
|
319
|
-
|
320
|
-
try:
|
321
|
-
# Analyze workflow results for real vs mock work
|
322
|
-
workflow_analysis = self._analyze_workflow_quality(manager_input["workflow_results"])
|
323
|
-
|
324
|
-
print(f"🔍 {manager_agent.role}: Let me analyze what each agent accomplished...")
|
325
|
-
|
326
|
-
# Prepare validation report
|
327
|
-
validation_report = {
|
328
|
-
"workflow_analysis": workflow_analysis,
|
329
|
-
"validation_status": "pass" if workflow_analysis["overall_quality"] == "real" else "fail",
|
330
|
-
"recommendations": [],
|
331
|
-
"agent_performance": {}
|
332
|
-
}
|
333
|
-
|
334
|
-
# Analyze each agent's performance
|
335
|
-
for result_key, result_data in manager_input["workflow_results"].items():
|
336
|
-
if isinstance(result_data, dict) and "_memra_metadata" in result_data:
|
337
|
-
metadata = result_data["_memra_metadata"]
|
338
|
-
agent_role = metadata["agent_role"]
|
339
|
-
|
340
|
-
if metadata["work_quality"] == "real":
|
341
|
-
print(f"👍 {manager_agent.role}: {agent_role} did excellent real work!")
|
342
|
-
else:
|
343
|
-
print(f"📋 {manager_agent.role}: {agent_role} completed their tasks but with simulated data")
|
344
|
-
|
345
|
-
validation_report["agent_performance"][agent_role] = {
|
346
|
-
"work_quality": metadata["work_quality"],
|
347
|
-
"tools_real_work": metadata["tools_real_work"],
|
348
|
-
"tools_mock_work": metadata["tools_mock_work"],
|
349
|
-
"status": "completed_real_work" if metadata["work_quality"] == "real" else "completed_mock_work"
|
350
|
-
}
|
351
|
-
|
352
|
-
# Add recommendations for mock work
|
353
|
-
if metadata["work_quality"] == "mock":
|
354
|
-
recommendation = f"Agent {agent_role} performed mock work - implement real {', '.join(metadata['tools_mock_work'])} functionality"
|
355
|
-
validation_report["recommendations"].append(recommendation)
|
356
|
-
print(f"💡 {manager_agent.role}: I recommend upgrading {agent_role}'s tools for production")
|
357
|
-
|
358
|
-
# Overall workflow validation
|
359
|
-
if workflow_analysis["overall_quality"] == "real":
|
360
|
-
validation_report["summary"] = "Workflow completed successfully with real data processing"
|
361
|
-
print(f"🎯 {manager_agent.role}: Excellent! This workflow is production-ready")
|
362
|
-
elif workflow_analysis["overall_quality"].startswith("mixed"):
|
363
|
-
validation_report["summary"] = "Workflow completed with mixed real and simulated data"
|
364
|
-
print(f"⚖️ {manager_agent.role}: Good progress! Some agents are production-ready, others need work")
|
365
|
-
else:
|
366
|
-
validation_report["summary"] = "Workflow completed but with mock/simulated data - production readiness requires real implementations"
|
367
|
-
print(f"🚧 {manager_agent.role}: This workflow needs more development before production use")
|
368
|
-
|
369
|
-
real_percentage = workflow_analysis["real_work_percentage"]
|
370
|
-
print(f"📊 {manager_agent.role}: Overall assessment: {real_percentage:.0f}% of agents did real work")
|
371
|
-
|
372
|
-
return {
|
373
|
-
"success": True,
|
374
|
-
"data": validation_report
|
375
|
-
}
|
376
|
-
|
377
|
-
except Exception as e:
|
378
|
-
print(f"😰 {manager_agent.role}: I had trouble analyzing the workflow: {str(e)}")
|
379
|
-
logger.error(f"Manager validation failed: {str(e)}")
|
380
|
-
return {
|
381
|
-
"success": False,
|
382
|
-
"error": str(e)
|
383
|
-
}
|
384
|
-
|
385
|
-
def _analyze_workflow_quality(self, workflow_results: Dict[str, Any]) -> Dict[str, Any]:
|
386
|
-
"""Analyze the overall quality of workflow execution"""
|
387
|
-
|
388
|
-
total_agents = 0
|
389
|
-
real_work_agents = 0
|
390
|
-
mock_work_agents = 0
|
391
|
-
|
392
|
-
for result_key, result_data in workflow_results.items():
|
393
|
-
if isinstance(result_data, dict) and "_memra_metadata" in result_data:
|
394
|
-
metadata = result_data["_memra_metadata"]
|
395
|
-
total_agents += 1
|
396
|
-
|
397
|
-
if metadata["work_quality"] == "real":
|
398
|
-
real_work_agents += 1
|
399
|
-
else:
|
400
|
-
mock_work_agents += 1
|
401
|
-
|
402
|
-
# Determine overall quality
|
403
|
-
if real_work_agents > 0 and mock_work_agents == 0:
|
404
|
-
overall_quality = "real"
|
405
|
-
elif real_work_agents > mock_work_agents:
|
406
|
-
overall_quality = "mixed_mostly_real"
|
407
|
-
elif real_work_agents > 0:
|
408
|
-
overall_quality = "mixed_mostly_mock"
|
409
|
-
else:
|
410
|
-
overall_quality = "mock"
|
411
|
-
|
412
|
-
return {
|
413
|
-
"total_agents": total_agents,
|
414
|
-
"real_work_agents": real_work_agents,
|
415
|
-
"mock_work_agents": mock_work_agents,
|
416
|
-
"overall_quality": overall_quality,
|
417
|
-
"real_work_percentage": (real_work_agents / total_agents * 100) if total_agents > 0 else 0
|
418
|
-
}
|
@@ -1,98 +0,0 @@
|
|
1
|
-
from typing import List, Dict, Optional, Any, Union
|
2
|
-
from pydantic import BaseModel, Field
|
3
|
-
|
4
|
-
class LLM(BaseModel):
|
5
|
-
model: str
|
6
|
-
temperature: float = 0.0
|
7
|
-
max_tokens: Optional[int] = None
|
8
|
-
stop: Optional[List[str]] = None
|
9
|
-
|
10
|
-
class Tool(BaseModel):
|
11
|
-
name: str
|
12
|
-
hosted_by: str = "memra" # or "mcp" for customer's Model Context Protocol
|
13
|
-
description: Optional[str] = None
|
14
|
-
parameters: Optional[Dict[str, Any]] = None
|
15
|
-
|
16
|
-
class Agent(BaseModel):
|
17
|
-
role: str
|
18
|
-
job: str
|
19
|
-
llm: Optional[Union[LLM, Dict[str, Any]]] = None
|
20
|
-
sops: List[str] = Field(default_factory=list)
|
21
|
-
tools: List[Union[Tool, Dict[str, Any]]] = Field(default_factory=list)
|
22
|
-
systems: List[str] = Field(default_factory=list)
|
23
|
-
input_keys: List[str] = Field(default_factory=list)
|
24
|
-
output_key: str
|
25
|
-
allow_delegation: bool = False
|
26
|
-
fallback_agents: Optional[Dict[str, str]] = None
|
27
|
-
config: Optional[Dict[str, Any]] = None
|
28
|
-
|
29
|
-
class ExecutionPolicy(BaseModel):
|
30
|
-
retry_on_fail: bool = True
|
31
|
-
max_retries: int = 2
|
32
|
-
halt_on_validation_error: bool = True
|
33
|
-
timeout_seconds: int = 300
|
34
|
-
|
35
|
-
class ExecutionTrace(BaseModel):
|
36
|
-
agents_executed: List[str] = Field(default_factory=list)
|
37
|
-
tools_invoked: List[str] = Field(default_factory=list)
|
38
|
-
execution_times: Dict[str, float] = Field(default_factory=dict)
|
39
|
-
errors: List[str] = Field(default_factory=list)
|
40
|
-
|
41
|
-
def show(self):
|
42
|
-
"""Display execution trace information"""
|
43
|
-
print("=== Execution Trace ===")
|
44
|
-
print(f"Agents executed: {', '.join(self.agents_executed)}")
|
45
|
-
print(f"Tools invoked: {', '.join(self.tools_invoked)}")
|
46
|
-
if self.errors:
|
47
|
-
print(f"Errors: {', '.join(self.errors)}")
|
48
|
-
|
49
|
-
class DepartmentResult(BaseModel):
|
50
|
-
success: bool
|
51
|
-
data: Optional[Dict[str, Any]] = None
|
52
|
-
error: Optional[str] = None
|
53
|
-
trace: ExecutionTrace = Field(default_factory=ExecutionTrace)
|
54
|
-
|
55
|
-
class DepartmentAudit(BaseModel):
|
56
|
-
agents_run: List[str]
|
57
|
-
tools_invoked: List[str]
|
58
|
-
duration_seconds: float
|
59
|
-
total_cost: Optional[float] = None
|
60
|
-
|
61
|
-
class Department(BaseModel):
|
62
|
-
name: str
|
63
|
-
mission: str
|
64
|
-
agents: List[Agent]
|
65
|
-
manager_agent: Optional[Agent] = None
|
66
|
-
default_llm: Optional[LLM] = None
|
67
|
-
workflow_order: List[str] = Field(default_factory=list)
|
68
|
-
dependencies: List[str] = Field(default_factory=list)
|
69
|
-
execution_policy: Optional[ExecutionPolicy] = None
|
70
|
-
context: Optional[Dict[str, Any]] = None
|
71
|
-
|
72
|
-
def run(self, input: Dict[str, Any]) -> DepartmentResult:
|
73
|
-
"""
|
74
|
-
Execute the department workflow with the given input data.
|
75
|
-
"""
|
76
|
-
# Import here to avoid circular imports
|
77
|
-
from .execution import ExecutionEngine
|
78
|
-
|
79
|
-
engine = ExecutionEngine()
|
80
|
-
return engine.execute_department(self, input)
|
81
|
-
|
82
|
-
def audit(self) -> DepartmentAudit:
|
83
|
-
"""
|
84
|
-
Return audit information about the last execution.
|
85
|
-
"""
|
86
|
-
# Import here to avoid circular imports
|
87
|
-
from .execution import ExecutionEngine
|
88
|
-
|
89
|
-
engine = ExecutionEngine()
|
90
|
-
audit = engine.get_last_audit()
|
91
|
-
if audit:
|
92
|
-
return audit
|
93
|
-
else:
|
94
|
-
return DepartmentAudit(
|
95
|
-
agents_run=[],
|
96
|
-
tools_invoked=[],
|
97
|
-
duration_seconds=0.0
|
98
|
-
)
|
@@ -1,105 +0,0 @@
|
|
1
|
-
import httpx
|
2
|
-
import logging
|
3
|
-
import os
|
4
|
-
from typing import Dict, Any, List, Optional
|
5
|
-
import asyncio
|
6
|
-
|
7
|
-
logger = logging.getLogger(__name__)
|
8
|
-
|
9
|
-
class ToolRegistryClient:
|
10
|
-
"""Client-side registry that calls Memra API for tool execution"""
|
11
|
-
|
12
|
-
def __init__(self):
|
13
|
-
self.api_base = os.getenv("MEMRA_API_URL", "https://api.memra.co")
|
14
|
-
self.api_key = os.getenv("MEMRA_API_KEY")
|
15
|
-
if not self.api_key:
|
16
|
-
raise ValueError(
|
17
|
-
"MEMRA_API_KEY environment variable is required. "
|
18
|
-
"Contact info@memra.co to request access."
|
19
|
-
)
|
20
|
-
self.tools_cache = None
|
21
|
-
|
22
|
-
def discover_tools(self, hosted_by: Optional[str] = None) -> List[Dict[str, Any]]:
|
23
|
-
"""Discover available tools from the API"""
|
24
|
-
try:
|
25
|
-
# Use sync httpx for compatibility with existing sync code
|
26
|
-
with httpx.Client(timeout=30.0) as client:
|
27
|
-
response = client.get(
|
28
|
-
f"{self.api_base}/tools/discover",
|
29
|
-
headers={"X-API-Key": self.api_key}
|
30
|
-
)
|
31
|
-
response.raise_for_status()
|
32
|
-
|
33
|
-
data = response.json()
|
34
|
-
tools = data.get("tools", [])
|
35
|
-
|
36
|
-
# Filter by hosted_by if specified
|
37
|
-
if hosted_by:
|
38
|
-
tools = [t for t in tools if t.get("hosted_by") == hosted_by]
|
39
|
-
|
40
|
-
self.tools_cache = tools
|
41
|
-
logger.info(f"Discovered {len(tools)} tools from API")
|
42
|
-
return tools
|
43
|
-
|
44
|
-
except Exception as e:
|
45
|
-
logger.error(f"Failed to discover tools from API: {e}")
|
46
|
-
# Return empty list if API is unavailable
|
47
|
-
return []
|
48
|
-
|
49
|
-
def execute_tool(self, tool_name: str, hosted_by: str, input_data: Dict[str, Any],
|
50
|
-
config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
51
|
-
"""Execute a tool via the API"""
|
52
|
-
try:
|
53
|
-
logger.info(f"Executing tool {tool_name} via API")
|
54
|
-
|
55
|
-
# Prepare request payload
|
56
|
-
payload = {
|
57
|
-
"tool_name": tool_name,
|
58
|
-
"hosted_by": hosted_by,
|
59
|
-
"input_data": input_data,
|
60
|
-
"config": config
|
61
|
-
}
|
62
|
-
|
63
|
-
# Make API call
|
64
|
-
with httpx.Client(timeout=120.0) as client: # Longer timeout for tool execution
|
65
|
-
response = client.post(
|
66
|
-
f"{self.api_base}/tools/execute",
|
67
|
-
headers={
|
68
|
-
"X-API-Key": self.api_key,
|
69
|
-
"Content-Type": "application/json"
|
70
|
-
},
|
71
|
-
json=payload
|
72
|
-
)
|
73
|
-
response.raise_for_status()
|
74
|
-
|
75
|
-
result = response.json()
|
76
|
-
logger.info(f"Tool {tool_name} executed successfully via API")
|
77
|
-
return result
|
78
|
-
|
79
|
-
except httpx.TimeoutException:
|
80
|
-
logger.error(f"Tool {tool_name} execution timed out")
|
81
|
-
return {
|
82
|
-
"success": False,
|
83
|
-
"error": f"Tool execution timed out after 120 seconds"
|
84
|
-
}
|
85
|
-
except httpx.HTTPStatusError as e:
|
86
|
-
logger.error(f"API error for tool {tool_name}: {e.response.status_code}")
|
87
|
-
return {
|
88
|
-
"success": False,
|
89
|
-
"error": f"API error: {e.response.status_code} - {e.response.text}"
|
90
|
-
}
|
91
|
-
except Exception as e:
|
92
|
-
logger.error(f"Tool execution failed for {tool_name}: {str(e)}")
|
93
|
-
return {
|
94
|
-
"success": False,
|
95
|
-
"error": str(e)
|
96
|
-
}
|
97
|
-
|
98
|
-
def health_check(self) -> bool:
|
99
|
-
"""Check if the API is available"""
|
100
|
-
try:
|
101
|
-
with httpx.Client(timeout=10.0) as client:
|
102
|
-
response = client.get(f"{self.api_base}/health")
|
103
|
-
return response.status_code == 200
|
104
|
-
except:
|
105
|
-
return False
|
File without changes
|
File without changes
|