memra 0.2.2__tar.gz → 0.2.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memra-0.2.3/PKG-INFO +101 -0
- memra-0.2.3/README.md +64 -0
- {memra-0.2.2 → memra-0.2.3}/memra/__init__.py +6 -2
- {memra-0.2.2 → memra-0.2.3}/memra/execution.py +43 -0
- {memra-0.2.2 → memra-0.2.3}/memra/tool_registry.py +154 -0
- {memra-0.2.2 → memra-0.2.3}/memra.egg-info/SOURCES.txt +0 -3
- {memra-0.2.2 → memra-0.2.3}/pyproject.toml +1 -1
- {memra-0.2.2 → memra-0.2.3}/setup.py +1 -1
- memra-0.2.2/CHANGELOG.md +0 -63
- memra-0.2.2/LICENSE +0 -0
- memra-0.2.2/PKG-INFO +0 -148
- memra-0.2.2/README.md +0 -113
- memra-0.2.2/mcp_bridge_server.py +0 -230
- {memra-0.2.2 → memra-0.2.3}/MANIFEST.in +0 -0
- {memra-0.2.2 → memra-0.2.3}/memra/discovery.py +0 -0
- {memra-0.2.2 → memra-0.2.3}/memra/discovery_client.py +0 -0
- {memra-0.2.2 → memra-0.2.3}/memra/models.py +0 -0
- {memra-0.2.2 → memra-0.2.3}/memra/tool_registry_client.py +0 -0
- {memra-0.2.2 → memra-0.2.3}/setup.cfg +0 -0
memra-0.2.3/PKG-INFO
ADDED
@@ -0,0 +1,101 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: memra
|
3
|
+
Version: 0.2.3
|
4
|
+
Summary: Declarative framework for enterprise workflows with MCP integration - Client SDK
|
5
|
+
Home-page: https://github.com/memra/memra-sdk
|
6
|
+
Author: Memra
|
7
|
+
Author-email: Memra <support@memra.com>
|
8
|
+
License: MIT
|
9
|
+
Project-URL: Homepage, https://memra.co
|
10
|
+
Project-URL: Repository, https://github.com/memra-platform/memra-sdk
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
12
|
+
Classifier: Intended Audience :: Developers
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
14
|
+
Classifier: Operating System :: OS Independent
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
20
|
+
Requires-Python: >=3.8
|
21
|
+
Description-Content-Type: text/markdown
|
22
|
+
Requires-Dist: pydantic>=1.8.0
|
23
|
+
Requires-Dist: httpx>=0.24.0
|
24
|
+
Requires-Dist: typing-extensions>=4.0.0
|
25
|
+
Requires-Dist: aiohttp>=3.8.0
|
26
|
+
Requires-Dist: aiohttp-cors>=0.7.0
|
27
|
+
Provides-Extra: dev
|
28
|
+
Requires-Dist: pytest>=6.0; extra == "dev"
|
29
|
+
Requires-Dist: pytest-asyncio; extra == "dev"
|
30
|
+
Requires-Dist: black; extra == "dev"
|
31
|
+
Requires-Dist: flake8; extra == "dev"
|
32
|
+
Provides-Extra: mcp
|
33
|
+
Requires-Dist: psycopg2-binary>=2.9.0; extra == "mcp"
|
34
|
+
Dynamic: author
|
35
|
+
Dynamic: home-page
|
36
|
+
Dynamic: requires-python
|
37
|
+
|
38
|
+
# Memra SDK
|
39
|
+
|
40
|
+
The core Memra framework for building AI-powered business workflows.
|
41
|
+
|
42
|
+
## Installation
|
43
|
+
|
44
|
+
```bash
|
45
|
+
pip install memra
|
46
|
+
```
|
47
|
+
|
48
|
+
## Quick Start
|
49
|
+
|
50
|
+
```python
|
51
|
+
from memra import Agent, Department, LLM, ExecutionEngine
|
52
|
+
|
53
|
+
# Define an agent
|
54
|
+
agent = Agent(
|
55
|
+
role="Data Analyst",
|
56
|
+
job="Analyze customer data",
|
57
|
+
llm=LLM(model="llama-3.2-11b-vision-preview"),
|
58
|
+
sops=["Load data", "Perform analysis", "Generate report"],
|
59
|
+
output_key="analysis_result"
|
60
|
+
)
|
61
|
+
|
62
|
+
# Create a department
|
63
|
+
department = Department(
|
64
|
+
name="Analytics",
|
65
|
+
mission="Provide data insights",
|
66
|
+
agents=[agent],
|
67
|
+
workflow_order=["Data Analyst"]
|
68
|
+
)
|
69
|
+
|
70
|
+
# Execute the workflow
|
71
|
+
engine = ExecutionEngine()
|
72
|
+
result = engine.execute_department(department, {"data": "customer_data.csv"})
|
73
|
+
```
|
74
|
+
|
75
|
+
## Core Components
|
76
|
+
|
77
|
+
### Agent
|
78
|
+
An AI worker that performs specific tasks using LLMs and tools.
|
79
|
+
|
80
|
+
### Department
|
81
|
+
A team of agents working together to accomplish a mission.
|
82
|
+
|
83
|
+
### ExecutionEngine
|
84
|
+
Orchestrates the execution of departments and their workflows.
|
85
|
+
|
86
|
+
### LLM
|
87
|
+
Configuration for language models used by agents.
|
88
|
+
|
89
|
+
## Examples
|
90
|
+
|
91
|
+
See the `examples/` directory for basic usage examples:
|
92
|
+
- `simple_text_to_sql.py` - Basic text-to-SQL conversion
|
93
|
+
- `ask_questions.py` - Simple question answering
|
94
|
+
|
95
|
+
## Documentation
|
96
|
+
|
97
|
+
For detailed documentation, visit [docs.memra.co](https://docs.memra.co)
|
98
|
+
|
99
|
+
## License
|
100
|
+
|
101
|
+
MIT License - see LICENSE file for details.
|
memra-0.2.3/README.md
ADDED
@@ -0,0 +1,64 @@
|
|
1
|
+
# Memra SDK
|
2
|
+
|
3
|
+
The core Memra framework for building AI-powered business workflows.
|
4
|
+
|
5
|
+
## Installation
|
6
|
+
|
7
|
+
```bash
|
8
|
+
pip install memra
|
9
|
+
```
|
10
|
+
|
11
|
+
## Quick Start
|
12
|
+
|
13
|
+
```python
|
14
|
+
from memra import Agent, Department, LLM, ExecutionEngine
|
15
|
+
|
16
|
+
# Define an agent
|
17
|
+
agent = Agent(
|
18
|
+
role="Data Analyst",
|
19
|
+
job="Analyze customer data",
|
20
|
+
llm=LLM(model="llama-3.2-11b-vision-preview"),
|
21
|
+
sops=["Load data", "Perform analysis", "Generate report"],
|
22
|
+
output_key="analysis_result"
|
23
|
+
)
|
24
|
+
|
25
|
+
# Create a department
|
26
|
+
department = Department(
|
27
|
+
name="Analytics",
|
28
|
+
mission="Provide data insights",
|
29
|
+
agents=[agent],
|
30
|
+
workflow_order=["Data Analyst"]
|
31
|
+
)
|
32
|
+
|
33
|
+
# Execute the workflow
|
34
|
+
engine = ExecutionEngine()
|
35
|
+
result = engine.execute_department(department, {"data": "customer_data.csv"})
|
36
|
+
```
|
37
|
+
|
38
|
+
## Core Components
|
39
|
+
|
40
|
+
### Agent
|
41
|
+
An AI worker that performs specific tasks using LLMs and tools.
|
42
|
+
|
43
|
+
### Department
|
44
|
+
A team of agents working together to accomplish a mission.
|
45
|
+
|
46
|
+
### ExecutionEngine
|
47
|
+
Orchestrates the execution of departments and their workflows.
|
48
|
+
|
49
|
+
### LLM
|
50
|
+
Configuration for language models used by agents.
|
51
|
+
|
52
|
+
## Examples
|
53
|
+
|
54
|
+
See the `examples/` directory for basic usage examples:
|
55
|
+
- `simple_text_to_sql.py` - Basic text-to-SQL conversion
|
56
|
+
- `ask_questions.py` - Simple question answering
|
57
|
+
|
58
|
+
## Documentation
|
59
|
+
|
60
|
+
For detailed documentation, visit [docs.memra.co](https://docs.memra.co)
|
61
|
+
|
62
|
+
## License
|
63
|
+
|
64
|
+
MIT License - see LICENSE file for details.
|
@@ -6,18 +6,22 @@ Think of it as "Kubernetes for business logic" where agents are the pods and
|
|
6
6
|
departments are the deployments.
|
7
7
|
"""
|
8
8
|
|
9
|
-
__version__ = "0.2.
|
9
|
+
__version__ = "0.2.3"
|
10
10
|
|
11
11
|
# Core imports
|
12
|
-
from .models import Agent, Department, Tool
|
12
|
+
from .models import Agent, Department, Tool, LLM
|
13
13
|
from .execution import ExecutionEngine
|
14
|
+
from .discovery_client import check_api_health, get_api_status
|
14
15
|
|
15
16
|
# Make key classes available at package level
|
16
17
|
__all__ = [
|
17
18
|
"Agent",
|
18
19
|
"Department",
|
19
20
|
"Tool",
|
21
|
+
"LLM",
|
20
22
|
"ExecutionEngine",
|
23
|
+
"check_api_health",
|
24
|
+
"get_api_status",
|
21
25
|
"__version__"
|
22
26
|
]
|
23
27
|
|
@@ -216,6 +216,21 @@ class ExecutionEngine:
|
|
216
216
|
# Use local registry for MCP and other local tools
|
217
217
|
print(f"🏠 {agent.role}: Using local registry for {tool_name}")
|
218
218
|
config_to_pass = tool_spec.get("config") if isinstance(tool_spec, dict) else tool_spec.config
|
219
|
+
|
220
|
+
# For MCP tools, merge department context MCP configuration
|
221
|
+
if hosted_by == "mcp":
|
222
|
+
mcp_config = {}
|
223
|
+
dept_context = context.get("department_context", {})
|
224
|
+
if "mcp_bridge_url" in dept_context:
|
225
|
+
mcp_config["bridge_url"] = dept_context["mcp_bridge_url"]
|
226
|
+
if "mcp_bridge_secret" in dept_context:
|
227
|
+
mcp_config["bridge_secret"] = dept_context["mcp_bridge_secret"]
|
228
|
+
|
229
|
+
# Merge with tool-specific config if it exists
|
230
|
+
if config_to_pass:
|
231
|
+
mcp_config.update(config_to_pass)
|
232
|
+
config_to_pass = mcp_config
|
233
|
+
|
219
234
|
print(f"🔧 {agent.role}: Config for {tool_name}: {config_to_pass}")
|
220
235
|
tool_result = self.tool_registry.execute_tool(
|
221
236
|
tool_name,
|
@@ -325,6 +340,34 @@ class ExecutionEngine:
|
|
325
340
|
not tool_data.get("_mock", False) # Not mock data
|
326
341
|
)
|
327
342
|
|
343
|
+
elif tool_name == "FileDiscovery":
|
344
|
+
# Real work if it actually discovered files in a real directory
|
345
|
+
return (
|
346
|
+
"files" in tool_data and
|
347
|
+
isinstance(tool_data["files"], list) and
|
348
|
+
"directory" in tool_data and
|
349
|
+
tool_data.get("success", False) == True
|
350
|
+
)
|
351
|
+
|
352
|
+
elif tool_name == "FileCopy":
|
353
|
+
# Real work if it actually copied a file
|
354
|
+
return (
|
355
|
+
"destination_path" in tool_data and
|
356
|
+
"source_path" in tool_data and
|
357
|
+
tool_data.get("success", False) == True and
|
358
|
+
tool_data.get("operation") == "copy_completed"
|
359
|
+
)
|
360
|
+
|
361
|
+
elif tool_name == "TextToSQL":
|
362
|
+
# Real work if it actually executed SQL and returned real results
|
363
|
+
return (
|
364
|
+
"generated_sql" in tool_data and
|
365
|
+
"results" in tool_data and
|
366
|
+
isinstance(tool_data["results"], list) and
|
367
|
+
tool_data.get("success", False) == True and
|
368
|
+
not tool_data.get("_mock", False) # Not mock data
|
369
|
+
)
|
370
|
+
|
328
371
|
# Default to mock work
|
329
372
|
return False
|
330
373
|
|
@@ -24,6 +24,8 @@ class ToolRegistry:
|
|
24
24
|
("OCRTool", "Perform OCR on images and documents"),
|
25
25
|
("InvoiceExtractionWorkflow", "Extract structured data from invoices"),
|
26
26
|
("FileReader", "Read files from the filesystem"),
|
27
|
+
("FileDiscovery", "Discover and list files in directories"),
|
28
|
+
("FileCopy", "Copy files to standard processing directories"),
|
27
29
|
]
|
28
30
|
|
29
31
|
for tool_name, description in server_tools:
|
@@ -33,6 +35,9 @@ class ToolRegistry:
|
|
33
35
|
mcp_tools = [
|
34
36
|
("DataValidator", "Validate data against schemas"),
|
35
37
|
("PostgresInsert", "Insert data into PostgreSQL database"),
|
38
|
+
("TextToSQL", "Convert natural language questions to SQL queries and execute them"),
|
39
|
+
("SQLExecutor", "Execute SQL queries against PostgreSQL database"),
|
40
|
+
("TextToSQLGenerator", "Generate SQL from natural language questions"),
|
36
41
|
]
|
37
42
|
|
38
43
|
for tool_name, description in mcp_tools:
|
@@ -169,6 +174,155 @@ class ToolRegistry:
|
|
169
174
|
"_mock": True
|
170
175
|
}
|
171
176
|
}
|
177
|
+
elif tool_name == "FileDiscovery":
|
178
|
+
# Mock file discovery - in real implementation, would scan directories
|
179
|
+
directory = input_data.get("directory", "invoices")
|
180
|
+
file_pattern = input_data.get("pattern", "*.pdf")
|
181
|
+
|
182
|
+
# Simulate finding files in the directory
|
183
|
+
mock_files = [
|
184
|
+
{
|
185
|
+
"filename": "10352259310.PDF",
|
186
|
+
"path": f"{directory}/10352259310.PDF",
|
187
|
+
"size": "542KB",
|
188
|
+
"modified": "2024-05-28",
|
189
|
+
"type": "PDF"
|
190
|
+
}
|
191
|
+
]
|
192
|
+
|
193
|
+
return {
|
194
|
+
"success": True,
|
195
|
+
"data": {
|
196
|
+
"directory": directory,
|
197
|
+
"pattern": file_pattern,
|
198
|
+
"files_found": len(mock_files),
|
199
|
+
"files": mock_files,
|
200
|
+
"message": f"Found {len(mock_files)} files in {directory}/ directory"
|
201
|
+
}
|
202
|
+
}
|
203
|
+
|
204
|
+
elif tool_name == "FileCopy":
|
205
|
+
# Mock file copy - in real implementation, would copy files
|
206
|
+
source_path = input_data.get("source_path", "")
|
207
|
+
destination_dir = input_data.get("destination_dir", "invoices")
|
208
|
+
|
209
|
+
if not source_path:
|
210
|
+
return {
|
211
|
+
"success": False,
|
212
|
+
"error": "Source path is required"
|
213
|
+
}
|
214
|
+
|
215
|
+
# Extract filename from path
|
216
|
+
import os
|
217
|
+
filename = os.path.basename(source_path)
|
218
|
+
destination_path = f"{destination_dir}/{filename}"
|
219
|
+
|
220
|
+
return {
|
221
|
+
"success": True,
|
222
|
+
"data": {
|
223
|
+
"source_path": source_path,
|
224
|
+
"destination_path": destination_path,
|
225
|
+
"message": f"File copied from {source_path} to {destination_path}",
|
226
|
+
"file_size": "245KB",
|
227
|
+
"operation": "copy_completed"
|
228
|
+
}
|
229
|
+
}
|
230
|
+
elif tool_name == "TextToSQL":
|
231
|
+
# Mock text-to-SQL - in real implementation, would use LLM to generate SQL
|
232
|
+
question = input_data.get("question", "")
|
233
|
+
schema = input_data.get("schema", {})
|
234
|
+
|
235
|
+
if not question:
|
236
|
+
return {
|
237
|
+
"success": False,
|
238
|
+
"error": "Question is required for text-to-SQL conversion"
|
239
|
+
}
|
240
|
+
|
241
|
+
# Simulate SQL generation and execution
|
242
|
+
mock_sql = "SELECT vendor_name, invoice_number, total_amount FROM invoices WHERE vendor_name ILIKE '%air liquide%' ORDER BY invoice_date DESC LIMIT 5;"
|
243
|
+
mock_results = [
|
244
|
+
{
|
245
|
+
"vendor_name": "Air Liquide Canada Inc.",
|
246
|
+
"invoice_number": "INV-12345",
|
247
|
+
"total_amount": 1234.56
|
248
|
+
},
|
249
|
+
{
|
250
|
+
"vendor_name": "Air Liquide Canada Inc.",
|
251
|
+
"invoice_number": "INV-67890",
|
252
|
+
"total_amount": 2345.67
|
253
|
+
}
|
254
|
+
]
|
255
|
+
|
256
|
+
return {
|
257
|
+
"success": True,
|
258
|
+
"data": {
|
259
|
+
"question": question,
|
260
|
+
"generated_sql": mock_sql,
|
261
|
+
"results": mock_results,
|
262
|
+
"row_count": len(mock_results),
|
263
|
+
"message": f"Found {len(mock_results)} results for: {question}",
|
264
|
+
"_mock": True
|
265
|
+
}
|
266
|
+
}
|
267
|
+
elif tool_name == "SQLExecutor":
|
268
|
+
# Mock SQL execution
|
269
|
+
sql_query = input_data.get("sql_query", "")
|
270
|
+
|
271
|
+
if not sql_query:
|
272
|
+
return {
|
273
|
+
"success": False,
|
274
|
+
"error": "SQL query is required"
|
275
|
+
}
|
276
|
+
|
277
|
+
# Mock results based on query type
|
278
|
+
if sql_query.upper().startswith("SELECT"):
|
279
|
+
mock_results = [
|
280
|
+
{"vendor_name": "Air Liquide Canada Inc.", "invoice_number": "INV-12345", "total_amount": 1234.56},
|
281
|
+
{"vendor_name": "Air Liquide Canada Inc.", "invoice_number": "INV-67890", "total_amount": 2345.67}
|
282
|
+
]
|
283
|
+
return {
|
284
|
+
"success": True,
|
285
|
+
"data": {
|
286
|
+
"query": sql_query,
|
287
|
+
"results": mock_results,
|
288
|
+
"row_count": len(mock_results),
|
289
|
+
"columns": ["vendor_name", "invoice_number", "total_amount"],
|
290
|
+
"_mock": True
|
291
|
+
}
|
292
|
+
}
|
293
|
+
else:
|
294
|
+
return {
|
295
|
+
"success": True,
|
296
|
+
"data": {
|
297
|
+
"query": sql_query,
|
298
|
+
"affected_rows": 1,
|
299
|
+
"message": "Query executed successfully",
|
300
|
+
"_mock": True
|
301
|
+
}
|
302
|
+
}
|
303
|
+
elif tool_name == "TextToSQLGenerator":
|
304
|
+
# Mock SQL generation
|
305
|
+
question = input_data.get("question", "")
|
306
|
+
|
307
|
+
if not question:
|
308
|
+
return {
|
309
|
+
"success": False,
|
310
|
+
"error": "Question is required for SQL generation"
|
311
|
+
}
|
312
|
+
|
313
|
+
# Generate mock SQL based on question
|
314
|
+
mock_sql = "SELECT * FROM invoices WHERE vendor_name ILIKE '%air liquide%'"
|
315
|
+
|
316
|
+
return {
|
317
|
+
"success": True,
|
318
|
+
"data": {
|
319
|
+
"question": question,
|
320
|
+
"generated_sql": mock_sql,
|
321
|
+
"explanation": "Generated SQL query based on natural language question",
|
322
|
+
"confidence": "medium",
|
323
|
+
"_mock": True
|
324
|
+
}
|
325
|
+
}
|
172
326
|
else:
|
173
327
|
return {
|
174
328
|
"success": False,
|
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
|
|
5
5
|
|
6
6
|
setup(
|
7
7
|
name="memra",
|
8
|
-
version="0.2.
|
8
|
+
version="0.2.3",
|
9
9
|
author="Memra",
|
10
10
|
author_email="support@memra.com",
|
11
11
|
description="Declarative framework for enterprise workflows with MCP integration - Client SDK",
|
memra-0.2.2/CHANGELOG.md
DELETED
@@ -1,63 +0,0 @@
|
|
1
|
-
# Changelog
|
2
|
-
|
3
|
-
All notable changes to the Memra SDK will be documented in this file.
|
4
|
-
|
5
|
-
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
|
-
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
|
-
|
8
|
-
## [0.2.2] - 2025-05-28
|
9
|
-
|
10
|
-
### Fixed
|
11
|
-
- **MCP Integration**: Fixed broken MCP tool execution after repository separation
|
12
|
-
- **Tool Registry**: Updated MCP tool routing to use correct endpoints
|
13
|
-
- **Bridge Server**: Added working MCP bridge server implementation
|
14
|
-
- **Real Work Detection**: Improved detection of real vs mock work for MCP tools
|
15
|
-
|
16
|
-
### Added
|
17
|
-
- Complete MCP bridge server with DataValidator and PostgresInsert tools
|
18
|
-
- Health check endpoint for MCP bridge monitoring
|
19
|
-
- Better error handling and fallback for MCP tool execution
|
20
|
-
|
21
|
-
### Changed
|
22
|
-
- MCP tools now perform real database operations instead of mock responses
|
23
|
-
- Improved logging and debugging for MCP tool execution flow
|
24
|
-
|
25
|
-
## [0.2.1] - 2025-05-27
|
26
|
-
|
27
|
-
## [0.2.0] - 2024-01-17
|
28
|
-
|
29
|
-
### Added
|
30
|
-
- **MCP (Model Context Protocol) Integration**: Execute operations on local infrastructure while leveraging cloud AI processing
|
31
|
-
- New `mcp_bridge_server.py` for local resource bridging
|
32
|
-
- HMAC authentication for secure cloud-to-local communication
|
33
|
-
- Support for `hosted_by: "mcp"` in agent tool configurations
|
34
|
-
- PostgreSQL integration via MCP bridge
|
35
|
-
- Tool-level configuration support in execution engine
|
36
|
-
- New MCP tools: `PostgresInsert`, `DataValidator`
|
37
|
-
|
38
|
-
### Enhanced
|
39
|
-
- **Execution Engine**: Updated to support tool-level configuration and MCP routing
|
40
|
-
- **Tool Registry Client**: Enhanced API client with better error handling and MCP support
|
41
|
-
- **Agent Configuration**: Added support for tool-specific configuration alongside agent-level config
|
42
|
-
|
43
|
-
### Examples
|
44
|
-
- `examples/accounts_payable_mcp.py` - Complete invoice processing with MCP database integration
|
45
|
-
- `test_mcp_success.py` - Simple MCP integration test
|
46
|
-
|
47
|
-
### Documentation
|
48
|
-
- `docs/mcp_integration.md` - Comprehensive MCP integration guide
|
49
|
-
- Updated README with MCP overview and quick start
|
50
|
-
|
51
|
-
### Dependencies
|
52
|
-
- Added `aiohttp>=3.8.0` for MCP bridge server
|
53
|
-
- Added `aiohttp-cors>=0.7.0` for CORS support
|
54
|
-
- Added `psycopg2-binary>=2.9.0` for PostgreSQL integration
|
55
|
-
|
56
|
-
## [0.1.0] - 2024-01-01
|
57
|
-
|
58
|
-
### Added
|
59
|
-
- Initial release of Memra SDK
|
60
|
-
- Core agent and department framework
|
61
|
-
- API client for Memra cloud services
|
62
|
-
- Basic tool registry and execution engine
|
63
|
-
- Examples for accounts payable and propane delivery workflows
|
memra-0.2.2/LICENSE
DELETED
File without changes
|
memra-0.2.2/PKG-INFO
DELETED
@@ -1,148 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: memra
|
3
|
-
Version: 0.2.2
|
4
|
-
Summary: Declarative framework for enterprise workflows with MCP integration - Client SDK
|
5
|
-
Home-page: https://github.com/memra/memra-sdk
|
6
|
-
Author: Memra
|
7
|
-
Author-email: Memra <support@memra.com>
|
8
|
-
License: MIT
|
9
|
-
Project-URL: Homepage, https://memra.co
|
10
|
-
Project-URL: Repository, https://github.com/memra-platform/memra-sdk
|
11
|
-
Classifier: Development Status :: 3 - Alpha
|
12
|
-
Classifier: Intended Audience :: Developers
|
13
|
-
Classifier: License :: OSI Approved :: MIT License
|
14
|
-
Classifier: Operating System :: OS Independent
|
15
|
-
Classifier: Programming Language :: Python :: 3
|
16
|
-
Classifier: Programming Language :: Python :: 3.8
|
17
|
-
Classifier: Programming Language :: Python :: 3.9
|
18
|
-
Classifier: Programming Language :: Python :: 3.10
|
19
|
-
Classifier: Programming Language :: Python :: 3.11
|
20
|
-
Requires-Python: >=3.8
|
21
|
-
Description-Content-Type: text/markdown
|
22
|
-
License-File: LICENSE
|
23
|
-
Requires-Dist: pydantic>=1.8.0
|
24
|
-
Requires-Dist: httpx>=0.24.0
|
25
|
-
Requires-Dist: typing-extensions>=4.0.0
|
26
|
-
Requires-Dist: aiohttp>=3.8.0
|
27
|
-
Requires-Dist: aiohttp-cors>=0.7.0
|
28
|
-
Provides-Extra: dev
|
29
|
-
Requires-Dist: pytest>=6.0; extra == "dev"
|
30
|
-
Requires-Dist: pytest-asyncio; extra == "dev"
|
31
|
-
Requires-Dist: black; extra == "dev"
|
32
|
-
Requires-Dist: flake8; extra == "dev"
|
33
|
-
Provides-Extra: mcp
|
34
|
-
Requires-Dist: psycopg2-binary>=2.9.0; extra == "mcp"
|
35
|
-
|
36
|
-
# Memra SDK
|
37
|
-
|
38
|
-
A declarative orchestration framework for AI-powered business workflows. Think of it as "Kubernetes for business logic" where agents are the pods and departments are the deployments.
|
39
|
-
|
40
|
-
## 🚀 Team Setup
|
41
|
-
|
42
|
-
**New team member?** See the complete setup guide: **[TEAM_SETUP.md](TEAM_SETUP.md)**
|
43
|
-
|
44
|
-
This includes:
|
45
|
-
- Database setup (PostgreSQL + Docker)
|
46
|
-
- Local development environment
|
47
|
-
- Testing instructions
|
48
|
-
- Troubleshooting guide
|
49
|
-
|
50
|
-
## Quick Start
|
51
|
-
|
52
|
-
```python
|
53
|
-
from memra.sdk.models import Agent, Department, Tool
|
54
|
-
|
55
|
-
# Define your agents
|
56
|
-
data_extractor = Agent(
|
57
|
-
role="Data Extraction Specialist",
|
58
|
-
job="Extract and validate data",
|
59
|
-
tools=[Tool(name="DataExtractor", hosted_by="memra")],
|
60
|
-
input_keys=["input_data"],
|
61
|
-
output_key="extracted_data"
|
62
|
-
)
|
63
|
-
|
64
|
-
# Create a department
|
65
|
-
dept = Department(
|
66
|
-
name="Data Processing",
|
67
|
-
mission="Process and validate data",
|
68
|
-
agents=[data_extractor]
|
69
|
-
)
|
70
|
-
|
71
|
-
# Run the workflow
|
72
|
-
result = dept.run({"input_data": {...}})
|
73
|
-
```
|
74
|
-
|
75
|
-
## Installation
|
76
|
-
|
77
|
-
```bash
|
78
|
-
pip install memra
|
79
|
-
```
|
80
|
-
|
81
|
-
## API Access
|
82
|
-
|
83
|
-
Memra requires an API key to execute workflows on the hosted infrastructure.
|
84
|
-
|
85
|
-
### Get Your API Key
|
86
|
-
Contact [info@memra.co](mailto:info@memra.co) for API access.
|
87
|
-
|
88
|
-
### Set Your API Key
|
89
|
-
```bash
|
90
|
-
# Set environment variable
|
91
|
-
export MEMRA_API_KEY="your-api-key-here"
|
92
|
-
|
93
|
-
# Or add to your shell profile for persistence
|
94
|
-
echo 'export MEMRA_API_KEY="your-api-key-here"' >> ~/.zshrc
|
95
|
-
```
|
96
|
-
|
97
|
-
### Test Your Setup
|
98
|
-
```bash
|
99
|
-
python examples/accounts_payable_client.py
|
100
|
-
```
|
101
|
-
|
102
|
-
## Architecture
|
103
|
-
|
104
|
-
The Memra platform consists of three main components:
|
105
|
-
|
106
|
-
- **Memra SDK** (this repository): Client library for building and executing workflows
|
107
|
-
- **Memra Server**: Hosted infrastructure for heavy AI processing tools
|
108
|
-
- **MCP Bridge**: Local execution environment for database operations
|
109
|
-
|
110
|
-
Tools are automatically routed between server and local execution based on their `hosted_by` configuration.
|
111
|
-
|
112
|
-
## Documentation
|
113
|
-
|
114
|
-
Documentation is coming soon. For now, see the examples below and in the `examples/` directory.
|
115
|
-
|
116
|
-
## Example: Propane Delivery Workflow
|
117
|
-
|
118
|
-
See the `examples/propane_delivery.py` file for a complete example of how to use Memra to orchestrate a propane delivery workflow.
|
119
|
-
|
120
|
-
## Contributing
|
121
|
-
|
122
|
-
We welcome contributions! Please see our [contributing guide](CONTRIBUTING.md) for details.
|
123
|
-
|
124
|
-
## License
|
125
|
-
|
126
|
-
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
127
|
-
|
128
|
-
## Repository Structure
|
129
|
-
|
130
|
-
```
|
131
|
-
├── examples/ # Example workflows and use cases
|
132
|
-
│ ├── accounts_payable_client.py # API-based accounts payable workflow
|
133
|
-
│ ├── accounts_payable_mcp.py # MCP-enabled accounts payable workflow
|
134
|
-
│ ├── invoice_processing.py # Simple invoice processing example
|
135
|
-
│ └── propane_delivery.py # Propane delivery domain example
|
136
|
-
├── memra/ # Core SDK package
|
137
|
-
│ ├── __init__.py # Package initialization
|
138
|
-
│ ├── tool_registry.py # Tool discovery and routing
|
139
|
-
│ └── sdk/ # SDK components
|
140
|
-
│ ├── __init__.py
|
141
|
-
│ ├── client.py # API client
|
142
|
-
│ ├── execution_engine.py # Workflow execution
|
143
|
-
│ └── models.py # Core data models
|
144
|
-
├── docs/ # Documentation
|
145
|
-
├── tests/ # Test suite
|
146
|
-
├── local/dependencies/ # Local development setup
|
147
|
-
└── scripts/ # Utility scripts
|
148
|
-
```
|
memra-0.2.2/README.md
DELETED
@@ -1,113 +0,0 @@
|
|
1
|
-
# Memra SDK
|
2
|
-
|
3
|
-
A declarative orchestration framework for AI-powered business workflows. Think of it as "Kubernetes for business logic" where agents are the pods and departments are the deployments.
|
4
|
-
|
5
|
-
## 🚀 Team Setup
|
6
|
-
|
7
|
-
**New team member?** See the complete setup guide: **[TEAM_SETUP.md](TEAM_SETUP.md)**
|
8
|
-
|
9
|
-
This includes:
|
10
|
-
- Database setup (PostgreSQL + Docker)
|
11
|
-
- Local development environment
|
12
|
-
- Testing instructions
|
13
|
-
- Troubleshooting guide
|
14
|
-
|
15
|
-
## Quick Start
|
16
|
-
|
17
|
-
```python
|
18
|
-
from memra.sdk.models import Agent, Department, Tool
|
19
|
-
|
20
|
-
# Define your agents
|
21
|
-
data_extractor = Agent(
|
22
|
-
role="Data Extraction Specialist",
|
23
|
-
job="Extract and validate data",
|
24
|
-
tools=[Tool(name="DataExtractor", hosted_by="memra")],
|
25
|
-
input_keys=["input_data"],
|
26
|
-
output_key="extracted_data"
|
27
|
-
)
|
28
|
-
|
29
|
-
# Create a department
|
30
|
-
dept = Department(
|
31
|
-
name="Data Processing",
|
32
|
-
mission="Process and validate data",
|
33
|
-
agents=[data_extractor]
|
34
|
-
)
|
35
|
-
|
36
|
-
# Run the workflow
|
37
|
-
result = dept.run({"input_data": {...}})
|
38
|
-
```
|
39
|
-
|
40
|
-
## Installation
|
41
|
-
|
42
|
-
```bash
|
43
|
-
pip install memra
|
44
|
-
```
|
45
|
-
|
46
|
-
## API Access
|
47
|
-
|
48
|
-
Memra requires an API key to execute workflows on the hosted infrastructure.
|
49
|
-
|
50
|
-
### Get Your API Key
|
51
|
-
Contact [info@memra.co](mailto:info@memra.co) for API access.
|
52
|
-
|
53
|
-
### Set Your API Key
|
54
|
-
```bash
|
55
|
-
# Set environment variable
|
56
|
-
export MEMRA_API_KEY="your-api-key-here"
|
57
|
-
|
58
|
-
# Or add to your shell profile for persistence
|
59
|
-
echo 'export MEMRA_API_KEY="your-api-key-here"' >> ~/.zshrc
|
60
|
-
```
|
61
|
-
|
62
|
-
### Test Your Setup
|
63
|
-
```bash
|
64
|
-
python examples/accounts_payable_client.py
|
65
|
-
```
|
66
|
-
|
67
|
-
## Architecture
|
68
|
-
|
69
|
-
The Memra platform consists of three main components:
|
70
|
-
|
71
|
-
- **Memra SDK** (this repository): Client library for building and executing workflows
|
72
|
-
- **Memra Server**: Hosted infrastructure for heavy AI processing tools
|
73
|
-
- **MCP Bridge**: Local execution environment for database operations
|
74
|
-
|
75
|
-
Tools are automatically routed between server and local execution based on their `hosted_by` configuration.
|
76
|
-
|
77
|
-
## Documentation
|
78
|
-
|
79
|
-
Documentation is coming soon. For now, see the examples below and in the `examples/` directory.
|
80
|
-
|
81
|
-
## Example: Propane Delivery Workflow
|
82
|
-
|
83
|
-
See the `examples/propane_delivery.py` file for a complete example of how to use Memra to orchestrate a propane delivery workflow.
|
84
|
-
|
85
|
-
## Contributing
|
86
|
-
|
87
|
-
We welcome contributions! Please see our [contributing guide](CONTRIBUTING.md) for details.
|
88
|
-
|
89
|
-
## License
|
90
|
-
|
91
|
-
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
92
|
-
|
93
|
-
## Repository Structure
|
94
|
-
|
95
|
-
```
|
96
|
-
├── examples/ # Example workflows and use cases
|
97
|
-
│ ├── accounts_payable_client.py # API-based accounts payable workflow
|
98
|
-
│ ├── accounts_payable_mcp.py # MCP-enabled accounts payable workflow
|
99
|
-
│ ├── invoice_processing.py # Simple invoice processing example
|
100
|
-
│ └── propane_delivery.py # Propane delivery domain example
|
101
|
-
├── memra/ # Core SDK package
|
102
|
-
│ ├── __init__.py # Package initialization
|
103
|
-
│ ├── tool_registry.py # Tool discovery and routing
|
104
|
-
│ └── sdk/ # SDK components
|
105
|
-
│ ├── __init__.py
|
106
|
-
│ ├── client.py # API client
|
107
|
-
│ ├── execution_engine.py # Workflow execution
|
108
|
-
│ └── models.py # Core data models
|
109
|
-
├── docs/ # Documentation
|
110
|
-
├── tests/ # Test suite
|
111
|
-
├── local/dependencies/ # Local development setup
|
112
|
-
└── scripts/ # Utility scripts
|
113
|
-
```
|
memra-0.2.2/mcp_bridge_server.py
DELETED
@@ -1,230 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
"""
|
3
|
-
Simple MCP Bridge Server for local tool execution
|
4
|
-
"""
|
5
|
-
|
6
|
-
import os
|
7
|
-
import json
|
8
|
-
import hmac
|
9
|
-
import hashlib
|
10
|
-
import logging
|
11
|
-
import asyncio
|
12
|
-
import psycopg2
|
13
|
-
from aiohttp import web, web_request
|
14
|
-
from typing import Dict, Any, Optional
|
15
|
-
|
16
|
-
logging.basicConfig(level=logging.INFO)
|
17
|
-
logger = logging.getLogger(__name__)
|
18
|
-
|
19
|
-
class MCPBridgeServer:
|
20
|
-
def __init__(self, postgres_url: str, bridge_secret: str):
|
21
|
-
self.postgres_url = postgres_url
|
22
|
-
self.bridge_secret = bridge_secret
|
23
|
-
|
24
|
-
def verify_signature(self, request_body: str, signature: str) -> bool:
|
25
|
-
"""Verify HMAC signature"""
|
26
|
-
expected = hmac.new(
|
27
|
-
self.bridge_secret.encode(),
|
28
|
-
request_body.encode(),
|
29
|
-
hashlib.sha256
|
30
|
-
).hexdigest()
|
31
|
-
return hmac.compare_digest(expected, signature)
|
32
|
-
|
33
|
-
async def execute_tool(self, request: web_request.Request) -> web.Response:
|
34
|
-
"""Execute MCP tool endpoint"""
|
35
|
-
try:
|
36
|
-
# Get request body
|
37
|
-
body = await request.text()
|
38
|
-
data = json.loads(body)
|
39
|
-
|
40
|
-
# Verify signature
|
41
|
-
signature = request.headers.get('X-Bridge-Secret')
|
42
|
-
if not signature or signature != self.bridge_secret:
|
43
|
-
logger.warning("Invalid or missing bridge secret")
|
44
|
-
return web.json_response({
|
45
|
-
"success": False,
|
46
|
-
"error": "Invalid authentication"
|
47
|
-
}, status=401)
|
48
|
-
|
49
|
-
tool_name = data.get('tool_name')
|
50
|
-
input_data = data.get('input_data', {})
|
51
|
-
|
52
|
-
logger.info(f"Executing MCP tool: {tool_name}")
|
53
|
-
|
54
|
-
if tool_name == "DataValidator":
|
55
|
-
result = await self.data_validator(input_data)
|
56
|
-
elif tool_name == "PostgresInsert":
|
57
|
-
result = await self.postgres_insert(input_data)
|
58
|
-
else:
|
59
|
-
return web.json_response({
|
60
|
-
"success": False,
|
61
|
-
"error": f"Unknown tool: {tool_name}"
|
62
|
-
}, status=400)
|
63
|
-
|
64
|
-
return web.json_response(result)
|
65
|
-
|
66
|
-
except Exception as e:
|
67
|
-
logger.error(f"Tool execution failed: {str(e)}")
|
68
|
-
return web.json_response({
|
69
|
-
"success": False,
|
70
|
-
"error": str(e)
|
71
|
-
}, status=500)
|
72
|
-
|
73
|
-
async def data_validator(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
74
|
-
"""Validate data against schema"""
|
75
|
-
try:
|
76
|
-
invoice_data = input_data.get('invoice_data', {})
|
77
|
-
|
78
|
-
# Perform basic validation
|
79
|
-
validation_errors = []
|
80
|
-
|
81
|
-
# Check required fields
|
82
|
-
required_fields = ['headerSection', 'billingDetails', 'chargesSummary']
|
83
|
-
for field in required_fields:
|
84
|
-
if field not in invoice_data:
|
85
|
-
validation_errors.append(f"Missing required field: {field}")
|
86
|
-
|
87
|
-
# Validate header section
|
88
|
-
if 'headerSection' in invoice_data:
|
89
|
-
header = invoice_data['headerSection']
|
90
|
-
if not header.get('vendorName'):
|
91
|
-
validation_errors.append("Missing vendor name in header")
|
92
|
-
if not header.get('subtotal'):
|
93
|
-
validation_errors.append("Missing subtotal in header")
|
94
|
-
|
95
|
-
# Validate billing details
|
96
|
-
if 'billingDetails' in invoice_data:
|
97
|
-
billing = invoice_data['billingDetails']
|
98
|
-
if not billing.get('invoiceNumber'):
|
99
|
-
validation_errors.append("Missing invoice number")
|
100
|
-
if not billing.get('invoiceDate'):
|
101
|
-
validation_errors.append("Missing invoice date")
|
102
|
-
|
103
|
-
is_valid = len(validation_errors) == 0
|
104
|
-
|
105
|
-
logger.info(f"Data validation completed: {'valid' if is_valid else 'invalid'}")
|
106
|
-
|
107
|
-
return {
|
108
|
-
"success": True,
|
109
|
-
"data": {
|
110
|
-
"is_valid": is_valid,
|
111
|
-
"validation_errors": validation_errors,
|
112
|
-
"validated_data": invoice_data
|
113
|
-
}
|
114
|
-
}
|
115
|
-
|
116
|
-
except Exception as e:
|
117
|
-
logger.error(f"Data validation failed: {str(e)}")
|
118
|
-
return {
|
119
|
-
"success": False,
|
120
|
-
"error": str(e)
|
121
|
-
}
|
122
|
-
|
123
|
-
async def postgres_insert(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
124
|
-
"""Insert data into PostgreSQL"""
|
125
|
-
try:
|
126
|
-
invoice_data = input_data.get('invoice_data', {})
|
127
|
-
table_name = input_data.get('table_name', 'invoices')
|
128
|
-
|
129
|
-
# Extract key fields from invoice data
|
130
|
-
header = invoice_data.get('headerSection', {})
|
131
|
-
billing = invoice_data.get('billingDetails', {})
|
132
|
-
charges = invoice_data.get('chargesSummary', {})
|
133
|
-
|
134
|
-
# Prepare insert data
|
135
|
-
insert_data = {
|
136
|
-
'invoice_number': billing.get('invoiceNumber', ''),
|
137
|
-
'vendor_name': header.get('vendorName', ''),
|
138
|
-
'invoice_date': billing.get('invoiceDate', ''),
|
139
|
-
'total_amount': charges.get('document_total', 0),
|
140
|
-
'tax_amount': charges.get('secondary_tax', 0),
|
141
|
-
'line_items': json.dumps(charges.get('lineItemsBreakdown', [])),
|
142
|
-
'status': 'processed'
|
143
|
-
}
|
144
|
-
|
145
|
-
# Connect to database and insert
|
146
|
-
conn = psycopg2.connect(self.postgres_url)
|
147
|
-
cursor = conn.cursor()
|
148
|
-
|
149
|
-
# Build insert query
|
150
|
-
columns = ', '.join(insert_data.keys())
|
151
|
-
placeholders = ', '.join(['%s'] * len(insert_data))
|
152
|
-
query = f"INSERT INTO {table_name} ({columns}) VALUES ({placeholders}) RETURNING id"
|
153
|
-
|
154
|
-
cursor.execute(query, list(insert_data.values()))
|
155
|
-
record_id = cursor.fetchone()[0]
|
156
|
-
|
157
|
-
conn.commit()
|
158
|
-
cursor.close()
|
159
|
-
conn.close()
|
160
|
-
|
161
|
-
logger.info(f"Successfully inserted record with ID: {record_id}")
|
162
|
-
|
163
|
-
return {
|
164
|
-
"success": True,
|
165
|
-
"data": {
|
166
|
-
"success": True,
|
167
|
-
"record_id": record_id,
|
168
|
-
"database_table": table_name,
|
169
|
-
"inserted_data": insert_data
|
170
|
-
}
|
171
|
-
}
|
172
|
-
|
173
|
-
except Exception as e:
|
174
|
-
logger.error(f"Database insert failed: {str(e)}")
|
175
|
-
return {
|
176
|
-
"success": False,
|
177
|
-
"error": str(e)
|
178
|
-
}
|
179
|
-
|
180
|
-
async def health_check(self, request: web_request.Request) -> web.Response:
|
181
|
-
"""Health check endpoint"""
|
182
|
-
return web.json_response({"status": "healthy", "service": "mcp-bridge"})
|
183
|
-
|
184
|
-
def create_app(self) -> web.Application:
|
185
|
-
"""Create aiohttp application"""
|
186
|
-
app = web.Application()
|
187
|
-
|
188
|
-
# Add routes
|
189
|
-
app.router.add_post('/execute_tool', self.execute_tool)
|
190
|
-
app.router.add_get('/health', self.health_check)
|
191
|
-
|
192
|
-
return app
|
193
|
-
|
194
|
-
async def start(self, port: int = 8081):
|
195
|
-
"""Start the server"""
|
196
|
-
app = self.create_app()
|
197
|
-
runner = web.AppRunner(app)
|
198
|
-
await runner.setup()
|
199
|
-
|
200
|
-
site = web.TCPSite(runner, 'localhost', port)
|
201
|
-
await site.start()
|
202
|
-
|
203
|
-
logger.info(f"MCP Bridge Server started on http://localhost:{port}")
|
204
|
-
logger.info(f"Available endpoints:")
|
205
|
-
logger.info(f" POST /execute_tool - Execute MCP tools")
|
206
|
-
logger.info(f" GET /health - Health check")
|
207
|
-
|
208
|
-
# Keep running
|
209
|
-
try:
|
210
|
-
await asyncio.Future() # Run forever
|
211
|
-
except KeyboardInterrupt:
|
212
|
-
logger.info("Shutting down server...")
|
213
|
-
finally:
|
214
|
-
await runner.cleanup()
|
215
|
-
|
216
|
-
def main():
|
217
|
-
# Get configuration from environment
|
218
|
-
postgres_url = os.getenv('MCP_POSTGRES_URL', 'postgresql://tarpus@localhost:5432/memra_invoice_db')
|
219
|
-
bridge_secret = os.getenv('MCP_BRIDGE_SECRET', 'test-secret-for-development')
|
220
|
-
|
221
|
-
logger.info(f"Starting MCP Bridge Server...")
|
222
|
-
logger.info(f"PostgreSQL URL: {postgres_url}")
|
223
|
-
logger.info(f"Bridge Secret: {'*' * len(bridge_secret)}")
|
224
|
-
|
225
|
-
# Create and start server
|
226
|
-
server = MCPBridgeServer(postgres_url, bridge_secret)
|
227
|
-
asyncio.run(server.start())
|
228
|
-
|
229
|
-
if __name__ == '__main__':
|
230
|
-
main()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|