jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/autoagent_distributed_example.py +211 -0
- examples/custom_profile_decorator.py +134 -0
- examples/custom_profile_wrap.py +168 -0
- examples/customagent_distributed_example.py +362 -0
- examples/customagent_p2p_example.py +347 -0
- jarviscore/__init__.py +49 -36
- jarviscore/adapter/__init__.py +15 -9
- jarviscore/adapter/decorator.py +23 -19
- jarviscore/adapter/wrapper.py +303 -0
- jarviscore/cli/scaffold.py +1 -1
- jarviscore/cli/smoketest.py +3 -2
- jarviscore/core/agent.py +44 -1
- jarviscore/core/mesh.py +196 -35
- jarviscore/data/examples/autoagent_distributed_example.py +211 -0
- jarviscore/data/examples/customagent_distributed_example.py +362 -0
- jarviscore/data/examples/customagent_p2p_example.py +347 -0
- jarviscore/docs/API_REFERENCE.md +264 -51
- jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
- jarviscore/docs/CONFIGURATION.md +35 -21
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
- jarviscore/docs/GETTING_STARTED.md +106 -13
- jarviscore/docs/TROUBLESHOOTING.md +144 -6
- jarviscore/docs/USER_GUIDE.md +138 -361
- jarviscore/orchestration/engine.py +20 -8
- jarviscore/p2p/__init__.py +10 -0
- jarviscore/p2p/coordinator.py +129 -0
- jarviscore/p2p/messages.py +87 -0
- jarviscore/p2p/peer_client.py +576 -0
- jarviscore/p2p/peer_tool.py +268 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/METADATA +60 -54
- jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
- test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
- test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
- test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
- test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
- test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
- test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
- test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
- test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
- test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
- test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
- test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
- test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
- test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
- test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
- test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
- test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
- tests/test_01_analyst_standalone.py +124 -0
- tests/test_02_assistant_standalone.py +164 -0
- tests/test_03_analyst_with_framework.py +945 -0
- tests/test_04_assistant_with_framework.py +1002 -0
- tests/test_05_integration.py +1301 -0
- tests/test_06_real_llm_integration.py +760 -0
- tests/test_07_distributed_single_node.py +578 -0
- tests/test_08_distributed_multi_node.py +454 -0
- tests/test_09_distributed_autoagent.py +509 -0
- tests/test_10_distributed_customagent.py +787 -0
- tests/test_mesh.py +35 -4
- jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AutoAgent Distributed Mode Example
|
|
3
|
+
|
|
4
|
+
Demonstrates AutoAgent in distributed mode, which combines:
|
|
5
|
+
- P2P network layer (SWIM protocol, ZMQ messaging)
|
|
6
|
+
- Workflow orchestration (step execution, dependencies)
|
|
7
|
+
|
|
8
|
+
This is ideal for multi-node deployments where agents can:
|
|
9
|
+
- Execute on different machines
|
|
10
|
+
- Discover each other via SWIM
|
|
11
|
+
- Run orchestrated workflows across the network
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
python examples/autoagent_distributed_example.py
|
|
15
|
+
|
|
16
|
+
Prerequisites:
|
|
17
|
+
- .env file with LLM API key (CLAUDE_API_KEY, etc.)
|
|
18
|
+
"""
|
|
19
|
+
import asyncio
|
|
20
|
+
import sys
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
24
|
+
|
|
25
|
+
from jarviscore import Mesh
|
|
26
|
+
from jarviscore.profiles import AutoAgent
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
30
|
+
# AUTOAGENT DEFINITIONS
|
|
31
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
32
|
+
|
|
33
|
+
class DataCollectorAgent(AutoAgent):
|
|
34
|
+
"""Collects and generates data."""
|
|
35
|
+
role = "collector"
|
|
36
|
+
capabilities = ["data_collection", "sampling"]
|
|
37
|
+
system_prompt = """
|
|
38
|
+
You are a data collection specialist. Generate sample datasets
|
|
39
|
+
based on specifications. Use Python's standard library only.
|
|
40
|
+
Store results in a variable named 'result' as a dictionary.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class DataProcessorAgent(AutoAgent):
|
|
45
|
+
"""Processes and transforms data."""
|
|
46
|
+
role = "processor"
|
|
47
|
+
capabilities = ["data_processing", "transformation"]
|
|
48
|
+
system_prompt = """
|
|
49
|
+
You are a data processing expert. Transform and clean datasets.
|
|
50
|
+
Apply filters, aggregations, and transformations as needed.
|
|
51
|
+
Use Python's standard library only. Store results in 'result'.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ReportWriterAgent(AutoAgent):
|
|
56
|
+
"""Generates reports from processed data."""
|
|
57
|
+
role = "reporter"
|
|
58
|
+
capabilities = ["reporting", "documentation"]
|
|
59
|
+
system_prompt = """
|
|
60
|
+
You are a technical writer. Create clear, well-formatted reports
|
|
61
|
+
from data. Use markdown formatting. Store the report in 'result'.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
66
|
+
# MAIN EXAMPLE
|
|
67
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
68
|
+
|
|
69
|
+
async def main():
|
|
70
|
+
"""Run AutoAgent distributed mode example."""
|
|
71
|
+
print("\n" + "="*70)
|
|
72
|
+
print("JarvisCore: AutoAgent in Distributed Mode")
|
|
73
|
+
print("="*70)
|
|
74
|
+
|
|
75
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
76
|
+
# KEY DIFFERENCE: mode="distributed" with P2P configuration
|
|
77
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
78
|
+
mesh = Mesh(
|
|
79
|
+
mode="distributed", # Enables P2P + Workflow Engine
|
|
80
|
+
config={
|
|
81
|
+
# P2P Network Configuration
|
|
82
|
+
'bind_host': '127.0.0.1', # Interface to bind to
|
|
83
|
+
'bind_port': 7950, # SWIM protocol port (ZMQ uses +1000)
|
|
84
|
+
'node_name': 'autoagent-node',
|
|
85
|
+
|
|
86
|
+
# For multi-node: uncomment to join existing cluster
|
|
87
|
+
# 'seed_nodes': '192.168.1.10:7950,192.168.1.11:7950',
|
|
88
|
+
|
|
89
|
+
# AutoAgent Configuration
|
|
90
|
+
'execution_timeout': 60, # Max seconds per task
|
|
91
|
+
'max_repair_attempts': 2, # Auto-repair on failure
|
|
92
|
+
'log_directory': './logs', # Result storage
|
|
93
|
+
}
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Add agents - same as autonomous mode
|
|
97
|
+
mesh.add(DataCollectorAgent)
|
|
98
|
+
mesh.add(DataProcessorAgent)
|
|
99
|
+
mesh.add(ReportWriterAgent)
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
await mesh.start()
|
|
103
|
+
|
|
104
|
+
print("\n[INFO] Mesh started in DISTRIBUTED mode")
|
|
105
|
+
print(f" - P2P Coordinator: Active (port {mesh.config.get('bind_port', 7950)})")
|
|
106
|
+
print(f" - Workflow Engine: Active")
|
|
107
|
+
print(f" - Agents: {len(mesh.agents)}")
|
|
108
|
+
|
|
109
|
+
# ─────────────────────────────────────────────────────────────────────
|
|
110
|
+
# WORKFLOW EXECUTION - Same API as autonomous mode
|
|
111
|
+
# ─────────────────────────────────────────────────────────────────────
|
|
112
|
+
print("\n" + "-"*70)
|
|
113
|
+
print("Executing Pipeline: Collect → Process → Report")
|
|
114
|
+
print("-"*70)
|
|
115
|
+
|
|
116
|
+
results = await mesh.workflow("distributed-pipeline", [
|
|
117
|
+
{
|
|
118
|
+
"id": "collect",
|
|
119
|
+
"agent": "collector",
|
|
120
|
+
"task": "Generate a dataset of 10 products with name, price, and category"
|
|
121
|
+
},
|
|
122
|
+
{
|
|
123
|
+
"id": "process",
|
|
124
|
+
"agent": "processor",
|
|
125
|
+
"task": "Calculate total value, average price, and count by category",
|
|
126
|
+
"depends_on": ["collect"]
|
|
127
|
+
},
|
|
128
|
+
{
|
|
129
|
+
"id": "report",
|
|
130
|
+
"agent": "reporter",
|
|
131
|
+
"task": "Create a summary report with the statistics",
|
|
132
|
+
"depends_on": ["process"]
|
|
133
|
+
}
|
|
134
|
+
])
|
|
135
|
+
|
|
136
|
+
# Display results
|
|
137
|
+
print("\n" + "="*70)
|
|
138
|
+
print("RESULTS")
|
|
139
|
+
print("="*70)
|
|
140
|
+
|
|
141
|
+
for i, result in enumerate(results):
|
|
142
|
+
step_names = ["Data Collection", "Data Processing", "Report Generation"]
|
|
143
|
+
print(f"\n{step_names[i]}:")
|
|
144
|
+
print(f" Status: {result['status']}")
|
|
145
|
+
if result['status'] == 'success':
|
|
146
|
+
output = str(result.get('output', ''))[:200]
|
|
147
|
+
print(f" Output: {output}...")
|
|
148
|
+
else:
|
|
149
|
+
print(f" Error: {result.get('error')}")
|
|
150
|
+
|
|
151
|
+
# Summary
|
|
152
|
+
successes = sum(1 for r in results if r['status'] == 'success')
|
|
153
|
+
print(f"\n{'='*70}")
|
|
154
|
+
print(f"Pipeline Complete: {successes}/{len(results)} steps successful")
|
|
155
|
+
print(f"{'='*70}")
|
|
156
|
+
|
|
157
|
+
await mesh.stop()
|
|
158
|
+
|
|
159
|
+
except Exception as e:
|
|
160
|
+
print(f"\nError: {e}")
|
|
161
|
+
import traceback
|
|
162
|
+
traceback.print_exc()
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
166
|
+
# MULTI-NODE EXAMPLE (Reference)
|
|
167
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
168
|
+
|
|
169
|
+
async def multi_node_example():
|
|
170
|
+
"""
|
|
171
|
+
Example: Running agents across multiple machines.
|
|
172
|
+
|
|
173
|
+
Node 1 (seed node):
|
|
174
|
+
mesh = Mesh(mode="distributed", config={
|
|
175
|
+
'bind_host': '0.0.0.0',
|
|
176
|
+
'bind_port': 7950,
|
|
177
|
+
'node_name': 'node-1',
|
|
178
|
+
})
|
|
179
|
+
mesh.add(DataCollectorAgent)
|
|
180
|
+
await mesh.start()
|
|
181
|
+
await mesh.serve_forever() # Keep running
|
|
182
|
+
|
|
183
|
+
Node 2 (joins cluster):
|
|
184
|
+
mesh = Mesh(mode="distributed", config={
|
|
185
|
+
'bind_host': '0.0.0.0',
|
|
186
|
+
'bind_port': 7950,
|
|
187
|
+
'node_name': 'node-2',
|
|
188
|
+
'seed_nodes': '192.168.1.10:7950', # Node 1's address
|
|
189
|
+
})
|
|
190
|
+
mesh.add(DataProcessorAgent)
|
|
191
|
+
await mesh.start()
|
|
192
|
+
await mesh.serve_forever()
|
|
193
|
+
|
|
194
|
+
Node 3 (joins cluster):
|
|
195
|
+
mesh = Mesh(mode="distributed", config={
|
|
196
|
+
'bind_host': '0.0.0.0',
|
|
197
|
+
'bind_port': 7950,
|
|
198
|
+
'node_name': 'node-3',
|
|
199
|
+
'seed_nodes': '192.168.1.10:7950',
|
|
200
|
+
})
|
|
201
|
+
mesh.add(ReportWriterAgent)
|
|
202
|
+
await mesh.start()
|
|
203
|
+
await mesh.serve_forever()
|
|
204
|
+
|
|
205
|
+
Any node can now execute workflows that span all three!
|
|
206
|
+
"""
|
|
207
|
+
pass
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
if __name__ == "__main__":
|
|
211
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom Profile Example: Using @jarvis_agent Decorator
|
|
3
|
+
|
|
4
|
+
This example shows how to use the @jarvis_agent decorator to convert
|
|
5
|
+
any Python class into a JarvisCore agent without modifying the class.
|
|
6
|
+
|
|
7
|
+
Use Case: You have existing Python classes/agents and want JarvisCore
|
|
8
|
+
to handle orchestration (data handoff, dependencies, shared memory).
|
|
9
|
+
"""
|
|
10
|
+
import asyncio
|
|
11
|
+
from jarviscore import Mesh, jarvis_agent, JarvisContext
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# Example 1: Simple decorator (no context needed)
|
|
15
|
+
@jarvis_agent(role="processor", capabilities=["data_processing"])
|
|
16
|
+
class DataProcessor:
|
|
17
|
+
"""Simple data processor - doubles input values."""
|
|
18
|
+
|
|
19
|
+
def run(self, data):
|
|
20
|
+
"""Process data by doubling values."""
|
|
21
|
+
if isinstance(data, list):
|
|
22
|
+
return {"processed": [x * 2 for x in data]}
|
|
23
|
+
return {"processed": data * 2}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# Example 2: Decorator with context access
|
|
27
|
+
@jarvis_agent(role="aggregator", capabilities=["aggregation"])
|
|
28
|
+
class Aggregator:
|
|
29
|
+
"""Aggregates results from previous steps using JarvisContext."""
|
|
30
|
+
|
|
31
|
+
def run(self, task, ctx: JarvisContext):
|
|
32
|
+
"""
|
|
33
|
+
Access previous step results via ctx.previous().
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
task: The task description
|
|
37
|
+
ctx: JarvisContext with memory and dependency access
|
|
38
|
+
"""
|
|
39
|
+
# Get output from a specific previous step
|
|
40
|
+
processed = ctx.previous("step1")
|
|
41
|
+
|
|
42
|
+
if processed:
|
|
43
|
+
data = processed.get("processed", [])
|
|
44
|
+
return {
|
|
45
|
+
"sum": sum(data) if isinstance(data, list) else data,
|
|
46
|
+
"count": len(data) if isinstance(data, list) else 1,
|
|
47
|
+
"source_step": "step1"
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
return {"error": "No previous data found"}
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# Example 3: Decorator with custom execute method
|
|
54
|
+
@jarvis_agent(role="validator", capabilities=["validation"], execute_method="validate")
|
|
55
|
+
class DataValidator:
|
|
56
|
+
"""Validates data using a custom method name."""
|
|
57
|
+
|
|
58
|
+
def validate(self, data):
|
|
59
|
+
"""Custom execute method - validates input data."""
|
|
60
|
+
if isinstance(data, list):
|
|
61
|
+
return {
|
|
62
|
+
"valid": all(isinstance(x, (int, float)) for x in data),
|
|
63
|
+
"count": len(data),
|
|
64
|
+
"type": "list"
|
|
65
|
+
}
|
|
66
|
+
return {
|
|
67
|
+
"valid": isinstance(data, (int, float)),
|
|
68
|
+
"type": type(data).__name__
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
async def main():
|
|
73
|
+
"""Run a multi-step workflow with custom profile agents."""
|
|
74
|
+
print("=" * 60)
|
|
75
|
+
print(" Custom Profile Example: @jarvis_agent Decorator")
|
|
76
|
+
print("=" * 60)
|
|
77
|
+
|
|
78
|
+
# Create mesh in autonomous mode
|
|
79
|
+
mesh = Mesh(mode="autonomous")
|
|
80
|
+
|
|
81
|
+
# Add our decorated agents
|
|
82
|
+
mesh.add(DataProcessor)
|
|
83
|
+
mesh.add(Aggregator)
|
|
84
|
+
mesh.add(DataValidator)
|
|
85
|
+
|
|
86
|
+
# Start the mesh
|
|
87
|
+
await mesh.start()
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
# Execute a multi-step workflow
|
|
91
|
+
print("\nExecuting workflow with 3 steps...\n")
|
|
92
|
+
|
|
93
|
+
results = await mesh.workflow("custom-profile-demo", [
|
|
94
|
+
{
|
|
95
|
+
"id": "step1",
|
|
96
|
+
"agent": "processor",
|
|
97
|
+
"task": "Process input data",
|
|
98
|
+
"params": {"data": [1, 2, 3, 4, 5]}
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
"id": "step2",
|
|
102
|
+
"agent": "aggregator",
|
|
103
|
+
"task": "Aggregate processed results",
|
|
104
|
+
"depends_on": ["step1"] # Wait for step1
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
"id": "step3",
|
|
108
|
+
"agent": "validator",
|
|
109
|
+
"task": "Validate original data",
|
|
110
|
+
"params": {"data": [1, 2, 3, 4, 5]}
|
|
111
|
+
}
|
|
112
|
+
])
|
|
113
|
+
|
|
114
|
+
# Print results
|
|
115
|
+
print("Results:")
|
|
116
|
+
print("-" * 40)
|
|
117
|
+
|
|
118
|
+
for i, result in enumerate(results):
|
|
119
|
+
step_name = ["Processor", "Aggregator", "Validator"][i]
|
|
120
|
+
print(f"\n{step_name} (step{i+1}):")
|
|
121
|
+
print(f" Status: {result.get('status')}")
|
|
122
|
+
print(f" Output: {result.get('output')}")
|
|
123
|
+
|
|
124
|
+
print("\n" + "=" * 60)
|
|
125
|
+
print(" Workflow completed successfully!")
|
|
126
|
+
print("=" * 60)
|
|
127
|
+
|
|
128
|
+
finally:
|
|
129
|
+
# Stop the mesh
|
|
130
|
+
await mesh.stop()
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
if __name__ == "__main__":
|
|
134
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom Profile Example: Using wrap() Function
|
|
3
|
+
|
|
4
|
+
This example shows how to use the wrap() function to convert
|
|
5
|
+
an existing instance into a JarvisCore agent.
|
|
6
|
+
|
|
7
|
+
Use Case: You have an already-instantiated object (like a LangChain
|
|
8
|
+
agent, CrewAI agent, or any configured instance) and want to use it
|
|
9
|
+
with JarvisCore orchestration.
|
|
10
|
+
"""
|
|
11
|
+
import asyncio
|
|
12
|
+
from jarviscore import Mesh, wrap, JarvisContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Simulate an existing "LangChain-like" agent
|
|
16
|
+
class ExternalLLMAgent:
|
|
17
|
+
"""
|
|
18
|
+
Simulates an external LLM agent (like LangChain).
|
|
19
|
+
In real usage, this would be your actual LangChain/CrewAI agent.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, model_name: str, temperature: float = 0.7):
|
|
23
|
+
self.model_name = model_name
|
|
24
|
+
self.temperature = temperature
|
|
25
|
+
print(f" Initialized ExternalLLMAgent with {model_name}")
|
|
26
|
+
|
|
27
|
+
def invoke(self, query: str) -> dict:
|
|
28
|
+
"""LangChain-style invoke method."""
|
|
29
|
+
# Simulate LLM response
|
|
30
|
+
return {
|
|
31
|
+
"answer": f"Response to '{query}' from {self.model_name}",
|
|
32
|
+
"model": self.model_name,
|
|
33
|
+
"tokens_used": len(query.split()) * 10
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# Simulate a data processing service
|
|
38
|
+
class DataService:
|
|
39
|
+
"""Simulates an external data processing service."""
|
|
40
|
+
|
|
41
|
+
def __init__(self, api_url: str):
|
|
42
|
+
self.api_url = api_url
|
|
43
|
+
print(f" Initialized DataService with {api_url}")
|
|
44
|
+
|
|
45
|
+
def run(self, data):
|
|
46
|
+
"""Process data through the service."""
|
|
47
|
+
if isinstance(data, list):
|
|
48
|
+
return {
|
|
49
|
+
"transformed": [x ** 2 for x in data],
|
|
50
|
+
"source": self.api_url
|
|
51
|
+
}
|
|
52
|
+
return {"transformed": data ** 2, "source": self.api_url}
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# Simulate an agent that needs context
|
|
56
|
+
class ContextAwareProcessor:
|
|
57
|
+
"""Agent that uses JarvisContext to access previous results."""
|
|
58
|
+
|
|
59
|
+
def run(self, task, ctx: JarvisContext):
|
|
60
|
+
"""Process with context access."""
|
|
61
|
+
# Get all previous results
|
|
62
|
+
all_previous = ctx.all_previous()
|
|
63
|
+
|
|
64
|
+
summary = {
|
|
65
|
+
"task": task,
|
|
66
|
+
"previous_steps": list(all_previous.keys()),
|
|
67
|
+
"combined_data": {}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
for step_id, output in all_previous.items():
|
|
71
|
+
if isinstance(output, dict):
|
|
72
|
+
summary["combined_data"][step_id] = output
|
|
73
|
+
|
|
74
|
+
return summary
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
async def main():
|
|
78
|
+
"""Demonstrate wrapping existing instances."""
|
|
79
|
+
print("=" * 60)
|
|
80
|
+
print(" Custom Profile Example: wrap() Function")
|
|
81
|
+
print("=" * 60)
|
|
82
|
+
|
|
83
|
+
# Create instances of "external" agents
|
|
84
|
+
print("\nCreating external agent instances...")
|
|
85
|
+
llm_agent = ExternalLLMAgent(model_name="gpt-4-turbo", temperature=0.3)
|
|
86
|
+
data_service = DataService(api_url="https://api.example.com/process")
|
|
87
|
+
context_processor = ContextAwareProcessor()
|
|
88
|
+
|
|
89
|
+
# Wrap them for JarvisCore
|
|
90
|
+
print("\nWrapping instances for JarvisCore...")
|
|
91
|
+
|
|
92
|
+
wrapped_llm = wrap(
|
|
93
|
+
llm_agent,
|
|
94
|
+
role="llm_assistant",
|
|
95
|
+
capabilities=["chat", "qa"],
|
|
96
|
+
execute_method="invoke" # LangChain uses "invoke"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
wrapped_data = wrap(
|
|
100
|
+
data_service,
|
|
101
|
+
role="data_processor",
|
|
102
|
+
capabilities=["data_processing", "transformation"]
|
|
103
|
+
# execute_method auto-detected as "run"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
wrapped_context = wrap(
|
|
107
|
+
context_processor,
|
|
108
|
+
role="context_aggregator",
|
|
109
|
+
capabilities=["aggregation", "summary"]
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Create mesh and add wrapped agents
|
|
113
|
+
mesh = Mesh(mode="autonomous")
|
|
114
|
+
mesh.add(wrapped_llm)
|
|
115
|
+
mesh.add(wrapped_data)
|
|
116
|
+
mesh.add(wrapped_context)
|
|
117
|
+
|
|
118
|
+
await mesh.start()
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
print("\nExecuting workflow with wrapped agents...\n")
|
|
122
|
+
|
|
123
|
+
results = await mesh.workflow("wrap-demo", [
|
|
124
|
+
{
|
|
125
|
+
"id": "llm_step",
|
|
126
|
+
"agent": "llm_assistant",
|
|
127
|
+
"task": "What is the capital of France?",
|
|
128
|
+
"params": {"query": "What is the capital of France?"}
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
"id": "data_step",
|
|
132
|
+
"agent": "data_processor",
|
|
133
|
+
"task": "Transform numbers",
|
|
134
|
+
"params": {"data": [1, 2, 3, 4, 5]}
|
|
135
|
+
},
|
|
136
|
+
{
|
|
137
|
+
"id": "summary_step",
|
|
138
|
+
"agent": "context_aggregator",
|
|
139
|
+
"task": "Summarize all results",
|
|
140
|
+
"depends_on": ["llm_step", "data_step"]
|
|
141
|
+
}
|
|
142
|
+
])
|
|
143
|
+
|
|
144
|
+
# Print results
|
|
145
|
+
print("Results:")
|
|
146
|
+
print("-" * 40)
|
|
147
|
+
|
|
148
|
+
step_names = ["LLM Assistant", "Data Processor", "Context Aggregator"]
|
|
149
|
+
for i, result in enumerate(results):
|
|
150
|
+
print(f"\n{step_names[i]}:")
|
|
151
|
+
print(f" Status: {result.get('status')}")
|
|
152
|
+
output = result.get('output', {})
|
|
153
|
+
if isinstance(output, dict):
|
|
154
|
+
for key, value in output.items():
|
|
155
|
+
print(f" {key}: {value}")
|
|
156
|
+
else:
|
|
157
|
+
print(f" Output: {output}")
|
|
158
|
+
|
|
159
|
+
print("\n" + "=" * 60)
|
|
160
|
+
print(" Workflow with wrapped instances completed!")
|
|
161
|
+
print("=" * 60)
|
|
162
|
+
|
|
163
|
+
finally:
|
|
164
|
+
await mesh.stop()
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
if __name__ == "__main__":
|
|
168
|
+
asyncio.run(main())
|