jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/cloud_deployment_example.py +162 -0
- examples/customagent_cognitive_discovery_example.py +343 -0
- examples/fastapi_integration_example.py +570 -0
- jarviscore/__init__.py +19 -5
- jarviscore/cli/smoketest.py +8 -4
- jarviscore/core/agent.py +227 -0
- jarviscore/core/mesh.py +9 -0
- jarviscore/data/examples/cloud_deployment_example.py +162 -0
- jarviscore/data/examples/custom_profile_decorator.py +134 -0
- jarviscore/data/examples/custom_profile_wrap.py +168 -0
- jarviscore/data/examples/customagent_cognitive_discovery_example.py +343 -0
- jarviscore/data/examples/fastapi_integration_example.py +570 -0
- jarviscore/docs/API_REFERENCE.md +283 -3
- jarviscore/docs/CHANGELOG.md +139 -0
- jarviscore/docs/CONFIGURATION.md +1 -1
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +997 -85
- jarviscore/docs/GETTING_STARTED.md +228 -267
- jarviscore/docs/TROUBLESHOOTING.md +1 -1
- jarviscore/docs/USER_GUIDE.md +153 -8
- jarviscore/integrations/__init__.py +16 -0
- jarviscore/integrations/fastapi.py +247 -0
- jarviscore/p2p/broadcaster.py +10 -3
- jarviscore/p2p/coordinator.py +310 -14
- jarviscore/p2p/keepalive.py +45 -23
- jarviscore/p2p/peer_client.py +311 -12
- jarviscore/p2p/swim_manager.py +9 -4
- jarviscore/profiles/__init__.py +7 -1
- jarviscore/profiles/customagent.py +295 -74
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/METADATA +66 -18
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/RECORD +37 -22
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/WHEEL +1 -1
- tests/test_13_dx_improvements.py +554 -0
- tests/test_14_cloud_deployment.py +403 -0
- tests/test_15_llm_cognitive_discovery.py +684 -0
- tests/test_16_unified_dx_flow.py +947 -0
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/top_level.txt +0 -0
jarviscore/p2p/peer_client.py
CHANGED
|
@@ -32,6 +32,31 @@ if TYPE_CHECKING:
|
|
|
32
32
|
logger = logging.getLogger(__name__)
|
|
33
33
|
|
|
34
34
|
|
|
35
|
+
class RemoteAgentProxy:
|
|
36
|
+
"""
|
|
37
|
+
Proxy object for remote agents.
|
|
38
|
+
|
|
39
|
+
Used by PeerClient to represent agents on other nodes in the mesh.
|
|
40
|
+
Contains enough information to route messages via P2P coordinator.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
agent_id: str,
|
|
46
|
+
role: str,
|
|
47
|
+
node_id: str,
|
|
48
|
+
capabilities: List[str] = None
|
|
49
|
+
):
|
|
50
|
+
self.agent_id = agent_id
|
|
51
|
+
self.role = role
|
|
52
|
+
self.node_id = node_id
|
|
53
|
+
self.capabilities = capabilities or []
|
|
54
|
+
self.peers = None # Remote agents don't have local PeerClient
|
|
55
|
+
|
|
56
|
+
def __repr__(self):
|
|
57
|
+
return f"<RemoteAgentProxy {self.role}@{self.node_id}>"
|
|
58
|
+
|
|
59
|
+
|
|
35
60
|
class PeerClient:
|
|
36
61
|
"""
|
|
37
62
|
Client for peer-to-peer agent communication.
|
|
@@ -128,6 +153,7 @@ class PeerClient:
|
|
|
128
153
|
"""
|
|
129
154
|
results = []
|
|
130
155
|
|
|
156
|
+
# Search LOCAL agents first
|
|
131
157
|
if role:
|
|
132
158
|
agents = self._agent_registry.get(role, [])
|
|
133
159
|
for agent in agents:
|
|
@@ -141,7 +167,7 @@ class PeerClient:
|
|
|
141
167
|
))
|
|
142
168
|
|
|
143
169
|
elif capability:
|
|
144
|
-
# Search all agents for capability
|
|
170
|
+
# Search all local agents for capability
|
|
145
171
|
for role_name, agents in self._agent_registry.items():
|
|
146
172
|
for agent in agents:
|
|
147
173
|
if agent.agent_id != self._agent_id: # Exclude self
|
|
@@ -155,7 +181,7 @@ class PeerClient:
|
|
|
155
181
|
))
|
|
156
182
|
|
|
157
183
|
else:
|
|
158
|
-
# Return all peers
|
|
184
|
+
# Return all local peers
|
|
159
185
|
for role_name, agents in self._agent_registry.items():
|
|
160
186
|
for agent in agents:
|
|
161
187
|
if agent.agent_id != self._agent_id: # Exclude self
|
|
@@ -167,6 +193,32 @@ class PeerClient:
|
|
|
167
193
|
status="alive"
|
|
168
194
|
))
|
|
169
195
|
|
|
196
|
+
# BUG FIX: Also search REMOTE agents from other nodes
|
|
197
|
+
# Access coordinator's _remote_agent_registry
|
|
198
|
+
if self._coordinator and hasattr(self._coordinator, '_remote_agent_registry'):
|
|
199
|
+
remote_registry = self._coordinator._remote_agent_registry
|
|
200
|
+
|
|
201
|
+
for agent_id, info in remote_registry.items():
|
|
202
|
+
if agent_id == self._agent_id: # Exclude self
|
|
203
|
+
continue
|
|
204
|
+
|
|
205
|
+
# Filter by role if specified
|
|
206
|
+
if role and info.get('role') != role:
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
# Filter by capability if specified
|
|
210
|
+
if capability and capability not in info.get('capabilities', []):
|
|
211
|
+
continue
|
|
212
|
+
|
|
213
|
+
# Add remote peer
|
|
214
|
+
results.append(PeerInfo(
|
|
215
|
+
agent_id=info['agent_id'],
|
|
216
|
+
role=info['role'],
|
|
217
|
+
capabilities=info.get('capabilities', []),
|
|
218
|
+
node_id=info.get('node_id', 'unknown'),
|
|
219
|
+
status="alive"
|
|
220
|
+
))
|
|
221
|
+
|
|
170
222
|
return results
|
|
171
223
|
|
|
172
224
|
@property
|
|
@@ -234,15 +286,22 @@ class PeerClient:
|
|
|
234
286
|
"""
|
|
235
287
|
Get detailed list of peers with capabilities.
|
|
236
288
|
|
|
289
|
+
Includes both local and remote agents in the mesh.
|
|
290
|
+
|
|
237
291
|
Returns:
|
|
238
|
-
List of dicts with role, agent_id, capabilities, status
|
|
292
|
+
List of dicts with role, agent_id, capabilities, status, location
|
|
239
293
|
|
|
240
294
|
Example:
|
|
241
295
|
peers = self.peers.list_peers()
|
|
242
|
-
# [
|
|
296
|
+
# [
|
|
297
|
+
# {"role": "scout", "capabilities": ["reasoning"], "location": "local", ...},
|
|
298
|
+
# {"role": "analyst", "capabilities": ["analysis"], "location": "remote", ...}
|
|
299
|
+
# ]
|
|
243
300
|
"""
|
|
244
301
|
seen = set()
|
|
245
302
|
peers = []
|
|
303
|
+
|
|
304
|
+
# 1. Local agents
|
|
246
305
|
for role_name, agents in self._agent_registry.items():
|
|
247
306
|
for agent in agents:
|
|
248
307
|
if agent.agent_id != self._agent_id and agent.agent_id not in seen:
|
|
@@ -251,8 +310,27 @@ class PeerClient:
|
|
|
251
310
|
"role": agent.role,
|
|
252
311
|
"agent_id": agent.agent_id,
|
|
253
312
|
"capabilities": list(agent.capabilities),
|
|
254
|
-
"
|
|
313
|
+
"description": getattr(agent, 'description', ''),
|
|
314
|
+
"status": "online",
|
|
315
|
+
"location": "local"
|
|
255
316
|
})
|
|
317
|
+
|
|
318
|
+
# 2. Remote agents from coordinator
|
|
319
|
+
if self._coordinator:
|
|
320
|
+
for remote in self._coordinator.list_remote_agents():
|
|
321
|
+
agent_id = remote.get('agent_id')
|
|
322
|
+
if agent_id and agent_id not in seen:
|
|
323
|
+
seen.add(agent_id)
|
|
324
|
+
peers.append({
|
|
325
|
+
"role": remote.get('role', 'unknown'),
|
|
326
|
+
"agent_id": agent_id,
|
|
327
|
+
"capabilities": remote.get('capabilities', []),
|
|
328
|
+
"description": remote.get('description', ''),
|
|
329
|
+
"status": "online",
|
|
330
|
+
"location": "remote",
|
|
331
|
+
"node_id": remote.get('node_id', '')
|
|
332
|
+
})
|
|
333
|
+
|
|
256
334
|
return peers
|
|
257
335
|
|
|
258
336
|
# ─────────────────────────────────────────────────────────────────
|
|
@@ -439,6 +517,183 @@ class PeerClient:
|
|
|
439
517
|
from .peer_tool import PeerTool
|
|
440
518
|
return PeerTool(self)
|
|
441
519
|
|
|
520
|
+
# ─────────────────────────────────────────────────────────────────
|
|
521
|
+
# COGNITIVE CONTEXT (for LLM prompts)
|
|
522
|
+
# ─────────────────────────────────────────────────────────────────
|
|
523
|
+
|
|
524
|
+
def get_cognitive_context(
|
|
525
|
+
self,
|
|
526
|
+
format: str = "markdown",
|
|
527
|
+
include_capabilities: bool = True,
|
|
528
|
+
include_description: bool = True,
|
|
529
|
+
tool_name: str = "ask_peer"
|
|
530
|
+
) -> str:
|
|
531
|
+
"""
|
|
532
|
+
Generate a prompt-ready description of available mesh peers.
|
|
533
|
+
|
|
534
|
+
Bridges the gap between the Network Layer (who is connected)
|
|
535
|
+
and the Cognitive Layer (who the LLM should know about).
|
|
536
|
+
|
|
537
|
+
This enables dynamic system prompts that automatically update
|
|
538
|
+
as peers join or leave the mesh, eliminating hardcoded peer names.
|
|
539
|
+
|
|
540
|
+
Args:
|
|
541
|
+
format: Output format - "markdown", "json", or "text"
|
|
542
|
+
include_capabilities: Include peer capabilities in output
|
|
543
|
+
include_description: Include peer descriptions in output
|
|
544
|
+
tool_name: Name of the tool for peer communication
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
Formatted string suitable for inclusion in system prompts
|
|
548
|
+
|
|
549
|
+
Example - Basic Usage:
|
|
550
|
+
system_prompt = BASE_PROMPT + "\\n\\n" + self.peers.get_cognitive_context()
|
|
551
|
+
|
|
552
|
+
Example - Output (markdown):
|
|
553
|
+
## AVAILABLE MESH PEERS
|
|
554
|
+
|
|
555
|
+
You are part of a multi-agent mesh. The following peers are available:
|
|
556
|
+
|
|
557
|
+
- **analyst** (`agent-analyst-abc123`)
|
|
558
|
+
- Capabilities: analysis, charting, reporting
|
|
559
|
+
- Description: Analyzes data and generates insights
|
|
560
|
+
|
|
561
|
+
- **scout** (`agent-scout-def456`)
|
|
562
|
+
- Capabilities: research, reconnaissance
|
|
563
|
+
- Description: Gathers information from external sources
|
|
564
|
+
|
|
565
|
+
Use the `ask_peer` tool to delegate tasks to these specialists.
|
|
566
|
+
"""
|
|
567
|
+
peers = self.list_peers()
|
|
568
|
+
|
|
569
|
+
if not peers:
|
|
570
|
+
return "No other agents are currently available in the mesh."
|
|
571
|
+
|
|
572
|
+
if format == "json":
|
|
573
|
+
return self._format_cognitive_json(peers)
|
|
574
|
+
elif format == "text":
|
|
575
|
+
return self._format_cognitive_text(peers, include_capabilities, tool_name)
|
|
576
|
+
else: # markdown (default)
|
|
577
|
+
return self._format_cognitive_markdown(
|
|
578
|
+
peers, include_capabilities, include_description, tool_name
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
def _format_cognitive_markdown(
|
|
582
|
+
self,
|
|
583
|
+
peers: List[Dict[str, Any]],
|
|
584
|
+
include_capabilities: bool,
|
|
585
|
+
include_description: bool,
|
|
586
|
+
tool_name: str
|
|
587
|
+
) -> str:
|
|
588
|
+
"""Format peers as markdown for LLM consumption."""
|
|
589
|
+
lines = [
|
|
590
|
+
"## AVAILABLE MESH PEERS",
|
|
591
|
+
"",
|
|
592
|
+
"You are part of a multi-agent mesh. The following peers are available for collaboration:",
|
|
593
|
+
""
|
|
594
|
+
]
|
|
595
|
+
|
|
596
|
+
for peer in peers:
|
|
597
|
+
role = peer.get("role", "unknown")
|
|
598
|
+
agent_id = peer.get("agent_id", "unknown")
|
|
599
|
+
capabilities = peer.get("capabilities", [])
|
|
600
|
+
description = peer.get("description", "")
|
|
601
|
+
|
|
602
|
+
# Role line with agent ID
|
|
603
|
+
lines.append(f"- **{role}** (`{agent_id}`)")
|
|
604
|
+
|
|
605
|
+
# Capabilities
|
|
606
|
+
if include_capabilities and capabilities:
|
|
607
|
+
lines.append(f" - Capabilities: {', '.join(capabilities)}")
|
|
608
|
+
|
|
609
|
+
# Description
|
|
610
|
+
if include_description:
|
|
611
|
+
desc = description if description else f"Specialist in {role} tasks"
|
|
612
|
+
lines.append(f" - Description: {desc}")
|
|
613
|
+
|
|
614
|
+
lines.append("") # Blank line between peers
|
|
615
|
+
|
|
616
|
+
# Usage instructions
|
|
617
|
+
lines.append(f"Use the `{tool_name}` tool to delegate tasks to these specialists.")
|
|
618
|
+
lines.append("When delegating, be specific about what you need and provide relevant context.")
|
|
619
|
+
|
|
620
|
+
return "\n".join(lines)
|
|
621
|
+
|
|
622
|
+
def _format_cognitive_text(
|
|
623
|
+
self,
|
|
624
|
+
peers: List[Dict[str, Any]],
|
|
625
|
+
include_capabilities: bool,
|
|
626
|
+
tool_name: str
|
|
627
|
+
) -> str:
|
|
628
|
+
"""Format peers as plain text for simpler LLM contexts."""
|
|
629
|
+
lines = ["Available Peers:"]
|
|
630
|
+
|
|
631
|
+
for peer in peers:
|
|
632
|
+
role = peer.get("role", "unknown")
|
|
633
|
+
capabilities = peer.get("capabilities", [])
|
|
634
|
+
|
|
635
|
+
if include_capabilities and capabilities:
|
|
636
|
+
lines.append(f"- {role}: {', '.join(capabilities)}")
|
|
637
|
+
else:
|
|
638
|
+
lines.append(f"- {role}")
|
|
639
|
+
|
|
640
|
+
lines.append(f"\nUse {tool_name} tool to communicate with peers.")
|
|
641
|
+
|
|
642
|
+
return "\n".join(lines)
|
|
643
|
+
|
|
644
|
+
def _format_cognitive_json(self, peers: List[Dict[str, Any]]) -> str:
|
|
645
|
+
"""Format peers as JSON string for structured LLM contexts."""
|
|
646
|
+
import json
|
|
647
|
+
return json.dumps({
|
|
648
|
+
"available_peers": [
|
|
649
|
+
{
|
|
650
|
+
"role": p.get("role"),
|
|
651
|
+
"agent_id": p.get("agent_id"),
|
|
652
|
+
"capabilities": p.get("capabilities", [])
|
|
653
|
+
}
|
|
654
|
+
for p in peers
|
|
655
|
+
],
|
|
656
|
+
"instruction": "Use ask_peer tool to communicate with these agents"
|
|
657
|
+
}, indent=2)
|
|
658
|
+
|
|
659
|
+
def build_system_prompt(self, base_prompt: str, **context_kwargs) -> str:
|
|
660
|
+
"""
|
|
661
|
+
Build a complete system prompt with peer context appended.
|
|
662
|
+
|
|
663
|
+
Convenience method that combines your base prompt with
|
|
664
|
+
dynamic peer context.
|
|
665
|
+
|
|
666
|
+
Args:
|
|
667
|
+
base_prompt: Your base system prompt
|
|
668
|
+
**context_kwargs: Arguments passed to get_cognitive_context()
|
|
669
|
+
- format: "markdown", "json", or "text"
|
|
670
|
+
- include_capabilities: bool
|
|
671
|
+
- include_description: bool
|
|
672
|
+
- tool_name: str
|
|
673
|
+
|
|
674
|
+
Returns:
|
|
675
|
+
Complete system prompt with peer awareness
|
|
676
|
+
|
|
677
|
+
Example:
|
|
678
|
+
# Simple usage
|
|
679
|
+
prompt = self.peers.build_system_prompt("You are a helpful analyst.")
|
|
680
|
+
|
|
681
|
+
# With options
|
|
682
|
+
prompt = self.peers.build_system_prompt(
|
|
683
|
+
"You are a data processor.",
|
|
684
|
+
include_capabilities=True,
|
|
685
|
+
include_description=False
|
|
686
|
+
)
|
|
687
|
+
|
|
688
|
+
# Use in LLM call
|
|
689
|
+
response = await llm.chat(
|
|
690
|
+
messages=[{"role": "system", "content": prompt}, ...],
|
|
691
|
+
tools=[self.peers.as_tool().schema]
|
|
692
|
+
)
|
|
693
|
+
"""
|
|
694
|
+
context = self.get_cognitive_context(**context_kwargs)
|
|
695
|
+
return f"{base_prompt}\n\n{context}"
|
|
696
|
+
|
|
442
697
|
# ─────────────────────────────────────────────────────────────────
|
|
443
698
|
# MESSAGING - RECEIVE
|
|
444
699
|
# ─────────────────────────────────────────────────────────────────
|
|
@@ -483,25 +738,40 @@ class PeerClient:
|
|
|
483
738
|
|
|
484
739
|
def _resolve_target(self, target: str):
|
|
485
740
|
"""
|
|
486
|
-
Resolve target string to agent.
|
|
741
|
+
Resolve target string to agent (local or remote).
|
|
742
|
+
|
|
743
|
+
Checks:
|
|
744
|
+
1. Local registry (same mesh)
|
|
745
|
+
2. Remote registry (other nodes via P2P coordinator)
|
|
487
746
|
|
|
488
747
|
Args:
|
|
489
748
|
target: Role name or agent_id
|
|
490
749
|
|
|
491
750
|
Returns:
|
|
492
|
-
Agent instance or None
|
|
751
|
+
Agent instance (local) or RemoteAgentProxy (remote), or None
|
|
493
752
|
"""
|
|
494
|
-
#
|
|
753
|
+
# 1. Try local registry first (by role)
|
|
495
754
|
agents = self._agent_registry.get(target, [])
|
|
496
755
|
if agents:
|
|
497
756
|
return agents[0]
|
|
498
757
|
|
|
499
|
-
# Try
|
|
758
|
+
# 2. Try local agent_id match
|
|
500
759
|
for role_name, agents in self._agent_registry.items():
|
|
501
760
|
for agent in agents:
|
|
502
761
|
if agent.agent_id == target:
|
|
503
762
|
return agent
|
|
504
763
|
|
|
764
|
+
# 3. Try remote agents via coordinator
|
|
765
|
+
if self._coordinator:
|
|
766
|
+
remote_info = self._coordinator.get_remote_agent(target)
|
|
767
|
+
if remote_info:
|
|
768
|
+
return RemoteAgentProxy(
|
|
769
|
+
agent_id=remote_info.get('agent_id', target),
|
|
770
|
+
role=remote_info.get('role', target),
|
|
771
|
+
node_id=remote_info.get('node_id', ''),
|
|
772
|
+
capabilities=remote_info.get('capabilities', [])
|
|
773
|
+
)
|
|
774
|
+
|
|
505
775
|
return None
|
|
506
776
|
|
|
507
777
|
async def _send_message(self, target_agent, message: OutgoingMessage) -> bool:
|
|
@@ -509,9 +779,37 @@ class PeerClient:
|
|
|
509
779
|
Send message to target agent via coordinator.
|
|
510
780
|
|
|
511
781
|
For local agents (same mesh), delivers directly to their queue.
|
|
512
|
-
For remote agents, sends via P2P coordinator.
|
|
782
|
+
For remote agents (RemoteAgentProxy), sends via P2P coordinator.
|
|
513
783
|
"""
|
|
514
784
|
try:
|
|
785
|
+
# Check if it's a RemoteAgentProxy (remote agent on another node)
|
|
786
|
+
if isinstance(target_agent, RemoteAgentProxy):
|
|
787
|
+
# Remote delivery via P2P coordinator
|
|
788
|
+
if self._coordinator:
|
|
789
|
+
msg_type = f"PEER_{message.type.value.upper()}"
|
|
790
|
+
payload = {
|
|
791
|
+
'sender': message.sender,
|
|
792
|
+
'sender_node': message.sender_node,
|
|
793
|
+
'target': target_agent.agent_id,
|
|
794
|
+
'target_role': target_agent.role,
|
|
795
|
+
'data': message.data,
|
|
796
|
+
'correlation_id': message.correlation_id,
|
|
797
|
+
'timestamp': message.timestamp
|
|
798
|
+
}
|
|
799
|
+
result = await self._coordinator._send_p2p_message(
|
|
800
|
+
target_agent.node_id,
|
|
801
|
+
msg_type,
|
|
802
|
+
payload
|
|
803
|
+
)
|
|
804
|
+
if result:
|
|
805
|
+
self._logger.debug(
|
|
806
|
+
f"Sent {message.type.value} to remote agent "
|
|
807
|
+
f"{target_agent.role}@{target_agent.node_id}"
|
|
808
|
+
)
|
|
809
|
+
return result
|
|
810
|
+
self._logger.warning("No coordinator available for remote delivery")
|
|
811
|
+
return False
|
|
812
|
+
|
|
515
813
|
# Check if target has a peer client (local agent)
|
|
516
814
|
if hasattr(target_agent, 'peers') and target_agent.peers:
|
|
517
815
|
# Direct local delivery
|
|
@@ -529,7 +827,7 @@ class PeerClient:
|
|
|
529
827
|
)
|
|
530
828
|
return True
|
|
531
829
|
|
|
532
|
-
# Remote delivery via P2P coordinator
|
|
830
|
+
# Fallback: Remote delivery via P2P coordinator
|
|
533
831
|
if self._coordinator:
|
|
534
832
|
msg_type = f"PEER_{message.type.value.upper()}"
|
|
535
833
|
payload = {
|
|
@@ -540,8 +838,9 @@ class PeerClient:
|
|
|
540
838
|
'correlation_id': message.correlation_id,
|
|
541
839
|
'timestamp': message.timestamp
|
|
542
840
|
}
|
|
841
|
+
node_id = getattr(target_agent, 'node_id', None) or self._node_id
|
|
543
842
|
return await self._coordinator._send_p2p_message(
|
|
544
|
-
|
|
843
|
+
node_id,
|
|
545
844
|
msg_type,
|
|
546
845
|
payload
|
|
547
846
|
)
|
jarviscore/p2p/swim_manager.py
CHANGED
|
@@ -116,12 +116,17 @@ class SWIMThreadManager:
|
|
|
116
116
|
self.bind_addr = swim_parse_address(f"{bind_host}:{bind_port}")
|
|
117
117
|
logger.info(f"SWIM bind address: {self.bind_addr}")
|
|
118
118
|
|
|
119
|
-
# Parse seed nodes
|
|
119
|
+
# Parse seed nodes - handle both string and list
|
|
120
120
|
seed_addrs = []
|
|
121
121
|
if seed_nodes:
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
122
|
+
# Handle both string (comma-separated) and list
|
|
123
|
+
if isinstance(seed_nodes, str):
|
|
124
|
+
seed_list = [s.strip() for s in seed_nodes.split(',') if s.strip()]
|
|
125
|
+
else:
|
|
126
|
+
seed_list = seed_nodes
|
|
127
|
+
for seed in seed_list:
|
|
128
|
+
if seed:
|
|
129
|
+
seed_addrs.append(swim_parse_address(seed.strip() if isinstance(seed, str) else seed))
|
|
125
130
|
logger.info(f"SWIM seed nodes: {seed_addrs}")
|
|
126
131
|
|
|
127
132
|
# Get SWIM config
|
jarviscore/profiles/__init__.py
CHANGED
|
@@ -1,4 +1,10 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""
|
|
2
|
+
Execution profiles for agents.
|
|
3
|
+
|
|
4
|
+
Profiles define HOW agents execute tasks:
|
|
5
|
+
- AutoAgent: LLM-powered code generation + sandboxed execution
|
|
6
|
+
- CustomAgent: User-defined logic with P2P message handling
|
|
7
|
+
"""
|
|
2
8
|
|
|
3
9
|
from .autoagent import AutoAgent
|
|
4
10
|
from .customagent import CustomAgent
|