agent-mcp 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. agent_mcp/__init__.py +66 -12
  2. agent_mcp/a2a_protocol.py +316 -0
  3. agent_mcp/agent_lightning_library.py +214 -0
  4. agent_mcp/camel_mcp_adapter.py +521 -0
  5. agent_mcp/claude_mcp_adapter.py +195 -0
  6. agent_mcp/cli.py +47 -0
  7. agent_mcp/google_ai_mcp_adapter.py +183 -0
  8. agent_mcp/heterogeneous_group_chat.py +412 -38
  9. agent_mcp/langchain_mcp_adapter.py +176 -43
  10. agent_mcp/llamaindex_mcp_adapter.py +410 -0
  11. agent_mcp/mcp_agent.py +26 -0
  12. agent_mcp/mcp_transport.py +11 -5
  13. agent_mcp/microsoft_agent_framework.py +591 -0
  14. agent_mcp/missing_frameworks.py +435 -0
  15. agent_mcp/openapi_protocol.py +616 -0
  16. agent_mcp/payments.py +804 -0
  17. agent_mcp/pydantic_ai_mcp_adapter.py +628 -0
  18. agent_mcp/registry.py +768 -0
  19. agent_mcp/security.py +864 -0
  20. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/METADATA +173 -49
  21. agent_mcp-0.1.5.dist-info/RECORD +62 -0
  22. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/WHEEL +1 -1
  23. agent_mcp-0.1.5.dist-info/entry_points.txt +4 -0
  24. agent_mcp-0.1.5.dist-info/top_level.txt +3 -0
  25. demos/__init__.py +1 -0
  26. demos/basic/__init__.py +1 -0
  27. demos/basic/framework_examples.py +108 -0
  28. demos/basic/langchain_camel_demo.py +272 -0
  29. demos/basic/simple_chat.py +355 -0
  30. demos/basic/simple_integration_example.py +51 -0
  31. demos/collaboration/collaborative_task_example.py +437 -0
  32. demos/collaboration/group_chat_example.py +130 -0
  33. demos/collaboration/simplified_crewai_example.py +39 -0
  34. demos/comprehensive_framework_demo.py +202 -0
  35. demos/langgraph/autonomous_langgraph_network.py +808 -0
  36. demos/langgraph/langgraph_agent_network.py +415 -0
  37. demos/langgraph/langgraph_collaborative_task.py +619 -0
  38. demos/langgraph/langgraph_example.py +227 -0
  39. demos/langgraph/run_langgraph_examples.py +213 -0
  40. demos/network/agent_network_example.py +381 -0
  41. demos/network/email_agent.py +130 -0
  42. demos/network/email_agent_demo.py +46 -0
  43. demos/network/heterogeneous_network_example.py +216 -0
  44. demos/network/multi_framework_example.py +199 -0
  45. demos/utils/check_imports.py +49 -0
  46. demos/workflows/autonomous_agent_workflow.py +248 -0
  47. demos/workflows/mcp_features_demo.py +353 -0
  48. demos/workflows/run_agent_collaboration_demo.py +63 -0
  49. demos/workflows/run_agent_collaboration_with_logs.py +396 -0
  50. demos/workflows/show_agent_interactions.py +107 -0
  51. demos/workflows/simplified_autonomous_demo.py +74 -0
  52. functions/main.py +144 -0
  53. functions/mcp_network_server.py +513 -0
  54. functions/utils.py +47 -0
  55. agent_mcp-0.1.3.dist-info/RECORD +0 -18
  56. agent_mcp-0.1.3.dist-info/entry_points.txt +0 -2
  57. agent_mcp-0.1.3.dist-info/top_level.txt +0 -1
@@ -0,0 +1,808 @@
1
+ """
2
+ Autonomous LangGraph Agent Network.
3
+
4
+ This example demonstrates a self-organizing network of agents built with LangGraph and the MCP protocol.
5
+ Agents can autonomously decide which other agents to collaborate with based on the task at hand,
6
+ without hardcoded collaboration patterns.
7
+ """
8
+
9
+ import os
10
+ import json
11
+ import time
12
+ import uuid
13
+ import random
14
+ import inspect # For inspecting function signatures
15
+ from typing import Any, Dict, List, Optional, Tuple
16
+
17
+ # Import LangGraph components
18
+ from langchain_core.messages import AIMessage, HumanMessage
19
+ from langchain_core.tools import tool
20
+ from langgraph.graph import StateGraph, END
21
+ from langgraph.graph import MessagesState
22
+ from langgraph.prebuilt import ToolNode
23
+ import openai
24
+
25
+ # Import our MCP implementation for LangGraph
26
+ from agent_mcp.mcp_langgraph import MCPNode, SharedContext
27
+
28
+ # Import Gemini support
29
+ import google.generativeai as genai
30
+ from langchain_google_genai import ChatGoogleGenerativeAI
31
+
32
+ # Constants
33
+ DEFAULT_TEMPERATURE = 0.7
34
+ DEFAULT_MODEL = "gpt-4o" # the newest OpenAI model is "gpt-4o" which was released May 13, 2024.
35
+ # do not change this unless explicitly requested by the user
36
+ GEMINI_MODEL = "gemini-2.5-pro-preview-03-25"
37
+ USE_MODEL_FALLBACK = True # Enable model fallback when rate limits are hit
38
+
39
+ # Set up Gemini API
40
+ GEMINI_API_KEY = os.environ.get("GOOGLE_GEMINI_API_KEY")
41
+ if not GEMINI_API_KEY:
42
+ raise ValueError("GOOGLE_GEMINI_API_KEY environment variable not set")
43
+ genai.configure(api_key=GEMINI_API_KEY)
44
+
45
+ def get_llm(use_fallback=False):
46
+ """
47
+ Get the LLM wrapper that implements the langchain interface.
48
+ Can switch between OpenAI and Gemini based on availability or fallback preference.
49
+
50
+ Args:
51
+ use_fallback: Force use of fallback model
52
+
53
+ Returns:
54
+ BaseChatModel: The LLM implementation
55
+ """
56
+ from langchain_openai import ChatOpenAI
57
+
58
+ # Try OpenAI first if fallback isn't forced
59
+ if not use_fallback and USE_MODEL_FALLBACK:
60
+ api_key = os.environ.get("OPENAI_API_KEY")
61
+ if api_key:
62
+ try:
63
+ return ChatOpenAI(
64
+ model=DEFAULT_MODEL,
65
+ temperature=DEFAULT_TEMPERATURE,
66
+ api_key=api_key
67
+ )
68
+ except Exception as e:
69
+ print(f"Error initializing OpenAI: {e}")
70
+ print("Falling back to Gemini model...")
71
+ # Continue to Gemini fallback
72
+
73
+ # Use Gemini as fallback or primary based on configuration
74
+ try:
75
+ return ChatGoogleGenerativeAI(
76
+ model=GEMINI_MODEL,
77
+ temperature=DEFAULT_TEMPERATURE,
78
+ google_api_key=GEMINI_API_KEY
79
+ )
80
+ except Exception as e:
81
+ raise ValueError(f"Failed to initialize Gemini model: {e}")
82
+
83
+ def get_random_llm():
84
+ """
85
+ Get a random LLM implementation to distribute load between models.
86
+ This helps avoid rate limiting issues by alternating between providers.
87
+
88
+ Returns:
89
+ BaseChatModel: Either OpenAI or Gemini LLM
90
+ """
91
+ # Randomly choose between OpenAI and Gemini to distribute load
92
+ use_gemini = random.choice([True, False])
93
+ return get_llm(use_fallback=use_gemini)
94
+
95
+
96
+ class AutonomousAgentNetwork:
97
+ """A self-organizing network of LangGraph agents using MCP for communication and context sharing."""
98
+
99
+ def __init__(self):
100
+ """Initialize the autonomous agent network."""
101
+ self.context = SharedContext()
102
+ self.agents = {}
103
+ self.agent_profiles = {}
104
+ self.workflows = {}
105
+ self.message_log = []
106
+
107
+ def create_network(self):
108
+ """Create the agent network with different specialized agents."""
109
+ # Define agent profiles - these are templates that agents will use to understand their roles
110
+ self.agent_profiles = {
111
+ "coordinator": {
112
+ "name": "Coordinator",
113
+ "system_message": """You are the Coordinator agent who manages collaboration.
114
+ You're responsible for guiding collaborative efforts, synthesizing information,
115
+ and helping other agents work effectively together.
116
+ Your goal is to facilitate autonomous collaboration between agents based on their specialties.
117
+
118
+ As Coordinator, you should:
119
+ 1. Understand the characteristics and specialties of other agents in the network
120
+ 2. Help agents recognize what work they should do and when they should involve others
121
+ 3. Synthesize information from multiple agents
122
+ 4. Guide the overall collaborative process
123
+ 5. Ask agents strategic questions to help them make progress
124
+
125
+ You are NOT meant to do all the work yourself. Instead, you should suggest which agent
126
+ might be appropriate for a task based on their specialty, and encourage agents to work together.
127
+ """,
128
+ "specialty": "coordination"
129
+ },
130
+ "researcher": {
131
+ "name": "Researcher",
132
+ "system_message": """You are the Researcher agent who finds and evaluates information.
133
+ You excel at gathering information, analyzing data, and providing evidence-based insights.
134
+ You should autonomously recognize when your research skills are needed and collaborate
135
+ with other agents when necessary.
136
+
137
+ Your research approach should be:
138
+ 1. Comprehensive - consider multiple information sources and perspectives
139
+ 2. Critical - evaluate the reliability and validity of information
140
+ 3. Current - focus on finding the most up-to-date information
141
+ 4. Contextual - relate information to the specific needs of the task
142
+
143
+ When appropriate, you should proactively suggest which other agent might be helpful
144
+ to further analyze or implement your research findings.
145
+ """,
146
+ "specialty": "research"
147
+ },
148
+ "analyst": {
149
+ "name": "Analyst",
150
+ "system_message": """You are the Analyst agent who interprets information and identifies patterns.
151
+ You excel at critical thinking, drawing connections, and providing insights based on data.
152
+ You should autonomously recognize when analytical skills are needed and collaborate with
153
+ other agents when appropriate.
154
+
155
+ As an analyst, you should:
156
+ 1. Examine information critically and identify patterns
157
+ 2. Evaluate different perspectives and possible interpretations
158
+ 3. Consider implications and potential applications
159
+ 4. Organize information in meaningful ways
160
+ 5. Ask clarifying questions when needed
161
+
162
+ When appropriate, you should proactively suggest which other agent might benefit from
163
+ your analysis or help implement your recommendations.
164
+ """,
165
+ "specialty": "analysis"
166
+ },
167
+ "creative": {
168
+ "name": "Creative",
169
+ "system_message": """You are the Creative agent who generates innovative ideas and approaches.
170
+ You excel at thinking outside the box, making unexpected connections, and developing novel solutions.
171
+ You should autonomously recognize when creative input is needed and collaborate with
172
+ other agents when appropriate.
173
+
174
+ As a creative agent, you should:
175
+ 1. Generate multiple and diverse ideas
176
+ 2. Make unexpected connections between concepts
177
+ 3. Envision new possibilities and approaches
178
+ 4. Reimagine existing frameworks and assumptions
179
+ 5. Add an innovative perspective to ongoing work
180
+
181
+ When appropriate, you should proactively suggest which other agent might help evaluate
182
+ or implement your creative ideas.
183
+ """,
184
+ "specialty": "creativity"
185
+ },
186
+ "planner": {
187
+ "name": "Planner",
188
+ "system_message": """You are the Planner agent who designs strategies and organizes implementation.
189
+ You excel at creating roadmaps, setting priorities, and developing structured approaches to problems.
190
+ You should autonomously recognize when planning skills are needed and collaborate with
191
+ other agents when appropriate.
192
+
193
+ As a planner, you should:
194
+ 1. Create structured frameworks for approaching tasks
195
+ 2. Break complex problems into manageable steps
196
+ 3. Identify resources needed and potential constraints
197
+ 4. Establish timelines and milestones
198
+ 5. Anticipate challenges and develop contingency plans
199
+
200
+ When appropriate, you should proactively suggest which other agent might help refine
201
+ or implement your plans.
202
+ """,
203
+ "specialty": "planning"
204
+ }
205
+ }
206
+
207
+ # Create all agents in the network
208
+ for agent_id, profile in self.agent_profiles.items():
209
+ # Create the agent with the MCP node
210
+ agent = self._create_agent(
211
+ agent_id=agent_id,
212
+ name=profile["name"],
213
+ system_message=profile["system_message"],
214
+ specialty=profile["specialty"]
215
+ )
216
+
217
+ self.agents[agent_id] = agent
218
+
219
+ # Register network tools for all agents
220
+ self._register_network_tools()
221
+
222
+ # Initialize the shared workspace
223
+ self._share_workspace()
224
+
225
+ # Create autonomous collaboration workflow
226
+ self._create_collaboration_workflow()
227
+
228
+ print("Autonomous agent network initialized successfully!")
229
+
230
+ def _create_agent(self, agent_id, name, system_message, specialty):
231
+ """Create a single agent with MCP capabilities."""
232
+ # Create an MCP node for context management with a randomly selected provider
233
+ # to distribute load and avoid rate limits
234
+ mcp_node = MCPNode(
235
+ name=name,
236
+ llm=get_random_llm(), # Use random provider to distribute load
237
+ system_message=system_message,
238
+ context=self.context # Share the same context object
239
+ )
240
+
241
+ # Add agent-specific context
242
+ self.context.set(f"{agent_id}_profile", {
243
+ "id": agent_id,
244
+ "name": name,
245
+ "specialty": specialty
246
+ })
247
+
248
+ # Create a LangGraph-compatible React agent that can be added to a graph
249
+ from langgraph.prebuilt import create_react_agent
250
+
251
+ # Get any tools our MCP node has
252
+ mcp_tools = mcp_node.get_tools_for_node()
253
+
254
+ # Create the agent using LangGraph's create_react_agent
255
+ # Check if create_react_agent supports the system_message parameter
256
+ sig = inspect.signature(create_react_agent)
257
+ if 'system_message' in sig.parameters:
258
+ # Newer versions of LangGraph
259
+ langraph_agent = create_react_agent(
260
+ mcp_node.llm,
261
+ mcp_tools,
262
+ system_message=mcp_node.get_system_message()
263
+ )
264
+ else:
265
+ # Older versions of LangGraph
266
+ # For older versions, we need to set the system message in the LLM
267
+ # Try to use the same model type as the MCP node for consistency
268
+ if isinstance(mcp_node.llm, ChatGoogleGenerativeAI):
269
+ llm_with_system = ChatGoogleGenerativeAI(
270
+ model=GEMINI_MODEL,
271
+ temperature=DEFAULT_TEMPERATURE,
272
+ google_api_key=GEMINI_API_KEY
273
+ )
274
+ # System message handling for Gemini
275
+ # Since it may handle system messages differently, we'll need to prepend it
276
+ # to the next user message in the backend
277
+ else:
278
+ # Default to OpenAI
279
+ from langchain_openai import ChatOpenAI
280
+ # Clone the LLM but with our system message as part of model_kwargs
281
+ llm_with_system = ChatOpenAI(
282
+ model=DEFAULT_MODEL,
283
+ temperature=DEFAULT_TEMPERATURE,
284
+ api_key=os.environ.get("OPENAI_API_KEY"),
285
+ model_kwargs={"messages": [{"role": "system", "content": mcp_node.get_system_message()}]}
286
+ )
287
+ langraph_agent = create_react_agent(
288
+ llm_with_system,
289
+ mcp_tools
290
+ )
291
+
292
+ # Store both the MCP node and the LangGraph agent
293
+ langraph_agent.mcp_node = mcp_node
294
+
295
+ return langraph_agent
296
+
297
+ def _register_network_tools(self):
298
+ """Register network-specific tools for all agents."""
299
+ # Register tools for each agent
300
+ for agent_id, agent in self.agents.items():
301
+ # Tool to list all available agents
302
+ @tool("list_agents")
303
+ def list_agents(agent_id: str = agent_id):
304
+ """List all agents in the network with their specialties."""
305
+ agents_info = {}
306
+ for aid, profile in self.agent_profiles.items():
307
+ agents_info[aid] = {
308
+ "name": profile["name"],
309
+ "specialty": profile["specialty"]
310
+ }
311
+ return agents_info
312
+
313
+ # Tool to suggest collaboration with specific agents
314
+ @tool("suggest_collaboration")
315
+ def suggest_collaboration(task_description: str, agent_id: str = agent_id):
316
+ """Suggest which agents would be appropriate to collaborate with on a specific task."""
317
+ # This is intentionally left for the agent to decide based on the task
318
+ return {
319
+ "message": "You can autonomously decide which agents to collaborate with based on the task and their specialties.",
320
+ "available_agents": list_agents()
321
+ }
322
+
323
+ # Tool to update the shared workspace
324
+ @tool("workspace_update")
325
+ def workspace_update(section: str, key: str, value: str, agent_id: str = agent_id):
326
+ """Update a section of the shared workspace."""
327
+ workspace_key = f"workspace_{section}"
328
+
329
+ # Get current workspace section or create if it doesn't exist
330
+ workspace_section = self.context.get(workspace_key) or {}
331
+
332
+ # Update the workspace
333
+ workspace_section[key] = {
334
+ "value": value,
335
+ "updated_by": agent_id,
336
+ "updated_at": time.time()
337
+ }
338
+
339
+ # Save back to context
340
+ self.context.set(workspace_key, workspace_section)
341
+
342
+ # Log the update
343
+ self.add_message(
344
+ from_agent=agent_id,
345
+ message=f"Updated workspace section '{section}' with key '{key}'"
346
+ )
347
+
348
+ return {"status": "success", "message": f"Workspace updated: {section}/{key}"}
349
+
350
+ # Tool to get data from the workspace
351
+ @tool("workspace_get")
352
+ def workspace_get(section: str, key: Optional[str] = None, agent_id: str = agent_id):
353
+ """Get data from the shared workspace."""
354
+ workspace_key = f"workspace_{section}"
355
+
356
+ # Get the workspace section
357
+ workspace_section = self.context.get(workspace_key)
358
+ if not workspace_section:
359
+ return {"status": "error", "message": f"Workspace section '{section}' not found"}
360
+
361
+ # If key is provided, return just that item
362
+ if key and key in workspace_section:
363
+ return {"status": "success", "data": workspace_section[key]}
364
+
365
+ # Otherwise return the whole section
366
+ return {"status": "success", "data": workspace_section}
367
+
368
+ # Tool to send a message to all agents
369
+ @tool("broadcast_message")
370
+ def broadcast_message(message: str, agent_id: str = agent_id):
371
+ """Broadcast a message to all agents in the network."""
372
+ self.add_message(from_agent=agent_id, message=message)
373
+ return {"status": "success", "message": "Message broadcasted to all agents"}
374
+
375
+ # We need to add tools directly to the graph-compatible agent
376
+ from langchain_core.tools import tool as langchain_tool
377
+
378
+ # Convert our tools to LangChain tools
379
+ # In newer versions of langchain_core, the tool decorator doesn't accept a name parameter
380
+ # We use function wrapping to preserve the function name and docstring
381
+ list_agents_tool = langchain_tool()(list_agents)
382
+ suggest_collaboration_tool = langchain_tool()(suggest_collaboration)
383
+ workspace_update_tool = langchain_tool()(workspace_update)
384
+ workspace_get_tool = langchain_tool()(workspace_get)
385
+ broadcast_message_tool = langchain_tool()(broadcast_message)
386
+
387
+ # Create a new version of the agent with all tools
388
+ from langgraph.prebuilt import create_react_agent
389
+
390
+ # Get the MCP node associated with this agent
391
+ mcp_node = agent.mcp_node
392
+
393
+ # Create a new React agent with all the tools
394
+ all_tools = [
395
+ list_agents_tool,
396
+ suggest_collaboration_tool,
397
+ workspace_update_tool,
398
+ workspace_get_tool,
399
+ broadcast_message_tool
400
+ ]
401
+
402
+ # Replace the agent with a new one that has all tools
403
+ # Check if create_react_agent supports the system_message parameter
404
+ sig = inspect.signature(create_react_agent)
405
+ if 'system_message' in sig.parameters:
406
+ # Newer versions of LangGraph
407
+ new_agent = create_react_agent(
408
+ mcp_node.llm,
409
+ all_tools,
410
+ system_message=mcp_node.get_system_message()
411
+ )
412
+ else:
413
+ # Older versions of LangGraph
414
+ # For older versions, we need to set the system message in the LLM
415
+ # Try to use the same model type as the MCP node for consistency
416
+ if isinstance(mcp_node.llm, ChatGoogleGenerativeAI):
417
+ llm_with_system = ChatGoogleGenerativeAI(
418
+ model=GEMINI_MODEL,
419
+ temperature=DEFAULT_TEMPERATURE,
420
+ google_api_key=GEMINI_API_KEY
421
+ )
422
+ # System message handling for Gemini
423
+ # Since it may handle system messages differently, we'll need to prepend it
424
+ # to the next user message in the backend
425
+ else:
426
+ # Default to OpenAI
427
+ from langchain_openai import ChatOpenAI
428
+ # Clone the LLM but with our system message as part of model_kwargs
429
+ llm_with_system = ChatOpenAI(
430
+ model=DEFAULT_MODEL,
431
+ temperature=DEFAULT_TEMPERATURE,
432
+ api_key=os.environ.get("OPENAI_API_KEY"),
433
+ # Pass system message through model_kwargs to avoid warning
434
+ model_kwargs={"messages": [{"role": "system", "content": mcp_node.get_system_message()}]}
435
+ )
436
+ new_agent = create_react_agent(
437
+ llm_with_system,
438
+ all_tools
439
+ )
440
+
441
+ # Store the MCP node in the new agent
442
+ new_agent.mcp_node = mcp_node
443
+
444
+ # Update the agent in our dictionary
445
+ self.agents[agent_id] = new_agent
446
+
447
+ def _create_collaboration_workflow(self):
448
+ """Create a workflow for autonomous collaboration on a topic."""
449
+ # Define the collaboration workflow as a graph
450
+ workflow = StateGraph(MessagesState)
451
+
452
+ # Add all agent nodes to the graph
453
+ for agent_id, agent in self.agents.items():
454
+ workflow.add_node(agent_id, agent)
455
+
456
+ # Add a router node that determines the next agent to call
457
+ def route_to_next_agent(state):
458
+ """
459
+ Determine which agent should respond next based on the conversation.
460
+
461
+ This function allows for autonomous agent selection without hardcoding
462
+ the sequence of agent interactions.
463
+
464
+ Returns:
465
+ A dict with the "next" key specifying the next agent to call
466
+ """
467
+ # In the current LangGraph version, state might be a dict with a "messages" key
468
+ if isinstance(state, dict) and "messages" in state:
469
+ messages = state["messages"]
470
+ # Or it could be a MessagesState object with a messages attribute
471
+ elif hasattr(state, "messages"):
472
+ messages = state.messages
473
+ else:
474
+ # If we can't determine the messages structure, start with coordinator
475
+ print("Warning: Unable to determine message structure, defaulting to coordinator")
476
+ return {"next": "coordinator"}
477
+
478
+ if not messages:
479
+ # Start with the coordinator if no messages
480
+ return {"next": "coordinator"}
481
+
482
+ # Get the last message
483
+ last_message = messages[-1]
484
+
485
+ # Check if the last message mentions a specific agent to handle the task
486
+ message_text = last_message.content.lower()
487
+
488
+ # Check for explicit agent mentions
489
+ for agent_id in self.agents.keys():
490
+ if f"agent:{agent_id}" in message_text:
491
+ print(f"Routing to {agent_id} based on explicit mention")
492
+ return {"next": agent_id}
493
+
494
+ # If it's from an agent, let's allow the agents to work together autonomously
495
+ if isinstance(last_message, AIMessage) and hasattr(last_message, 'name'):
496
+ # Get the sender's name
497
+ sender_name = last_message.name
498
+
499
+ # Find the sender's ID
500
+ sender_id = None
501
+ for aid, profile in self.agent_profiles.items():
502
+ if profile["name"] == sender_name:
503
+ sender_id = aid
504
+ break
505
+
506
+ if sender_id:
507
+ # Check if there's a suggestion in the message
508
+ for agent_id in self.agents.keys():
509
+ agent_name = self.agent_profiles[agent_id]["name"].lower()
510
+ # Look for patterns suggesting an agent
511
+ suggestion_patterns = [
512
+ f"ask {agent_name}",
513
+ f"let {agent_name}",
514
+ f"{agent_name} should",
515
+ f"{agent_name} could",
516
+ f"{agent_name} might",
517
+ f"{agent_name} would be"
518
+ ]
519
+
520
+ for pattern in suggestion_patterns:
521
+ if pattern in message_text.lower():
522
+ print(f"Routing to {agent_id} based on suggestion from {sender_id}")
523
+ return {"next": agent_id}
524
+
525
+ # If this has gone back and forth a lot with the same agent, involve the coordinator
526
+ # to prevent loops
527
+ consecutive_messages = 0
528
+ for msg in reversed(messages):
529
+ if hasattr(msg, 'name') and msg.name == sender_name:
530
+ consecutive_messages += 1
531
+ else:
532
+ break
533
+
534
+ if consecutive_messages >= 3 and sender_id != "coordinator":
535
+ print(f"Routing to coordinator after {consecutive_messages} consecutive messages from {sender_id}")
536
+ return {"next": "coordinator"}
537
+
538
+ # Default routing based on topic and specialties
539
+ # Analyze the entire conversation to determine the most relevant agent
540
+ full_conversation = " ".join([msg.content for msg in messages])
541
+
542
+ # Simple keyword routing based on specialties
543
+ specialty_keywords = {
544
+ "researcher": ["research", "information", "fact", "data", "source", "evidence"],
545
+ "analyst": ["analyze", "pattern", "trend", "interpret", "implication", "insight"],
546
+ "creative": ["idea", "innovative", "creative", "novel", "imagine", "possibility"],
547
+ "planner": ["plan", "strategy", "implementation", "step", "roadmap", "timeline"],
548
+ "coordinator": ["coordinate", "synthesize", "collaborate", "integrate", "summary"]
549
+ }
550
+
551
+ # Count keywords for each specialty in the recent conversation
552
+ # (last 3 messages to focus on current needs)
553
+ recent_text = " ".join([msg.content for msg in messages[-3:]])
554
+ specialty_scores = {}
555
+
556
+ for agent_id, keywords in specialty_keywords.items():
557
+ score = 0
558
+ for keyword in keywords:
559
+ score += recent_text.lower().count(keyword)
560
+ specialty_scores[agent_id] = score
561
+
562
+ # Find the highest scoring specialty that's not the current agent
563
+ max_score = -1
564
+ next_agent = "coordinator" # Default to coordinator
565
+
566
+ for agent_id, score in specialty_scores.items():
567
+ if score > max_score and agent_id != sender_id:
568
+ max_score = score
569
+ next_agent = agent_id
570
+
571
+ print(f"Routing to {next_agent} based on message content analysis")
572
+ return {"next": next_agent}
573
+
574
+ # Default to coordinator if we can't determine
575
+ return {"next": "coordinator"}
576
+
577
+ # Connect human input to the router
578
+ workflow.add_node("router", route_to_next_agent)
579
+ workflow.set_entry_point("router")
580
+
581
+ # Connect each agent back to the router
582
+ for agent_id in self.agents.keys():
583
+ workflow.add_edge(agent_id, "router")
584
+
585
+ # Connect router to all agents
586
+ for agent_id in self.agents.keys():
587
+ workflow.add_edge("router", agent_id)
588
+
589
+ # Compile the workflow
590
+ self.collaboration_graph = workflow.compile()
591
+
592
+ def _share_workspace(self):
593
+ """Initialize the shared workspace in the context."""
594
+ # Create sections for different types of information
595
+ workspace_sections = [
596
+ "research", # For research findings and information
597
+ "analysis", # For analysis and interpretations
598
+ "ideas", # For creative ideas and innovations
599
+ "plans", # For implementation plans and strategies
600
+ "summary" # For overall synthesis and conclusions
601
+ ]
602
+
603
+ # Initialize each section as an empty dict in the context
604
+ for section in workspace_sections:
605
+ self.context.set(f"workspace_{section}", {})
606
+
607
+ # Set basic workspace metadata
608
+ self.context.set("workspace_metadata", {
609
+ "created_at": time.time(),
610
+ "sections": workspace_sections,
611
+ "description": "Shared workspace for collaborative agent research and analysis"
612
+ })
613
+
614
+ def add_message(self, from_agent: str, message: str) -> None:
615
+ """Add a message to the network communication log."""
616
+ self.message_log.append({
617
+ "from": from_agent,
618
+ "content": message,
619
+ "timestamp": time.time()
620
+ })
621
+
622
+ def research_topic(self, topic: str, max_steps: int = 15) -> Dict:
623
+ """
624
+ Start autonomous collaborative research on a specific topic.
625
+
626
+ Args:
627
+ topic: The research topic
628
+ max_steps: Maximum number of interaction steps
629
+
630
+ Returns:
631
+ Dict containing the final results
632
+ """
633
+ print(f"\n==== Starting Autonomous Collaborative Research on: {topic} ====\n")
634
+
635
+ # Set the topic in the shared context
636
+ self.context.set("research_topic", {
637
+ "title": topic,
638
+ "started_at": time.time()
639
+ })
640
+
641
+ # Create the initial message from the human
642
+ initial_prompt = f"""
643
+ I need your help researching the topic: "{topic}"
644
+
645
+ I'd like you all to collaborate autonomously on this topic, with each agent contributing based on their specialty.
646
+ Together, I want you to:
647
+ 1. Research and gather key information about this topic
648
+ 2. Analyze the information and identify important patterns or insights
649
+ 3. Generate creative ideas or extensions related to the topic
650
+ 4. Develop a practical implementation plan or framework
651
+ 5. Synthesize everything into a comprehensive and cohesive output
652
+
653
+ You should decide among yourselves which agent should handle each part of this task,
654
+ based on your respective specialties and the needs of the project. Feel free to pass
655
+ the conversation to the most appropriate agent at each step.
656
+
657
+ Please start by discussing how you'll approach this research task.
658
+ """
659
+
660
+ initial_message = HumanMessage(content=initial_prompt)
661
+
662
+ # Run the autonomous collaboration
663
+ print("Beginning collaborative research process...")
664
+ messages = [initial_message]
665
+
666
+ # Track steps to avoid infinite loops
667
+ steps = 0
668
+
669
+ # Run the collaboration until max steps is reached
670
+ while steps < max_steps:
671
+ # Run one step of the collaboration
672
+ result = self.collaboration_graph.invoke({"messages": messages})
673
+ steps += 1
674
+
675
+ # Update messages based on the result structure
676
+ if isinstance(result, dict) and "messages" in result:
677
+ messages = result["messages"]
678
+ else:
679
+ # If unexpected result format, just use the result as messages
680
+ messages = result
681
+
682
+ # Check if we should stop based on the last message
683
+ if isinstance(messages, list) and messages:
684
+ last_message = messages[-1]
685
+ elif isinstance(messages, dict) and "messages" in messages and messages["messages"]:
686
+ last_message = messages["messages"][-1]
687
+ else:
688
+ print("Warning: Unexpected message format returned from collaboration graph")
689
+ continue
690
+
691
+ if isinstance(last_message, AIMessage) and "RESEARCH COMPLETE" in last_message.content:
692
+ print("\nResearch process completed by agents.")
693
+ break
694
+
695
+ # Get the final results from the workspace
696
+ final_results = {
697
+ "topic": topic,
698
+ "steps_taken": steps,
699
+ "research": self.context.get("workspace_research") or {},
700
+ "analysis": self.context.get("workspace_analysis") or {},
701
+ "ideas": self.context.get("workspace_ideas") or {},
702
+ "plans": self.context.get("workspace_plans") or {},
703
+ "summary": self.context.get("workspace_summary") or {}
704
+ }
705
+
706
+ # Display the summary
707
+ print("\n==== Autonomous Research Results ====\n")
708
+ summary_section = final_results["summary"]
709
+ if summary_section and isinstance(summary_section, dict) and len(summary_section) > 0:
710
+ # Find the most recent summary
711
+ latest_summary = None
712
+ latest_time = 0
713
+
714
+ for key, item in summary_section.items():
715
+ if isinstance(item, dict) and "updated_at" in item and item["updated_at"] > latest_time:
716
+ latest_summary = item
717
+ latest_time = item["updated_at"]
718
+
719
+ if latest_summary:
720
+ print(f"SUMMARY: {latest_summary['value']}")
721
+ else:
722
+ print("No summary was generated.")
723
+ else:
724
+ print("No summary was generated.")
725
+
726
+ # Return the final results
727
+ return final_results
728
+
729
+ def show_workspace(self) -> None:
730
+ """Display the current state of the shared workspace."""
731
+ print("\n==== Shared Workspace Contents ====\n")
732
+
733
+ # Get workspace metadata
734
+ metadata = self.context.get("workspace_metadata")
735
+ if metadata:
736
+ print(f"Workspace created: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(metadata['created_at']))}")
737
+ print(f"Description: {metadata['description']}")
738
+ print(f"Sections: {', '.join(metadata['sections'])}\n")
739
+
740
+ # Display each section
741
+ for section in metadata['sections']:
742
+ section_key = f"workspace_{section}"
743
+ section_data = self.context.get(section_key)
744
+
745
+ print(f"=== {section.upper()} ===")
746
+ if section_data and len(section_data) > 0:
747
+ for key, item in section_data.items():
748
+ if isinstance(item, dict) and "value" in item:
749
+ updated_by = item.get("updated_by", "unknown")
750
+ updated_time = time.strftime('%H:%M:%S', time.localtime(item.get("updated_at", 0)))
751
+ print(f"- {key} (by {updated_by} at {updated_time}):")
752
+
753
+ # Format the value based on type
754
+ value = item["value"]
755
+ if isinstance(value, str):
756
+ # For multiline strings, indent properly
757
+ lines = value.split('\n')
758
+ if len(lines) > 1:
759
+ print(f" {lines[0]}")
760
+ for line in lines[1:]:
761
+ print(f" {line}")
762
+ else:
763
+ print(f" {value}")
764
+ else:
765
+ print(f" {value}")
766
+ else:
767
+ print(f"- {key}: {item}")
768
+ else:
769
+ print(" No entries yet.")
770
+ print()
771
+
772
+
773
+ def main():
774
+ """Run the autonomous agent network example."""
775
+ print("=== Autonomous LangGraph Agent Network Example ===")
776
+ print("This example demonstrates agents autonomously deciding how to collaborate,")
777
+ print("without hardcoded interaction patterns.")
778
+
779
+ # Create the autonomous agent network
780
+ network = AutonomousAgentNetwork()
781
+ network.create_network()
782
+
783
+ # Menu for different options
784
+ while True:
785
+ print("\n=== Autonomous Agent Network Menu ===")
786
+ print("1. Research a topic")
787
+ print("2. View the workspace")
788
+ print("3. Exit")
789
+
790
+ choice = input("\nSelect an option (1-3): ")
791
+
792
+ if choice == "1":
793
+ topic = input("Enter a research topic: ")
794
+ network.research_topic(topic)
795
+
796
+ elif choice == "2":
797
+ network.show_workspace()
798
+
799
+ elif choice == "3":
800
+ print("Exiting the Autonomous Agent Network Example. Goodbye!")
801
+ break
802
+
803
+ else:
804
+ print("Invalid option. Please try again.")
805
+
806
+
807
+ if __name__ == "__main__":
808
+ main()