jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. examples/cloud_deployment_example.py +162 -0
  2. examples/customagent_cognitive_discovery_example.py +343 -0
  3. examples/fastapi_integration_example.py +570 -0
  4. jarviscore/__init__.py +19 -5
  5. jarviscore/cli/smoketest.py +8 -4
  6. jarviscore/core/agent.py +227 -0
  7. jarviscore/core/mesh.py +9 -0
  8. jarviscore/data/examples/cloud_deployment_example.py +162 -0
  9. jarviscore/data/examples/custom_profile_decorator.py +134 -0
  10. jarviscore/data/examples/custom_profile_wrap.py +168 -0
  11. jarviscore/data/examples/customagent_cognitive_discovery_example.py +343 -0
  12. jarviscore/data/examples/fastapi_integration_example.py +570 -0
  13. jarviscore/docs/API_REFERENCE.md +283 -3
  14. jarviscore/docs/CHANGELOG.md +139 -0
  15. jarviscore/docs/CONFIGURATION.md +1 -1
  16. jarviscore/docs/CUSTOMAGENT_GUIDE.md +997 -85
  17. jarviscore/docs/GETTING_STARTED.md +228 -267
  18. jarviscore/docs/TROUBLESHOOTING.md +1 -1
  19. jarviscore/docs/USER_GUIDE.md +153 -8
  20. jarviscore/integrations/__init__.py +16 -0
  21. jarviscore/integrations/fastapi.py +247 -0
  22. jarviscore/p2p/broadcaster.py +10 -3
  23. jarviscore/p2p/coordinator.py +310 -14
  24. jarviscore/p2p/keepalive.py +45 -23
  25. jarviscore/p2p/peer_client.py +311 -12
  26. jarviscore/p2p/swim_manager.py +9 -4
  27. jarviscore/profiles/__init__.py +7 -1
  28. jarviscore/profiles/customagent.py +295 -74
  29. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/METADATA +66 -18
  30. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/RECORD +37 -22
  31. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/WHEEL +1 -1
  32. tests/test_13_dx_improvements.py +554 -0
  33. tests/test_14_cloud_deployment.py +403 -0
  34. tests/test_15_llm_cognitive_discovery.py +684 -0
  35. tests/test_16_unified_dx_flow.py +947 -0
  36. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/licenses/LICENSE +0 -0
  37. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/top_level.txt +0 -0
jarviscore/core/agent.py CHANGED
@@ -5,14 +5,19 @@ This is the foundation of the JarvisCore framework. All agents inherit from this
5
5
 
6
6
  For p2p mode, agents can implement a run() method for their own execution loop
7
7
  and use self.peers for direct peer-to-peer communication.
8
+
9
+ For cloud deployment, agents can self-register with a mesh using join_mesh().
8
10
  """
9
11
  from abc import ABC, abstractmethod
10
12
  from typing import List, Dict, Any, Optional, TYPE_CHECKING
11
13
  from uuid import uuid4
14
+ import asyncio
12
15
  import logging
16
+ import os
13
17
 
14
18
  if TYPE_CHECKING:
15
19
  from jarviscore.p2p import PeerClient
20
+ from jarviscore.p2p.coordinator import P2PCoordinator
16
21
 
17
22
  logger = logging.getLogger(__name__)
18
23
 
@@ -70,6 +75,10 @@ class Agent(ABC):
70
75
  self.peers: Optional['PeerClient'] = None # Injected by Mesh in p2p mode
71
76
  self.shutdown_requested: bool = False # Set True to stop run() loop
72
77
 
78
+ # Cloud deployment support (standalone mode)
79
+ self._standalone_p2p: Optional['P2PCoordinator'] = None
80
+ self._mesh_connected: bool = False
81
+
73
82
  self._logger.debug(f"Agent initialized: {self.agent_id}")
74
83
 
75
84
  @abstractmethod
@@ -204,3 +213,221 @@ class Agent(ABC):
204
213
  def __str__(self) -> str:
205
214
  """Human-readable string representation."""
206
215
  return f"{self.role} ({self.agent_id})"
216
+
217
+ # ─────────────────────────────────────────────────────────────────
218
+ # CLOUD DEPLOYMENT (Standalone Mode)
219
+ # ─────────────────────────────────────────────────────────────────
220
+
221
+ async def join_mesh(
222
+ self,
223
+ endpoint: str = None,
224
+ seed_nodes: str = None,
225
+ config: dict = None
226
+ ) -> bool:
227
+ """
228
+ Self-register with a running mesh (for cloud/container deployment).
229
+
230
+ Instead of using mesh.add(), agents can join an existing mesh
231
+ independently. This is the pattern for containerized deployments
232
+ where each container runs a single agent.
233
+
234
+ Args:
235
+ endpoint: Mesh endpoint (host:port) - uses JARVISCORE_MESH_ENDPOINT env if not provided
236
+ seed_nodes: Comma-separated seed nodes - uses JARVISCORE_SEED_NODES env if not provided
237
+ config: Additional P2P configuration options
238
+
239
+ Returns:
240
+ True if successfully joined the mesh
241
+
242
+ Raises:
243
+ ValueError: If no endpoint or seed_nodes provided and not in environment
244
+
245
+ Example - Direct:
246
+ agent = MyAgent()
247
+ await agent.join_mesh(seed_nodes="192.168.1.10:7946")
248
+ await agent.run()
249
+ await agent.leave_mesh()
250
+
251
+ Example - Environment Variable:
252
+ # Set JARVISCORE_SEED_NODES=192.168.1.10:7946
253
+ agent = MyAgent()
254
+ await agent.join_mesh() # Auto-discovers from env
255
+ await agent.run()
256
+ await agent.leave_mesh()
257
+
258
+ Example - Docker/K8s:
259
+ # In container entrypoint
260
+ async def main():
261
+ agent = ProcessorAgent()
262
+ await agent.join_mesh() # Uses env vars
263
+ await agent.run_standalone() # Handles graceful shutdown
264
+ """
265
+ from jarviscore.p2p.coordinator import P2PCoordinator
266
+ from jarviscore.p2p.peer_client import PeerClient
267
+
268
+ # 1. Resolve connection info from args or environment
269
+ endpoint = endpoint or os.environ.get("JARVISCORE_MESH_ENDPOINT")
270
+ seed_nodes = seed_nodes or os.environ.get("JARVISCORE_SEED_NODES", "")
271
+
272
+ if not endpoint and not seed_nodes:
273
+ raise ValueError(
274
+ "Must provide endpoint, seed_nodes, or set "
275
+ "JARVISCORE_MESH_ENDPOINT / JARVISCORE_SEED_NODES environment variable"
276
+ )
277
+
278
+ # 2. Build P2P configuration - use same config loading as Mesh
279
+ from jarviscore.config import get_config_from_dict
280
+ mesh_config = get_config_from_dict(config)
281
+
282
+ # Set seed nodes for joining the cluster
283
+ if endpoint:
284
+ mesh_config["seed_nodes"] = endpoint
285
+ if seed_nodes:
286
+ mesh_config["seed_nodes"] = seed_nodes
287
+
288
+ # Find an available port for this agent's P2P listener
289
+ # SWIM doesn't support bind_port=0, so we find a free port
290
+ if "bind_port" not in mesh_config or mesh_config.get("bind_port") == 0:
291
+ import socket
292
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
293
+ s.bind(('', 0))
294
+ mesh_config["bind_port"] = s.getsockname()[1]
295
+
296
+ mesh_config["node_name"] = f"agent-{self.agent_id}"
297
+
298
+ self._logger.info(f"Joining mesh via {endpoint or seed_nodes}...")
299
+
300
+ # 3. Setup agent (call setup hook)
301
+ await self.setup()
302
+
303
+ # 4. Start standalone P2P coordinator
304
+ self._standalone_p2p = P2PCoordinator([self], mesh_config)
305
+ await self._standalone_p2p.start()
306
+
307
+ # 5. Wait for SWIM cluster to converge
308
+ # This allows SWIM gossip to sync membership
309
+ import asyncio
310
+ self._logger.info("Waiting for SWIM cluster convergence...")
311
+ await asyncio.sleep(1.0) # Brief wait for SWIM gossip
312
+
313
+ # 6. Request existing capabilities from peers (we're a late joiner)
314
+ # Note: request_peer_capabilities will wait for ZMQ connections internally
315
+ self._logger.info("Requesting capabilities from existing peers...")
316
+ await self._standalone_p2p.request_peer_capabilities()
317
+
318
+ # 7. Announce our own capabilities to mesh
319
+ # Note: announce_capabilities will wait for ZMQ connections internally
320
+ await self._standalone_p2p.announce_capabilities()
321
+
322
+ # 7. Setup PeerClient for this agent
323
+ node_id = ""
324
+ if self._standalone_p2p.swim_manager:
325
+ addr = self._standalone_p2p.swim_manager.bind_addr
326
+ if addr:
327
+ node_id = f"{addr[0]}:{addr[1]}"
328
+
329
+ self.peers = PeerClient(
330
+ coordinator=self._standalone_p2p,
331
+ agent_id=self.agent_id,
332
+ agent_role=self.role,
333
+ agent_registry={self.role: [self]},
334
+ node_id=node_id
335
+ )
336
+
337
+ # Register PeerClient with coordinator for message routing
338
+ self._standalone_p2p.register_peer_client(self.agent_id, self.peers)
339
+
340
+ self._mesh_connected = True
341
+ self._logger.info(f"Successfully joined mesh as {self.role} ({self.agent_id})")
342
+
343
+ return True
344
+
345
+ async def leave_mesh(self) -> bool:
346
+ """
347
+ Gracefully deregister from mesh.
348
+
349
+ Called when agent is shutting down to notify other nodes
350
+ that this agent is no longer available.
351
+
352
+ Returns:
353
+ True if successfully left the mesh
354
+
355
+ Example:
356
+ try:
357
+ await agent.run()
358
+ finally:
359
+ await agent.leave_mesh()
360
+ """
361
+ if not self._mesh_connected:
362
+ return True
363
+
364
+ self._logger.info("Leaving mesh...")
365
+
366
+ # 1. Deannounce capabilities (notify mesh we're leaving)
367
+ if self._standalone_p2p:
368
+ try:
369
+ await self._standalone_p2p.deannounce_capabilities()
370
+ except Exception as e:
371
+ self._logger.warning(f"Error deannouncing capabilities: {e}")
372
+
373
+ # 2. Unregister peer client
374
+ if self._standalone_p2p:
375
+ self._standalone_p2p.unregister_peer_client(self.agent_id)
376
+
377
+ # 3. Stop P2P coordinator
378
+ if self._standalone_p2p:
379
+ await self._standalone_p2p.stop()
380
+ self._standalone_p2p = None
381
+
382
+ # 4. Teardown agent (call teardown hook)
383
+ await self.teardown()
384
+
385
+ self._mesh_connected = False
386
+ self.peers = None
387
+ self._logger.info("Successfully left mesh")
388
+
389
+ return True
390
+
391
+ @property
392
+ def is_mesh_connected(self) -> bool:
393
+ """Check if agent is currently connected to a mesh."""
394
+ return self._mesh_connected
395
+
396
+ async def run_standalone(self):
397
+ """
398
+ Run agent in standalone mode with automatic mesh cleanup.
399
+
400
+ Combines run() loop with graceful leave_mesh() on exit.
401
+ Use this as the main entrypoint for containerized agents.
402
+
403
+ Example - Container Entrypoint:
404
+ async def main():
405
+ agent = ProcessorAgent()
406
+ await agent.join_mesh()
407
+ await agent.run_standalone() # Blocks until shutdown
408
+
409
+ if __name__ == "__main__":
410
+ asyncio.run(main())
411
+ """
412
+ if not self._mesh_connected:
413
+ raise RuntimeError(
414
+ "Not connected to mesh. Call join_mesh() first."
415
+ )
416
+
417
+ try:
418
+ # Run the agent's main loop
419
+ if hasattr(self, 'run') and asyncio.iscoroutinefunction(self.run):
420
+ await self.run()
421
+ else:
422
+ # No run() method - just wait for shutdown signal
423
+ while not self.shutdown_requested:
424
+ await asyncio.sleep(0.1)
425
+
426
+ except asyncio.CancelledError:
427
+ self._logger.info("Agent cancelled, cleaning up...")
428
+ except Exception as e:
429
+ self._logger.error(f"Agent error: {e}")
430
+ raise
431
+ finally:
432
+ # Always leave mesh gracefully
433
+ await self.leave_mesh()
jarviscore/core/mesh.py CHANGED
@@ -256,9 +256,18 @@ class Mesh:
256
256
  await self._p2p_coordinator.start()
257
257
  self._logger.info("✓ P2P coordinator started")
258
258
 
259
+ # Wait for mesh to stabilize before announcing
260
+ # Increased delay to ensure SWIM fully connects all nodes
261
+ await asyncio.sleep(5)
262
+ self._logger.info("Waited for mesh stabilization")
263
+
259
264
  # Announce capabilities to network
260
265
  await self._p2p_coordinator.announce_capabilities()
261
266
  self._logger.info("✓ Capabilities announced to mesh")
267
+
268
+ # Request capabilities from existing peers (for late-joiners)
269
+ await self._p2p_coordinator.request_peer_capabilities()
270
+ self._logger.info("✓ Requested capabilities from existing peers")
262
271
 
263
272
  # Inject PeerClients for p2p mode
264
273
  if self.mode == MeshMode.P2P:
@@ -0,0 +1,162 @@
1
+ """
2
+ Cloud Deployment Example (v0.3.0)
3
+
4
+ Demonstrates agent self-registration with join_mesh() and leave_mesh().
5
+ Agents join an existing mesh independently - no central orchestrator needed.
6
+
7
+ This is the pattern for:
8
+ - Docker containers where each container runs one agent
9
+ - Kubernetes pods with auto-scaling
10
+ - Cloud Functions / Lambda
11
+ - Any distributed deployment where agents start independently
12
+
13
+ Usage:
14
+ # Terminal 1: Start a mesh (or use an existing one)
15
+ python examples/customagent_p2p_example.py
16
+
17
+ # Terminal 2: Run standalone agent that joins the mesh
18
+ JARVISCORE_SEED_NODES=127.0.0.1:7946 python examples/cloud_deployment_example.py
19
+
20
+ Environment Variables:
21
+ JARVISCORE_SEED_NODES: Comma-separated seed nodes (e.g., "host1:7946,host2:7946")
22
+ JARVISCORE_MESH_ENDPOINT: Single mesh endpoint (alternative to seed_nodes)
23
+ """
24
+ import asyncio
25
+ import os
26
+ import signal
27
+ import sys
28
+
29
+ sys.path.insert(0, '.')
30
+
31
+ from jarviscore.profiles import CustomAgent
32
+
33
+
34
+ class StandaloneProcessor(CustomAgent):
35
+ """
36
+ Example standalone agent that joins mesh independently.
37
+
38
+ This agent:
39
+ - Self-registers with the mesh on startup
40
+ - Listens for peer requests
41
+ - Shows its view of the mesh (cognitive context)
42
+ - Gracefully leaves mesh on shutdown
43
+ """
44
+
45
+ role = "standalone_processor"
46
+ capabilities = ["standalone", "processing", "example"]
47
+ description = "Processes requests from other mesh agents (standalone deployment)"
48
+
49
+ async def on_peer_request(self, msg):
50
+ """Handle incoming requests from other agents."""
51
+ print(f"\n[{self.role}] Received request from {msg.sender}:")
52
+ print(f" Data: {msg.data}")
53
+
54
+ # Process the request
55
+ task = msg.data.get("task", "")
56
+ result = {
57
+ "status": "success",
58
+ "output": f"Processed: {task}",
59
+ "agent_id": self.agent_id,
60
+ "processed_by": self.role
61
+ }
62
+
63
+ print(f"[{self.role}] Sending response: {result}")
64
+ return result
65
+
66
+ async def on_peer_notify(self, msg):
67
+ """Handle incoming notifications from other agents."""
68
+ print(f"\n[{self.role}] Received notification from {msg.sender}:")
69
+ print(f" Event: {msg.data.get('event', 'unknown')}")
70
+ print(f" Data: {msg.data}")
71
+
72
+
73
+ async def main():
74
+ print("=" * 60)
75
+ print("Standalone Agent Example - Cloud Deployment Pattern")
76
+ print("=" * 60)
77
+
78
+ # Check for mesh connection info
79
+ endpoint = os.environ.get("JARVISCORE_MESH_ENDPOINT")
80
+ seed_nodes = os.environ.get("JARVISCORE_SEED_NODES")
81
+
82
+ if not endpoint and not seed_nodes:
83
+ print("\nNo mesh endpoint configured!")
84
+ print("\nSet one of:")
85
+ print(" - JARVISCORE_MESH_ENDPOINT (single endpoint)")
86
+ print(" - JARVISCORE_SEED_NODES (comma-separated list)")
87
+ print("\nExample:")
88
+ print(" JARVISCORE_SEED_NODES=127.0.0.1:7946 python cloud_deployment_example.py")
89
+ print("\nTo start a mesh first, run:")
90
+ print(" python examples/customagent_p2p_example.py")
91
+ return
92
+
93
+ print(f"\nConnecting to mesh via: {endpoint or seed_nodes}")
94
+
95
+ # Create agent
96
+ agent = StandaloneProcessor()
97
+
98
+ # Join the mesh
99
+ print(f"\nJoining mesh...")
100
+ try:
101
+ await agent.join_mesh()
102
+ except Exception as e:
103
+ print(f"Failed to join mesh: {e}")
104
+ return
105
+
106
+ print(f"\nSuccessfully joined mesh!")
107
+ print(f" Agent ID: {agent.agent_id}")
108
+ print(f" Role: {agent.role}")
109
+ print(f" Capabilities: {agent.capabilities}")
110
+
111
+ # Show discovered peers
112
+ print(f"\n--- Discovered Peers ---")
113
+ peers = agent.peers.list_peers()
114
+ if peers:
115
+ for p in peers:
116
+ location = f" ({p.get('location', 'unknown')})" if 'location' in p else ""
117
+ print(f" - {p['role']}: {p['capabilities']}{location}")
118
+ else:
119
+ print(" No other peers discovered yet")
120
+
121
+ # Show cognitive context (what an LLM would see)
122
+ print(f"\n--- Cognitive Context for LLM ---")
123
+ print(agent.peers.get_cognitive_context())
124
+
125
+ # Setup graceful shutdown
126
+ shutdown_event = asyncio.Event()
127
+
128
+ def signal_handler():
129
+ print("\n\nShutdown requested (Ctrl+C)...")
130
+ agent.request_shutdown()
131
+ shutdown_event.set()
132
+
133
+ # Register signal handlers
134
+ loop = asyncio.get_event_loop()
135
+ for sig in (signal.SIGINT, signal.SIGTERM):
136
+ try:
137
+ loop.add_signal_handler(sig, signal_handler)
138
+ except NotImplementedError:
139
+ # Windows doesn't support add_signal_handler
140
+ pass
141
+
142
+ print(f"\n--- Agent Running ---")
143
+ print("Listening for peer requests...")
144
+ print("Press Ctrl+C to stop.\n")
145
+
146
+ # Run agent (CustomAgent's run() handles the message loop)
147
+ try:
148
+ await agent.run()
149
+ except asyncio.CancelledError:
150
+ pass
151
+
152
+ # Leave mesh gracefully
153
+ print("\nLeaving mesh...")
154
+ await agent.leave_mesh()
155
+ print("Goodbye!")
156
+
157
+
158
+ if __name__ == "__main__":
159
+ try:
160
+ asyncio.run(main())
161
+ except KeyboardInterrupt:
162
+ print("\nInterrupted.")
@@ -0,0 +1,134 @@
1
+ """
2
+ Custom Profile Example: Using @jarvis_agent Decorator
3
+
4
+ This example shows how to use the @jarvis_agent decorator to convert
5
+ any Python class into a JarvisCore agent without modifying the class.
6
+
7
+ Use Case: You have existing Python classes/agents and want JarvisCore
8
+ to handle orchestration (data handoff, dependencies, shared memory).
9
+ """
10
+ import asyncio
11
+ from jarviscore import Mesh, jarvis_agent, JarvisContext
12
+
13
+
14
+ # Example 1: Simple decorator (no context needed)
15
+ @jarvis_agent(role="processor", capabilities=["data_processing"])
16
+ class DataProcessor:
17
+ """Simple data processor - doubles input values."""
18
+
19
+ def run(self, data):
20
+ """Process data by doubling values."""
21
+ if isinstance(data, list):
22
+ return {"processed": [x * 2 for x in data]}
23
+ return {"processed": data * 2}
24
+
25
+
26
+ # Example 2: Decorator with context access
27
+ @jarvis_agent(role="aggregator", capabilities=["aggregation"])
28
+ class Aggregator:
29
+ """Aggregates results from previous steps using JarvisContext."""
30
+
31
+ def run(self, task, ctx: JarvisContext):
32
+ """
33
+ Access previous step results via ctx.previous().
34
+
35
+ Args:
36
+ task: The task description
37
+ ctx: JarvisContext with memory and dependency access
38
+ """
39
+ # Get output from a specific previous step
40
+ processed = ctx.previous("step1")
41
+
42
+ if processed:
43
+ data = processed.get("processed", [])
44
+ return {
45
+ "sum": sum(data) if isinstance(data, list) else data,
46
+ "count": len(data) if isinstance(data, list) else 1,
47
+ "source_step": "step1"
48
+ }
49
+
50
+ return {"error": "No previous data found"}
51
+
52
+
53
+ # Example 3: Decorator with custom execute method
54
+ @jarvis_agent(role="validator", capabilities=["validation"], execute_method="validate")
55
+ class DataValidator:
56
+ """Validates data using a custom method name."""
57
+
58
+ def validate(self, data):
59
+ """Custom execute method - validates input data."""
60
+ if isinstance(data, list):
61
+ return {
62
+ "valid": all(isinstance(x, (int, float)) for x in data),
63
+ "count": len(data),
64
+ "type": "list"
65
+ }
66
+ return {
67
+ "valid": isinstance(data, (int, float)),
68
+ "type": type(data).__name__
69
+ }
70
+
71
+
72
+ async def main():
73
+ """Run a multi-step workflow with custom profile agents."""
74
+ print("=" * 60)
75
+ print(" Custom Profile Example: @jarvis_agent Decorator")
76
+ print("=" * 60)
77
+
78
+ # Create mesh in autonomous mode
79
+ mesh = Mesh(mode="autonomous")
80
+
81
+ # Add our decorated agents
82
+ mesh.add(DataProcessor)
83
+ mesh.add(Aggregator)
84
+ mesh.add(DataValidator)
85
+
86
+ # Start the mesh
87
+ await mesh.start()
88
+
89
+ try:
90
+ # Execute a multi-step workflow
91
+ print("\nExecuting workflow with 3 steps...\n")
92
+
93
+ results = await mesh.workflow("custom-profile-demo", [
94
+ {
95
+ "id": "step1",
96
+ "agent": "processor",
97
+ "task": "Process input data",
98
+ "params": {"data": [1, 2, 3, 4, 5]}
99
+ },
100
+ {
101
+ "id": "step2",
102
+ "agent": "aggregator",
103
+ "task": "Aggregate processed results",
104
+ "depends_on": ["step1"] # Wait for step1
105
+ },
106
+ {
107
+ "id": "step3",
108
+ "agent": "validator",
109
+ "task": "Validate original data",
110
+ "params": {"data": [1, 2, 3, 4, 5]}
111
+ }
112
+ ])
113
+
114
+ # Print results
115
+ print("Results:")
116
+ print("-" * 40)
117
+
118
+ for i, result in enumerate(results):
119
+ step_name = ["Processor", "Aggregator", "Validator"][i]
120
+ print(f"\n{step_name} (step{i+1}):")
121
+ print(f" Status: {result.get('status')}")
122
+ print(f" Output: {result.get('output')}")
123
+
124
+ print("\n" + "=" * 60)
125
+ print(" Workflow completed successfully!")
126
+ print("=" * 60)
127
+
128
+ finally:
129
+ # Stop the mesh
130
+ await mesh.stop()
131
+
132
+
133
+ if __name__ == "__main__":
134
+ asyncio.run(main())