unaiverse 0.1.6__cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unaiverse might be problematic. Click here for more details.
- unaiverse/__init__.py +19 -0
- unaiverse/agent.py +2008 -0
- unaiverse/agent_basics.py +1846 -0
- unaiverse/clock.py +191 -0
- unaiverse/dataprops.py +1209 -0
- unaiverse/hsm.py +1880 -0
- unaiverse/modules/__init__.py +18 -0
- unaiverse/modules/cnu/__init__.py +17 -0
- unaiverse/modules/cnu/cnus.py +536 -0
- unaiverse/modules/cnu/layers.py +261 -0
- unaiverse/modules/cnu/psi.py +60 -0
- unaiverse/modules/hl/__init__.py +15 -0
- unaiverse/modules/hl/hl_utils.py +411 -0
- unaiverse/modules/networks.py +1509 -0
- unaiverse/modules/utils.py +680 -0
- unaiverse/networking/__init__.py +16 -0
- unaiverse/networking/node/__init__.py +18 -0
- unaiverse/networking/node/connpool.py +1261 -0
- unaiverse/networking/node/node.py +2223 -0
- unaiverse/networking/node/profile.py +446 -0
- unaiverse/networking/node/tokens.py +79 -0
- unaiverse/networking/p2p/__init__.py +198 -0
- unaiverse/networking/p2p/go.mod +127 -0
- unaiverse/networking/p2p/go.sum +548 -0
- unaiverse/networking/p2p/golibp2p.py +18 -0
- unaiverse/networking/p2p/golibp2p.pyi +135 -0
- unaiverse/networking/p2p/lib.go +2714 -0
- unaiverse/networking/p2p/lib.go.sha256 +1 -0
- unaiverse/networking/p2p/lib_types.py +312 -0
- unaiverse/networking/p2p/message_pb2.py +63 -0
- unaiverse/networking/p2p/messages.py +265 -0
- unaiverse/networking/p2p/mylogger.py +77 -0
- unaiverse/networking/p2p/p2p.py +929 -0
- unaiverse/networking/p2p/proto-go/message.pb.go +616 -0
- unaiverse/networking/p2p/unailib.cpython-310-aarch64-linux-gnu.so +0 -0
- unaiverse/streamlib/__init__.py +15 -0
- unaiverse/streamlib/streamlib.py +210 -0
- unaiverse/streams.py +770 -0
- unaiverse/utils/__init__.py +16 -0
- unaiverse/utils/ask_lone_wolf.json +27 -0
- unaiverse/utils/lone_wolf.json +19 -0
- unaiverse/utils/misc.py +305 -0
- unaiverse/utils/sandbox.py +293 -0
- unaiverse/utils/server.py +435 -0
- unaiverse/world.py +175 -0
- unaiverse-0.1.6.dist-info/METADATA +365 -0
- unaiverse-0.1.6.dist-info/RECORD +50 -0
- unaiverse-0.1.6.dist-info/WHEEL +7 -0
- unaiverse-0.1.6.dist-info/licenses/LICENSE +43 -0
- unaiverse-0.1.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2223 @@
|
|
|
1
|
+
"""
|
|
2
|
+
█████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
|
|
3
|
+
░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
|
|
4
|
+
░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
|
|
5
|
+
░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
|
|
6
|
+
░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
|
|
7
|
+
░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
|
|
8
|
+
░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
|
|
9
|
+
░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
|
|
10
|
+
A Collectionless AI Project (https://collectionless.ai)
|
|
11
|
+
Registration/Login: https://unaiverse.io
|
|
12
|
+
Code Repositories: https://github.com/collectionlessai/
|
|
13
|
+
Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
|
|
14
|
+
"""
|
|
15
|
+
import os
|
|
16
|
+
import sys
|
|
17
|
+
import ast
|
|
18
|
+
import cv2
|
|
19
|
+
import copy
|
|
20
|
+
import json
|
|
21
|
+
import math
|
|
22
|
+
import time
|
|
23
|
+
import html
|
|
24
|
+
import queue
|
|
25
|
+
import types
|
|
26
|
+
import requests
|
|
27
|
+
import threading
|
|
28
|
+
import traceback
|
|
29
|
+
from PIL import Image
|
|
30
|
+
from typing import Optional
|
|
31
|
+
from collections import deque
|
|
32
|
+
from unaiverse.clock import Clock
|
|
33
|
+
from unaiverse.world import World
|
|
34
|
+
from unaiverse.agent import Agent
|
|
35
|
+
from unaiverse.networking.p2p import P2P
|
|
36
|
+
from unaiverse.networking.p2p.messages import Msg
|
|
37
|
+
from datetime import datetime, timezone, timedelta
|
|
38
|
+
from unaiverse.networking.node.connpool import NodeConn
|
|
39
|
+
from unaiverse.networking.node.profile import NodeProfile
|
|
40
|
+
from unaiverse.streams import DataProps, BufferedDataStream
|
|
41
|
+
from unaiverse.utils.misc import GenException, get_key_considering_multiple_sources, save_node_addresses_to_file
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Node:
|
|
45
|
+
|
|
46
|
+
# Each node can host an agent or a world
|
|
47
|
+
AGENT = "agent"
|
|
48
|
+
WORLD = "world"
|
|
49
|
+
|
|
50
|
+
# Each node outputs console text with a different color
|
|
51
|
+
TEXT_COLORS = ('\033[91m', '\033[94m', '\033[92m', '\033[93m')
|
|
52
|
+
TEXT_LAST_USED_COLOR = 0
|
|
53
|
+
TEXT_LOCK = threading.Lock()
|
|
54
|
+
|
|
55
|
+
def __init__(self,
|
|
56
|
+
hosted: Agent | World,
|
|
57
|
+
unaiverse_key: str | None = None,
|
|
58
|
+
node_name: str | None = None,
|
|
59
|
+
node_id: str | None = None,
|
|
60
|
+
hidden: bool = False,
|
|
61
|
+
clock_delta: float = 1. / 25.,
|
|
62
|
+
base_identity_dir: str = "./unaiverse_nodes_identity",
|
|
63
|
+
only_certified_agents: bool = False,
|
|
64
|
+
allowed_node_ids: list[str] | set[str] = None, # Optional: it is loaded from the online profile
|
|
65
|
+
world_masters_node_ids: list[str] | set[str] = None, # Optional: it is loaded from the online profile
|
|
66
|
+
world_masters_node_names: list[str] | set[str] = None, # Optional: it will be converted to node IDs
|
|
67
|
+
allow_connection_through_relay: bool = True,
|
|
68
|
+
talk_to_relay_based_nodes: bool = True):
|
|
69
|
+
"""Initializes a new instance of the Node class.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
hosted: The Agent or World entity hosted by this node.
|
|
73
|
+
unaiverse_key: The UNaIVERSE key for authentication (if None, it will be loaded from env var or cache file,
|
|
74
|
+
or you will be asked for it).
|
|
75
|
+
node_name: A human-readable name for the node (using node ID is preferable; use this or node ID, not both).
|
|
76
|
+
node_id: A unique identifier for the node (use this or the node name, not both).
|
|
77
|
+
hidden: A flag to determine if the node is hidden (i.e., only the owner of the account can see it).
|
|
78
|
+
clock_delta: The minimum time delta for the node's clock.
|
|
79
|
+
only_certified_agents: A flag to allow only certified agents to connect.
|
|
80
|
+
allowed_node_ids: A list or set of allowed node IDs to connect (t is loaded from the online profile).
|
|
81
|
+
world_masters_node_ids: A list or set of world masters' node IDs (it is also loaded from online profile).
|
|
82
|
+
world_masters_node_names: A list or set of world masters' node names (using IDs is preferable).
|
|
83
|
+
allow_connection_through_relay: A flag to allow connections through a relay.
|
|
84
|
+
talk_to_relay_based_nodes: A flag to allow talking to relay-based nodes.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
# Checking main arguments
|
|
88
|
+
if not (isinstance(hosted, Agent) or isinstance(hosted, World)):
|
|
89
|
+
raise GenException("Invalid hosted entity, must be Agent or World")
|
|
90
|
+
if not (node_id is None or isinstance(node_id, str)):
|
|
91
|
+
raise GenException("Invalid node ID")
|
|
92
|
+
if not (node_name is None or isinstance(node_name, str)):
|
|
93
|
+
raise GenException("Invalid node name")
|
|
94
|
+
if not (node_name is None or node_id is None):
|
|
95
|
+
raise GenException("Cannot specify both node ID and node name")
|
|
96
|
+
if not (node_name is not None or node_id is not None):
|
|
97
|
+
raise GenException("You must specify either node ID or node name: both are missing")
|
|
98
|
+
if not (unaiverse_key is None or isinstance(unaiverse_key, str)):
|
|
99
|
+
raise GenException("Invalid UNaIVERSE key")
|
|
100
|
+
|
|
101
|
+
# Main attributes
|
|
102
|
+
self.node_id = node_id
|
|
103
|
+
self.unaiverse_key = unaiverse_key
|
|
104
|
+
self.hosted = hosted
|
|
105
|
+
self.node_type = Node.AGENT if (isinstance(hosted, Agent) and not isinstance(hosted, World)) else Node.WORLD
|
|
106
|
+
self.agent = hosted if self.node_type is Node.AGENT else None
|
|
107
|
+
self.world = hosted if self.node_type is Node.WORLD else None
|
|
108
|
+
self.clock = Clock(min_delta=clock_delta) # Node clock
|
|
109
|
+
self.conn = None # Manages the network operations in the P2P network
|
|
110
|
+
self.talk_to_relay_based_nodes = talk_to_relay_based_nodes
|
|
111
|
+
|
|
112
|
+
# Expected properties of the nodes that will try to connect to this one
|
|
113
|
+
self.only_certified_agents = only_certified_agents
|
|
114
|
+
self.allowed_node_ids = set(allowed_node_ids) if allowed_node_ids is not None else None
|
|
115
|
+
self.world_masters_node_ids = set(world_masters_node_ids) if world_masters_node_ids is not None else None
|
|
116
|
+
|
|
117
|
+
# Profile
|
|
118
|
+
self.profile = None
|
|
119
|
+
self.send_dynamic_profile_every = 10. if self.node_type is Node.WORLD else 10. # Seconds
|
|
120
|
+
self.get_new_token_every = 23 * 60. * 60. + 30 * 60. # Seconds (23 hours and 30 minutes, safer)
|
|
121
|
+
|
|
122
|
+
# Rendezvous
|
|
123
|
+
self.publish_rendezvous_every = 10.
|
|
124
|
+
self.last_rendezvous_time = 0.
|
|
125
|
+
|
|
126
|
+
# Automatic address update and relay refresh (if needed)
|
|
127
|
+
self.relay_reservation_expiry: Optional[datetime] = None
|
|
128
|
+
self.address_check_every = 5 * 60. # Check every 5 minutes
|
|
129
|
+
|
|
130
|
+
# Interview of newly connected nodes
|
|
131
|
+
self.interview_timeout = 45. # Seconds
|
|
132
|
+
self.connect_without_ack_timeout = 45. # Seconds
|
|
133
|
+
|
|
134
|
+
# Alive messaging
|
|
135
|
+
self.send_alive_every = 2.5 * 60. # Seconds
|
|
136
|
+
self.last_alive_time = 0.
|
|
137
|
+
self.skip_was_alive_check = os.getenv("NODE_IGNORE_ALIVE", "0") == "1"
|
|
138
|
+
|
|
139
|
+
# Alive messaging
|
|
140
|
+
self.run_start_time = 0.
|
|
141
|
+
|
|
142
|
+
# Root server-related
|
|
143
|
+
self.root_endpoint = 'https://unaiverse.io/api' # WARNING: EDITING THIS ADDRESS VIOLATES THE LICENSE
|
|
144
|
+
self.node_token = ""
|
|
145
|
+
self.public_key = ""
|
|
146
|
+
|
|
147
|
+
# Output console text
|
|
148
|
+
print_level = int(os.getenv("NODE_PRINT", "0")) # 0, 1, 2
|
|
149
|
+
self.print_enabled = print_level > 0
|
|
150
|
+
self.cursor_hidden = False
|
|
151
|
+
NodeSynchronizer.DEBUG = print_level > 1
|
|
152
|
+
NodeConn.DEBUG = print_level > 1
|
|
153
|
+
if print_level == 0:
|
|
154
|
+
self.cursor_hidden = True
|
|
155
|
+
with Node.TEXT_LOCK:
|
|
156
|
+
self.text_color = Node.TEXT_COLORS[Node.TEXT_LAST_USED_COLOR]
|
|
157
|
+
Node.TEXT_LAST_USED_COLOR = (Node.TEXT_LAST_USED_COLOR + 1) % len(Node.TEXT_COLORS)
|
|
158
|
+
|
|
159
|
+
# Print-related logging (for inspector only)
|
|
160
|
+
self._output_messages = [""] * 20
|
|
161
|
+
self._output_messages_ids = [-1] * 20
|
|
162
|
+
self._output_messages_count = 0
|
|
163
|
+
self._output_messages_last_pos = -1
|
|
164
|
+
|
|
165
|
+
# Attributes: handshake-related
|
|
166
|
+
self.agents_to_interview: dict[str, [float, NodeProfile | None]] = {} # Peer_id -> [time, profile | None]
|
|
167
|
+
self.agents_expected_to_send_ack = {}
|
|
168
|
+
self.last_rejected_agents = deque(maxlen=self.conn)
|
|
169
|
+
self.joining_world_info = None
|
|
170
|
+
self.first = True
|
|
171
|
+
|
|
172
|
+
# Inspector related
|
|
173
|
+
self.inspector_activated = False
|
|
174
|
+
self.inspector_peer_id = None
|
|
175
|
+
self.debug_server_running = False
|
|
176
|
+
self.__inspector_cache = {"behav": None, "known_streams_count": 0, "all_agents_count": 0}
|
|
177
|
+
self.__inspector_told_to_pause = False
|
|
178
|
+
|
|
179
|
+
# Get key
|
|
180
|
+
self.unaiverse_key = get_key_considering_multiple_sources(self.unaiverse_key)
|
|
181
|
+
|
|
182
|
+
# Getting node ID (retrieving by name), if it was not provided (the node is created if not existing)
|
|
183
|
+
if self.node_id is None:
|
|
184
|
+
node_ids, were_alive = self.get_node_id_by_name([node_name],
|
|
185
|
+
create_if_missing=True)
|
|
186
|
+
self.node_id = node_ids[0]
|
|
187
|
+
if were_alive[0]:
|
|
188
|
+
raise GenException(f"Cannot access node {node_name}, it is already running! "
|
|
189
|
+
f"(set env variable NODE_IGNORE_ALIVE=1 to ignore this control)")
|
|
190
|
+
|
|
191
|
+
# Automatically create a unique data directory for this specific node
|
|
192
|
+
node_identity_dir = os.path.join(base_identity_dir, self.node_id)
|
|
193
|
+
p2p_u_identity_dir = os.path.join(node_identity_dir, "p2p_public")
|
|
194
|
+
p2p_w_identity_dir = os.path.join(node_identity_dir, "p2p_private")
|
|
195
|
+
|
|
196
|
+
# Getting node ID of world masters, if needed
|
|
197
|
+
if world_masters_node_names is not None and len(world_masters_node_names) > 0:
|
|
198
|
+
master_node_ids, were_alive = self.get_node_id_by_name(world_masters_node_names,
|
|
199
|
+
create_if_missing=True, node_type=Node.AGENT)
|
|
200
|
+
for master_node_name, master_node_id in zip(world_masters_node_names, master_node_ids):
|
|
201
|
+
if master_node_id is None:
|
|
202
|
+
raise GenException(f"Cannot find world master node ID given its name: {master_node_name}")
|
|
203
|
+
else:
|
|
204
|
+
if self.world_masters_node_ids is None:
|
|
205
|
+
self.world_masters_node_ids = set()
|
|
206
|
+
self.world_masters_node_ids.add(master_node_id)
|
|
207
|
+
|
|
208
|
+
# Here you can setup max_instances, max_channels, enable_logging at libp2p level etc.
|
|
209
|
+
P2P.setup_library(enable_logging=os.getenv("NODE_LIBP2PLOG", "0") == "1")
|
|
210
|
+
|
|
211
|
+
offer_relay_facilities = self.node_type is Node.WORLD # Only world nodes offer relay facilities
|
|
212
|
+
|
|
213
|
+
# Create P2P node in the whole universe (it has fields 'addresses', and 'peer_id', and 'libp2p')
|
|
214
|
+
p2p_u = P2P(identity_dir=p2p_u_identity_dir,
|
|
215
|
+
port=int(os.getenv("NODE_STARTING_PORT", "0")),
|
|
216
|
+
ips=None,
|
|
217
|
+
enable_relay_client=allow_connection_through_relay,
|
|
218
|
+
enable_relay_service=offer_relay_facilities,
|
|
219
|
+
knows_is_public=os.getenv("NODE_IS_PUBLIC", "0") == "1",
|
|
220
|
+
max_connections=1000,
|
|
221
|
+
enable_tls=os.getenv("NODE_USE_TLS", "0") == "1",
|
|
222
|
+
domain_name=os.getenv("DOMAIN", None),
|
|
223
|
+
tls_cert_path=os.getenv("TLS_CERT_PATH", None),
|
|
224
|
+
tls_key_path=os.getenv("TLS_KEY_PATH", None))
|
|
225
|
+
|
|
226
|
+
# Create another P2P node for the private world (it has fields 'addresses', and 'peer_id', and 'libp2p')
|
|
227
|
+
p2p_w = P2P(identity_dir=p2p_w_identity_dir,
|
|
228
|
+
port=(int(os.getenv("NODE_STARTING_PORT", "0")) + 4)
|
|
229
|
+
if int(os.getenv("NODE_STARTING_PORT", "0")) > 0 else 0,
|
|
230
|
+
ips=None,
|
|
231
|
+
enable_relay_client=allow_connection_through_relay,
|
|
232
|
+
enable_relay_service=offer_relay_facilities,
|
|
233
|
+
knows_is_public=os.getenv("NODE_IS_PUBLIC", "0") == "1",
|
|
234
|
+
max_connections=1000,
|
|
235
|
+
enable_tls=os.getenv("NODE_USE_TLS", "0") == "1",
|
|
236
|
+
domain_name=os.getenv("DOMAIN", None),
|
|
237
|
+
tls_cert_path=os.getenv("TLS_CERT_PATH", None),
|
|
238
|
+
tls_key_path=os.getenv("TLS_KEY_PATH", None))
|
|
239
|
+
|
|
240
|
+
# Get first node token
|
|
241
|
+
self.get_node_token(peer_ids=[p2p_u.peer_id, p2p_w.peer_id]) # Passing both the peer IDs
|
|
242
|
+
|
|
243
|
+
# Get first badge token
|
|
244
|
+
if self.node_type is Node.WORLD:
|
|
245
|
+
self.badge_token = self.__root(api="account/node/cv/badge/token/get", payload={"node_id": self.node_id})
|
|
246
|
+
else:
|
|
247
|
+
self.badge_token = None
|
|
248
|
+
|
|
249
|
+
# Get profile (static)
|
|
250
|
+
profile_static = self.__root(api="/account/node/profile/static/get", payload={"node_id": self.node_id})
|
|
251
|
+
|
|
252
|
+
# Getting list of allowed nodes from the static profile,
|
|
253
|
+
# if we did not already specify it when creating the node in the code (the code has higher priority)
|
|
254
|
+
if (self.allowed_node_ids is None and 'allowed_node_ids' in profile_static and
|
|
255
|
+
profile_static['allowed_node_ids'] is not None and len(profile_static['allowed_node_ids']) > 0):
|
|
256
|
+
self.allowed_node_ids = set(profile_static['allowed_node_ids'])
|
|
257
|
+
|
|
258
|
+
# Getting list of world master nodes from the static profile,
|
|
259
|
+
# if we did not already specify it when creating the node in the code (the code has higher priority)
|
|
260
|
+
if self.node_type is Node.WORLD:
|
|
261
|
+
if (self.world_masters_node_ids is None and 'world_masters_node_ids' in profile_static and
|
|
262
|
+
profile_static['world_masters_node_ids'] is not None
|
|
263
|
+
and len(profile_static['world_masters_node_ids']) > 0):
|
|
264
|
+
self.world_masters_node_ids = set(profile_static['world_masters_node_ids'])
|
|
265
|
+
else:
|
|
266
|
+
self.world_masters_node_ids = None # Clearing this in case the user specified it for a non-world node
|
|
267
|
+
|
|
268
|
+
# Creating the connection manager
|
|
269
|
+
# guessing max number of connections (max number of valid
|
|
270
|
+
# the connection manager will ensure that this limit is fulfilled)
|
|
271
|
+
# however, the actual number of connection attempts handled by libp2p must be higher that
|
|
272
|
+
self.conn = NodeConn(max_connections=profile_static['max_nr_connections'],
|
|
273
|
+
p2p_u=p2p_u,
|
|
274
|
+
p2p_w=p2p_w,
|
|
275
|
+
is_world_node=self.node_type is Node.WORLD,
|
|
276
|
+
public_key=self.public_key,
|
|
277
|
+
token=self.node_token)
|
|
278
|
+
|
|
279
|
+
# Get CV
|
|
280
|
+
cv = self.get_cv()
|
|
281
|
+
|
|
282
|
+
# Creating full node profile putting together static info, dynamic profile, adding P2P node info, CV
|
|
283
|
+
self.profile = NodeProfile(static=profile_static,
|
|
284
|
+
dynamic={'peer_id': p2p_u.peer_id,
|
|
285
|
+
'peer_addresses': p2p_u.addresses,
|
|
286
|
+
'private_peer_id': p2p_w.peer_id,
|
|
287
|
+
'private_peer_addresses': p2p_w.addresses,
|
|
288
|
+
'connections': {
|
|
289
|
+
'role': self.hosted.ROLE_BITS_TO_STR[self.hosted.ROLE_PUBLIC]
|
|
290
|
+
},
|
|
291
|
+
'world_summary': {
|
|
292
|
+
'world_name':
|
|
293
|
+
profile_static['node_name']
|
|
294
|
+
if self.node_type is Node.WORLD else None
|
|
295
|
+
},
|
|
296
|
+
"world_roles_fsm": None, # This will be filled later if this is a world
|
|
297
|
+
"hidden": hidden # Marking the node as hidden (or not)
|
|
298
|
+
},
|
|
299
|
+
cv=cv) # Adding CV here
|
|
300
|
+
|
|
301
|
+
# Sharing node-level info with the hosted entity
|
|
302
|
+
self.hosted.set_node_info(self.clock, self.conn, self.profile, self.out, self.ask_to_get_in_touch,
|
|
303
|
+
self.__purge, self.agents_expected_to_send_ack, print_level)
|
|
304
|
+
|
|
305
|
+
# Finally, sending dynamic profile to the root server
|
|
306
|
+
# (send AFTER set_node_info, not before, since set_node_info updates the profile,
|
|
307
|
+
# adding world roles and state machines)
|
|
308
|
+
self.send_dynamic_profile()
|
|
309
|
+
|
|
310
|
+
# Save public addresses
|
|
311
|
+
path_to_append_addresses = os.getenv("NODE_SAVE_RUNNING_ADDRESSES")
|
|
312
|
+
if path_to_append_addresses is not None and os.path.exists(path_to_append_addresses):
|
|
313
|
+
save_node_addresses_to_file(self, public=True, dir_path=path_to_append_addresses,
|
|
314
|
+
filename="running.csv", append=True)
|
|
315
|
+
|
|
316
|
+
# Update lone-wolf machines to replace default wildcards (like <agent>) - the private one will be handled when
|
|
317
|
+
# joining a world
|
|
318
|
+
if self.node_type is Node.AGENT:
|
|
319
|
+
self.agent.behav_lone_wolf.update_wildcard("<agent>", f"{self.get_public_peer_id()}")
|
|
320
|
+
|
|
321
|
+
def out(self, msg: str):
|
|
322
|
+
"""Prints a formatted message to the console if printing is enabled.
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
msg: The message to be printed.
|
|
326
|
+
"""
|
|
327
|
+
if self.print_enabled:
|
|
328
|
+
s = (f"{self.node_type[0:2]}: " +
|
|
329
|
+
((self.hosted.get_name())[0:6] + ",").ljust(7) +
|
|
330
|
+
f" cy: {self.clock.get_cycle()}")
|
|
331
|
+
s = f"[{s}] {msg}"
|
|
332
|
+
print(f"{self.text_color}{s}\033[0m")
|
|
333
|
+
|
|
334
|
+
if self.inspector_activated or self.debug_server_running:
|
|
335
|
+
last_id = self._output_messages_ids[self._output_messages_last_pos]
|
|
336
|
+
self._output_messages_last_pos = (self._output_messages_last_pos + 1) % len(self._output_messages)
|
|
337
|
+
self._output_messages_count = min(self._output_messages_count + 1, len(self._output_messages))
|
|
338
|
+
self._output_messages_ids[self._output_messages_last_pos] = last_id + 1
|
|
339
|
+
self._output_messages[self._output_messages_last_pos] = html.escape(str(msg), quote=True)
|
|
340
|
+
|
|
341
|
+
def err(self, msg: str):
|
|
342
|
+
"""Prints a formatted error message to the console.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
msg: The error message to be printed.
|
|
346
|
+
"""
|
|
347
|
+
when = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
|
348
|
+
if self.print_enabled:
|
|
349
|
+
self.out(f"<ERROR> [{when}] " + msg)
|
|
350
|
+
else:
|
|
351
|
+
print(f"<ERROR> [{when}] " + msg)
|
|
352
|
+
|
|
353
|
+
def get_node_id_by_name(self, node_names: list[str], create_if_missing: bool = False,
|
|
354
|
+
node_type: str | None = None) -> tuple[list[str], list[bool]]:
|
|
355
|
+
"""Retrieves the node ID by its name from the root server, creating a new node if it's missing and specified.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
node_names: The list with the names of the nodes to retrieve.
|
|
359
|
+
create_if_missing: A flag to create the node if it doesn't exist (only valid for your own nodes).
|
|
360
|
+
node_type: The type of the node to create if missing (when create_if_missing is True) - default: the type of
|
|
361
|
+
the current node.
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
The list of node IDs and the list of boolean flags telling if a node was already alive,
|
|
365
|
+
or an exception if an error occurs.
|
|
366
|
+
"""
|
|
367
|
+
try:
|
|
368
|
+
response = self.__root("/account/node/get/id",
|
|
369
|
+
payload={"node_name": node_names,
|
|
370
|
+
"account_token": self.unaiverse_key})
|
|
371
|
+
node_ids = []
|
|
372
|
+
were_alive = []
|
|
373
|
+
missing = []
|
|
374
|
+
for i in range(0, len(response["nodes"])):
|
|
375
|
+
if response["nodes"][i] is not None:
|
|
376
|
+
node_ids.append(response["nodes"][i]["node_id"])
|
|
377
|
+
were_alive.append(response["nodes"][i]["was_alive"])
|
|
378
|
+
else:
|
|
379
|
+
node_ids.append(None)
|
|
380
|
+
were_alive.append(None)
|
|
381
|
+
missing.append(i)
|
|
382
|
+
except Exception as e:
|
|
383
|
+
raise GenException(f"Error while retrieving nodes named {node_names} from server! [{e}]")
|
|
384
|
+
|
|
385
|
+
if create_if_missing:
|
|
386
|
+
for i in missing:
|
|
387
|
+
node_name = node_names[i]
|
|
388
|
+
if "/" in node_name or "@" in node_name: # Cannot create nodes belonging to others
|
|
389
|
+
continue
|
|
390
|
+
try:
|
|
391
|
+
response = self.__root("/account/node/fast_register",
|
|
392
|
+
payload={"node_name": node_name,
|
|
393
|
+
"node_type": self.node_type if node_type is None else node_type,
|
|
394
|
+
"account_token": self.unaiverse_key})
|
|
395
|
+
node_ids[i] = response["node_id"]
|
|
396
|
+
were_alive[i] = False
|
|
397
|
+
except Exception as e:
|
|
398
|
+
raise GenException(f"Error while registering node named {node_name} in server! [{e}]")
|
|
399
|
+
return node_ids, were_alive
|
|
400
|
+
|
|
401
|
+
def send_alive(self) -> bool:
|
|
402
|
+
"""Send an alive message to the root server.
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
A boolean flag indicating whether the node was already live before sending this.
|
|
406
|
+
"""
|
|
407
|
+
try:
|
|
408
|
+
response = self.__root("/account/node/alive",
|
|
409
|
+
payload={"node_id": self.node_id,
|
|
410
|
+
"account_token": self.unaiverse_key})
|
|
411
|
+
return response["was_alive"]
|
|
412
|
+
except Exception as e:
|
|
413
|
+
self.err(f"Error while sending alive message to server! [{e}]")
|
|
414
|
+
|
|
415
|
+
def get_node_token(self, peer_ids):
|
|
416
|
+
"""Generates and retrieves a node token from the root server.
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
peer_ids: A list of public and private peer IDs.
|
|
420
|
+
"""
|
|
421
|
+
response = None
|
|
422
|
+
|
|
423
|
+
for i in range(0, 3): # It will try 3 times before raising the exception...
|
|
424
|
+
try:
|
|
425
|
+
response = self.__root("/account/node/token/generate",
|
|
426
|
+
payload={"node_id": self.node_id,
|
|
427
|
+
"account_token": self.unaiverse_key
|
|
428
|
+
if self.node_token is None or len(self.node_token) == 0 else None,
|
|
429
|
+
"node_token": self.node_token, "peer_ids": json.dumps(peer_ids)})
|
|
430
|
+
break
|
|
431
|
+
except Exception as e:
|
|
432
|
+
if i < 2:
|
|
433
|
+
self.err(f"Error while getting token from server, retrying...")
|
|
434
|
+
time.sleep(1) # Wait a little bit
|
|
435
|
+
else:
|
|
436
|
+
raise GenException(f"Error while getting token from server [{e}]") # Raise the exception
|
|
437
|
+
|
|
438
|
+
self.node_token = response["token"]
|
|
439
|
+
self.public_key = response["public_key"]
|
|
440
|
+
|
|
441
|
+
# Sharing the token with the connection manager
|
|
442
|
+
if self.conn is not None:
|
|
443
|
+
self.conn.set_token(self.node_token)
|
|
444
|
+
|
|
445
|
+
def get_cv(self):
|
|
446
|
+
"""Retrieves the node's CV (Curriculum Vitae) from the root server
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
The node's CV as a dictionary.
|
|
450
|
+
"""
|
|
451
|
+
for i in range(0, 3): # It will try 3 times before raising the exception...
|
|
452
|
+
try:
|
|
453
|
+
return self.__root(api="/account/node/cv/get", payload={"node_id": self.node_id})
|
|
454
|
+
except Exception as e:
|
|
455
|
+
self.err(f"Error while getting CV from server [{e}]")
|
|
456
|
+
if i < 2:
|
|
457
|
+
self.out("Retrying...")
|
|
458
|
+
time.sleep(1) # Wait a little bit
|
|
459
|
+
else:
|
|
460
|
+
raise GenException(f"Error while getting CV from server [{e}]")
|
|
461
|
+
|
|
462
|
+
def send_dynamic_profile(self):
|
|
463
|
+
"""Sends the node's dynamic profile to the root server."""
|
|
464
|
+
try:
|
|
465
|
+
self.__root(api="/account/node/profile/dynamic/post", payload={"node_id": self.node_id,
|
|
466
|
+
"profile":
|
|
467
|
+
self.profile.get_dynamic_profile()})
|
|
468
|
+
except Exception as e:
|
|
469
|
+
self.err(f"Error while sending dynamic profile to from server [{e}]")
|
|
470
|
+
|
|
471
|
+
def send_badges(self):
|
|
472
|
+
"""Sends new badges assigned by a world node to the root server and notifies the agents."""
|
|
473
|
+
if self.node_type is Node.WORLD:
|
|
474
|
+
peer_id_to_badges = self.world.get_all_badges()
|
|
475
|
+
if len(peer_id_to_badges) > 0:
|
|
476
|
+
self.out(f"Sending {len(peer_id_to_badges)} badges to root server")
|
|
477
|
+
for i in range(0, 3): # It will try 3 times before raising the exception...
|
|
478
|
+
try:
|
|
479
|
+
badges = [badge for _badges in peer_id_to_badges.values() for badge in _badges]
|
|
480
|
+
peer_ids = [peer_id for peer_id, _badges in peer_id_to_badges.items() for _ in _badges]
|
|
481
|
+
|
|
482
|
+
response = self.__root(api="/account/node/cv/badge/assign",
|
|
483
|
+
payload={"badges": badges,
|
|
484
|
+
"world_node_id": self.node_id,
|
|
485
|
+
"world_badge_token": self.badge_token})
|
|
486
|
+
|
|
487
|
+
# Getting the next badge token
|
|
488
|
+
self.badge_token = response["badge_token"]
|
|
489
|
+
badges_states = response["badges_states"] # List of booleans
|
|
490
|
+
|
|
491
|
+
# Check if posting went well and saving the set of peer IDs to contact
|
|
492
|
+
peer_ids_to_notify = set()
|
|
493
|
+
for z in range(0, len(badges_states)):
|
|
494
|
+
ret = badges_states[z]
|
|
495
|
+
if 'state' not in ret or 'code' not in ret['state'] or 'message' not in ret['state']:
|
|
496
|
+
self.err(f"Error while posting a badge assigned to {peer_ids[z]}. Badge: {badges[z]}. "
|
|
497
|
+
f"Error message: invalid response format")
|
|
498
|
+
else:
|
|
499
|
+
if ret['state']['code'] != "ok":
|
|
500
|
+
self.err(f"Error while posting a badge assigned to {peer_ids[z]}. "
|
|
501
|
+
f"Badge: {badges[z]}. "
|
|
502
|
+
f"Error message: {ret['state']['message']}")
|
|
503
|
+
else:
|
|
504
|
+
peer_ids_to_notify.add(peer_ids[z])
|
|
505
|
+
|
|
506
|
+
# Notify agents
|
|
507
|
+
for peer_id in peer_ids_to_notify:
|
|
508
|
+
if not self.conn.send(peer_id, channel_trail=None, content=None,
|
|
509
|
+
content_type=Msg.GET_CV_FROM_ROOT):
|
|
510
|
+
self.err(f"Error while sending the request to re-download CV to peer {peer_id}")
|
|
511
|
+
|
|
512
|
+
# Clearing
|
|
513
|
+
self.world.clear_badges()
|
|
514
|
+
break
|
|
515
|
+
except Exception as e:
|
|
516
|
+
self.err(f"Error while sending badges to server or when notifying peers [{e}]")
|
|
517
|
+
if i < 2:
|
|
518
|
+
self.out("Retrying...")
|
|
519
|
+
time.sleep(1) # Wait a little bit
|
|
520
|
+
else:
|
|
521
|
+
self.err(f"Couldn't complete badge sending or notification procedure (stop trying)")
|
|
522
|
+
|
|
523
|
+
def get_public_addresses(self) -> list[str]:
|
|
524
|
+
"""Returns the public addresses of the P2P node
|
|
525
|
+
|
|
526
|
+
Returns:
|
|
527
|
+
The list of public addresses.
|
|
528
|
+
"""
|
|
529
|
+
return self.conn[NodeConn.P2P_PUBLIC].addresses
|
|
530
|
+
|
|
531
|
+
def get_world_addresses(self) -> list[str]:
|
|
532
|
+
"""Returns the world addresses of the P2P node
|
|
533
|
+
|
|
534
|
+
Returns:
|
|
535
|
+
The list of world addresses.
|
|
536
|
+
"""
|
|
537
|
+
return self.conn[NodeConn.P2P_WORLD].addresses
|
|
538
|
+
|
|
539
|
+
def get_public_peer_id(self) -> str:
|
|
540
|
+
"""Returns the public peer ID of the P2P node
|
|
541
|
+
|
|
542
|
+
Returns:
|
|
543
|
+
The public peer ID.
|
|
544
|
+
"""
|
|
545
|
+
return self.conn[NodeConn.P2P_PUBLIC].peer_id
|
|
546
|
+
|
|
547
|
+
def get_world_peer_id(self) -> str:
|
|
548
|
+
"""Returns the world peer ID of the P2P node
|
|
549
|
+
|
|
550
|
+
Returns:
|
|
551
|
+
The world peer ID.
|
|
552
|
+
"""
|
|
553
|
+
return self.conn[NodeConn.P2P_WORLD].peer_id
|
|
554
|
+
|
|
555
|
+
def ask_to_get_in_touch(self, node_name: str | None = None, addresses: list[str] | None = None, public: bool = True,
|
|
556
|
+
before_updating_pools_fcn=None, run_count: int = 0):
|
|
557
|
+
"""Tries to connect to another agent or world node.
|
|
558
|
+
|
|
559
|
+
Args:
|
|
560
|
+
node_name: Name of the node to join (alternative to addresses below)
|
|
561
|
+
addresses: A list of network addresses to connect to (alternative to node_name).
|
|
562
|
+
public: A boolean flag indicating whether to use the public or world P2P network.
|
|
563
|
+
before_updating_pools_fcn: A function to call before updating the connection pools.
|
|
564
|
+
run_count: The number of connection attempts made.
|
|
565
|
+
|
|
566
|
+
Returns:
|
|
567
|
+
The peer ID of the connected node if successful, otherwise None.
|
|
568
|
+
"""
|
|
569
|
+
|
|
570
|
+
# Checking arguments
|
|
571
|
+
if (node_name is None and addresses is None) or (node_name is not None and addresses is not None):
|
|
572
|
+
raise GenException("Cannot specify both node_name and addresses or none of them, check your code!")
|
|
573
|
+
|
|
574
|
+
# Getting addresses, if needed
|
|
575
|
+
if addresses is None:
|
|
576
|
+
addresses = self.__root(api="account/node/get/addresses",
|
|
577
|
+
payload={"node_name": node_name, "account_token": self.unaiverse_key})["addresses"]
|
|
578
|
+
|
|
579
|
+
# Connecting
|
|
580
|
+
self.out("Connecting to another agent/world...")
|
|
581
|
+
peer_id, through_relay = self.conn.connect(addresses,
|
|
582
|
+
p2p_name=NodeConn.P2P_PUBLIC if public else NodeConn.P2P_WORLD)
|
|
583
|
+
|
|
584
|
+
if through_relay:
|
|
585
|
+
print("Warning: this connection goes through a relay-based circuit, "
|
|
586
|
+
"so a third-party node is involved in the communication")
|
|
587
|
+
|
|
588
|
+
if peer_id is not None and (not through_relay or self.talk_to_relay_based_nodes):
|
|
589
|
+
|
|
590
|
+
# Ping to test the readiness of the established connection
|
|
591
|
+
self.out(f"Connected, ping-pong...")
|
|
592
|
+
if not self.conn.send(peer_id, channel_trail=None, content_type=Msg.MISC, content={"ping": "pong"},
|
|
593
|
+
p2p=self.conn.p2p_name_to_p2p[NodeConn.P2P_PUBLIC if public else NodeConn.P2P_WORLD]):
|
|
594
|
+
if run_count < 2:
|
|
595
|
+
return self.ask_to_get_in_touch(addresses=addresses, public=public,
|
|
596
|
+
before_updating_pools_fcn=before_updating_pools_fcn,
|
|
597
|
+
run_count=run_count+1)
|
|
598
|
+
else:
|
|
599
|
+
self.err("Connection failed! (ping-pong max trials exceeded)")
|
|
600
|
+
return None
|
|
601
|
+
|
|
602
|
+
self.out("Connected, updating pools...")
|
|
603
|
+
if before_updating_pools_fcn is not None:
|
|
604
|
+
before_updating_pools_fcn(peer_id)
|
|
605
|
+
self.conn.update()
|
|
606
|
+
|
|
607
|
+
self.agents_expected_to_send_ack[peer_id] = self.clock.get_time()
|
|
608
|
+
self.out(f"Current set of {len(self.agents_expected_to_send_ack)} connected peer IDs that will get our "
|
|
609
|
+
f"profile and are expected to send a confirmation: "
|
|
610
|
+
f"{self.agents_expected_to_send_ack.keys()}")
|
|
611
|
+
return peer_id
|
|
612
|
+
else:
|
|
613
|
+
self.err("Connection failed!")
|
|
614
|
+
return None
|
|
615
|
+
|
|
616
|
+
def ask_to_join_world(self, node_name: str | None = None, addresses: list[str] | None = None, **kwargs):
|
|
617
|
+
"""Initiates a request to join a world.
|
|
618
|
+
|
|
619
|
+
Args:
|
|
620
|
+
node_name: The name of the node hosting the world to join (alternative to addresses below).
|
|
621
|
+
addresses: A list of network addresses of the world node (alternative to world_name).
|
|
622
|
+
**kwargs: Additional options for joining the world.
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
The public peer ID of the world node if the connection request is successful, otherwise None.
|
|
626
|
+
"""
|
|
627
|
+
print("Asking to join world...")
|
|
628
|
+
|
|
629
|
+
# Leave an already entered world (if any)
|
|
630
|
+
world_peer_id = self.profile.get_dynamic_profile()['connections']['world_peer_id']
|
|
631
|
+
if world_peer_id is not None:
|
|
632
|
+
self.leave(world_peer_id)
|
|
633
|
+
|
|
634
|
+
# Connecting to the world (public)
|
|
635
|
+
peer_id = self.ask_to_get_in_touch(node_name=node_name, addresses=addresses, public=True)
|
|
636
|
+
|
|
637
|
+
# Saving info
|
|
638
|
+
if peer_id is not None:
|
|
639
|
+
print("Connected on the public network, waiting for handshake...")
|
|
640
|
+
self.joining_world_info = {"world_public_peer_id": peer_id, "options": kwargs}
|
|
641
|
+
else:
|
|
642
|
+
print("Failed to join world!")
|
|
643
|
+
return peer_id
|
|
644
|
+
|
|
645
|
+
def leave(self, peer_id: str):
|
|
646
|
+
"""Disconnects the node from a specific peer, typically a world.
|
|
647
|
+
|
|
648
|
+
Args:
|
|
649
|
+
peer_id: The peer ID of the node to leave.
|
|
650
|
+
"""
|
|
651
|
+
|
|
652
|
+
if not isinstance(peer_id, str):
|
|
653
|
+
self.err(f"Invalid argument provided to leave(...): {peer_id}")
|
|
654
|
+
return
|
|
655
|
+
|
|
656
|
+
print(f"Leaving {peer_id}...")
|
|
657
|
+
|
|
658
|
+
dynamic_profile = self.profile.get_dynamic_profile()
|
|
659
|
+
|
|
660
|
+
if peer_id == dynamic_profile['connections']['world_peer_id']:
|
|
661
|
+
print("Leaving world...")
|
|
662
|
+
|
|
663
|
+
# Clearing world-related lists in the connection manager (to avoid world agent to connect again)
|
|
664
|
+
self.conn.set_world(None)
|
|
665
|
+
self.conn.set_world_agents_list(None)
|
|
666
|
+
self.conn.set_world_masters_list(None)
|
|
667
|
+
|
|
668
|
+
# Disconnecting all connected world-related agents, including world node (it clears roles too)
|
|
669
|
+
self.conn.remove_all_world_agents()
|
|
670
|
+
|
|
671
|
+
# Better clear this as well
|
|
672
|
+
if peer_id in self.agents_expected_to_send_ack:
|
|
673
|
+
del self.agents_expected_to_send_ack[peer_id]
|
|
674
|
+
|
|
675
|
+
# Clear profile
|
|
676
|
+
dynamic_profile['connections']['world_peer_id'] = None
|
|
677
|
+
dynamic_profile['connections']['world_agents'] = None
|
|
678
|
+
dynamic_profile['connections']['world_masters'] = None
|
|
679
|
+
self.profile.mark_change_in_connections()
|
|
680
|
+
|
|
681
|
+
# Clearing agent-level info
|
|
682
|
+
self.agent.clear_world_related_data()
|
|
683
|
+
|
|
684
|
+
# Clearing all joining options
|
|
685
|
+
self.joining_world_info = None
|
|
686
|
+
else:
|
|
687
|
+
if peer_id in self.hosted.all_agents:
|
|
688
|
+
self.hosted.remove_agent(peer_id)
|
|
689
|
+
self.conn.remove(peer_id)
|
|
690
|
+
|
|
691
|
+
def leave_world(self):
|
|
692
|
+
"""Initiates the process of leaving a world.
|
|
693
|
+
|
|
694
|
+
Returns:
|
|
695
|
+
None.
|
|
696
|
+
"""
|
|
697
|
+
if self.profile.get_dynamic_profile()['connections']['world_peer_id'] is not None:
|
|
698
|
+
self.agent.accept_new_role(self.agent.ROLE_PUBLIC)
|
|
699
|
+
self.agent.world_profile = None
|
|
700
|
+
self.leave(self.profile.get_dynamic_profile()['connections']['world_peer_id'])
|
|
701
|
+
|
|
702
|
+
def run(self, cycles: int | None = None, max_time: float | None = None, interact_mode_opts: dict | None = None):
|
|
703
|
+
"""Starts the main execution loop for the node.
|
|
704
|
+
|
|
705
|
+
Args:
|
|
706
|
+
cycles: The number of clock cycles to run the loop for. If None, runs indefinitely.
|
|
707
|
+
max_time: The maximum time in seconds to run the loop. If None, runs indefinitely.
|
|
708
|
+
interact_mode_opts: A dictionary of options to enable interactive mode.
|
|
709
|
+
"""
|
|
710
|
+
try:
|
|
711
|
+
if self.cursor_hidden:
|
|
712
|
+
sys.stdout.write("\033[?25l") # Hide cursor
|
|
713
|
+
|
|
714
|
+
last_dynamic_profile_time = self.clock.get_time()
|
|
715
|
+
last_get_token_time = self.clock.get_time()
|
|
716
|
+
last_address_check_time = self.clock.get_time()
|
|
717
|
+
if not (cycles is None or cycles > 0):
|
|
718
|
+
raise GenException("Invalid number of cycles")
|
|
719
|
+
|
|
720
|
+
# Interactive mode (useful when chatting with lone wolves)
|
|
721
|
+
keyboard_queue = None
|
|
722
|
+
keyboard_listener = None
|
|
723
|
+
processor_net_hash = None
|
|
724
|
+
processor_img_stream = None
|
|
725
|
+
processor_text_stream = None
|
|
726
|
+
cap = None
|
|
727
|
+
ready_to_interact = False
|
|
728
|
+
|
|
729
|
+
if interact_mode_opts is not None:
|
|
730
|
+
if self.agent is None:
|
|
731
|
+
raise GenException("Interactive mode is only valid for agents")
|
|
732
|
+
|
|
733
|
+
processor_text_net_hash = None
|
|
734
|
+
processor_img_net_hash = None
|
|
735
|
+
looking_for_public_streams = "lone_wolf_peer_id" in interact_mode_opts
|
|
736
|
+
for net_hash, stream_dict in self.agent.proc_streams.items():
|
|
737
|
+
for stream in stream_dict.values():
|
|
738
|
+
if (processor_text_stream is None and stream.props.is_public() == looking_for_public_streams
|
|
739
|
+
and stream.props.is_text()):
|
|
740
|
+
processor_text_stream = stream
|
|
741
|
+
processor_text_net_hash = net_hash
|
|
742
|
+
if (processor_img_stream is None and stream.props.is_public() == looking_for_public_streams
|
|
743
|
+
and stream.props.is_img()):
|
|
744
|
+
processor_img_stream = stream
|
|
745
|
+
processor_img_net_hash = net_hash
|
|
746
|
+
|
|
747
|
+
if processor_text_net_hash is None:
|
|
748
|
+
raise GenException("Interactive mode requires a processor that generates a text stream")
|
|
749
|
+
if not (processor_img_net_hash is None or (processor_text_net_hash == processor_img_net_hash)):
|
|
750
|
+
raise GenException("Interactive mode requires the same processor to generate text and img streams")
|
|
751
|
+
processor_net_hash = processor_text_net_hash
|
|
752
|
+
|
|
753
|
+
def keyboard_listener(k_queue):
|
|
754
|
+
while True:
|
|
755
|
+
webcam_shot = None
|
|
756
|
+
keyboard_msg = input() # Get from keyboards
|
|
757
|
+
if cap is not None:
|
|
758
|
+
ret, got_shot = cap.read() # Get from webcam
|
|
759
|
+
if ret:
|
|
760
|
+
target_area = 224 * 224
|
|
761
|
+
webcam_shot = Image.fromarray(cv2.cvtColor(got_shot, cv2.COLOR_BGR2RGB))
|
|
762
|
+
width, height = webcam_shot.size
|
|
763
|
+
current_area = width * height
|
|
764
|
+
|
|
765
|
+
if current_area > target_area:
|
|
766
|
+
scale_factor = math.sqrt(target_area / current_area)
|
|
767
|
+
new_width = int(round(width * scale_factor))
|
|
768
|
+
new_height = int(round(height * scale_factor))
|
|
769
|
+
webcam_shot = webcam_shot.resize((new_width, new_height),
|
|
770
|
+
Image.Resampling.LANCZOS)
|
|
771
|
+
|
|
772
|
+
if keyboard_msg is not None and len(keyboard_msg) > 0:
|
|
773
|
+
k_queue.put((keyboard_msg, webcam_shot)) # Store in the asynch queue
|
|
774
|
+
|
|
775
|
+
if keyboard_msg.strip() == "exit" or keyboard_msg.strip() == "quit":
|
|
776
|
+
break
|
|
777
|
+
|
|
778
|
+
if not self.agent.in_world(): # If the world disconnected
|
|
779
|
+
break
|
|
780
|
+
|
|
781
|
+
keyboard_queue = queue.Queue() # Create a thread-safe queue for communication
|
|
782
|
+
keyboard_listener = threading.Thread(target=keyboard_listener, args=(keyboard_queue,), daemon=True)
|
|
783
|
+
|
|
784
|
+
if self.clock.get_cycle() == -1:
|
|
785
|
+
print("Running " + ("agent node" if self.agent else "world node") + " " +
|
|
786
|
+
f"(public: {self.get_public_peer_id()}, private: {self.get_world_peer_id()})...")
|
|
787
|
+
|
|
788
|
+
# Main loop
|
|
789
|
+
must_quit = False
|
|
790
|
+
self.run_start_time = self.clock.get_time()
|
|
791
|
+
while not must_quit:
|
|
792
|
+
|
|
793
|
+
# Sending alive message every "K" seconds
|
|
794
|
+
if self.clock.get_time() - self.last_alive_time >= self.send_alive_every:
|
|
795
|
+
was_alive = self.send_alive()
|
|
796
|
+
|
|
797
|
+
# Checking only at the first run
|
|
798
|
+
if self.last_alive_time == 0 and was_alive and not self.skip_was_alive_check:
|
|
799
|
+
print(f"The node is already alive, maybe running in a different machine? "
|
|
800
|
+
f"(set env variable NODE_IGNORE_ALIVE=1 to ignore this control)")
|
|
801
|
+
break # Stopping the running cycle
|
|
802
|
+
self.last_alive_time = self.clock.get_time()
|
|
803
|
+
|
|
804
|
+
# Check inspector
|
|
805
|
+
if self.inspector_activated:
|
|
806
|
+
if self.__inspector_told_to_pause:
|
|
807
|
+
print("Paused by the inspector, waiting...")
|
|
808
|
+
|
|
809
|
+
while self.__inspector_told_to_pause:
|
|
810
|
+
if not self.inspector_activated: # Disconnected
|
|
811
|
+
self.__inspector_told_to_pause = False
|
|
812
|
+
print("Inspector is not active/connected anymore, resuming...")
|
|
813
|
+
break
|
|
814
|
+
|
|
815
|
+
public_messages = self.conn.get_messages(p2p_name=NodeConn.P2P_PUBLIC)
|
|
816
|
+
for msg in public_messages:
|
|
817
|
+
if msg.content_type == Msg.INSPECT_CMD:
|
|
818
|
+
|
|
819
|
+
# Unpacking piggyback
|
|
820
|
+
sender_node_id, sender_inspector_mode_on = (msg.piggyback[0:-1],
|
|
821
|
+
msg.piggyback[-1] == "1")
|
|
822
|
+
|
|
823
|
+
# Is message from inspector?
|
|
824
|
+
sender_is_inspector = (sender_node_id == self.profile.get_static_profile()[
|
|
825
|
+
'inspector_node_id'] and
|
|
826
|
+
sender_inspector_mode_on)
|
|
827
|
+
|
|
828
|
+
if sender_is_inspector:
|
|
829
|
+
self.__handle_inspector_command(msg.content['cmd'], msg.content['arg'])
|
|
830
|
+
else:
|
|
831
|
+
self.err("Inspector command was not sent by the expected inspector node ID "
|
|
832
|
+
"or no inspector connected")
|
|
833
|
+
self.__purge(msg.sender)
|
|
834
|
+
time.sleep(0.1)
|
|
835
|
+
|
|
836
|
+
# Move to the next cycle
|
|
837
|
+
while not self.clock.next_cycle():
|
|
838
|
+
time.sleep(0.001) # Seconds (lowest possible granularity level)
|
|
839
|
+
|
|
840
|
+
self.out(f">>> Starting clock cycle {self.clock.get_cycle()} <<<")
|
|
841
|
+
|
|
842
|
+
# Handle new connections or lost connections
|
|
843
|
+
self.__handle_network_connections()
|
|
844
|
+
|
|
845
|
+
# Handle (read, execute) received network data/commands
|
|
846
|
+
self.__handle_network_messages(interact_mode_opts=interact_mode_opts)
|
|
847
|
+
|
|
848
|
+
# Stream live data (generated and environmental)
|
|
849
|
+
if len(self.hosted.all_agents) > 0:
|
|
850
|
+
if self.node_type is Node.WORLD:
|
|
851
|
+
if self.first is True:
|
|
852
|
+
self.first = False
|
|
853
|
+
for net_hash, stream_dict in self.hosted.known_streams.items():
|
|
854
|
+
for stream_obj in stream_dict.values():
|
|
855
|
+
if isinstance(stream_obj, BufferedDataStream):
|
|
856
|
+
stream_obj.restart()
|
|
857
|
+
self.hosted.send_stream_samples()
|
|
858
|
+
|
|
859
|
+
# Trigger HSM of the agent
|
|
860
|
+
if self.node_type is Node.AGENT:
|
|
861
|
+
if interact_mode_opts is not None:
|
|
862
|
+
try:
|
|
863
|
+
|
|
864
|
+
# Waiting until we meet a state named "ready"
|
|
865
|
+
if not ready_to_interact:
|
|
866
|
+
behav = self.agent.behav_lone_wolf \
|
|
867
|
+
if "lone_wolf_peer_id" in interact_mode_opts else self.agent.behav
|
|
868
|
+
if behav.state == "ready":
|
|
869
|
+
ready_to_interact = True
|
|
870
|
+
keyboard_listener.start()
|
|
871
|
+
cap = cv2.VideoCapture(0) if processor_img_stream is not None else None
|
|
872
|
+
print(f"\n*** Entering interactive text mode ***\n\n👉 ", end="")
|
|
873
|
+
|
|
874
|
+
original_stdout = sys.stdout # Valid screen-related stream
|
|
875
|
+
sys.stdout = open('interact_stdout.txt',
|
|
876
|
+
'w') # Open(os.devnull, 'w') # null stream
|
|
877
|
+
interact_mode_opts["stdout"] = [original_stdout, sys.stdout]
|
|
878
|
+
|
|
879
|
+
# Getting message from keyboard
|
|
880
|
+
msg, image_pil = keyboard_queue.get_nowait()
|
|
881
|
+
|
|
882
|
+
# Quit?
|
|
883
|
+
msg = msg.strip()
|
|
884
|
+
if msg == "exit" or msg == "quit":
|
|
885
|
+
must_quit = True
|
|
886
|
+
interact_mode_opts["stdout"][1].close()
|
|
887
|
+
if cap is not None:
|
|
888
|
+
cap.release()
|
|
889
|
+
else:
|
|
890
|
+
|
|
891
|
+
# Asking the to generate (the request will be immediately sent)
|
|
892
|
+
if "lone_wolf_peer_id" in interact_mode_opts:
|
|
893
|
+
behav = self.agent.behav_lone_wolf
|
|
894
|
+
other_behav = self.agent.behav
|
|
895
|
+
|
|
896
|
+
other_behav.enable(False)
|
|
897
|
+
behav.enable(True)
|
|
898
|
+
self.agent.ask_gen(agent=interact_mode_opts["lone_wolf_peer_id"],
|
|
899
|
+
u_hashes=[processor_net_hash],
|
|
900
|
+
samples=1)
|
|
901
|
+
behav.enable(False)
|
|
902
|
+
else:
|
|
903
|
+
self.agent.behav.request_action(action_name="ask_gen",
|
|
904
|
+
args={},
|
|
905
|
+
signature=self.get_world_peer_id(),
|
|
906
|
+
timestamp=self.clock.get_time(),
|
|
907
|
+
uuid=None)
|
|
908
|
+
behav = self.agent.behav
|
|
909
|
+
other_behav = self.agent.behav_lone_wolf
|
|
910
|
+
self.agent.behave()
|
|
911
|
+
|
|
912
|
+
# Loading the message and image to the processor's output streams
|
|
913
|
+
# they will be sent at the next clock cycle
|
|
914
|
+
other_behav.enable(False)
|
|
915
|
+
behav.enable(True)
|
|
916
|
+
if processor_img_stream is not None:
|
|
917
|
+
[msg, image_pil], _ = self.agent.generate(input_net_hashes=None,
|
|
918
|
+
inputs=[msg, image_pil])
|
|
919
|
+
processor_text_stream.set(msg)
|
|
920
|
+
processor_img_stream.set(image_pil)
|
|
921
|
+
else:
|
|
922
|
+
[msg], _ = self.agent.generate(input_net_hashes=None,
|
|
923
|
+
inputs=[msg])
|
|
924
|
+
processor_text_stream.set(msg)
|
|
925
|
+
behav.enable(False)
|
|
926
|
+
except queue.Empty:
|
|
927
|
+
self.agent.behave() # If nothing has been typed (+ enter)
|
|
928
|
+
else:
|
|
929
|
+
|
|
930
|
+
# Ordinary behaviour
|
|
931
|
+
self.agent.behave()
|
|
932
|
+
|
|
933
|
+
# Send dynamic profile every "N" seconds
|
|
934
|
+
if (self.clock.get_time() - last_dynamic_profile_time >= self.send_dynamic_profile_every
|
|
935
|
+
and self.profile.connections_changed()):
|
|
936
|
+
try:
|
|
937
|
+
last_dynamic_profile_time = self.clock.get_time()
|
|
938
|
+
self.profile.unmark_change_in_connections()
|
|
939
|
+
self.send_badges() # Sending and clearing badges
|
|
940
|
+
self.send_dynamic_profile() # Sending
|
|
941
|
+
except Exception as e:
|
|
942
|
+
self.err(f"Error while sending the update dynamic profile (or badges) to the server "
|
|
943
|
+
f"(trying to go ahead...) [{e}]")
|
|
944
|
+
|
|
945
|
+
# Getting a new token every "N" seconds
|
|
946
|
+
if self.clock.get_time() - last_get_token_time >= self.get_new_token_every:
|
|
947
|
+
self.get_node_token(peer_ids=[self.get_public_peer_id(), self.get_world_peer_id()])
|
|
948
|
+
last_get_token_time = self.clock.get_time()
|
|
949
|
+
|
|
950
|
+
# Check for address changes every "N" seconds
|
|
951
|
+
if self.clock.get_time() - last_address_check_time >= self.address_check_every:
|
|
952
|
+
self.out("Performing periodic check for address changes...")
|
|
953
|
+
last_address_check_time = self.clock.get_time()
|
|
954
|
+
try:
|
|
955
|
+
current_public_addrs = self.conn.p2p_public.get_node_addresses()
|
|
956
|
+
current_private_addrs = self.conn.p2p_world.get_node_addresses()
|
|
957
|
+
profile_public_addrs = self.profile.get_dynamic_profile().get('peer_addresses', [])
|
|
958
|
+
profile_private_addrs = self.profile.get_dynamic_profile().get('private_peer_addresses', [])
|
|
959
|
+
|
|
960
|
+
# TODO: if public addresses changed... (if this makes any sense)
|
|
961
|
+
if set(current_public_addrs) != set(profile_public_addrs):
|
|
962
|
+
self.out(f"Address change detected for the public instance! "
|
|
963
|
+
f"New addresses: {current_public_addrs}")
|
|
964
|
+
|
|
965
|
+
# Update profile in-place
|
|
966
|
+
# address_list = self.profile.get_dynamic_profile()['peer_addresses']
|
|
967
|
+
# address_list.clear()
|
|
968
|
+
# address_list.extend(current_public_addrs)
|
|
969
|
+
# self.profile.mark_change_in_connections()
|
|
970
|
+
|
|
971
|
+
# If private addresses changed, update the profile and notify the world
|
|
972
|
+
elif set(current_private_addrs) != set(profile_private_addrs):
|
|
973
|
+
self.out(f"Address change detected for the private instance! "
|
|
974
|
+
f"New addresses: {current_public_addrs}")
|
|
975
|
+
|
|
976
|
+
# Update profile in-place
|
|
977
|
+
address_list = self.profile.get_dynamic_profile()['private_peer_addresses']
|
|
978
|
+
address_list.clear()
|
|
979
|
+
address_list.extend(current_private_addrs)
|
|
980
|
+
# self.profile.mark_change_in_connections()
|
|
981
|
+
|
|
982
|
+
world_peer_id = (
|
|
983
|
+
self.profile.get_dynamic_profile().get('connections', {}).get('world_peer_id'))
|
|
984
|
+
if self.node_type is Node.AGENT and world_peer_id:
|
|
985
|
+
self.out("Notifying world of address change...")
|
|
986
|
+
self.conn.send(
|
|
987
|
+
world_peer_id, content_type=Msg.ADDRESS_UPDATE, channel_trail=None,
|
|
988
|
+
content={'addresses': self.profile.get_dynamic_profile()['private_peer_addresses']}
|
|
989
|
+
)
|
|
990
|
+
else:
|
|
991
|
+
self.out("No address changes detected.")
|
|
992
|
+
except Exception as e:
|
|
993
|
+
self.err(f"Failed to check for address updates: {e}")
|
|
994
|
+
|
|
995
|
+
# Refresh relay reservation if nearing expiration
|
|
996
|
+
if self.relay_reservation_expiry is not None:
|
|
997
|
+
time_to_expiry = self.relay_reservation_expiry - datetime.now(timezone.utc)
|
|
998
|
+
if time_to_expiry < timedelta(minutes=15):
|
|
999
|
+
self.out("Relay reservation nearing expiration. Attempting to renew...")
|
|
1000
|
+
try:
|
|
1001
|
+
world_private_peer_id = self.profile.get_dynamic_profile()['connections']['world_peer_id']
|
|
1002
|
+
new_expiry_utc = self.conn.p2p_world.reserve_on_relay(world_private_peer_id)
|
|
1003
|
+
self.relay_reservation_expiry = datetime.fromisoformat(
|
|
1004
|
+
new_expiry_utc.replace('Z', '+00:00'))
|
|
1005
|
+
self.out(f"Relay reservation renewed. New expiration: "
|
|
1006
|
+
f"{self.relay_reservation_expiry.strftime('%Y-%m-%d %H:%M:%S')} UTC")
|
|
1007
|
+
except Exception as e:
|
|
1008
|
+
self.err(f"Failed to renew relay reservation: {e}. Node may become unreachable.")
|
|
1009
|
+
self.relay_reservation_expiry = None # Stop trying if it fails
|
|
1010
|
+
|
|
1011
|
+
# Taking to the inspector
|
|
1012
|
+
if self.inspector_activated:
|
|
1013
|
+
self.__send_to_inspector()
|
|
1014
|
+
|
|
1015
|
+
# Stop conditions
|
|
1016
|
+
if cycles is not None and ((self.clock.get_cycle() + 1) >= cycles):
|
|
1017
|
+
break
|
|
1018
|
+
if max_time is not None and (self.clock.get_time() - self.run_start_time) >= max_time:
|
|
1019
|
+
break
|
|
1020
|
+
|
|
1021
|
+
except KeyboardInterrupt:
|
|
1022
|
+
if self.cursor_hidden:
|
|
1023
|
+
sys.stdout.write("\033[?25h") # Re-enabling cursor
|
|
1024
|
+
if cycles == 1:
|
|
1025
|
+
raise KeyboardInterrupt # Node synch will catch this
|
|
1026
|
+
else:
|
|
1027
|
+
print("\nDetected Ctrl+C! Exiting gracefully...")
|
|
1028
|
+
|
|
1029
|
+
except Exception as e:
|
|
1030
|
+
if self.cursor_hidden:
|
|
1031
|
+
sys.stdout.write("\033[?25h") # Re-enabling cursor
|
|
1032
|
+
print(f"An error occurred: {e}")
|
|
1033
|
+
traceback.print_exc()
|
|
1034
|
+
|
|
1035
|
+
def __handle_network_connections(self):
|
|
1036
|
+
"""Manages new and lost network connections."""
|
|
1037
|
+
|
|
1038
|
+
# Getting fresh lists of existing world agents and world masters (from the rendezvous)
|
|
1039
|
+
if self.node_type is Node.AGENT:
|
|
1040
|
+
self.out("Updating list of world agents and world masters by using data from the rendezvous")
|
|
1041
|
+
self.conn.set_world_agents_and_world_masters_lists_from_rendezvous()
|
|
1042
|
+
|
|
1043
|
+
# Updating connection pools, getting back the lists (well, dictionaries) of new agents and lost agents
|
|
1044
|
+
new_peer_ids_by_pool, removed_peer_ids_by_pool = self.conn.update()
|
|
1045
|
+
if len(new_peer_ids_by_pool) > 0 or len(removed_peer_ids_by_pool) > 0:
|
|
1046
|
+
self.out("Current status of the pools, right after the update:\n" + str(self.conn))
|
|
1047
|
+
|
|
1048
|
+
# Checking if some peers were removed
|
|
1049
|
+
an_agent_left_the_world = False
|
|
1050
|
+
removed_peers = False
|
|
1051
|
+
for pool_name, removed_peer_ids in removed_peer_ids_by_pool.items():
|
|
1052
|
+
for peer_id in removed_peer_ids:
|
|
1053
|
+
removed_peers = True
|
|
1054
|
+
self.out("Removing a not-connected-anymore peer, "
|
|
1055
|
+
"pool_name: " + pool_name + ", peer_id: " + peer_id + "...")
|
|
1056
|
+
self.__purge(peer_id)
|
|
1057
|
+
|
|
1058
|
+
# Checking if we removed an agent from this world
|
|
1059
|
+
if self.node_type is Node.WORLD and pool_name in self.conn.WORLD:
|
|
1060
|
+
an_agent_left_the_world = True
|
|
1061
|
+
|
|
1062
|
+
# Check if the world disconnected: in that case, disconnect all the other agents in the world and leave
|
|
1063
|
+
if self.node_type is Node.AGENT and pool_name in self.conn.WORLD_NODE:
|
|
1064
|
+
self.leave_world()
|
|
1065
|
+
|
|
1066
|
+
# Checking if the inspector disconnected
|
|
1067
|
+
if peer_id == self.inspector_peer_id:
|
|
1068
|
+
self.inspector_activated = False
|
|
1069
|
+
self.inspector_peer_id = None
|
|
1070
|
+
self.__inspector_cache = {"behav": None, "known_streams_count": 0, "all_agents_count": 0}
|
|
1071
|
+
print("Inspector disconnected")
|
|
1072
|
+
|
|
1073
|
+
# Handling newly connected peers
|
|
1074
|
+
an_agent_joined_the_world = False
|
|
1075
|
+
added_peers = False
|
|
1076
|
+
for pool_name, new_peer_ids in new_peer_ids_by_pool.items():
|
|
1077
|
+
for peer_id in new_peer_ids:
|
|
1078
|
+
added_peers = True
|
|
1079
|
+
self.out("Processing a newly connected peers, "
|
|
1080
|
+
"pool_name: " + pool_name + ", peer_id: " + peer_id + "...")
|
|
1081
|
+
|
|
1082
|
+
# If this is a world node, it is time to tell the world object that a new agent is there
|
|
1083
|
+
if self.node_type is Node.WORLD and pool_name in self.conn.WORLD:
|
|
1084
|
+
self.out("Not considering interviewing since this is a world and the considered peer is in the"
|
|
1085
|
+
" world pools")
|
|
1086
|
+
|
|
1087
|
+
if peer_id in self.agents_to_interview:
|
|
1088
|
+
|
|
1089
|
+
# Getting the new agent profile
|
|
1090
|
+
profile = self.agents_to_interview[peer_id][1] # [time, profile]
|
|
1091
|
+
|
|
1092
|
+
# Adding the new agent to the world object
|
|
1093
|
+
if not self.world.add_agent(peer_id=peer_id, profile=profile):
|
|
1094
|
+
self.__purge(peer_id)
|
|
1095
|
+
continue
|
|
1096
|
+
|
|
1097
|
+
# Clearing the profile from the interviews
|
|
1098
|
+
del self.agents_to_interview[peer_id] # Removing from the queue (private peer id)
|
|
1099
|
+
an_agent_joined_the_world = True
|
|
1100
|
+
|
|
1101
|
+
# Replacing multi-address with what comes from the profile (there are more addresses there!)
|
|
1102
|
+
self.conn.set_addresses_in_peer_info(peer_id,
|
|
1103
|
+
profile.get_dynamic_profile()['private_peer_addresses'])
|
|
1104
|
+
else:
|
|
1105
|
+
|
|
1106
|
+
# This agent tried to connect to a world "directly", without passing through the
|
|
1107
|
+
# public handshake
|
|
1108
|
+
self.__purge(peer_id)
|
|
1109
|
+
continue
|
|
1110
|
+
|
|
1111
|
+
continue # Nothing else to do
|
|
1112
|
+
|
|
1113
|
+
# Both if this is an agent or a world, checks if the newly connected agent can be added or not to the
|
|
1114
|
+
# queue of agents to interview
|
|
1115
|
+
if pool_name not in self.conn.OUTGOING:
|
|
1116
|
+
|
|
1117
|
+
# Trying to add to the queue
|
|
1118
|
+
enqueued_for_interview = self.__interview_enqueue(peer_id)
|
|
1119
|
+
|
|
1120
|
+
# If the agent is rejected at this stage, we disconnect from its peer
|
|
1121
|
+
if not enqueued_for_interview:
|
|
1122
|
+
self.out(f"Not enqueued for interview, removing peer (disconnecting {peer_id})")
|
|
1123
|
+
self.__purge(peer_id)
|
|
1124
|
+
else:
|
|
1125
|
+
self.out("Enqueued for interview")
|
|
1126
|
+
|
|
1127
|
+
# Updating list of world agents & friends, if needed
|
|
1128
|
+
# (it happens only if the node hosts a world, otherwise 'an_agent_joined_the_world' and
|
|
1129
|
+
# 'an_agent_left_the_world' are certainly False)
|
|
1130
|
+
world_agents_peer_infos = None
|
|
1131
|
+
world_masters_peer_infos = None
|
|
1132
|
+
if self.node_type is Node.WORLD:
|
|
1133
|
+
enter_left = an_agent_joined_the_world or an_agent_left_the_world
|
|
1134
|
+
timeout = (self.clock.get_time() - self.last_rendezvous_time) >= self.publish_rendezvous_every
|
|
1135
|
+
|
|
1136
|
+
if enter_left or timeout or self.world.role_changed_by_world or self.world.received_address_update:
|
|
1137
|
+
if enter_left or self.world.role_changed_by_world:
|
|
1138
|
+
|
|
1139
|
+
# Updating world-node profile with the summary of currently connected agents in the world
|
|
1140
|
+
world_agents_peer_infos = self.conn.get_all_connected_peer_infos(NodeConn.WORLD_AGENTS)
|
|
1141
|
+
world_masters_peer_infos = self.conn.get_all_connected_peer_infos(NodeConn.WORLD_MASTERS)
|
|
1142
|
+
|
|
1143
|
+
dynamic_profile = self.profile.get_dynamic_profile()
|
|
1144
|
+
dynamic_profile['world_summary']['world_agents'] = world_agents_peer_infos
|
|
1145
|
+
dynamic_profile['world_summary']['world_masters'] = world_masters_peer_infos
|
|
1146
|
+
dynamic_profile['world_summary']["world_agents_count"] = len(world_agents_peer_infos)
|
|
1147
|
+
dynamic_profile['world_summary']["world_masters_count"] = len(world_masters_peer_infos)
|
|
1148
|
+
dynamic_profile['world_summary']["total_agents"] = (len(world_agents_peer_infos) +
|
|
1149
|
+
len(world_masters_peer_infos))
|
|
1150
|
+
self.profile.mark_change_in_connections()
|
|
1151
|
+
|
|
1152
|
+
# Publish updated list of (all) world agents (i.e., both agents and masters)
|
|
1153
|
+
world_all_peer_infos = self.conn.get_all_connected_peer_infos(NodeConn.WORLD)
|
|
1154
|
+
if not self.conn.publish(self.conn.p2p_world.peer_id, f"{self.conn.p2p_world.peer_id}::ps:rv",
|
|
1155
|
+
content_type=Msg.WORLD_AGENTS_LIST,
|
|
1156
|
+
content={"peers": world_all_peer_infos,
|
|
1157
|
+
"update_count": self.clock.get_cycle()}):
|
|
1158
|
+
self.err("Failed to publish the updated list of (all) world agents (ignoring)")
|
|
1159
|
+
else:
|
|
1160
|
+
self.last_rendezvous_time = self.clock.get_time()
|
|
1161
|
+
self.out(f"Rendezvous messages just published "
|
|
1162
|
+
f"(tag: {self.clock.get_cycle()}, peers: {len(world_all_peer_infos)})")
|
|
1163
|
+
|
|
1164
|
+
# Clearing
|
|
1165
|
+
self.world.role_changed_by_world = False
|
|
1166
|
+
self.world.received_address_update = False
|
|
1167
|
+
|
|
1168
|
+
# Updating list of node connections (being this a world or a plain agent)
|
|
1169
|
+
if added_peers or removed_peers:
|
|
1170
|
+
|
|
1171
|
+
# The following could have been already computed in the code above, let's reuse
|
|
1172
|
+
if world_agents_peer_infos is None:
|
|
1173
|
+
world_agents_peer_infos = self.conn.get_all_connected_peer_infos(NodeConn.WORLD_AGENTS)
|
|
1174
|
+
if world_masters_peer_infos is None:
|
|
1175
|
+
world_masters_peer_infos = self.conn.get_all_connected_peer_infos(NodeConn.WORLD_MASTERS)
|
|
1176
|
+
world_private_peer_id = self.conn.get_all_connected_peer_infos(NodeConn.WORLD_NODE)
|
|
1177
|
+
world_private_peer_id = world_private_peer_id[0]['id'] if len(world_private_peer_id) > 0 else None
|
|
1178
|
+
|
|
1179
|
+
# This is only computed here
|
|
1180
|
+
public_agents_peer_infos = self.conn.get_all_connected_peer_infos(NodeConn.PUBLIC)
|
|
1181
|
+
|
|
1182
|
+
# Updating node profile with the summary of currently connected peers
|
|
1183
|
+
dynamic_profile = self.profile.get_dynamic_profile()
|
|
1184
|
+
dynamic_profile['connections']['public_agents'] = public_agents_peer_infos
|
|
1185
|
+
dynamic_profile['connections']['world_agents'] = world_agents_peer_infos
|
|
1186
|
+
dynamic_profile['connections']['world_masters'] = world_masters_peer_infos
|
|
1187
|
+
dynamic_profile['connections']['world_peer_id'] = world_private_peer_id
|
|
1188
|
+
self.profile.mark_change_in_connections()
|
|
1189
|
+
|
|
1190
|
+
def __handle_network_messages(self, interact_mode_opts=None):
|
|
1191
|
+
"""Handles and processes all incoming network messages.
|
|
1192
|
+
|
|
1193
|
+
Args:
|
|
1194
|
+
interact_mode_opts: A dictionary of options for interactive mode.
|
|
1195
|
+
"""
|
|
1196
|
+
# Fetching all messages,
|
|
1197
|
+
public_messages = self.conn.get_messages(p2p_name=NodeConn.P2P_PUBLIC)
|
|
1198
|
+
world_messages = self.conn.get_messages(p2p_name=NodeConn.P2P_WORLD)
|
|
1199
|
+
|
|
1200
|
+
self.out("Got " + str(len(public_messages)) + " messages from the public net")
|
|
1201
|
+
self.out("Got " + str(len(world_messages)) + " messages from the world/private net")
|
|
1202
|
+
|
|
1203
|
+
# Process all messages
|
|
1204
|
+
all_messages = public_messages + world_messages
|
|
1205
|
+
if len(all_messages) > 0:
|
|
1206
|
+
self.out("Processing all messages...")
|
|
1207
|
+
|
|
1208
|
+
for i, msg in enumerate(all_messages):
|
|
1209
|
+
if i < len(public_messages):
|
|
1210
|
+
self.out("Processing public message " + str(i + 1) + "/"
|
|
1211
|
+
+ str(len(public_messages)) + ": " + str(msg))
|
|
1212
|
+
else:
|
|
1213
|
+
self.out("Processing world/private message " + str(i - len(public_messages) + 1)
|
|
1214
|
+
+ "/" + str(len(world_messages)) + ": " + str(msg))
|
|
1215
|
+
|
|
1216
|
+
# Checking
|
|
1217
|
+
if not isinstance(msg, Msg):
|
|
1218
|
+
self.err("Expected message of type Msg, got {} (skipping)".format(type(msg)))
|
|
1219
|
+
continue
|
|
1220
|
+
|
|
1221
|
+
# Unpacking piggyback
|
|
1222
|
+
sender_node_id, sender_inspector_mode_on = (msg.piggyback[0:-1],
|
|
1223
|
+
msg.piggyback[-1] == "1")
|
|
1224
|
+
|
|
1225
|
+
# Is message from inspector?
|
|
1226
|
+
sender_is_inspector = (sender_node_id == self.profile.get_static_profile()['inspector_node_id'] and
|
|
1227
|
+
sender_inspector_mode_on)
|
|
1228
|
+
|
|
1229
|
+
# (A) received a profile
|
|
1230
|
+
if msg.content_type == Msg.PROFILE:
|
|
1231
|
+
self.out("Received a profile...")
|
|
1232
|
+
|
|
1233
|
+
# Checking the received profile
|
|
1234
|
+
# (recall that a profile sent through the world connection to the world node will be considered
|
|
1235
|
+
# not acceptable)
|
|
1236
|
+
profile = NodeProfile.from_dict(msg.content)
|
|
1237
|
+
is_an_already_known_agent = msg.sender in self.hosted.all_agents
|
|
1238
|
+
|
|
1239
|
+
if is_an_already_known_agent:
|
|
1240
|
+
self.out("Editing information of an already added agent " + msg.sender)
|
|
1241
|
+
|
|
1242
|
+
if not self.hosted.add_agent(peer_id=msg.sender, profile=profile):
|
|
1243
|
+
self.__purge(msg.sender)
|
|
1244
|
+
else:
|
|
1245
|
+
is_expected_and_acceptable_profile = self.__interview_check_profile(peer_id=msg.sender,
|
|
1246
|
+
node_id=sender_node_id,
|
|
1247
|
+
profile=profile)
|
|
1248
|
+
|
|
1249
|
+
if not is_expected_and_acceptable_profile:
|
|
1250
|
+
self.err("Unexpected or unacceptable profile, removing (disconnecting) " + msg.sender)
|
|
1251
|
+
self.__purge(msg.sender)
|
|
1252
|
+
else:
|
|
1253
|
+
|
|
1254
|
+
# If the node hosts a world and gets an expected and acceptable profile from the public network,
|
|
1255
|
+
# assigns a role and sends the world profile (which includes private peer ID) and role to the
|
|
1256
|
+
# requester
|
|
1257
|
+
if (self.node_type is Node.WORLD and self.conn.is_public(peer_id=msg.sender) and
|
|
1258
|
+
not sender_is_inspector):
|
|
1259
|
+
self.out("Sending world approval message, profile, and assigned role to " + msg.sender +
|
|
1260
|
+
" (and switching peer ID in the interview queue)...")
|
|
1261
|
+
is_world_master = (self.world_masters_node_ids is not None and
|
|
1262
|
+
sender_node_id in self.world_masters_node_ids)
|
|
1263
|
+
|
|
1264
|
+
# Assigning a role
|
|
1265
|
+
role_str = self.world.assign_role(profile=profile, is_world_master=is_world_master)
|
|
1266
|
+
if role_str is None:
|
|
1267
|
+
self.err("Unable to determine what role to assign, removing (disconnecting) "
|
|
1268
|
+
+ msg.sender)
|
|
1269
|
+
self.__purge(msg.sender)
|
|
1270
|
+
else:
|
|
1271
|
+
role = self.world.ROLE_STR_TO_BITS[role_str] # The role is a bit-wise-interpretable int
|
|
1272
|
+
role = role | (Agent.ROLE_WORLD_MASTER if is_world_master else Agent.ROLE_WORLD_AGENT)
|
|
1273
|
+
|
|
1274
|
+
# Clearing temporary options (if any)
|
|
1275
|
+
dynamic_profile = profile.get_dynamic_profile()
|
|
1276
|
+
keys_to_delete = [key for key in dynamic_profile if key.startswith('tmp_')]
|
|
1277
|
+
for key in keys_to_delete:
|
|
1278
|
+
del dynamic_profile[key]
|
|
1279
|
+
|
|
1280
|
+
if not self.conn.send(msg.sender, channel_trail=None,
|
|
1281
|
+
content={
|
|
1282
|
+
'world_profile': self.profile.get_all_profile(),
|
|
1283
|
+
'rendezvous_tag': self.clock.get_cycle(),
|
|
1284
|
+
'your_role': role,
|
|
1285
|
+
'agent_actions': self.world.agent_actions
|
|
1286
|
+
},
|
|
1287
|
+
content_type=Msg.WORLD_APPROVAL):
|
|
1288
|
+
self.err("Failed to send world approval, removing (disconnecting) " + msg.sender)
|
|
1289
|
+
self.__purge(msg.sender)
|
|
1290
|
+
else:
|
|
1291
|
+
private_peer_id = profile.get_dynamic_profile()['private_peer_id']
|
|
1292
|
+
private_addr = profile.get_dynamic_profile()['private_peer_addresses']
|
|
1293
|
+
if is_world_master:
|
|
1294
|
+
role = role | Agent.ROLE_WORLD_MASTER
|
|
1295
|
+
self.conn.add_to_world_masters_list(private_peer_id, private_addr, role)
|
|
1296
|
+
else:
|
|
1297
|
+
role = role | Agent.ROLE_WORLD_AGENT
|
|
1298
|
+
self.conn.add_to_world_agents_list(private_peer_id, private_addr, role)
|
|
1299
|
+
|
|
1300
|
+
# Removing from the queue of public interviews
|
|
1301
|
+
# and adding to the private ones (refreshing timer)
|
|
1302
|
+
del self.agents_to_interview[msg.sender] # Removing from public queue
|
|
1303
|
+
self.agents_to_interview[private_peer_id] = [self.clock.get_time(), profile] # Add
|
|
1304
|
+
|
|
1305
|
+
# If the node is an agent, it is time to tell the agent object that a new agent is now known,
|
|
1306
|
+
# and send our profile to the agent that asked for out contact
|
|
1307
|
+
elif self.node_type is Node.AGENT or sender_is_inspector:
|
|
1308
|
+
self.out("Sending agent approval message and profile...")
|
|
1309
|
+
|
|
1310
|
+
if not self.conn.send(msg.sender, channel_trail=None,
|
|
1311
|
+
content={
|
|
1312
|
+
'my_profile': self.profile.get_all_profile()
|
|
1313
|
+
},
|
|
1314
|
+
content_type=Msg.AGENT_APPROVAL):
|
|
1315
|
+
self.err("Failed to send agent approval, removing (disconnecting) " + msg.sender)
|
|
1316
|
+
self.__purge(msg.sender)
|
|
1317
|
+
else:
|
|
1318
|
+
self.out("Adding known agent and removing it from the interview queue " + msg.sender)
|
|
1319
|
+
if not self.hosted.add_agent(peer_id=msg.sender, profile=profile): # keep "hosted" here
|
|
1320
|
+
self.__purge(msg.sender)
|
|
1321
|
+
else:
|
|
1322
|
+
|
|
1323
|
+
# Removing from the queues
|
|
1324
|
+
del self.agents_to_interview[msg.sender] # Removing from queue
|
|
1325
|
+
|
|
1326
|
+
# (B) received a world-join-approval
|
|
1327
|
+
elif msg.content_type == Msg.WORLD_APPROVAL:
|
|
1328
|
+
self.out("Received a world-join-approval message...")
|
|
1329
|
+
|
|
1330
|
+
# Checking if it is the world we asked for
|
|
1331
|
+
# moreover, it must be on the public network, and this must not be a world-node (of course)
|
|
1332
|
+
# and you must not be already in another world
|
|
1333
|
+
if (not self.conn.is_public(peer_id=msg.sender) or self.node_type is Node.WORLD
|
|
1334
|
+
or msg.sender not in self.agents_expected_to_send_ack or
|
|
1335
|
+
self.profile.get_dynamic_profile()['connections']['world_peer_id'] is not None):
|
|
1336
|
+
self.err("Unexpected world approval, removing (disconnecting) " + msg.sender)
|
|
1337
|
+
self.__purge(msg.sender)
|
|
1338
|
+
else:
|
|
1339
|
+
if msg.sender != self.joining_world_info["world_public_peer_id"]:
|
|
1340
|
+
self.err(f"Unexpected world approval: asked to join "
|
|
1341
|
+
f"{self.joining_world_info['world_public_peer_id']} got approval from {msg.sender} "
|
|
1342
|
+
f"(disconnecting)")
|
|
1343
|
+
self.__purge(msg.sender)
|
|
1344
|
+
else:
|
|
1345
|
+
|
|
1346
|
+
# Getting world profile (includes private addresses) and connecting to the world (privately)
|
|
1347
|
+
self.__join_world(profile=NodeProfile.from_dict(msg.content['world_profile']),
|
|
1348
|
+
role=msg.content['your_role'],
|
|
1349
|
+
agent_actions=msg.content['agent_actions'],
|
|
1350
|
+
rendezvous_tag=msg.content['rendezvous_tag'])
|
|
1351
|
+
|
|
1352
|
+
# (C) received an agent-connect-approval
|
|
1353
|
+
elif msg.content_type == Msg.AGENT_APPROVAL:
|
|
1354
|
+
self.out("Received an agent-connect-approval message...")
|
|
1355
|
+
|
|
1356
|
+
# Checking if it is the agent we asked for
|
|
1357
|
+
if msg.sender not in self.agents_expected_to_send_ack:
|
|
1358
|
+
self.err("Unexpected agent-connect approval, removing (disconnecting) " + msg.sender)
|
|
1359
|
+
self.__purge(msg.sender)
|
|
1360
|
+
else:
|
|
1361
|
+
|
|
1362
|
+
# Adding the agent
|
|
1363
|
+
self.__join_agent(profile=NodeProfile.from_dict(msg.content['my_profile']),
|
|
1364
|
+
peer_id=msg.sender)
|
|
1365
|
+
|
|
1366
|
+
# (D) requested for a profile
|
|
1367
|
+
elif msg.content_type == Msg.PROFILE_REQUEST:
|
|
1368
|
+
self.out("Received a profile request...")
|
|
1369
|
+
|
|
1370
|
+
# If this is a world-node, it expects profile requests only on the public network
|
|
1371
|
+
# if this is not a world or not, we only send profile to agents who are involved in the handshake
|
|
1372
|
+
if ((self.node_type is Node.WORLD and not self.conn.is_public(peer_id=msg.sender)) or
|
|
1373
|
+
(msg.sender not in self.agents_expected_to_send_ack)):
|
|
1374
|
+
self.err("Unexpected profile request, removing (disconnecting) " + msg.sender)
|
|
1375
|
+
self.__purge(msg.sender)
|
|
1376
|
+
else:
|
|
1377
|
+
|
|
1378
|
+
# If a preference was defined, we temporarily add it to the profile
|
|
1379
|
+
if (self.joining_world_info is not None and
|
|
1380
|
+
msg.sender == self.joining_world_info["world_public_peer_id"] and
|
|
1381
|
+
self.joining_world_info["options"] is not None and
|
|
1382
|
+
len(self.joining_world_info["options"]) > 0):
|
|
1383
|
+
my_profile = copy.deepcopy(self.profile)
|
|
1384
|
+
for k, v in self.joining_world_info["options"].items():
|
|
1385
|
+
my_profile.get_dynamic_profile()['tmp_' + str(k)] = v
|
|
1386
|
+
my_profile = my_profile.get_all_profile()
|
|
1387
|
+
else:
|
|
1388
|
+
my_profile = self.profile.get_all_profile()
|
|
1389
|
+
|
|
1390
|
+
# Sending the profile
|
|
1391
|
+
self.out("Sending profile")
|
|
1392
|
+
if not self.conn.send(msg.sender, channel_trail=None,
|
|
1393
|
+
content=my_profile,
|
|
1394
|
+
content_type=Msg.PROFILE):
|
|
1395
|
+
self.err("Failed to send profile, removing (disconnecting) " + msg.sender)
|
|
1396
|
+
self.__purge(msg.sender)
|
|
1397
|
+
|
|
1398
|
+
# (E) the world node received an ADDRESS_UPDATE from an agent
|
|
1399
|
+
elif msg.content_type == Msg.ADDRESS_UPDATE:
|
|
1400
|
+
self.out("Received an address update from " + msg.sender)
|
|
1401
|
+
|
|
1402
|
+
if self.node_type is Node.WORLD and msg.sender in self.world.all_agents:
|
|
1403
|
+
all_addresses = msg.content.get('addresses')
|
|
1404
|
+
if all_addresses and isinstance(all_addresses, list):
|
|
1405
|
+
|
|
1406
|
+
# Update the address both in the connection and in the profile
|
|
1407
|
+
self.conn.set_addresses_in_peer_info(msg.sender, all_addresses)
|
|
1408
|
+
self.world.set_addresses_in_profile(msg.sender, all_addresses)
|
|
1409
|
+
self.out(f"Waiting rendezvous publish after address update from {msg.sender}")
|
|
1410
|
+
|
|
1411
|
+
# (F) got stream data
|
|
1412
|
+
elif msg.content_type == Msg.STREAM_SAMPLE:
|
|
1413
|
+
self.out("Received a stream sample...")
|
|
1414
|
+
|
|
1415
|
+
if self.node_type is Node.AGENT: # Handling the received samples
|
|
1416
|
+
self.agent.get_stream_sample(net_hash=msg.channel, sample_dict=msg.content)
|
|
1417
|
+
|
|
1418
|
+
# Printing messages to screen, if needed (useful when chatting with lone wolves)
|
|
1419
|
+
if interact_mode_opts is not None and "stdout" in interact_mode_opts:
|
|
1420
|
+
net_hash = DataProps.normalize_net_hash(msg.channel)
|
|
1421
|
+
if net_hash in self.agent.known_streams:
|
|
1422
|
+
stream_dict = self.agent.known_streams[net_hash]
|
|
1423
|
+
sys.stdout = interact_mode_opts["stdout"][0] # Output on
|
|
1424
|
+
for name, stream_obj in stream_dict.items():
|
|
1425
|
+
if stream_obj.props.is_text():
|
|
1426
|
+
msg = stream_obj.get(requested_by="print") # Getting message
|
|
1427
|
+
print(f"\n『 {msg} 』") # Printing to screen
|
|
1428
|
+
if stream_obj.props.is_img():
|
|
1429
|
+
img = stream_obj.get(requested_by="print") # Getting image
|
|
1430
|
+
filename = "wolf_img.png"
|
|
1431
|
+
img.save(filename)
|
|
1432
|
+
msg = f"(saved image to {filename})"
|
|
1433
|
+
print(f"\n『 {msg} 』") # Printing to screen
|
|
1434
|
+
print("\n👉 ", end="")
|
|
1435
|
+
sys.stdout = interact_mode_opts["stdout"][1] # Output off
|
|
1436
|
+
|
|
1437
|
+
elif self.node_type is Node.WORLD:
|
|
1438
|
+
self.err("Unexpected stream samples received by this world node, sent by: " + msg.sender)
|
|
1439
|
+
self.__purge(msg.sender)
|
|
1440
|
+
|
|
1441
|
+
# (G) got action request
|
|
1442
|
+
elif msg.content_type == Msg.ACTION_REQUEST:
|
|
1443
|
+
self.out("Received an action request...")
|
|
1444
|
+
|
|
1445
|
+
if self.node_type is Node.AGENT:
|
|
1446
|
+
if msg.sender not in self.agent.all_agents:
|
|
1447
|
+
self.err("Unexpected action request received by a unknown node: " + msg.sender)
|
|
1448
|
+
else:
|
|
1449
|
+
behav = self.agent.behav_lone_wolf \
|
|
1450
|
+
if msg.sender in self.agent.public_agents else self.agent.behav
|
|
1451
|
+
behav.request_action(action_name=msg.content['action_name'],
|
|
1452
|
+
args=msg.content['args'],
|
|
1453
|
+
signature=msg.sender,
|
|
1454
|
+
timestamp=self.clock.get_time(),
|
|
1455
|
+
uuid=msg.content['uuid'])
|
|
1456
|
+
|
|
1457
|
+
elif self.node_type is Node.WORLD:
|
|
1458
|
+
self.err("Unexpected action request received by this world node, sent by: " + msg.sender)
|
|
1459
|
+
self.__purge(msg.sender)
|
|
1460
|
+
|
|
1461
|
+
# (H) got role suggestion
|
|
1462
|
+
elif msg.content_type == Msg.ROLE_SUGGESTION:
|
|
1463
|
+
self.out("Received a role suggestion/new role...")
|
|
1464
|
+
|
|
1465
|
+
if self.node_type is Node.AGENT:
|
|
1466
|
+
if msg.sender == self.conn.get_world_peer_id():
|
|
1467
|
+
new_role_indication = msg.content
|
|
1468
|
+
if new_role_indication['peer_id'] == self.get_world_peer_id():
|
|
1469
|
+
self.agent.accept_new_role(new_role_indication['role'])
|
|
1470
|
+
|
|
1471
|
+
self.agent.behav.update_wildcard("<agent>", f"{self.get_world_peer_id()}")
|
|
1472
|
+
self.agent.behav.update_wildcard("<world>", f"{msg.sender}")
|
|
1473
|
+
|
|
1474
|
+
elif self.node_type is Node.WORLD:
|
|
1475
|
+
if msg.sender in self.world.world_masters:
|
|
1476
|
+
for role_suggestion in msg.content:
|
|
1477
|
+
self.world.set_role(peer_id=role_suggestion['peer_id'], role=role_suggestion['role'])
|
|
1478
|
+
|
|
1479
|
+
# (I) got request to alter the HSM
|
|
1480
|
+
elif msg.content_type == Msg.HSM:
|
|
1481
|
+
self.out("Received a request to alter the HSM...")
|
|
1482
|
+
|
|
1483
|
+
if self.node_type is Node.AGENT:
|
|
1484
|
+
if msg.sender in self.agent.world_masters: # This must be coherent with what we do in set_role
|
|
1485
|
+
ret = getattr(self.agent.behav, msg.content['method'])(*msg.content['args'])
|
|
1486
|
+
if not ret:
|
|
1487
|
+
self.err(f"Cannot run HSM action named {msg.content['method']} with args "
|
|
1488
|
+
f"{msg.content['args']}")
|
|
1489
|
+
else:
|
|
1490
|
+
self.err("Only world-master can alter HSMs of other agents: " + msg.sender) # No need to purge
|
|
1491
|
+
|
|
1492
|
+
elif self.node_type is Node.WORLD:
|
|
1493
|
+
self.err("Unexpected request to alter the HSM received by this world node, sent by: " + msg.sender)
|
|
1494
|
+
self.__purge(msg.sender)
|
|
1495
|
+
|
|
1496
|
+
# (J) misc
|
|
1497
|
+
elif msg.content_type == Msg.MISC:
|
|
1498
|
+
self.out("Received a misc message...")
|
|
1499
|
+
self.out(msg.content)
|
|
1500
|
+
|
|
1501
|
+
# (K) got a request to re-download the CV from the root server
|
|
1502
|
+
elif msg.content_type == Msg.GET_CV_FROM_ROOT:
|
|
1503
|
+
self.out("Received a notification to re-download the CV...")
|
|
1504
|
+
|
|
1505
|
+
# Downloading CV
|
|
1506
|
+
self.profile.update_cv(self.get_cv())
|
|
1507
|
+
|
|
1508
|
+
# Re-downloading token (it will include the new CV hash)
|
|
1509
|
+
self.get_node_token(peer_ids=[self.get_public_peer_id(), self.get_world_peer_id()])
|
|
1510
|
+
|
|
1511
|
+
# (L) got one or more badge suggestions
|
|
1512
|
+
elif msg.content_type == Msg.BADGE_SUGGESTIONS:
|
|
1513
|
+
self.out("Received badge suggestions...")
|
|
1514
|
+
|
|
1515
|
+
if self.node_type is Node.WORLD:
|
|
1516
|
+
for badge_dict in msg.content:
|
|
1517
|
+
|
|
1518
|
+
# Right now, we accept all the suggestions
|
|
1519
|
+
self.world.add_badge(**badge_dict) # Adding to the list of badges
|
|
1520
|
+
elif self.node_type is Node.AGENT:
|
|
1521
|
+
self.err("Receiving badge suggestions is not expected for an agent node")
|
|
1522
|
+
|
|
1523
|
+
# (M) got a special connection/presence message for an inspector
|
|
1524
|
+
elif msg.content_type == Msg.INSPECT_ON:
|
|
1525
|
+
self.out("Received an inspector-activation message...")
|
|
1526
|
+
|
|
1527
|
+
if sender_is_inspector:
|
|
1528
|
+
self.inspector_activated = True
|
|
1529
|
+
self.inspector_peer_id = msg.sender
|
|
1530
|
+
print("Inspector activated")
|
|
1531
|
+
else:
|
|
1532
|
+
self.err("Inspector-activation message was not sent by the expected inspector node ID")
|
|
1533
|
+
self.__purge(msg.sender)
|
|
1534
|
+
|
|
1535
|
+
# (N) got a command from an inspector
|
|
1536
|
+
elif msg.content_type == Msg.INSPECT_CMD:
|
|
1537
|
+
self.out("Received a command from the inspector...")
|
|
1538
|
+
|
|
1539
|
+
if sender_is_inspector and self.inspector_activated:
|
|
1540
|
+
self.__handle_inspector_command(msg.content['cmd'], msg.content['arg'])
|
|
1541
|
+
else:
|
|
1542
|
+
self.err("Inspector command was not sent by the expected inspector node ID "
|
|
1543
|
+
"or the inspector was not yet activated (Msg.INSPECT_ON not received yet)")
|
|
1544
|
+
self.__purge(msg.sender)
|
|
1545
|
+
|
|
1546
|
+
self.__interview_clean()
|
|
1547
|
+
self.__connected_without_ack_clean()
|
|
1548
|
+
|
|
1549
|
+
def __join_world(self, profile: NodeProfile, role: int,
|
|
1550
|
+
agent_actions: str | None, rendezvous_tag: int):
|
|
1551
|
+
"""Performs the actual operation of joining a world after receiving confirmation.
|
|
1552
|
+
|
|
1553
|
+
Args:
|
|
1554
|
+
profile: The profile of the world to join.
|
|
1555
|
+
role: The role assigned to the agent in the world (int).
|
|
1556
|
+
agent_actions: A string of code defining the agent's actions.
|
|
1557
|
+
rendezvous_tag: The rendezvous tag from the world's profile.
|
|
1558
|
+
|
|
1559
|
+
Returns:
|
|
1560
|
+
True if the join operation is successful, otherwise False.
|
|
1561
|
+
"""
|
|
1562
|
+
addresses = profile.get_dynamic_profile()['private_peer_addresses']
|
|
1563
|
+
world_public_peer_id = profile.get_dynamic_profile()['peer_id']
|
|
1564
|
+
self.out(f"Actually joining world, role will be {role}")
|
|
1565
|
+
|
|
1566
|
+
# Connecting to the world (private)
|
|
1567
|
+
# notice that we also communicate the world node private peer ID to the connection manager,
|
|
1568
|
+
# to avoid filtering it out when updating pools
|
|
1569
|
+
peer_id = self.ask_to_get_in_touch(addresses=addresses, public=False,
|
|
1570
|
+
before_updating_pools_fcn=self.conn.set_world)
|
|
1571
|
+
|
|
1572
|
+
if peer_id is not None:
|
|
1573
|
+
|
|
1574
|
+
# Relay reservation logic for non-public peers
|
|
1575
|
+
if not self.conn.p2p_world.is_public and self.conn.p2p_world.relay_is_enabled:
|
|
1576
|
+
self.out("Node is not publicly reachable. Attempting to reserve a slot on the world's private network.")
|
|
1577
|
+
try:
|
|
1578
|
+
expiry_utc = self.conn.p2p_world.reserve_on_relay(peer_id)
|
|
1579
|
+
self.relay_reservation_expiry = (
|
|
1580
|
+
datetime.fromisoformat(expiry_utc.replace('Z', '+00:00')))
|
|
1581
|
+
self.out(f"Reserved relay slot. Expires at "
|
|
1582
|
+
f"{self.relay_reservation_expiry.strftime('%Y-%m-%d %H:%M:%S')} UTC")
|
|
1583
|
+
|
|
1584
|
+
self.out("Fetching updated address list from transport layer...")
|
|
1585
|
+
complete_private_addrs = self.conn.p2p_world.get_node_addresses()
|
|
1586
|
+
|
|
1587
|
+
# Update the profile with this definitive list (IN-PLACE)
|
|
1588
|
+
address_list = self.profile.get_dynamic_profile()['private_peer_addresses']
|
|
1589
|
+
address_list.clear()
|
|
1590
|
+
address_list.extend(complete_private_addrs)
|
|
1591
|
+
|
|
1592
|
+
self.out("Notifying world of the complete updated address list...")
|
|
1593
|
+
self.conn.send(peer_id, channel_trail=None,
|
|
1594
|
+
content_type=Msg.ADDRESS_UPDATE,
|
|
1595
|
+
content={'addresses': complete_private_addrs})
|
|
1596
|
+
except Exception as e:
|
|
1597
|
+
self.err(f"An error occurred during relay reservation: {e}.")
|
|
1598
|
+
|
|
1599
|
+
# Subscribing to the world rendezvous topic, from which we will get fresh information
|
|
1600
|
+
# about the world agents and masters
|
|
1601
|
+
self.out("Subscribing to the world-members topic...")
|
|
1602
|
+
if not self.conn.subscribe(peer_id, channel=f"{peer_id}::ps:rv"): # Special rendezvous (ps:rv)
|
|
1603
|
+
self.leave(peer_id) # If subscribing fails, we quit everything (safer)
|
|
1604
|
+
return False
|
|
1605
|
+
|
|
1606
|
+
# Killing the public connection to the world node
|
|
1607
|
+
self.out("Disconnecting from the public world network (since we joined the private one)")
|
|
1608
|
+
self.__purge(world_public_peer_id)
|
|
1609
|
+
|
|
1610
|
+
# Removing the private world peer id from the list of connected-but-not-managed peer
|
|
1611
|
+
del self.agents_expected_to_send_ack[peer_id]
|
|
1612
|
+
|
|
1613
|
+
# Subscribing to all the other world topics, from which we will get fresh information
|
|
1614
|
+
# about the streams
|
|
1615
|
+
self.out("Subscribing to the world-streams topics...")
|
|
1616
|
+
dynamic_profile = profile.get_dynamic_profile()
|
|
1617
|
+
list_of_props = []
|
|
1618
|
+
list_of_props += dynamic_profile['streams'] if dynamic_profile['streams'] is not None else []
|
|
1619
|
+
list_of_props += dynamic_profile['proc_outputs'] if dynamic_profile['proc_outputs'] is not None else []
|
|
1620
|
+
|
|
1621
|
+
if not self.agent.add_compatible_streams(peer_id, list_of_props, buffered=False, public=False):
|
|
1622
|
+
self.leave(peer_id)
|
|
1623
|
+
return False
|
|
1624
|
+
|
|
1625
|
+
# Setting actions
|
|
1626
|
+
if agent_actions is not None and len(agent_actions) > 0:
|
|
1627
|
+
|
|
1628
|
+
# Checking code
|
|
1629
|
+
if not Node.__analyze_code(agent_actions):
|
|
1630
|
+
self.err("Invalid agent actions code (syntax errors or unsafe code) was provided by the world, "
|
|
1631
|
+
"blocking the join operation")
|
|
1632
|
+
return False
|
|
1633
|
+
|
|
1634
|
+
# Creating a new agent with the received actions
|
|
1635
|
+
mod = types.ModuleType("dynamic_module")
|
|
1636
|
+
exec(agent_actions, mod.__dict__)
|
|
1637
|
+
sys.modules["dynamic_module"] = mod
|
|
1638
|
+
new_agent = mod.WAgent(proc=None)
|
|
1639
|
+
|
|
1640
|
+
# Cloning attributes of the existing agent
|
|
1641
|
+
for key, value in self.agent.__dict__.items():
|
|
1642
|
+
if hasattr(new_agent, key): # This will skip ROLE_BITS_TO_STR, CUSTOM_ROLES, etc...
|
|
1643
|
+
setattr(new_agent, key, value)
|
|
1644
|
+
|
|
1645
|
+
# Telling the FSM that actions are related to this new agent
|
|
1646
|
+
new_agent.behav.set_actionable(new_agent)
|
|
1647
|
+
new_agent.behav_lone_wolf.set_actionable(new_agent)
|
|
1648
|
+
|
|
1649
|
+
# Setting up roles
|
|
1650
|
+
roles = profile.get_dynamic_profile()['world_roles_fsm'].keys()
|
|
1651
|
+
new_agent.CUSTOM_ROLES = roles
|
|
1652
|
+
new_agent.augment_roles()
|
|
1653
|
+
|
|
1654
|
+
# Updating node-level references
|
|
1655
|
+
old_agent = self.agent
|
|
1656
|
+
self.agent = new_agent
|
|
1657
|
+
self.hosted = new_agent
|
|
1658
|
+
else:
|
|
1659
|
+
old_agent = self.agent
|
|
1660
|
+
|
|
1661
|
+
# Saving the world profile
|
|
1662
|
+
self.agent.world_profile = profile
|
|
1663
|
+
|
|
1664
|
+
# Setting the assigned role and default behavior (do it after having recreated the new agent object)
|
|
1665
|
+
self.agent.accept_new_role(role) # Do this after having done 'self.agent.world_profile = profile'
|
|
1666
|
+
|
|
1667
|
+
# Updating wildcards
|
|
1668
|
+
self.agent.behav.update_wildcard("<agent>", f"{self.get_world_peer_id()}")
|
|
1669
|
+
self.agent.behav.update_wildcard("<world>", f"{peer_id}")
|
|
1670
|
+
self.agent.behav.add_wildcards(old_agent.behav_wildcards)
|
|
1671
|
+
|
|
1672
|
+
# Telling the connection manager the info needed to discriminate peers (getting them from the world profile)
|
|
1673
|
+
# notice that the world node private ID was already told to the connection manager (see a few lines above)
|
|
1674
|
+
self.out(f"Rendezvous tag received with profile: {rendezvous_tag} "
|
|
1675
|
+
f"(in conn pool: {self.conn.rendezvous_tag})")
|
|
1676
|
+
if self.conn.rendezvous_tag < rendezvous_tag:
|
|
1677
|
+
self.conn.rendezvous_tag = rendezvous_tag
|
|
1678
|
+
num_world_masters = len(dynamic_profile['world_summary']['world_masters']) \
|
|
1679
|
+
if dynamic_profile['world_summary']['world_masters'] is not None else 'none'
|
|
1680
|
+
num_world_agents = len(dynamic_profile['world_summary']['world_agents']) \
|
|
1681
|
+
if dynamic_profile['world_summary']['world_agents'] is not None else 'none'
|
|
1682
|
+
self.out(f"Rendezvous from profile (tag: {rendezvous_tag}), world masters: {num_world_masters}")
|
|
1683
|
+
self.out(f"Rendezvous from profile (tag: {rendezvous_tag}), world agents: {num_world_agents}")
|
|
1684
|
+
self.conn.set_world_masters_list(dynamic_profile['world_summary']['world_masters'])
|
|
1685
|
+
self.conn.set_world_agents_list(dynamic_profile['world_summary']['world_agents'])
|
|
1686
|
+
|
|
1687
|
+
# Updating our profile to set the world we are in
|
|
1688
|
+
self.profile.get_dynamic_profile()['connections']['world_peer_id'] = peer_id
|
|
1689
|
+
self.profile.mark_change_in_connections()
|
|
1690
|
+
|
|
1691
|
+
print("Handshake completed, world joined!")
|
|
1692
|
+
return True
|
|
1693
|
+
else:
|
|
1694
|
+
return False
|
|
1695
|
+
|
|
1696
|
+
def __join_agent(self, profile: NodeProfile, peer_id: str):
|
|
1697
|
+
"""Adds a new known agent after receiving an approval message.
|
|
1698
|
+
|
|
1699
|
+
Args:
|
|
1700
|
+
profile: The profile of the agent to join.
|
|
1701
|
+
peer_id: The peer ID of the agent.
|
|
1702
|
+
|
|
1703
|
+
Returns:
|
|
1704
|
+
True if the agent is successfully added, otherwise False.
|
|
1705
|
+
"""
|
|
1706
|
+
self.out("Adding known agent " + peer_id)
|
|
1707
|
+
if not self.agent.add_agent(peer_id=peer_id, profile=profile):
|
|
1708
|
+
self.__purge(peer_id)
|
|
1709
|
+
return False
|
|
1710
|
+
|
|
1711
|
+
del self.agents_expected_to_send_ack[peer_id]
|
|
1712
|
+
return True
|
|
1713
|
+
|
|
1714
|
+
def __interview_enqueue(self, peer_id: str):
|
|
1715
|
+
"""Adds a newly connected peer to the queue of agents to be interviewed.
|
|
1716
|
+
|
|
1717
|
+
Args:
|
|
1718
|
+
peer_id: The peer ID of the agent to interview.
|
|
1719
|
+
|
|
1720
|
+
Returns:
|
|
1721
|
+
True if the agent is successfully enqueued, otherwise False.
|
|
1722
|
+
"""
|
|
1723
|
+
|
|
1724
|
+
# If the peer_id is not in the same world were we are, we early stop the interview process
|
|
1725
|
+
if (not self.conn.is_public(peer_id) and peer_id not in self.conn.world_agents_list and
|
|
1726
|
+
peer_id not in self.conn.world_masters_list and peer_id != self.conn.world_node_peer_id):
|
|
1727
|
+
self.out(f"Interview failed: "
|
|
1728
|
+
f"peer ID {peer_id} is not in the world agents/masters list, and it is not the world node")
|
|
1729
|
+
return False
|
|
1730
|
+
|
|
1731
|
+
# Ask for the profile
|
|
1732
|
+
ret = self.conn.send(peer_id, channel_trail=None,
|
|
1733
|
+
content_type=Msg.PROFILE_REQUEST, content=None)
|
|
1734
|
+
if not ret:
|
|
1735
|
+
self.out(f"Interview failed: "
|
|
1736
|
+
f"unable to send a profile request to peer ID {peer_id}")
|
|
1737
|
+
return False
|
|
1738
|
+
|
|
1739
|
+
# Put the agent in the list of agents to interview
|
|
1740
|
+
self.agents_to_interview[peer_id] = [self.clock.get_time(), None] # Peer ID -> [time, profile]; no profile yet
|
|
1741
|
+
return True
|
|
1742
|
+
|
|
1743
|
+
def __interview_check_profile(self, peer_id: str, node_id: str, profile: NodeProfile):
|
|
1744
|
+
"""Checks if a received profile is acceptable and valid.
|
|
1745
|
+
|
|
1746
|
+
Args:
|
|
1747
|
+
peer_id: The peer ID of the node that sent the profile.
|
|
1748
|
+
node_id: The node ID of the node that sent the profile.
|
|
1749
|
+
profile: The NodeProfile object to be checked.
|
|
1750
|
+
|
|
1751
|
+
Returns:
|
|
1752
|
+
True if the profile is acceptable, otherwise False.
|
|
1753
|
+
"""
|
|
1754
|
+
|
|
1755
|
+
# If the node ID was not on the list of allowed ones (if the list exists), then stop it
|
|
1756
|
+
# notice that we do not get the node ID from the profile, but from outside (it comes from the token, so safe)
|
|
1757
|
+
if ((self.allowed_node_ids is not None and node_id not in self.allowed_node_ids) or
|
|
1758
|
+
(peer_id not in self.agents_to_interview)):
|
|
1759
|
+
self.out(f"Profile of f{peer_id} not in the list of agents to interview or its node ID is not allowed")
|
|
1760
|
+
return False
|
|
1761
|
+
else:
|
|
1762
|
+
|
|
1763
|
+
# Getting the parts of profile needed
|
|
1764
|
+
eval_static_profile = profile.get_static_profile()
|
|
1765
|
+
eval_dynamic_profile = profile.get_dynamic_profile()
|
|
1766
|
+
my_dynamic_profile = self.profile.get_dynamic_profile()
|
|
1767
|
+
|
|
1768
|
+
# Checking if CV was altered
|
|
1769
|
+
cv_hash = self.conn.get_cv_hash_from_last_token(peer_id)
|
|
1770
|
+
sanity_ok, pairs_of_hashes = profile.verify_cv_hash(cv_hash)
|
|
1771
|
+
if not sanity_ok:
|
|
1772
|
+
self.out(f"The CV in the profile of f{peer_id} failed the sanity check {pairs_of_hashes},"
|
|
1773
|
+
f" {profile.get_cv()}")
|
|
1774
|
+
return False
|
|
1775
|
+
|
|
1776
|
+
# Determining type of agent, checking the connection pools
|
|
1777
|
+
role = self.conn.get_role(peer_id)
|
|
1778
|
+
|
|
1779
|
+
if role & 1 == 0:
|
|
1780
|
+
|
|
1781
|
+
# Ensuring that the interviewed agent is out of every world
|
|
1782
|
+
# (if it were in the same world in which we are, it would connect in a private manner) and
|
|
1783
|
+
# possibly fulfilling the optional constraint of accepting only certified agent,
|
|
1784
|
+
# then asking the hosted entity for additional custom evaluation
|
|
1785
|
+
if (eval_dynamic_profile['connections']['world_peer_id'] is None and
|
|
1786
|
+
(not self.only_certified_agents or eval_static_profile['certified'] is True)):
|
|
1787
|
+
return self.hosted.evaluate_profile(role, profile)
|
|
1788
|
+
else:
|
|
1789
|
+
self.out(f"Peer f{peer_id} is already living in a world, of it is not certified "
|
|
1790
|
+
f"and maybe I expect certified only")
|
|
1791
|
+
return False
|
|
1792
|
+
else:
|
|
1793
|
+
|
|
1794
|
+
if self.node_type is Node.AGENT:
|
|
1795
|
+
|
|
1796
|
+
# Ensuring that the interviewed agent is in the same world where we are and
|
|
1797
|
+
# possibly fulfilling the optional constraint of accepting only certified agent
|
|
1798
|
+
if (eval_dynamic_profile['connections']['world_peer_id'] is not None and
|
|
1799
|
+
eval_dynamic_profile['connections']['world_peer_id'] ==
|
|
1800
|
+
my_dynamic_profile['connections']['world_peer_id'] and
|
|
1801
|
+
(not self.only_certified_agents or 'certified' in eval_static_profile and
|
|
1802
|
+
eval_static_profile['certified'] is True)):
|
|
1803
|
+
return self.hosted.evaluate_profile(role, profile)
|
|
1804
|
+
else:
|
|
1805
|
+
self.out(f"Peer f{peer_id} is living in a different world, of it is not certified "
|
|
1806
|
+
f"and maybe I expect certified only")
|
|
1807
|
+
return False
|
|
1808
|
+
|
|
1809
|
+
elif self.node_type is Node.WORLD:
|
|
1810
|
+
|
|
1811
|
+
# If this node hosts a world, we do not expect to interview agents in the private world connection,
|
|
1812
|
+
# so something went wrong here, let's reject it
|
|
1813
|
+
self.out(f"Peer f{peer_id} sent a profile in the private network, unexpected")
|
|
1814
|
+
return False
|
|
1815
|
+
|
|
1816
|
+
def __interview_clean(self):
|
|
1817
|
+
"""Removes outdated or timed-out interview requests from the queue."""
|
|
1818
|
+
cur_time = self.clock.get_time()
|
|
1819
|
+
agents_to_remove = []
|
|
1820
|
+
for peer_id, (profile_time, profile) in self.agents_to_interview.items():
|
|
1821
|
+
|
|
1822
|
+
# Checking timeout
|
|
1823
|
+
if (cur_time - profile_time) > self.interview_timeout:
|
|
1824
|
+
self.out("Removing (disconnecting) due to timeout in interview queue: " + peer_id)
|
|
1825
|
+
agents_to_remove.append(peer_id)
|
|
1826
|
+
|
|
1827
|
+
# Updating
|
|
1828
|
+
for peer_id in agents_to_remove:
|
|
1829
|
+
self.__purge(peer_id) # This will also remove the peer from the queue of peers to interview
|
|
1830
|
+
|
|
1831
|
+
def __connected_without_ack_clean(self):
|
|
1832
|
+
"""Removes connected peers from the queue if they haven't sent an acknowledgment within the timeout period."""
|
|
1833
|
+
cur_time = self.clock.get_time()
|
|
1834
|
+
agents_to_remove = []
|
|
1835
|
+
for peer_id, connection_time in self.agents_expected_to_send_ack.items():
|
|
1836
|
+
|
|
1837
|
+
# Checking timeout
|
|
1838
|
+
if (cur_time - connection_time) > self.connect_without_ack_timeout:
|
|
1839
|
+
self.out("Removing (disconnecting) due to timeout in the connected-without-ack queue: " + peer_id)
|
|
1840
|
+
agents_to_remove.append(peer_id)
|
|
1841
|
+
|
|
1842
|
+
# Updating
|
|
1843
|
+
for peer_id in agents_to_remove:
|
|
1844
|
+
self.__purge(peer_id) # This will also remove the peer from the queue of in the connected-without-ack queue
|
|
1845
|
+
|
|
1846
|
+
def __purge(self, peer_id: str):
|
|
1847
|
+
"""Removes a peer from all relevant connection lists and queues.
|
|
1848
|
+
|
|
1849
|
+
Args:
|
|
1850
|
+
peer_id: The peer ID of the node to purge.
|
|
1851
|
+
"""
|
|
1852
|
+
self.hosted.remove_agent(peer_id)
|
|
1853
|
+
self.conn.remove(peer_id)
|
|
1854
|
+
|
|
1855
|
+
# Clearing also the contents of the list of interviews
|
|
1856
|
+
if peer_id in self.agents_to_interview:
|
|
1857
|
+
del self.agents_to_interview[peer_id]
|
|
1858
|
+
|
|
1859
|
+
# Clearing the temporary list of connected agents
|
|
1860
|
+
if peer_id in self.agents_expected_to_send_ack:
|
|
1861
|
+
del self.agents_expected_to_send_ack[peer_id]
|
|
1862
|
+
|
|
1863
|
+
def __root(self, api: str, payload: dict):
|
|
1864
|
+
"""Sends a POST request to the root server's API endpoint.
|
|
1865
|
+
|
|
1866
|
+
Args:
|
|
1867
|
+
api: The API endpoint to send the request to.
|
|
1868
|
+
payload: The data to be sent in the request body.
|
|
1869
|
+
|
|
1870
|
+
Returns:
|
|
1871
|
+
The 'data' field from the server's JSON response.
|
|
1872
|
+
"""
|
|
1873
|
+
response_fields = ["state", "flags", "data"]
|
|
1874
|
+
|
|
1875
|
+
try:
|
|
1876
|
+
api = self.root_endpoint + ("/" if self.root_endpoint[-1] != "/" and api[0] != "/" else "") + api
|
|
1877
|
+
payload["node_token"] = self.node_token # Adding token to let the server verify
|
|
1878
|
+
response = requests.post(api,
|
|
1879
|
+
json=payload,
|
|
1880
|
+
headers={"Content-Type": "application/json"})
|
|
1881
|
+
|
|
1882
|
+
if response.status_code == 200:
|
|
1883
|
+
ret = response.json()
|
|
1884
|
+
if response_fields is not None:
|
|
1885
|
+
for field in response_fields:
|
|
1886
|
+
if field not in ret:
|
|
1887
|
+
raise GenException(f"Missing key '{field}' in the response to {api}: {ret}")
|
|
1888
|
+
else:
|
|
1889
|
+
raise GenException(f"Request {api} failed with status code {response.status_code}")
|
|
1890
|
+
except Exception as e:
|
|
1891
|
+
self.err(f"An error occurred while making the POST request: {e}")
|
|
1892
|
+
raise GenException(f"An error occurred while making the POST request: {e}")
|
|
1893
|
+
|
|
1894
|
+
if ret['state']['code'] != "ok":
|
|
1895
|
+
raise GenException("[" + api + "] " + ret['state']['message'])
|
|
1896
|
+
|
|
1897
|
+
return ret['data']
|
|
1898
|
+
|
|
1899
|
+
@staticmethod
|
|
1900
|
+
def __analyze_code(file_in_memory):
|
|
1901
|
+
"""Analyzes a string of Python code for dangerous or unsafe functions and modules.
|
|
1902
|
+
|
|
1903
|
+
Args:
|
|
1904
|
+
file_in_memory: The string of Python code to analyze.
|
|
1905
|
+
|
|
1906
|
+
Returns:
|
|
1907
|
+
True if the code is considered safe, otherwise False.
|
|
1908
|
+
"""
|
|
1909
|
+
dangerous_functions = {"eval", "exec", "compile", "system", "__import__", "input"}
|
|
1910
|
+
dangerous_modules = {"subprocess"}
|
|
1911
|
+
|
|
1912
|
+
def is_suspicious(ast_node):
|
|
1913
|
+
|
|
1914
|
+
# Detect function calls
|
|
1915
|
+
if isinstance(ast_node, ast.Call):
|
|
1916
|
+
if isinstance(ast_node.func, ast.Name):
|
|
1917
|
+
return ast_node.func.id in dangerous_functions
|
|
1918
|
+
elif isinstance(ast_node.func, ast.Attribute):
|
|
1919
|
+
return ast_node.func.attr in dangerous_functions
|
|
1920
|
+
|
|
1921
|
+
# Detect imports
|
|
1922
|
+
if isinstance(ast_node, (ast.Import, ast.ImportFrom)):
|
|
1923
|
+
for alias in ast_node.names:
|
|
1924
|
+
if alias.name.split('.')[0] in dangerous_modules:
|
|
1925
|
+
return True
|
|
1926
|
+
|
|
1927
|
+
return False
|
|
1928
|
+
|
|
1929
|
+
try:
|
|
1930
|
+
tree = ast.parse(file_in_memory)
|
|
1931
|
+
except SyntaxError:
|
|
1932
|
+
return False
|
|
1933
|
+
|
|
1934
|
+
for _ast_node in ast.walk(tree):
|
|
1935
|
+
if is_suspicious(_ast_node):
|
|
1936
|
+
return False
|
|
1937
|
+
|
|
1938
|
+
return True
|
|
1939
|
+
|
|
1940
|
+
def __handle_inspector_command(self, cmd: str, arg):
|
|
1941
|
+
"""Handles commands received from an inspector node.
|
|
1942
|
+
|
|
1943
|
+
Args:
|
|
1944
|
+
cmd: The command string.
|
|
1945
|
+
arg: The argument for the command.
|
|
1946
|
+
"""
|
|
1947
|
+
self.out(f"Handling inspector message {cmd}, with arg {arg}")
|
|
1948
|
+
print(f"Handling inspector message {cmd}, with arg {arg}")
|
|
1949
|
+
|
|
1950
|
+
if arg is not None and not isinstance(arg, str):
|
|
1951
|
+
self.err(f"Expecting a string argument from the inspector!")
|
|
1952
|
+
else:
|
|
1953
|
+
if cmd == "ask_to_join_world":
|
|
1954
|
+
print(f"Inspector asked to join world: {arg}")
|
|
1955
|
+
self.ask_to_join_world(node_name=arg)
|
|
1956
|
+
elif cmd == "ask_to_get_in_touch":
|
|
1957
|
+
print(f"Inspector asked to get in touch with an agent: {arg}")
|
|
1958
|
+
self.ask_to_get_in_touch(node_name=arg, public=True)
|
|
1959
|
+
elif cmd == "leave":
|
|
1960
|
+
print(f"Inspector asked to leave an agent: {arg}")
|
|
1961
|
+
self.leave(arg)
|
|
1962
|
+
elif cmd == "leave_world":
|
|
1963
|
+
print(f"Inspector asked to leave the current world")
|
|
1964
|
+
self.leave_world()
|
|
1965
|
+
elif cmd == "pause":
|
|
1966
|
+
print("Inspector asked to pause")
|
|
1967
|
+
self.__inspector_told_to_pause = True
|
|
1968
|
+
elif cmd == "play":
|
|
1969
|
+
print("Inspector asked to play")
|
|
1970
|
+
self.__inspector_told_to_pause = False
|
|
1971
|
+
elif cmd == "save":
|
|
1972
|
+
print("Inspector asked to save")
|
|
1973
|
+
self.hosted.save(arg)
|
|
1974
|
+
else:
|
|
1975
|
+
self.err("Unknown inspector command")
|
|
1976
|
+
|
|
1977
|
+
def __send_to_inspector(self):
|
|
1978
|
+
"""Sends status updates and data to the connected inspector node."""
|
|
1979
|
+
|
|
1980
|
+
# Collecting console
|
|
1981
|
+
f = self._output_messages_last_pos - self._output_messages_count + 1 # Included
|
|
1982
|
+
t = self._output_messages_last_pos # Included
|
|
1983
|
+
ff = -1
|
|
1984
|
+
tt = -1
|
|
1985
|
+
if t >= 0 > f: # If there is something, and we incurred in the circular organization (t: valid; f: negative)
|
|
1986
|
+
ff = len(self._output_messages) + f # Included
|
|
1987
|
+
tt = len(self._output_messages) - 1 # Included
|
|
1988
|
+
f = 0
|
|
1989
|
+
elif t < 0: # If there are no messages at all (t: -1; f: 0 - due to the way we initialized class attributes)
|
|
1990
|
+
f = -1
|
|
1991
|
+
t = -1
|
|
1992
|
+
console = {'output_messages': self._output_messages[ff:tt+1] + self._output_messages[f:t+1]}
|
|
1993
|
+
|
|
1994
|
+
# Collecting the HSM
|
|
1995
|
+
if self.__inspector_cache['behav'] != self.hosted.behav:
|
|
1996
|
+
self.__inspector_cache['behav'] = self.hosted.behav
|
|
1997
|
+
behav = str(self.hosted.behav)
|
|
1998
|
+
else:
|
|
1999
|
+
behav = None
|
|
2000
|
+
|
|
2001
|
+
# Collecting status of the HSM
|
|
2002
|
+
if self.hosted.behav is not None:
|
|
2003
|
+
_behav = self.hosted.behav
|
|
2004
|
+
state = _behav.get_state().id if _behav.get_state() is not None else None
|
|
2005
|
+
action = _behav.get_action().id if _behav.get_action() is not None else None
|
|
2006
|
+
behav_status = {'state': state, 'action': action,
|
|
2007
|
+
'state_with_action': _behav.get_state().has_action()
|
|
2008
|
+
if (state is not None) else False}
|
|
2009
|
+
else:
|
|
2010
|
+
behav_status = None
|
|
2011
|
+
|
|
2012
|
+
# Collecting known agents
|
|
2013
|
+
if self.__inspector_cache['all_agents_count'] != len(self.hosted.all_agents):
|
|
2014
|
+
self.__inspector_cache['all_agents_count'] = len(self.hosted.all_agents)
|
|
2015
|
+
all_agents_profiles = {k: v.get_all_profile() for k, v in self.hosted.all_agents.items()}
|
|
2016
|
+
|
|
2017
|
+
# Inspector expects also to have access to the profile of the world,
|
|
2018
|
+
# so we patch this thing by adding it here
|
|
2019
|
+
if self.hosted.in_world() and self.conn.world_node_peer_id is not None:
|
|
2020
|
+
all_agents_profiles[self.conn.world_node_peer_id] = self.hosted.world_profile.get_all_profile()
|
|
2021
|
+
else:
|
|
2022
|
+
all_agents_profiles = None
|
|
2023
|
+
|
|
2024
|
+
# Collecting known streams info
|
|
2025
|
+
if self.__inspector_cache['known_streams_count'] != len(self.hosted.known_streams):
|
|
2026
|
+
self.__inspector_cache['known_streams_count'] = len(self.hosted.known_streams)
|
|
2027
|
+
known_streams_props = {(k + "-" + name): v.get_props().to_dict() for k, stream_dict in
|
|
2028
|
+
self.hosted.known_streams.items() for name, v in stream_dict.items()}
|
|
2029
|
+
else:
|
|
2030
|
+
known_streams_props = None
|
|
2031
|
+
|
|
2032
|
+
# Packing console, HSM status, and possibly HSM
|
|
2033
|
+
console_behav_status_and_behav = {'console': console,
|
|
2034
|
+
'behav': behav,
|
|
2035
|
+
'behav_status': behav_status,
|
|
2036
|
+
'all_agents_profiles': all_agents_profiles,
|
|
2037
|
+
'known_streams_props': known_streams_props}
|
|
2038
|
+
|
|
2039
|
+
# Sending console, HSM status, and possibly HSM to the inspector
|
|
2040
|
+
if not self.conn.send(self.inspector_peer_id, channel_trail=None,
|
|
2041
|
+
content_type=Msg.CONSOLE_AND_BEHAV_STATUS,
|
|
2042
|
+
content=console_behav_status_and_behav):
|
|
2043
|
+
self.err("Failed to send data to the inspector")
|
|
2044
|
+
|
|
2045
|
+
# Sending stream data (not pubsub) to the inspector
|
|
2046
|
+
my_peer_ids = (self.get_public_peer_id(), self.get_world_peer_id())
|
|
2047
|
+
for net_hash, streams_dict in self.hosted.known_streams.items():
|
|
2048
|
+
peer_id = DataProps.peer_id_from_net_hash(net_hash)
|
|
2049
|
+
|
|
2050
|
+
# Preparing sample dict
|
|
2051
|
+
something_to_send = False
|
|
2052
|
+
content = {name: {} for name in streams_dict.keys()}
|
|
2053
|
+
for name, stream in streams_dict.items():
|
|
2054
|
+
data = stream.get(requested_by="__send_to_inspector")
|
|
2055
|
+
|
|
2056
|
+
if data is not None:
|
|
2057
|
+
something_to_send = True
|
|
2058
|
+
|
|
2059
|
+
self.hosted.deb(f"[__send_to_inspector] Preparing to send stream samples from {net_hash}, {name}")
|
|
2060
|
+
content[(peer_id + "|" + name) if peer_id not in my_peer_ids else name] = \
|
|
2061
|
+
{'data': data, 'data_tag': stream.get_tag(), 'data_uuid': stream.get_uuid()}
|
|
2062
|
+
|
|
2063
|
+
# Checking if there is something valid in this group of streams to send to inspector
|
|
2064
|
+
if not something_to_send:
|
|
2065
|
+
self.hosted.deb(f"[__send_to_inspector] No stream samples to send to inspector for {net_hash}, "
|
|
2066
|
+
f"all internal streams returned None")
|
|
2067
|
+
continue
|
|
2068
|
+
|
|
2069
|
+
self.hosted.deb(f"[__send_to_inspector] Sending samples of {net_hash} by direct message, to inspector")
|
|
2070
|
+
name_or_group = DataProps.name_or_group_from_net_hash(net_hash)
|
|
2071
|
+
if not self.conn.send(self.inspector_peer_id, channel_trail=name_or_group,
|
|
2072
|
+
content_type=Msg.STREAM_SAMPLE, content=content):
|
|
2073
|
+
self.err(f"Failed to send stream sample data to the inspector (hash: {net_hash})")
|
|
2074
|
+
|
|
2075
|
+
|
|
2076
|
+
class NodeSynchronizer:
|
|
2077
|
+
DEBUG = True
|
|
2078
|
+
|
|
2079
|
+
def __init__(self):
|
|
2080
|
+
"""Initializes a new instance of the NodeSynchronizer class."""
|
|
2081
|
+
self.nodes = []
|
|
2082
|
+
self.agent_nodes = {}
|
|
2083
|
+
self.world_node = None # Added to allow get_console() to access the world node from server.py (synch only)
|
|
2084
|
+
self.streams = {}
|
|
2085
|
+
self.world = None
|
|
2086
|
+
self.world_masters = set()
|
|
2087
|
+
self.world_masters_node_ids = None
|
|
2088
|
+
self.agent_name_to_profile = {}
|
|
2089
|
+
self.clock = Clock()
|
|
2090
|
+
self.synch_cycle = -1
|
|
2091
|
+
self.synch_cycles = -1
|
|
2092
|
+
|
|
2093
|
+
# Visualization-related attributes
|
|
2094
|
+
self.using_server = False
|
|
2095
|
+
self.server_checkpoints = None
|
|
2096
|
+
self.skip_clear_for = 0
|
|
2097
|
+
self.step_event = None # Event that triggers a new step (manipulated by the server)
|
|
2098
|
+
self.wait_event = None # Event that triggers a new "wait-for-step-event" case (manipulated by the server)
|
|
2099
|
+
self.next_checkpoint = 0
|
|
2100
|
+
self.server_checkpoints = None
|
|
2101
|
+
self.gap = 0. # Seconds
|
|
2102
|
+
|
|
2103
|
+
def add_node(self, node: Node):
|
|
2104
|
+
"""Adds a new node to the synchronizer.
|
|
2105
|
+
|
|
2106
|
+
Args:
|
|
2107
|
+
node: The node to add.
|
|
2108
|
+
"""
|
|
2109
|
+
self.nodes.append(node)
|
|
2110
|
+
|
|
2111
|
+
if node.node_type == Node.AGENT:
|
|
2112
|
+
self.agent_nodes[node.agent.get_name()] = node
|
|
2113
|
+
if self.world_masters_node_ids is not None:
|
|
2114
|
+
if node.node_id in self.world_masters_node_ids:
|
|
2115
|
+
self.world_masters.add(node.agent.get_name())
|
|
2116
|
+
self.agent_name_to_profile[node.agent.get_name()] = node.agent.get_profile()
|
|
2117
|
+
elif node.node_type == Node.WORLD:
|
|
2118
|
+
self.world_node = node
|
|
2119
|
+
self.world = node.world
|
|
2120
|
+
self.world_masters_node_ids = node.world_masters_node_ids
|
|
2121
|
+
if self.world_masters_node_ids is None:
|
|
2122
|
+
self.world_masters_node_ids = set()
|
|
2123
|
+
for node in self.nodes:
|
|
2124
|
+
if node.node_id in self.world_masters_node_ids:
|
|
2125
|
+
self.world_masters.add(node.agent.get_name())
|
|
2126
|
+
node.debug_server_running = True
|
|
2127
|
+
|
|
2128
|
+
def run(self, synch_cycles: int | None = None):
|
|
2129
|
+
"""Starts the main execution loop for the node.
|
|
2130
|
+
|
|
2131
|
+
Args:
|
|
2132
|
+
synch_cycles: The number of clock cycles to run the loop for. If None, runs indefinitely.
|
|
2133
|
+
"""
|
|
2134
|
+
if self.world is None:
|
|
2135
|
+
raise GenException("Missing world node")
|
|
2136
|
+
|
|
2137
|
+
# External events
|
|
2138
|
+
if self.using_server:
|
|
2139
|
+
self.step_event = threading.Event()
|
|
2140
|
+
self.wait_event = threading.Event()
|
|
2141
|
+
|
|
2142
|
+
# Main loop
|
|
2143
|
+
self.synch_cycles = synch_cycles
|
|
2144
|
+
self.synch_cycle = 0
|
|
2145
|
+
|
|
2146
|
+
try:
|
|
2147
|
+
while True:
|
|
2148
|
+
|
|
2149
|
+
# In server mode, we wait for an external event to go ahead (step_event.set())
|
|
2150
|
+
if self.using_server:
|
|
2151
|
+
self.wait_event.set()
|
|
2152
|
+
self.step_event.wait()
|
|
2153
|
+
self.wait_event.clear()
|
|
2154
|
+
|
|
2155
|
+
state_changed = False
|
|
2156
|
+
world_node = None
|
|
2157
|
+
for node in self.nodes:
|
|
2158
|
+
if node.node_type == Node.AGENT:
|
|
2159
|
+
node.run(cycles=1)
|
|
2160
|
+
if self.gap > 0.:
|
|
2161
|
+
time.sleep(self.gap)
|
|
2162
|
+
state_changed = state_changed or node.agent.behav.get_state_changed()
|
|
2163
|
+
else:
|
|
2164
|
+
world_node = node
|
|
2165
|
+
if world_node is not None:
|
|
2166
|
+
world_node.run(cycles=1)
|
|
2167
|
+
if self.gap > 0.:
|
|
2168
|
+
time.sleep(self.gap)
|
|
2169
|
+
|
|
2170
|
+
if NodeSynchronizer.DEBUG and state_changed:
|
|
2171
|
+
for node in self.nodes:
|
|
2172
|
+
if node.node_type == Node.AGENT:
|
|
2173
|
+
print(f"[DEBUG NODE SYNCHRONIZER] {node.agent.get_name()} "
|
|
2174
|
+
f"state: {node.agent.behav.get_state_name()}")
|
|
2175
|
+
|
|
2176
|
+
# Matching checkpoints
|
|
2177
|
+
if self.server_checkpoints is not None and self.server_checkpoints["current"] >= 0:
|
|
2178
|
+
self.server_checkpoints["matched"] = -1
|
|
2179
|
+
checkpoint = self.server_checkpoints["checkpoints"][self.server_checkpoints["current"]]
|
|
2180
|
+
agent = checkpoint["agent"]
|
|
2181
|
+
state = checkpoint["state"] if "state" in checkpoint else None
|
|
2182
|
+
|
|
2183
|
+
if agent not in self.nodes:
|
|
2184
|
+
raise GenException(f"Unknown agent in the checkpoint list: {agent}")
|
|
2185
|
+
behav = self.nodes[agent].agent.behav
|
|
2186
|
+
if not (state is None or state in behav.states):
|
|
2187
|
+
raise GenException(f"Unknown state in the checkpoint list: {state}")
|
|
2188
|
+
|
|
2189
|
+
if state is None or behav.state == state:
|
|
2190
|
+
if "skip" not in checkpoint:
|
|
2191
|
+
self.server_checkpoints["matched"] = self.server_checkpoints["current"]
|
|
2192
|
+
self.server_checkpoints["current"] += 1
|
|
2193
|
+
if self.server_checkpoints["current"] >= len(self.server_checkpoints["checkpoints"]):
|
|
2194
|
+
self.server_checkpoints["current"] = -1 # This means: no more checkpoints
|
|
2195
|
+
else:
|
|
2196
|
+
checkpoint["skip"] -= 1
|
|
2197
|
+
if checkpoint["skip"] <= 0:
|
|
2198
|
+
self.server_checkpoints["current"] += 1
|
|
2199
|
+
if self.server_checkpoints["current"] >= len(self.server_checkpoints["checkpoints"]):
|
|
2200
|
+
self.server_checkpoints["current"] = -1 # This means: no more checkpoints
|
|
2201
|
+
|
|
2202
|
+
# In step mode, we clear the external event to be able to wait for a new one
|
|
2203
|
+
if self.using_server:
|
|
2204
|
+
if self.skip_clear_for == 0:
|
|
2205
|
+
self.step_event.clear()
|
|
2206
|
+
elif self.skip_clear_for == -2: # Infinite play
|
|
2207
|
+
pass
|
|
2208
|
+
elif self.skip_clear_for == -1: # Play until next state
|
|
2209
|
+
if state_changed:
|
|
2210
|
+
self.step_event.clear()
|
|
2211
|
+
elif self.skip_clear_for == -3: # Play until next checkpoint:
|
|
2212
|
+
if self.server_checkpoints["matched"] >= 0:
|
|
2213
|
+
self.step_event.clear()
|
|
2214
|
+
else:
|
|
2215
|
+
self.skip_clear_for -= 1
|
|
2216
|
+
|
|
2217
|
+
self.synch_cycle += 1
|
|
2218
|
+
|
|
2219
|
+
# Stop condition on the number of cycles
|
|
2220
|
+
if self.synch_cycles is not None and self.synch_cycle == self.synch_cycles:
|
|
2221
|
+
break
|
|
2222
|
+
except KeyboardInterrupt:
|
|
2223
|
+
print("\nDetected Ctrl+C! Exiting gracefully...")
|