unaiverse 0.1.6__cp314-cp314-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unaiverse might be problematic. Click here for more details.
- unaiverse/__init__.py +19 -0
- unaiverse/agent.py +2008 -0
- unaiverse/agent_basics.py +1846 -0
- unaiverse/clock.py +191 -0
- unaiverse/dataprops.py +1209 -0
- unaiverse/hsm.py +1880 -0
- unaiverse/modules/__init__.py +18 -0
- unaiverse/modules/cnu/__init__.py +17 -0
- unaiverse/modules/cnu/cnus.py +536 -0
- unaiverse/modules/cnu/layers.py +261 -0
- unaiverse/modules/cnu/psi.py +60 -0
- unaiverse/modules/hl/__init__.py +15 -0
- unaiverse/modules/hl/hl_utils.py +411 -0
- unaiverse/modules/networks.py +1509 -0
- unaiverse/modules/utils.py +680 -0
- unaiverse/networking/__init__.py +16 -0
- unaiverse/networking/node/__init__.py +18 -0
- unaiverse/networking/node/connpool.py +1261 -0
- unaiverse/networking/node/node.py +2223 -0
- unaiverse/networking/node/profile.py +446 -0
- unaiverse/networking/node/tokens.py +79 -0
- unaiverse/networking/p2p/__init__.py +198 -0
- unaiverse/networking/p2p/go.mod +127 -0
- unaiverse/networking/p2p/go.sum +548 -0
- unaiverse/networking/p2p/golibp2p.py +18 -0
- unaiverse/networking/p2p/golibp2p.pyi +135 -0
- unaiverse/networking/p2p/lib.go +2714 -0
- unaiverse/networking/p2p/lib.go.sha256 +1 -0
- unaiverse/networking/p2p/lib_types.py +312 -0
- unaiverse/networking/p2p/message_pb2.py +63 -0
- unaiverse/networking/p2p/messages.py +265 -0
- unaiverse/networking/p2p/mylogger.py +77 -0
- unaiverse/networking/p2p/p2p.py +929 -0
- unaiverse/networking/p2p/proto-go/message.pb.go +616 -0
- unaiverse/networking/p2p/unailib.cpython-314-darwin.so +0 -0
- unaiverse/streamlib/__init__.py +15 -0
- unaiverse/streamlib/streamlib.py +210 -0
- unaiverse/streams.py +770 -0
- unaiverse/utils/__init__.py +16 -0
- unaiverse/utils/ask_lone_wolf.json +27 -0
- unaiverse/utils/lone_wolf.json +19 -0
- unaiverse/utils/misc.py +305 -0
- unaiverse/utils/sandbox.py +293 -0
- unaiverse/utils/server.py +435 -0
- unaiverse/world.py +175 -0
- unaiverse-0.1.6.dist-info/METADATA +365 -0
- unaiverse-0.1.6.dist-info/RECORD +50 -0
- unaiverse-0.1.6.dist-info/WHEEL +6 -0
- unaiverse-0.1.6.dist-info/licenses/LICENSE +43 -0
- unaiverse-0.1.6.dist-info/top_level.txt +1 -0
unaiverse/agent.py
ADDED
|
@@ -0,0 +1,2008 @@
|
|
|
1
|
+
"""
|
|
2
|
+
█████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
|
|
3
|
+
░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
|
|
4
|
+
░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
|
|
5
|
+
░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
|
|
6
|
+
░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
|
|
7
|
+
░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
|
|
8
|
+
░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
|
|
9
|
+
░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
|
|
10
|
+
A Collectionless AI Project (https://collectionless.ai)
|
|
11
|
+
Registration/Login: https://unaiverse.io
|
|
12
|
+
Code Repositories: https://github.com/collectionlessai/
|
|
13
|
+
Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
|
|
14
|
+
"""
|
|
15
|
+
import copy
|
|
16
|
+
import json
|
|
17
|
+
import uuid
|
|
18
|
+
import torch
|
|
19
|
+
from unaiverse.dataprops import DataProps
|
|
20
|
+
from unaiverse.agent_basics import AgentBasics
|
|
21
|
+
from unaiverse.streams import BufferedDataStream
|
|
22
|
+
from unaiverse.networking.p2p.messages import Msg
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class Agent(AgentBasics):
|
|
26
|
+
"""This class contains those basic actions that can be performed by every agent."""
|
|
27
|
+
|
|
28
|
+
def __init__(self, *args, **kwargs):
|
|
29
|
+
super().__init__(*args, **kwargs)
|
|
30
|
+
|
|
31
|
+
# Status variables (assumed to start with "_"): Agent exchanges
|
|
32
|
+
self._available = True # It will be automatically set/changed during the agent's life
|
|
33
|
+
self._found_agents = set() # Peer IDs discovered
|
|
34
|
+
self._valid_cmp_agents = set() # Agents for which the last evaluation was positive
|
|
35
|
+
self._engaged_agents = set()
|
|
36
|
+
self._agents_who_completed_what_they_were_asked = set()
|
|
37
|
+
self._agents_who_were_asked = set()
|
|
38
|
+
self._eval_results = {}
|
|
39
|
+
|
|
40
|
+
# Status variables (assumed to start with "_"): Recordings
|
|
41
|
+
self._last_recorded_stream_num = 1
|
|
42
|
+
self._last_recorded_stream_dict = None
|
|
43
|
+
self._last_recording_stream_dict = None
|
|
44
|
+
|
|
45
|
+
# Status variables (assumed to start with "_"): Playlist
|
|
46
|
+
self._preferred_streams = [] # List of preferred streams
|
|
47
|
+
self._cur_preferred_stream = 0 # ID of the current preferred stream from the list
|
|
48
|
+
self._repeat = 1 # Number of repetitions of the playlist
|
|
49
|
+
|
|
50
|
+
def remove_peer_from_agent_status_attrs(self, peer_id):
|
|
51
|
+
super().remove_peer_from_agent_status_attrs(peer_id)
|
|
52
|
+
self._available = len(self._engaged_agents) == 0
|
|
53
|
+
|
|
54
|
+
def reset_agent_status_attrs(self):
|
|
55
|
+
super().reset_agent_status_attrs() # this sets status vars to [], {}, 0, 0., False, in function of their type
|
|
56
|
+
self._available = True
|
|
57
|
+
self._repeat = 1
|
|
58
|
+
self._last_recorded_stream_num = 1
|
|
59
|
+
|
|
60
|
+
def set_next_action(self, agent: str | None, action: str, args: dict | None = None, ref_uuid: str | None = None):
|
|
61
|
+
"""Try to tell another agent what is the next action it should run.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
agent: The ID of the agent to send the action to or a valid wildcard like "<valid_cmp>" for a set of agents
|
|
65
|
+
(if None the agents in self._engaged_agents will be considered).
|
|
66
|
+
action: The name of the action to be executed by the agent.
|
|
67
|
+
args: A dictionary of arguments for the action. Defaults to None.
|
|
68
|
+
ref_uuid: An optional UUID for referencing the action. Defaults to None.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
True if the action was successfully sent to the target agent or to at least one of the
|
|
72
|
+
involved agents (wildcard case).
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
# - if "agent" is a peer ID, the involved agents will be a list with one element.
|
|
76
|
+
# - if "agent" is a known wildcard, as "<valid_cmp>", then involved agents will be self._valid_cmp_agents
|
|
77
|
+
# - if "agent" is None, then the current agent in self._engaged_agents will be returned
|
|
78
|
+
involved_agents = self.__involved_agents(agent)
|
|
79
|
+
if len(involved_agents) == 0:
|
|
80
|
+
return False
|
|
81
|
+
|
|
82
|
+
at_least_one_completed = False
|
|
83
|
+
_, private_peer_id = self.get_peer_ids()
|
|
84
|
+
for _peer_id in involved_agents:
|
|
85
|
+
ret = self._node_conn.send(_peer_id, channel_trail=None,
|
|
86
|
+
content={"action_name": action, "args": args, "uuid": ref_uuid},
|
|
87
|
+
content_type=Msg.ACTION_REQUEST)
|
|
88
|
+
at_least_one_completed = at_least_one_completed or ret
|
|
89
|
+
self.deb(f"[set_next_action] {self._node_name} sent action: {action}, with args: {args}, "
|
|
90
|
+
f"and result of sending is {ret}")
|
|
91
|
+
return at_least_one_completed
|
|
92
|
+
|
|
93
|
+
def send_engagement(self):
|
|
94
|
+
"""Offer engagement to the agents whose identifiers are in self._found_agents.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
True if engagement requests were successfully sent to at least one found agent, False otherwise.
|
|
98
|
+
"""
|
|
99
|
+
at_least_one_sent = False
|
|
100
|
+
|
|
101
|
+
if len(self._found_agents) > 0:
|
|
102
|
+
self.out(f"Sending engagement request to {', '.join([x for x in self._found_agents])}")
|
|
103
|
+
my_role_str = self._node_profile.get_dynamic_profile()['connections']['role']
|
|
104
|
+
for found_agent in self._found_agents:
|
|
105
|
+
if self.set_next_action(found_agent, action="get_engagement",
|
|
106
|
+
args={"sender_role": my_role_str}):
|
|
107
|
+
at_least_one_sent = True
|
|
108
|
+
else:
|
|
109
|
+
self.err(f"Unable to send engagement to {found_agent}")
|
|
110
|
+
|
|
111
|
+
return at_least_one_sent
|
|
112
|
+
|
|
113
|
+
def get_engagement(self, acceptable_role: str | None = None, sender_role: str | None = None,
|
|
114
|
+
_requester: str | None = None):
|
|
115
|
+
"""Receive engagement from another agent whose authority is in the specified range.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
acceptable_role: The role that the sender must have for engagement to be accepted. Defaults to None.
|
|
119
|
+
sender_role: The role of the agent sending the engagement request. Defaults to None.
|
|
120
|
+
_requester: The ID of the agent requesting engagement (automatically set by the action calling routine)
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
True if the engagement was successfully received and confirmed, False otherwise.
|
|
124
|
+
"""
|
|
125
|
+
self.out(f"Getting engagement from {_requester}, whose role is {sender_role} (looking for {acceptable_role})")
|
|
126
|
+
if _requester not in self.world_agents and _requester not in self.world_masters:
|
|
127
|
+
self.err(f"Unknown agent: {_requester}")
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
if sender_role is None:
|
|
131
|
+
self.err(f"Unknown role of {_requester}")
|
|
132
|
+
return False
|
|
133
|
+
|
|
134
|
+
# Confirming
|
|
135
|
+
if self._available:
|
|
136
|
+
acceptable_role_int = self.ROLE_STR_TO_BITS[acceptable_role]
|
|
137
|
+
if "~" not in acceptable_role:
|
|
138
|
+
sender_role_int = (self.ROLE_STR_TO_BITS[sender_role] >> 2) << 2
|
|
139
|
+
else:
|
|
140
|
+
sender_role_int = self.ROLE_STR_TO_BITS[sender_role]
|
|
141
|
+
|
|
142
|
+
if acceptable_role_int == sender_role_int:
|
|
143
|
+
if self.set_next_action(_requester, "got_engagement"):
|
|
144
|
+
self._engaged_agents.add(_requester)
|
|
145
|
+
|
|
146
|
+
# Marking this agent as not available since it engaged with another one
|
|
147
|
+
self._available = False
|
|
148
|
+
return True
|
|
149
|
+
else:
|
|
150
|
+
self.err(f"Unable to confirm engagement to {_requester}")
|
|
151
|
+
return False
|
|
152
|
+
else:
|
|
153
|
+
self.err(f"Cannot engage to {_requester}")
|
|
154
|
+
return False
|
|
155
|
+
else:
|
|
156
|
+
self.err(f"Cannot engage to {_requester}")
|
|
157
|
+
return False
|
|
158
|
+
|
|
159
|
+
def got_engagement(self, _requester: str | None = None):
|
|
160
|
+
"""Confirm an engagement.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
_requester: The ID of the agent confirming the engagement (automatically set by the action calling routine).
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
True if the engagement was successfully confirmed, False otherwise.
|
|
167
|
+
"""
|
|
168
|
+
self.out(f"Confirming engagement with {_requester}")
|
|
169
|
+
if _requester in self._found_agents:
|
|
170
|
+
self._engaged_agents.add(_requester)
|
|
171
|
+
|
|
172
|
+
# Marking this agent as not available since it engaged with another one
|
|
173
|
+
self._available = False
|
|
174
|
+
|
|
175
|
+
# Removing the engaged agent from the list of found agents, to avoid sending him another engagement request
|
|
176
|
+
self._found_agents.discard(_requester)
|
|
177
|
+
return True
|
|
178
|
+
else:
|
|
179
|
+
self.err(f"Unable to confirm engagement with {_requester}")
|
|
180
|
+
return False
|
|
181
|
+
|
|
182
|
+
def send_disengagement(self, send_disconnection_too: bool = False):
|
|
183
|
+
"""Ask for disengagement.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
send_disconnection_too: Whether to send a disconnect-suggestion together with the disengagement.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
True if disengagement requests were successfully sent to at least one engaged agent, False otherwise.
|
|
190
|
+
"""
|
|
191
|
+
at_least_one_sent = False
|
|
192
|
+
|
|
193
|
+
if len(self._engaged_agents) > 0:
|
|
194
|
+
self.out(f"Sending disengagement request to {', '.join([x for x in self._engaged_agents])}")
|
|
195
|
+
for agent in self._engaged_agents:
|
|
196
|
+
if self.set_next_action(agent, action="get_disengagement", args={"disconnect_too": send_disconnection_too}):
|
|
197
|
+
at_least_one_sent = True
|
|
198
|
+
else:
|
|
199
|
+
self.err(f"Unable to send disengagement to {agent}")
|
|
200
|
+
|
|
201
|
+
return at_least_one_sent
|
|
202
|
+
|
|
203
|
+
def get_disengagement(self, disconnect_too: bool = False, _requester: str | None = None):
|
|
204
|
+
"""Get a disengagement request from an agent.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
disconnect_too: Whether to disconnect the agent who sent the disengagement.
|
|
208
|
+
_requester: The ID of the agent requesting disengagement. Defaults to None.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
True if the disengagement request was successfully processed, False otherwise.
|
|
212
|
+
"""
|
|
213
|
+
self.out(f"Getting a disengagement request from {_requester}")
|
|
214
|
+
if _requester not in self.world_agents and _requester not in self.world_masters:
|
|
215
|
+
self.err(f"Unknown agent: {_requester}")
|
|
216
|
+
return False
|
|
217
|
+
|
|
218
|
+
if _requester not in self._engaged_agents:
|
|
219
|
+
self.err(f"Not previously engaged to {_requester}")
|
|
220
|
+
return False
|
|
221
|
+
|
|
222
|
+
if disconnect_too:
|
|
223
|
+
self._node_purge_fcn(_requester)
|
|
224
|
+
|
|
225
|
+
self._engaged_agents.discard(_requester) # Remove if present
|
|
226
|
+
|
|
227
|
+
# Marking this agent as available if not engaged to any agent
|
|
228
|
+
self._available = len(self._engaged_agents) == 0
|
|
229
|
+
return True
|
|
230
|
+
|
|
231
|
+
def disengage_all(self):
|
|
232
|
+
"""Disengage all the previously engaged agents.
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
True if the disengagement procedure was successfully executed, False otherwise.
|
|
236
|
+
"""
|
|
237
|
+
self.out(f"Disengaging all agents")
|
|
238
|
+
self._engaged_agents = set()
|
|
239
|
+
|
|
240
|
+
# Marking this agent as available
|
|
241
|
+
self._available = True
|
|
242
|
+
return True
|
|
243
|
+
|
|
244
|
+
def disconnect_by_role(self, role: str | list[str]):
|
|
245
|
+
"""Disconnects from all agents that match a specified role.
|
|
246
|
+
It finds the agents and calls the node's purge function on each.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
role: A string or list of strings representing the role(s) of agents to disconnect from.
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
Always True.
|
|
253
|
+
"""
|
|
254
|
+
self.out(f"Disconnecting agents with role: {role}")
|
|
255
|
+
if self.find_agents(role):
|
|
256
|
+
found_agents = copy.deepcopy(self._found_agents)
|
|
257
|
+
for agent in found_agents:
|
|
258
|
+
self._node_purge_fcn(agent) # This will also call remove_agent, that will call remove_streams
|
|
259
|
+
return True
|
|
260
|
+
|
|
261
|
+
def disconnected(self, agent: str | None = None, delay: float = -1.):
|
|
262
|
+
"""Checks if a specific set of agents (by ID or wildcard) are no longer connected to the agent.
|
|
263
|
+
It returns False if any of the specified agents are still connected.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
agent: The ID of the agent or a wildcard to check.
|
|
267
|
+
delay: The time (seconds) to be spent in the current state before actually considering this action.
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
True if all involved agents are disconnected, False otherwise.
|
|
271
|
+
|
|
272
|
+
"""
|
|
273
|
+
assert delay is not None, "Missing basic action information"
|
|
274
|
+
|
|
275
|
+
# - if "agent" is a peer ID, the involved agents will be a list with one element.
|
|
276
|
+
# - if "agent" is a known wildcard, as "<valid_cmp>", then involved agents will be self._valid_cmp_agents
|
|
277
|
+
# - if "agent" is None, then the current agent in self._engaged_agents will be returned
|
|
278
|
+
involved_agents = self.__involved_agents(agent)
|
|
279
|
+
if len(involved_agents) == 0:
|
|
280
|
+
return False
|
|
281
|
+
|
|
282
|
+
self.out(f"Checking if all these agents are not connected to me anymore: {involved_agents}")
|
|
283
|
+
all_disconnected = True
|
|
284
|
+
for agent in involved_agents:
|
|
285
|
+
if agent in self.world_agents or agent in self.public_agents or agent in self._node_agents_waiting\
|
|
286
|
+
or self._node_conn.is_connected(agent):
|
|
287
|
+
all_disconnected = False
|
|
288
|
+
break
|
|
289
|
+
return all_disconnected
|
|
290
|
+
|
|
291
|
+
def received_some_asked_data(self, processing_fcn: str | None = None):
|
|
292
|
+
"""Checks if any of the agents that were previously asked for data (e.g., via `ask_gen`) have sent a stream
|
|
293
|
+
sample back. Optionally, it can process the received data with a specified function.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
processing_fcn: The name of a function to process the received data.
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
True if at least one data sample was received, False otherwise.
|
|
300
|
+
"""
|
|
301
|
+
_processing_fcn = None
|
|
302
|
+
if processing_fcn is not None:
|
|
303
|
+
if hasattr(self, processing_fcn):
|
|
304
|
+
_processing_fcn = getattr(self, processing_fcn)
|
|
305
|
+
if not callable(_processing_fcn):
|
|
306
|
+
_processing_fcn = None
|
|
307
|
+
if _processing_fcn is None:
|
|
308
|
+
self.err(f"Processing function not found: {processing_fcn}")
|
|
309
|
+
|
|
310
|
+
got_something = False
|
|
311
|
+
for agent in self._agents_who_were_asked:
|
|
312
|
+
net_hash_to_stream_dict = self.find_streams(agent, "processor")
|
|
313
|
+
for stream_dict in net_hash_to_stream_dict.values():
|
|
314
|
+
for stream_obj in stream_dict.values():
|
|
315
|
+
if not stream_obj.props.is_public():
|
|
316
|
+
data = stream_obj.get("received_some_asked_data")
|
|
317
|
+
data_tag = stream_obj.get_tag()
|
|
318
|
+
|
|
319
|
+
if data is not None:
|
|
320
|
+
if _processing_fcn is None:
|
|
321
|
+
return True
|
|
322
|
+
else:
|
|
323
|
+
got_something = True
|
|
324
|
+
_processing_fcn(agent, stream_obj.props, data, data_tag)
|
|
325
|
+
return got_something
|
|
326
|
+
|
|
327
|
+
def nop(self, message: str | None = None, delay: float = -1.):
|
|
328
|
+
"""Do nothing.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
message: An optional message to print. Defaults to None.
|
|
332
|
+
delay: The time (seconds) to be spent in the current state before actually considering this action.
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
Always True.
|
|
336
|
+
"""
|
|
337
|
+
assert delay is not None, "Missing basic action information"
|
|
338
|
+
if message is not None:
|
|
339
|
+
self.out(message)
|
|
340
|
+
return True
|
|
341
|
+
|
|
342
|
+
def wait_for_actions(self, agent: str, from_state: str, to_state: str, wait: bool):
|
|
343
|
+
"""Lock or unlock every action between a pair of states in the state machine of a target agent.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
agent: The ID of the agent to send the action locking request to, or a valid wildcard like "<valid_cmp>"
|
|
347
|
+
for a set of agents (if None the agents in self._engaged_agents will be considered).
|
|
348
|
+
from_state: The starting state of the actions to be locked/unlocked.
|
|
349
|
+
to_state: The ending state of the actions to be locked/unlocked.
|
|
350
|
+
wait: A boolean indicating whether to wait for the actions to complete (wait == !ready).
|
|
351
|
+
|
|
352
|
+
Returns:
|
|
353
|
+
True if the request was successfully sent to at least one involved agent, False otherwise.
|
|
354
|
+
"""
|
|
355
|
+
|
|
356
|
+
# - if "agent" is a peer ID, the involved agents will be a list with one element.
|
|
357
|
+
# - if "agent" is a known wildcard, as "<valid_cmp>", then involved agents will be self._valid_cmp_agents
|
|
358
|
+
# - if "agent" is None, then the current agent in self._engaged_agents will be returned
|
|
359
|
+
involved_agents = self.__involved_agents(agent)
|
|
360
|
+
if len(involved_agents) == 0:
|
|
361
|
+
return False
|
|
362
|
+
|
|
363
|
+
at_least_one_completed = False
|
|
364
|
+
for _agent in involved_agents:
|
|
365
|
+
self.out(f"Telling {_agent} to alter his HSM {from_state} -> {to_state} (wait: {wait}) "
|
|
366
|
+
f"by calling method 'wait_for_actions' on it")
|
|
367
|
+
ret = self._node_conn.send(_agent, channel_trail=None,
|
|
368
|
+
content={'method': 'wait_for_actions', 'args': (from_state, to_state, wait)},
|
|
369
|
+
content_type=Msg.HSM)
|
|
370
|
+
at_least_one_completed = at_least_one_completed or ret
|
|
371
|
+
return at_least_one_completed
|
|
372
|
+
|
|
373
|
+
def ask_gen(self, agent: str | None = None, u_hashes: list[str] | None = None,
|
|
374
|
+
samples: int = 100, time: float = -1., timeout: float = -1., ask_uuid: str | None = None,
|
|
375
|
+
ignore_uuid: bool = False):
|
|
376
|
+
"""Asking for generation.
|
|
377
|
+
|
|
378
|
+
Args:
|
|
379
|
+
agent: The ID of the agent to ask for generation, or a valid wildcard like "<valid_cmp>"
|
|
380
|
+
for a set of agents (if None the agents in self._engaged_agents will be considered).
|
|
381
|
+
u_hashes: A list of input stream hashes for generation. Defaults to None.
|
|
382
|
+
samples: The number of samples to generate. Defaults to 100.
|
|
383
|
+
time: The time duration for generation. Defaults to -1.
|
|
384
|
+
timeout: The timeout for the generation request. Defaults to -1.
|
|
385
|
+
ask_uuid: Specify the UUID of the action (if None - default -, it is randomly generated).
|
|
386
|
+
ignore_uuid: Force a None UUID instead of generating a random one.
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
True if the generation request was successfully sent to at least one involved agent, False otherwise.
|
|
390
|
+
"""
|
|
391
|
+
assert samples is not None and time is not None and timeout is not None, "Missing basic action information"
|
|
392
|
+
|
|
393
|
+
# - if "agent" is a peer ID, the involved agents will be a list with one element.
|
|
394
|
+
# - if "agent" is a known wildcard, as "<valid_cmp>", then involved agents will be self._valid_cmp_agents
|
|
395
|
+
# - if "agent" is None, then the current agent in self._engaged_agents will be returned
|
|
396
|
+
involved_agents = self.__involved_agents(agent)
|
|
397
|
+
self.deb(f"[ask_gen] Involved_agents: {involved_agents}")
|
|
398
|
+
|
|
399
|
+
if len(involved_agents) == 0:
|
|
400
|
+
self.deb(f"[ask_gen] No involved agents, action ask_gen returns False")
|
|
401
|
+
return False
|
|
402
|
+
|
|
403
|
+
# Create a copy of the input hashes, normalizing them in the appropriate way
|
|
404
|
+
u_hashes_copy: list[str | None] = [None] * len(u_hashes)
|
|
405
|
+
for i in range(len(u_hashes_copy)):
|
|
406
|
+
if u_hashes_copy[i] == "<playlist>":
|
|
407
|
+
|
|
408
|
+
# From <playlist> to the current element of the playlist
|
|
409
|
+
u_hashes_copy[i] = self._preferred_streams[self._cur_preferred_stream]
|
|
410
|
+
else:
|
|
411
|
+
|
|
412
|
+
# From a user specified hash to a net hash (e.g., peer_id:name_or_group to peer_id::ps:name_or_group)
|
|
413
|
+
u_hashes_copy[i] = self.user_stream_hash_to_net_hash(u_hashes[i])
|
|
414
|
+
|
|
415
|
+
# Generate a new UUID for this request
|
|
416
|
+
ref_uuid = uuid.uuid4().hex[0:8] if ask_uuid is None else ask_uuid
|
|
417
|
+
if ignore_uuid:
|
|
418
|
+
ref_uuid = None
|
|
419
|
+
|
|
420
|
+
# If the input streams are all owned by this agent, discard UUID
|
|
421
|
+
all_owned = True
|
|
422
|
+
for i in range(len(u_hashes_copy)):
|
|
423
|
+
if u_hashes_copy[i] not in self.owned_streams:
|
|
424
|
+
all_owned = False
|
|
425
|
+
break
|
|
426
|
+
if not all_owned:
|
|
427
|
+
ref_uuid = None
|
|
428
|
+
|
|
429
|
+
for i in range(len(u_hashes_copy)):
|
|
430
|
+
|
|
431
|
+
# If there are our own streams involved, and they are buffered, let's plan to restart them when we will
|
|
432
|
+
# start sending them through the net: moreover, let's set the local stream UUID appropriately to
|
|
433
|
+
# the generated UUID
|
|
434
|
+
if u_hashes_copy[i] in self.owned_streams:
|
|
435
|
+
stream_dict = self.known_streams[u_hashes_copy[i]]
|
|
436
|
+
for stream_name, stream_obj in stream_dict.items():
|
|
437
|
+
|
|
438
|
+
# Plan to restart buffered streams
|
|
439
|
+
if isinstance(stream_obj, BufferedDataStream):
|
|
440
|
+
stream_obj.plan_restart_before_next_get(requested_by="send_stream_samples")
|
|
441
|
+
|
|
442
|
+
# Activate the stream (if it was off)
|
|
443
|
+
stream_obj.enable()
|
|
444
|
+
|
|
445
|
+
# Set UUID to the generated one
|
|
446
|
+
stream_obj.set_uuid(ref_uuid=ref_uuid, expected=False)
|
|
447
|
+
stream_obj.set_uuid(ref_uuid=None, expected=True)
|
|
448
|
+
|
|
449
|
+
self.deb(f"[ask_gen] Input streams u_hashes: {u_hashes_copy}")
|
|
450
|
+
|
|
451
|
+
self.out(f"Asking {', '.join(involved_agents)} to generate signal given {u_hashes_copy} (ref_uuid: {ref_uuid})")
|
|
452
|
+
self._agents_who_completed_what_they_were_asked = set()
|
|
453
|
+
self._agents_who_were_asked = set()
|
|
454
|
+
correctly_asked = []
|
|
455
|
+
for peer_id in involved_agents:
|
|
456
|
+
ret = self.__ask_gen_or_learn(for_what="gen", agent=peer_id,
|
|
457
|
+
u_hashes=u_hashes_copy,
|
|
458
|
+
yhat_hashes=None,
|
|
459
|
+
samples=samples, time=time, timeout=timeout, ref_uuid=ref_uuid)
|
|
460
|
+
self.deb(f"[ask_gen] Asking {peer_id} returned {ret}")
|
|
461
|
+
if ret:
|
|
462
|
+
correctly_asked.append(peer_id)
|
|
463
|
+
|
|
464
|
+
# Preparing the buffered stream where to store data, if needed
|
|
465
|
+
if len(correctly_asked) > 0:
|
|
466
|
+
|
|
467
|
+
# Saving
|
|
468
|
+
self.last_ref_uuid = ref_uuid
|
|
469
|
+
|
|
470
|
+
# For each agent that we involve in this request....
|
|
471
|
+
for peer_id in correctly_asked:
|
|
472
|
+
|
|
473
|
+
# Finding the streams generated by the processor of the agent we asked to generate
|
|
474
|
+
processor_streams = self.find_streams(peer_id, name_or_group="processor")
|
|
475
|
+
|
|
476
|
+
# For each stream generated by the processor of the agent we asked to generate...
|
|
477
|
+
for net_hash, stream_dict in processor_streams.items():
|
|
478
|
+
|
|
479
|
+
# Set the appropriate UUID to the one we created in this method
|
|
480
|
+
for stream in stream_dict.values():
|
|
481
|
+
stream.set_uuid(None, expected=False)
|
|
482
|
+
stream.set_uuid(ref_uuid, expected=True) # Setting the "expected" one
|
|
483
|
+
|
|
484
|
+
self.deb(f"[ask_gen] Overall, the action ask_gen will return {len(correctly_asked) > 0}")
|
|
485
|
+
return len(correctly_asked) > 0
|
|
486
|
+
|
|
487
|
+
def do_gen(self, u_hashes: list[str] | None = None,
|
|
488
|
+
samples: int = 100, time: float = -1., timeout: float = -1.,
|
|
489
|
+
_requester: str | list | None = None, _request_time: float = -1., _request_uuid: str | None = None,
|
|
490
|
+
_completed: bool = False) -> bool:
|
|
491
|
+
"""Generate a signal.
|
|
492
|
+
|
|
493
|
+
Args:
|
|
494
|
+
u_hashes: A list of input stream hashes for generation. Defaults to None.
|
|
495
|
+
samples: The number of samples to generate. Defaults to 100.
|
|
496
|
+
time: The max time duration for whole generation process. Defaults to -1.
|
|
497
|
+
timeout: The timeout for generation attempts: if calling the generate action fails for more than "timeout"
|
|
498
|
+
seconds, it is declared as complete. Defaults to -1.
|
|
499
|
+
_requester: The ID of the agent who requested generation (automatically set by the action calling routine).
|
|
500
|
+
_request_time: The time the generation was requested (automatically set by the action calling routine).
|
|
501
|
+
_request_uuid: The UUID of the generation request (automatically set by the action calling routine).
|
|
502
|
+
_completed: A boolean indicating if the generation is already completed (automatically set by the action
|
|
503
|
+
calling routine). This will tell that it is time to run a final procedure.
|
|
504
|
+
|
|
505
|
+
Returns:
|
|
506
|
+
True if the signal generation was successful, False otherwise.
|
|
507
|
+
"""
|
|
508
|
+
assert samples is not None and time is not None and timeout is not None, "Missing basic action information"
|
|
509
|
+
|
|
510
|
+
self.deb(f"[do_gen] Samples: {samples}, time: {time}, timeout: {timeout}, "
|
|
511
|
+
f"requester: {_requester}, request_time: {_request_time}, request_uuid: {_request_uuid}, "
|
|
512
|
+
f"completed: {_completed}")
|
|
513
|
+
|
|
514
|
+
if _requester is not None:
|
|
515
|
+
if isinstance(_requester, list):
|
|
516
|
+
for _r in _requester:
|
|
517
|
+
if self.behaving_in_world():
|
|
518
|
+
if _r not in self.world_agents and _requester not in self.world_masters:
|
|
519
|
+
self.err(f"Unknown agent: {_r} in list {_requester} (fully skipping generation)")
|
|
520
|
+
return False
|
|
521
|
+
else:
|
|
522
|
+
if _r not in self.public_agents:
|
|
523
|
+
self.err(f"Unknown agent: {_r} in list {_requester} (fully skipping generation)")
|
|
524
|
+
return False
|
|
525
|
+
else:
|
|
526
|
+
if self.behaving_in_world():
|
|
527
|
+
if _requester not in self.world_agents and _requester not in self.world_masters:
|
|
528
|
+
self.err(f"Unknown agent: {_requester} (fully skipping generation)")
|
|
529
|
+
return False
|
|
530
|
+
else:
|
|
531
|
+
if _requester not in self.public_agents:
|
|
532
|
+
self.err(f"Unknown agent: {_requester} (fully skipping generation)")
|
|
533
|
+
return False
|
|
534
|
+
|
|
535
|
+
# Check what is the step ID of the multistep action
|
|
536
|
+
k = self.get_action_step()
|
|
537
|
+
|
|
538
|
+
# In the first step of this action, we change the UUID of the local stream associated to the input data we will
|
|
539
|
+
# use to handle this action, setting expectations to avoid handling tags of old data
|
|
540
|
+
if k == 0:
|
|
541
|
+
|
|
542
|
+
# Warning: we are not normalizing the hashes, we should do it if this action is called directly
|
|
543
|
+
if u_hashes is not None:
|
|
544
|
+
for net_hash in u_hashes:
|
|
545
|
+
if net_hash in self.known_streams:
|
|
546
|
+
for stream_name, stream_obj in self.known_streams[net_hash].items():
|
|
547
|
+
|
|
548
|
+
# If the data arrived before this action, then the UUID is already set, and here there is
|
|
549
|
+
# no need to do anything; if the data has not yet arrived (common case) ...
|
|
550
|
+
if stream_obj.get_uuid(expected=False) != _request_uuid:
|
|
551
|
+
stream_obj.set_uuid(None, expected=False) # Clearing UUID
|
|
552
|
+
stream_obj.set_uuid(_request_uuid, expected=True) # Setting expectations
|
|
553
|
+
else:
|
|
554
|
+
self.out(f"Unknown stream mentioned in u_hashes: {net_hash}")
|
|
555
|
+
return False
|
|
556
|
+
|
|
557
|
+
if not _completed:
|
|
558
|
+
self.out(f"Generating signal")
|
|
559
|
+
ret = self.__process_streams(u_hashes=u_hashes, yhat_hashes=None, learn=False,
|
|
560
|
+
recipient=_requester, ref_uuid=_request_uuid)
|
|
561
|
+
if not ret:
|
|
562
|
+
self.out(f"Generating signal failed")
|
|
563
|
+
else:
|
|
564
|
+
if not self.is_multi_steps_action():
|
|
565
|
+
self.out(f"Completing signal generation (degenerate single-step case of a multi-step action")
|
|
566
|
+
ret = self.__complete_do(do_what="gen", peer_id_who_asked=_requester, all_hashes=u_hashes,
|
|
567
|
+
send_back_confirmation=False)
|
|
568
|
+
if not ret:
|
|
569
|
+
self.out(f"Completing signal generation failed")
|
|
570
|
+
return ret
|
|
571
|
+
else:
|
|
572
|
+
self.out(f"Completing signal generation")
|
|
573
|
+
ret = self.__complete_do(do_what="gen", peer_id_who_asked=_requester, all_hashes=u_hashes)
|
|
574
|
+
if not ret:
|
|
575
|
+
self.out(f"Completing signal generation failed")
|
|
576
|
+
return ret
|
|
577
|
+
|
|
578
|
+
def done_gen(self, _requester: str | None = None):
|
|
579
|
+
"""This is a way to get back the confirmation of a completed generation.
|
|
580
|
+
|
|
581
|
+
Args:
|
|
582
|
+
_requester: The ID of the agent who completed the generation. Defaults to None.
|
|
583
|
+
|
|
584
|
+
Returns:
|
|
585
|
+
True if the generation confirmation was successfully handled by this agent, False is something went wrong.
|
|
586
|
+
"""
|
|
587
|
+
self.out(f"Agent {_requester} finished generation")
|
|
588
|
+
|
|
589
|
+
# Searching for the processor-streams of the agent who generated data
|
|
590
|
+
processor_streams = self.find_streams(_requester, name_or_group="processor")
|
|
591
|
+
if processor_streams is None or len(processor_streams) == 0:
|
|
592
|
+
self.err("Unexpected confirmation of finished generation")
|
|
593
|
+
return False
|
|
594
|
+
|
|
595
|
+
# Remembering that the agent that invoked this action is the one who generated the data, and what he generated
|
|
596
|
+
# could be used in future action (for example, in evaluation processes)
|
|
597
|
+
self._agents_who_completed_what_they_were_asked.add(_requester)
|
|
598
|
+
|
|
599
|
+
# Clearing the UUID of the local streams associated to the agent who generated
|
|
600
|
+
for net_hash, stream_dict in processor_streams.items():
|
|
601
|
+
for stream_obj in stream_dict.values():
|
|
602
|
+
stream_obj.set_uuid(None, expected=False)
|
|
603
|
+
stream_obj.set_uuid(None, expected=True)
|
|
604
|
+
|
|
605
|
+
# If one or more of my streams where used as arguments of the generation request I did (ask_gen), then their
|
|
606
|
+
# UUID must be cleared...we clear them all
|
|
607
|
+
for net_hash, stream_dict in self.owned_streams.items():
|
|
608
|
+
for stream_obj in stream_dict.values():
|
|
609
|
+
if stream_obj.props.is_public() != self.behaving_in_world():
|
|
610
|
+
stream_obj.set_uuid(None, expected=False)
|
|
611
|
+
stream_obj.set_uuid(None, expected=True)
|
|
612
|
+
return True
|
|
613
|
+
|
|
614
|
+
def ask_learn(self, agent: str | None = None,
|
|
615
|
+
u_hashes: list[str] | None = None, yhat_hashes: list[str] | None = None,
|
|
616
|
+
samples: int = 100, time: float = -1., timeout: float = -1., ask_uuid: str | None = None,
|
|
617
|
+
ignore_uuid: str | None = None):
|
|
618
|
+
"""Asking for learning to generate.
|
|
619
|
+
|
|
620
|
+
Args:
|
|
621
|
+
agent: The ID of the agent to ask for generation, or a valid wildcard like "<valid_cmp>"
|
|
622
|
+
for a set of agents (if None the agents in self._engaged_agents will be considered).
|
|
623
|
+
u_hashes: A list of input stream hashes for inference. Defaults to None.
|
|
624
|
+
yhat_hashes: A list of target stream hashes to be used for loss computation. Defaults to None.
|
|
625
|
+
samples: The number of samples to learn from. Defaults to 100.
|
|
626
|
+
time: The time duration for generation. Defaults to -1.
|
|
627
|
+
timeout: The timeout for the generation request. Defaults to -1.
|
|
628
|
+
ask_uuid: Specify the action UUID (default = None, i.e., it is automatically generated).
|
|
629
|
+
ignore_uuid: If Trie, the UUID is fully ignored (i.e, forced to None).
|
|
630
|
+
|
|
631
|
+
Returns:
|
|
632
|
+
True if the learning request was successfully sent to at least one involved agent, False otherwise.
|
|
633
|
+
"""
|
|
634
|
+
assert samples is not None and time is not None and timeout is not None, "Missing basic action information"
|
|
635
|
+
|
|
636
|
+
# - if "agent" is a peer ID, the involved agents will be a list with one element.
|
|
637
|
+
# - if "agent" is a known wildcard, as "<valid_cmp>", then involved agents will be self._valid_cmp_agents
|
|
638
|
+
# - if "agent" is None, then the current agent in self._engaged_agents will be returned
|
|
639
|
+
involved_agents = self.__involved_agents(agent)
|
|
640
|
+
self.deb(f"[ask_learn] Involved agents: {involved_agents}")
|
|
641
|
+
|
|
642
|
+
if len(involved_agents) == 0:
|
|
643
|
+
self.deb(f"[ask_learn] No involved agents, action will return False")
|
|
644
|
+
return False
|
|
645
|
+
|
|
646
|
+
# Create a copy of the input hashes, normalizing them in the appropriate way
|
|
647
|
+
u_hashes_copy = [x for x in u_hashes]
|
|
648
|
+
for i in range(len(u_hashes_copy)):
|
|
649
|
+
if u_hashes_copy[i] == "<playlist>":
|
|
650
|
+
|
|
651
|
+
# From <playlist> to the current element of the playlist
|
|
652
|
+
u_hashes_copy[i] = self._preferred_streams[self._cur_preferred_stream]
|
|
653
|
+
else:
|
|
654
|
+
|
|
655
|
+
# From a user specified hash to a net hash (e.g., peer_id:name_or_group to peer_id::ps:name_or_group)
|
|
656
|
+
u_hashes_copy[i] = self.user_stream_hash_to_net_hash(u_hashes_copy[i])
|
|
657
|
+
|
|
658
|
+
# Create a copy of the target hashes, normalizing them in the appropriate way
|
|
659
|
+
yhat_hashes_copy = [x for x in yhat_hashes]
|
|
660
|
+
for i in range(len(yhat_hashes_copy)):
|
|
661
|
+
if yhat_hashes_copy[i] == "<playlist>":
|
|
662
|
+
|
|
663
|
+
# From <playlist> to the current element of the playlist
|
|
664
|
+
yhat_hashes_copy[i] = self._preferred_streams[self._cur_preferred_stream]
|
|
665
|
+
else:
|
|
666
|
+
|
|
667
|
+
# From a user specified hash to a net hash (e.g., peer_id:name_or_group to peer_id::ps:name_or_group)
|
|
668
|
+
yhat_hashes_copy[i] = self.user_stream_hash_to_net_hash(yhat_hashes_copy[i])
|
|
669
|
+
|
|
670
|
+
# Generate a new UUID for this request
|
|
671
|
+
ref_uuid = uuid.uuid4().hex[0:8] if ask_uuid is None else ask_uuid
|
|
672
|
+
if ignore_uuid:
|
|
673
|
+
ref_uuid = None
|
|
674
|
+
|
|
675
|
+
# If the input streams are all owned by this agent, discard UUID
|
|
676
|
+
all_owned = True
|
|
677
|
+
for i in range(len(u_hashes_copy)):
|
|
678
|
+
if u_hashes_copy[i] not in self.owned_streams:
|
|
679
|
+
all_owned = False
|
|
680
|
+
break
|
|
681
|
+
if all_owned:
|
|
682
|
+
for i in range(len(yhat_hashes_copy)):
|
|
683
|
+
if yhat_hashes_copy[i] not in self.owned_streams:
|
|
684
|
+
all_owned = False
|
|
685
|
+
break
|
|
686
|
+
if not all_owned:
|
|
687
|
+
ref_uuid = None
|
|
688
|
+
|
|
689
|
+
for i in range(len(u_hashes_copy)):
|
|
690
|
+
|
|
691
|
+
# If there are our own streams involved, and they are buffered, let's plan to restart them when we will
|
|
692
|
+
# start sending them through the net: moreover, let's set the local stream UUID appropriately to
|
|
693
|
+
# the generated UUID
|
|
694
|
+
if u_hashes_copy[i] in self.owned_streams:
|
|
695
|
+
stream_dict = self.known_streams[u_hashes_copy[i]]
|
|
696
|
+
for stream_name, stream_obj in stream_dict.items():
|
|
697
|
+
|
|
698
|
+
# Plan to restart buffered streams
|
|
699
|
+
if isinstance(stream_obj, BufferedDataStream):
|
|
700
|
+
stream_obj.plan_restart_before_next_get(requested_by="send_stream_samples")
|
|
701
|
+
|
|
702
|
+
# Activate the stream (if it was off)
|
|
703
|
+
stream_obj.enable()
|
|
704
|
+
|
|
705
|
+
# Set UUID to the generated one
|
|
706
|
+
stream_obj.set_uuid(ref_uuid=ref_uuid, expected=False)
|
|
707
|
+
stream_obj.set_uuid(ref_uuid=None, expected=True)
|
|
708
|
+
|
|
709
|
+
for i in range(len(yhat_hashes_copy)):
|
|
710
|
+
|
|
711
|
+
# If there are our own streams involved, and they are buffered, let's plan to restart them when we will
|
|
712
|
+
# start sending them through the net: moreover, let's set the local stream UUID appropriately to
|
|
713
|
+
# the generated UUID
|
|
714
|
+
if yhat_hashes_copy[i] in self.owned_streams:
|
|
715
|
+
stream_dict = self.known_streams[yhat_hashes_copy[i]]
|
|
716
|
+
for stream_name, stream_obj in stream_dict.items():
|
|
717
|
+
|
|
718
|
+
# Plan to restart buffered streams
|
|
719
|
+
if isinstance(stream_obj, BufferedDataStream):
|
|
720
|
+
stream_obj.plan_restart_before_next_get(requested_by="send_stream_samples")
|
|
721
|
+
|
|
722
|
+
# Activate the stream (if it was off)
|
|
723
|
+
stream_obj.enable()
|
|
724
|
+
|
|
725
|
+
# Set UUID to the generated one
|
|
726
|
+
stream_obj.set_uuid(ref_uuid=ref_uuid, expected=False)
|
|
727
|
+
stream_obj.set_uuid(ref_uuid=None, expected=True)
|
|
728
|
+
|
|
729
|
+
self.out(f"Asking {', '.join(involved_agents)} to learn to generate signal {yhat_hashes_copy}, "
|
|
730
|
+
f"given {u_hashes_copy} (ref_uuid: {ref_uuid})")
|
|
731
|
+
self._agents_who_completed_what_they_were_asked = set()
|
|
732
|
+
self._agents_who_were_asked = set()
|
|
733
|
+
correctly_asked = []
|
|
734
|
+
for peer_id in involved_agents:
|
|
735
|
+
ret = self.__ask_gen_or_learn(for_what="learn", agent=peer_id,
|
|
736
|
+
u_hashes=u_hashes_copy,
|
|
737
|
+
yhat_hashes=yhat_hashes_copy,
|
|
738
|
+
samples=samples, time=time, timeout=timeout, ref_uuid=ref_uuid)
|
|
739
|
+
self.deb(f"[ask_learn] Asking {peer_id} returned {ret}")
|
|
740
|
+
if ret:
|
|
741
|
+
correctly_asked.append(peer_id)
|
|
742
|
+
|
|
743
|
+
# Preparing the buffered stream where to store data, if needed
|
|
744
|
+
if len(correctly_asked) > 0:
|
|
745
|
+
|
|
746
|
+
# Saving
|
|
747
|
+
self.last_ref_uuid = ref_uuid
|
|
748
|
+
|
|
749
|
+
# For each agent that we involve in this request....
|
|
750
|
+
for peer_id in correctly_asked:
|
|
751
|
+
|
|
752
|
+
# Finding the streams generated by the processor of the agent we asked to generate
|
|
753
|
+
processor_streams = self.find_streams(peer_id, name_or_group="processor")
|
|
754
|
+
|
|
755
|
+
# For each stream generated by the processor of the agent we asked to generate...
|
|
756
|
+
for net_hash, stream_dict in processor_streams.items():
|
|
757
|
+
|
|
758
|
+
# Set the appropriate UUID to the one we created in this method
|
|
759
|
+
for stream in stream_dict.values():
|
|
760
|
+
stream.set_uuid(None, expected=False)
|
|
761
|
+
stream.set_uuid(ref_uuid, expected=True) # Setting the "expected" one
|
|
762
|
+
|
|
763
|
+
self.deb(f"[ask_learn] Overall the action ask_learn will return {len(correctly_asked) > 0}")
|
|
764
|
+
return len(correctly_asked) > 0
|
|
765
|
+
|
|
766
|
+
def do_learn(self, yhat_hashes: list[str] | None = None, u_hashes: list[str] | None = None,
|
|
767
|
+
samples: int = 100, time: float = -1., timeout: float = -1.,
|
|
768
|
+
_requester: str | None = None, _request_time: float = -1., _request_uuid: str | None = None,
|
|
769
|
+
_completed: bool = False) -> bool:
|
|
770
|
+
"""Learn to generate a signal.
|
|
771
|
+
|
|
772
|
+
Args:
|
|
773
|
+
yhat_hashes: A list of target stream hashes to be used for loss computation. Defaults to None.
|
|
774
|
+
u_hashes: A list of input stream hashes for inference. Defaults to None.
|
|
775
|
+
samples: The number of samples to learn from. Defaults to 100.
|
|
776
|
+
time: The max time duration of the learning procedure. Defaults to -1.
|
|
777
|
+
timeout: The timeout for learning attempts: if calling the learning action fails for more than "timeout"
|
|
778
|
+
seconds, it is declared as complete. Defaults to -1.
|
|
779
|
+
_requester: The ID of the agent who requested learning (automatically set by the action calling routine).
|
|
780
|
+
_request_time: The time learning was requested (automatically set by the action calling routine).
|
|
781
|
+
_request_uuid: The UUID of the learning request (automatically set by the action calling routine).
|
|
782
|
+
_completed: A boolean indicating if the learning is already completed (automatically set by the action
|
|
783
|
+
calling routine). This will tell that it is time to run a final procedure.
|
|
784
|
+
|
|
785
|
+
Returns:
|
|
786
|
+
True if the signal generation was successful, False otherwise.
|
|
787
|
+
"""
|
|
788
|
+
assert samples is not None and time is not None and timeout is not None, "Missing basic action information"
|
|
789
|
+
|
|
790
|
+
self.deb(f"[do_learn] samples: {samples}, time: {time}, timeout: {timeout}, "
|
|
791
|
+
f"requester: {_requester}, request_time: {_request_time}, request_uuid: {_request_uuid} "
|
|
792
|
+
f"completed: {_completed}")
|
|
793
|
+
|
|
794
|
+
if _requester not in self.world_agents and _requester not in self.world_masters:
|
|
795
|
+
self.err(f"Unknown agent: {_requester}")
|
|
796
|
+
return False
|
|
797
|
+
|
|
798
|
+
# Check what is the step ID of the multistep action
|
|
799
|
+
k = self.get_action_step()
|
|
800
|
+
|
|
801
|
+
# In the first step of this action, we change the UUID of the local stream associated to the input data we will
|
|
802
|
+
# use to handle this action, setting expectations to avoid handling tags of old data
|
|
803
|
+
if k == 0:
|
|
804
|
+
|
|
805
|
+
# Warning: we are not normalizing the hashes, we should do it if this action is called directly
|
|
806
|
+
if u_hashes is not None:
|
|
807
|
+
for net_hash in u_hashes:
|
|
808
|
+
if net_hash in self.known_streams:
|
|
809
|
+
for stream_obj in self.known_streams[net_hash].values():
|
|
810
|
+
|
|
811
|
+
# If the data arrived before this action, then the UUID is already set, and here there is
|
|
812
|
+
# no need to do anything; if the data has not yet arrived (common case) ...
|
|
813
|
+
if stream_obj.get_uuid(expected=False) != _request_uuid:
|
|
814
|
+
stream_obj.set_uuid(None, expected=False) # Clearing UUID
|
|
815
|
+
stream_obj.set_uuid(_request_uuid, expected=True) # Setting expectations
|
|
816
|
+
|
|
817
|
+
# Warning: we are not normalizing the hashes, we should do it if this action is called directly
|
|
818
|
+
if yhat_hashes is not None:
|
|
819
|
+
for net_hash in yhat_hashes:
|
|
820
|
+
if net_hash in self.known_streams:
|
|
821
|
+
for stream_obj in self.known_streams[net_hash].values():
|
|
822
|
+
if stream_obj.get_uuid(expected=False) != _request_uuid:
|
|
823
|
+
stream_obj.set_uuid(None, expected=False) # Clearing UUID
|
|
824
|
+
stream_obj.set_uuid(_request_uuid, expected=True) # Setting expectations
|
|
825
|
+
|
|
826
|
+
if not _completed:
|
|
827
|
+
self.out(f"Learning to generate signal {yhat_hashes}")
|
|
828
|
+
ret = self.__process_streams(u_hashes=u_hashes, yhat_hashes=yhat_hashes, learn=True,
|
|
829
|
+
recipient=_requester, ref_uuid=_request_uuid)
|
|
830
|
+
if not ret:
|
|
831
|
+
self.out(f"Learning to generate signal {yhat_hashes} failed")
|
|
832
|
+
return ret
|
|
833
|
+
else:
|
|
834
|
+
self.out(f"Completing learning to generate signal {yhat_hashes}")
|
|
835
|
+
all_hashes = (u_hashes if u_hashes is not None else []) + (yhat_hashes if yhat_hashes is not None else [])
|
|
836
|
+
ret = self.__complete_do(do_what="learn", peer_id_who_asked=_requester, all_hashes=all_hashes)
|
|
837
|
+
if not ret:
|
|
838
|
+
self.out(f"Completing learning to generate signal {yhat_hashes} failed")
|
|
839
|
+
return ret
|
|
840
|
+
|
|
841
|
+
def done_learn(self, _requester: str | None = None):
|
|
842
|
+
"""This is a way to get back the confirmation of a completed learning procedure.
|
|
843
|
+
|
|
844
|
+
Args:
|
|
845
|
+
_requester: The ID of the agent who completed the learning procedure. Defaults to None.
|
|
846
|
+
|
|
847
|
+
Returns:
|
|
848
|
+
True if the learning-complete confirmation was successfully handled by this agent, False otherwise.
|
|
849
|
+
"""
|
|
850
|
+
self.out(f"Agent {_requester} finished learning")
|
|
851
|
+
self._agents_who_completed_what_they_were_asked.add(_requester)
|
|
852
|
+
|
|
853
|
+
# Searching for the processor-streams of the agent who generated the (inference) data
|
|
854
|
+
processor_streams = self.find_streams(_requester, name_or_group="processor")
|
|
855
|
+
if processor_streams is None or len(processor_streams) == 0:
|
|
856
|
+
self.err("Unexpected confirmation of finished learning")
|
|
857
|
+
return False
|
|
858
|
+
|
|
859
|
+
# Warning: differently from the case of done_gen, we are not considering the streams generated by the
|
|
860
|
+
# learning agents as something we could use for evaluation (this might be changed in the future)
|
|
861
|
+
|
|
862
|
+
# Clearing the UUID of the local streams associated to the agent who learned
|
|
863
|
+
for net_hash, stream_dict in processor_streams.items():
|
|
864
|
+
for stream_obj in stream_dict.values():
|
|
865
|
+
stream_obj.set_uuid(None, expected=False)
|
|
866
|
+
stream_obj.set_uuid(None, expected=True)
|
|
867
|
+
|
|
868
|
+
# If one or more of my streams where used as arguments of the learning request I did (ask_learn), then their
|
|
869
|
+
# UUID must be cleared...we clear them all
|
|
870
|
+
for net_hash, stream_dict in self.owned_streams.items():
|
|
871
|
+
for stream_obj in stream_dict.values():
|
|
872
|
+
if stream_obj.props.is_public() != self.behaving_in_world():
|
|
873
|
+
stream_obj.set_uuid(None, expected=False)
|
|
874
|
+
stream_obj.set_uuid(None, expected=True)
|
|
875
|
+
return True
|
|
876
|
+
|
|
877
|
+
def all_asked_finished(self):
|
|
878
|
+
"""Checks if all agents that were previously asked to perform a task (e.g., generate or learn) have sent a
|
|
879
|
+
completion confirmation. It compares the set of agents asked with the set of agents that have completed
|
|
880
|
+
the task.
|
|
881
|
+
|
|
882
|
+
Returns:
|
|
883
|
+
True if all agents are done, False otherwise.
|
|
884
|
+
"""
|
|
885
|
+
return self._agents_who_were_asked == self._agents_who_completed_what_they_were_asked
|
|
886
|
+
|
|
887
|
+
def all_engagements_completed(self):
|
|
888
|
+
"""Checks if all engagement requests that were sent have been confirmed. It returns True if there are no agents
|
|
889
|
+
remaining in the `_found_agents` list, implying all have been engaged with or discarded.
|
|
890
|
+
|
|
891
|
+
Returns:
|
|
892
|
+
True if all engagements are complete, False otherwise.
|
|
893
|
+
|
|
894
|
+
"""
|
|
895
|
+
return len(self._found_agents) == 0
|
|
896
|
+
|
|
897
|
+
def agents_are_waiting(self):
|
|
898
|
+
"""Checks if there are any agents who have connected but have not yet been fully processed or added to the
|
|
899
|
+
agent's known lists. This indicates that new agents are waiting to be managed.
|
|
900
|
+
|
|
901
|
+
Returns:
|
|
902
|
+
True if there are waiting agents, False otherwise.
|
|
903
|
+
"""
|
|
904
|
+
self.out(f"Current set of {len(self._node_agents_waiting)} connected peer IDs non managed yet: "
|
|
905
|
+
f"{self._node_agents_waiting}")
|
|
906
|
+
for found_agent in self._found_agents:
|
|
907
|
+
if found_agent in self._node_agents_waiting:
|
|
908
|
+
return True
|
|
909
|
+
return False
|
|
910
|
+
|
|
911
|
+
def ask_subscribe(self, agent: str | None = None,
|
|
912
|
+
stream_hashes: list[str] | None = None, unsubscribe: bool = False):
|
|
913
|
+
"""Requests a remote agent or a group of agents to subscribe to or unsubscribe from a list of specified PubSub
|
|
914
|
+
streams. It normalizes the stream hashes and sends an action request containing the stream properties.
|
|
915
|
+
|
|
916
|
+
Args:
|
|
917
|
+
agent: The target agent's ID or a wildcard.
|
|
918
|
+
stream_hashes: A list of streams to subscribe to or unsubscribe from.
|
|
919
|
+
unsubscribe: A boolean to indicate if it's an unsubscription request.
|
|
920
|
+
|
|
921
|
+
Returns:
|
|
922
|
+
True if the request was sent to at least one agent, False otherwise.
|
|
923
|
+
"""
|
|
924
|
+
|
|
925
|
+
# - if "agent" is a peer ID, the involved agents will be a list with one element.
|
|
926
|
+
# - if "agent" is a known wildcard, as "<valid_cmp>", then involved agents will be self._valid_cmp_agents
|
|
927
|
+
# - if "agent" is None, then the current agent in self._engaged_agents will be returned
|
|
928
|
+
involved_agents = self.__involved_agents(agent)
|
|
929
|
+
self.deb(f"[ask_subscribe] Involved_agents: {involved_agents}")
|
|
930
|
+
|
|
931
|
+
if len(involved_agents) == 0:
|
|
932
|
+
self.deb(f"[ask_subscribe] No involved agents, action ask_gen returns False")
|
|
933
|
+
return False
|
|
934
|
+
|
|
935
|
+
# Create a copy of the stream hashes, normalizing them in the appropriate way
|
|
936
|
+
stream_hashes_copy: list[str | None] = [None] * len(stream_hashes)
|
|
937
|
+
for i in range(len(stream_hashes_copy)):
|
|
938
|
+
if stream_hashes_copy[i] == "<playlist>":
|
|
939
|
+
|
|
940
|
+
# From <playlist> to the current element of the playlist
|
|
941
|
+
stream_hashes_copy[i] = self._preferred_streams[self._cur_preferred_stream]
|
|
942
|
+
else:
|
|
943
|
+
|
|
944
|
+
# From a user specified hash to a net hash (e.g., peer_id:name_or_group to peer_id::ps:name_or_group)
|
|
945
|
+
stream_hashes_copy[i] = self.user_stream_hash_to_net_hash(stream_hashes[i])
|
|
946
|
+
|
|
947
|
+
# Getting properties
|
|
948
|
+
stream_owners = []
|
|
949
|
+
stream_props = []
|
|
950
|
+
for i in range(len(stream_hashes_copy)):
|
|
951
|
+
stream_dict = self.known_streams[stream_hashes_copy[i]]
|
|
952
|
+
peer_id = DataProps.peer_id_from_net_hash(stream_hashes_copy[i])
|
|
953
|
+
for name, stream_obj in stream_dict.items():
|
|
954
|
+
stream_owners.append(peer_id)
|
|
955
|
+
stream_props.append(json.dumps(stream_obj.props.to_dict()))
|
|
956
|
+
|
|
957
|
+
what = "subscribe to" if not unsubscribe else "unsubscribe from "
|
|
958
|
+
self.out(f"Asking {', '.join(involved_agents)} to {what} {stream_hashes}")
|
|
959
|
+
self._agents_who_completed_what_they_were_asked = set()
|
|
960
|
+
self._agents_who_were_asked = set()
|
|
961
|
+
correctly_asked = []
|
|
962
|
+
for agent in involved_agents:
|
|
963
|
+
if self.set_next_action(agent, action="do_subscribe", args={"stream_owners": stream_owners,
|
|
964
|
+
"stream_props": stream_props,
|
|
965
|
+
"unsubscribe": unsubscribe}):
|
|
966
|
+
self._agents_who_were_asked.add(agent)
|
|
967
|
+
ret = True
|
|
968
|
+
else:
|
|
969
|
+
what = "subscribe" if not unsubscribe else "unsubscribe"
|
|
970
|
+
self.err(f"Unable to ask {agent} to {what}")
|
|
971
|
+
ret = False
|
|
972
|
+
self.deb(f"[ask_subscribe] Asking {agent} returned {ret}")
|
|
973
|
+
if ret:
|
|
974
|
+
correctly_asked.append(agent)
|
|
975
|
+
|
|
976
|
+
self.deb(f"[ask_subscribe] Overall, the action ask_subscribe (unsubscribe: {unsubscribe})"
|
|
977
|
+
f" will return {len(correctly_asked) > 0}")
|
|
978
|
+
return len(correctly_asked) > 0
|
|
979
|
+
|
|
980
|
+
def do_subscribe(self, stream_owners: list[str] | None = None, stream_props: list[str] | None = None,
|
|
981
|
+
unsubscribe: bool = False,
|
|
982
|
+
_requester: str | list | None = None, _request_time: float = -1.):
|
|
983
|
+
"""Executes a subscription or unsubscription request received from another agent. It processes the stream
|
|
984
|
+
properties, adds or removes the streams from the agent's known streams, and handles the underlying PubSub topic
|
|
985
|
+
subscriptions.
|
|
986
|
+
|
|
987
|
+
Args:
|
|
988
|
+
stream_owners: A list of peer IDs who own the streams.
|
|
989
|
+
stream_props: A list of JSON-serialized stream properties.
|
|
990
|
+
unsubscribe: A boolean to indicate unsubscription.
|
|
991
|
+
_requester: The ID of the requesting agent.
|
|
992
|
+
_request_time: The time the request was made.
|
|
993
|
+
|
|
994
|
+
Returns:
|
|
995
|
+
True if the action is successful, False otherwise.
|
|
996
|
+
"""
|
|
997
|
+
self.deb(f"[do_subscribe] unsubscribe: {unsubscribe}, "
|
|
998
|
+
f"stream_owners: {stream_owners}, stream_props: ... ({len(stream_props)} props)")
|
|
999
|
+
|
|
1000
|
+
if _requester is not None:
|
|
1001
|
+
if isinstance(_requester, list):
|
|
1002
|
+
for _r in _requester:
|
|
1003
|
+
if self.behaving_in_world():
|
|
1004
|
+
if _r not in self.world_agents and _requester not in self.world_masters:
|
|
1005
|
+
self.err(f"Unknown agent: {_r} in list {_requester} (fully skipping do_subscribe)")
|
|
1006
|
+
return False
|
|
1007
|
+
else:
|
|
1008
|
+
if _r not in self.public_agents:
|
|
1009
|
+
self.err(f"Unknown agent: {_r} in list {_requester} (fully skipping do_subscribe)")
|
|
1010
|
+
return False
|
|
1011
|
+
else:
|
|
1012
|
+
if self.behaving_in_world():
|
|
1013
|
+
if _requester not in self.world_agents and _requester not in self.world_masters:
|
|
1014
|
+
self.err(f"Unknown agent: {_requester} (fully skipping do_subscribe)")
|
|
1015
|
+
return False
|
|
1016
|
+
else:
|
|
1017
|
+
if _requester not in self.public_agents:
|
|
1018
|
+
self.err(f"Unknown agent: {_requester} (fully skipping do_subscribe)")
|
|
1019
|
+
return False
|
|
1020
|
+
else:
|
|
1021
|
+
self.err("Unknown requester (None)")
|
|
1022
|
+
return False
|
|
1023
|
+
|
|
1024
|
+
# Building properties
|
|
1025
|
+
props_dicts = []
|
|
1026
|
+
props_objs = []
|
|
1027
|
+
for i in range(len(stream_props)):
|
|
1028
|
+
p_dict = json.loads(stream_props[i])
|
|
1029
|
+
props = DataProps.from_dict(p_dict)
|
|
1030
|
+
if props.is_pubsub():
|
|
1031
|
+
props_dicts.append(p_dict)
|
|
1032
|
+
props_objs.append(props)
|
|
1033
|
+
else:
|
|
1034
|
+
self.err(f"Expecting a pubsub stream, got a stream named {props.get_name()} "
|
|
1035
|
+
f"(group is {props.get_group()}), which is not pubsub")
|
|
1036
|
+
return False
|
|
1037
|
+
|
|
1038
|
+
# Adding new streams and subscribing (if compatible with our processor)
|
|
1039
|
+
for stream_owner, prop_dict, prop_obj in zip(stream_owners, props_dicts, props_objs):
|
|
1040
|
+
if not unsubscribe:
|
|
1041
|
+
if not self.add_compatible_streams(peer_id=stream_owner, streams_in_profile=[prop_dict],
|
|
1042
|
+
buffered=False, public=False):
|
|
1043
|
+
self.out(f"Unable to add a pubsub stream ({prop_obj.get_name()}) from agent {stream_owner}: "
|
|
1044
|
+
f"no compatible streams were found")
|
|
1045
|
+
else:
|
|
1046
|
+
if not self.remove_streams(peer_id=stream_owner, name=prop_obj.get_name()):
|
|
1047
|
+
self.out(f"Unable to unsubscribe from pubsub stream ({prop_obj.get_name()}) "
|
|
1048
|
+
f"of agent {stream_owner}")
|
|
1049
|
+
return True
|
|
1050
|
+
|
|
1051
|
+
def done_subscribe(self, unsubscribe: bool = False, _requester: str | None = None):
|
|
1052
|
+
"""Handles the confirmation that a subscription or unsubscription request has been completed by another agent.
|
|
1053
|
+
It adds the requester to the set of agents that have completed their asked tasks.
|
|
1054
|
+
|
|
1055
|
+
Args:
|
|
1056
|
+
unsubscribe: A boolean indicating if it was an unsubscription.
|
|
1057
|
+
_requester: The ID of the agent who completed the task.
|
|
1058
|
+
|
|
1059
|
+
Returns:
|
|
1060
|
+
Always True.
|
|
1061
|
+
"""
|
|
1062
|
+
what = "subscribing" if unsubscribe else "unsubscribing"
|
|
1063
|
+
self.out(f"Agent {_requester} finished {what}")
|
|
1064
|
+
|
|
1065
|
+
# Remembering that the agent that invoked this action is the one who actually subscribed
|
|
1066
|
+
self._agents_who_completed_what_they_were_asked.add(_requester)
|
|
1067
|
+
return True
|
|
1068
|
+
|
|
1069
|
+
def record(self, net_hash: str, samples: int = 100, time: float = -1., timeout: float = -1.):
|
|
1070
|
+
"""Records data from a specified stream into a new, owned `BufferedDataStream`. This is a multistep action
|
|
1071
|
+
that captures a sequence of samples over time and then adds the new recorded stream to the agent's profile.
|
|
1072
|
+
|
|
1073
|
+
Args:
|
|
1074
|
+
net_hash: The hash of the stream to record.
|
|
1075
|
+
samples: The number of samples to record.
|
|
1076
|
+
time: The time duration for recording.
|
|
1077
|
+
timeout: The timeout for each recording attempt.
|
|
1078
|
+
|
|
1079
|
+
Returns:
|
|
1080
|
+
True if a sample was successfully recorded, False otherwise.
|
|
1081
|
+
"""
|
|
1082
|
+
assert samples is not None and time is not None and timeout is not None, "Missing basic action information"
|
|
1083
|
+
|
|
1084
|
+
k = self.get_action_step()
|
|
1085
|
+
|
|
1086
|
+
self.out(f"Recording stream {net_hash}")
|
|
1087
|
+
|
|
1088
|
+
if k == 0:
|
|
1089
|
+
|
|
1090
|
+
# Getting stream(s)
|
|
1091
|
+
_net_hash = self.user_stream_hash_to_net_hash(net_hash) # In case of ambiguity, it yields the first one
|
|
1092
|
+
if _net_hash is None:
|
|
1093
|
+
self.err(f"Unknown stream {net_hash}")
|
|
1094
|
+
return False
|
|
1095
|
+
else:
|
|
1096
|
+
net_hash = _net_hash
|
|
1097
|
+
|
|
1098
|
+
stream_src_dict = self.known_streams[net_hash]
|
|
1099
|
+
|
|
1100
|
+
# Creating the new recorded stream (same props of the recorded one, just owned now)
|
|
1101
|
+
stream_dest_dict = {}
|
|
1102
|
+
for name, stream_obj in stream_src_dict.items():
|
|
1103
|
+
props = stream_obj.props.clone()
|
|
1104
|
+
props.set_group("recorded" + str(self._last_recorded_stream_num))
|
|
1105
|
+
stream_dest_dict[name] = BufferedDataStream(props=props, clock=self._node_clock)
|
|
1106
|
+
self._last_recorded_stream_dict = stream_dest_dict
|
|
1107
|
+
self._last_recording_stream_dict = stream_src_dict
|
|
1108
|
+
|
|
1109
|
+
else:
|
|
1110
|
+
|
|
1111
|
+
# Retrieving the stream(s)
|
|
1112
|
+
stream_dest_dict = self._last_recorded_stream_dict
|
|
1113
|
+
stream_src_dict = self._last_recording_stream_dict
|
|
1114
|
+
|
|
1115
|
+
# Recording
|
|
1116
|
+
for name, stream_obj in stream_src_dict.items():
|
|
1117
|
+
x = stream_obj.get(requested_by="record")
|
|
1118
|
+
if x is None:
|
|
1119
|
+
self.deb("[record] data sample missing, returning False")
|
|
1120
|
+
return False
|
|
1121
|
+
else:
|
|
1122
|
+
self.deb(f"[record] data_tag: {stream_obj.get_tag()}, data_uuid: {stream_obj.get_uuid()}")
|
|
1123
|
+
stream_dest_dict[name].set(x, k) # Saving specific data tags 0, 1, 2, ... #record_steps - 1
|
|
1124
|
+
|
|
1125
|
+
# Updating profile
|
|
1126
|
+
if self.is_last_action_step():
|
|
1127
|
+
self.deb("[record] last action step detected, finishing")
|
|
1128
|
+
|
|
1129
|
+
# Dummy get to ensure that the next get will return None (i.e., we only PubSub if somebody restarts this)
|
|
1130
|
+
for stream_obj in stream_dest_dict.values():
|
|
1131
|
+
stream_obj.get(requested_by="send_stream_samples")
|
|
1132
|
+
|
|
1133
|
+
self.add_streams(list(stream_dest_dict.values()), owned=True)
|
|
1134
|
+
self.update_streams_in_profile()
|
|
1135
|
+
self.subscribe_to_pubsub_owned_streams()
|
|
1136
|
+
self.send_profile_to_all()
|
|
1137
|
+
|
|
1138
|
+
# New recorded stream
|
|
1139
|
+
self._last_recorded_stream_num += 1
|
|
1140
|
+
|
|
1141
|
+
return True
|
|
1142
|
+
|
|
1143
|
+
def connect_by_role(self, role: str | list[str], filter_fcn: str | None = None,
|
|
1144
|
+
time: float = -1., timeout: float = -1.):
|
|
1145
|
+
"""Finds and attempts to connect with agents whose profiles match a specific role. It can be optionally
|
|
1146
|
+
filtered by a custom function. It returns True if at least one valid agent is found.
|
|
1147
|
+
|
|
1148
|
+
Args:
|
|
1149
|
+
role: The role or list of roles to search for.
|
|
1150
|
+
filter_fcn: The name of an optional filter function.
|
|
1151
|
+
time: The time duration for the action.
|
|
1152
|
+
timeout: The action timeout.
|
|
1153
|
+
|
|
1154
|
+
Returns:
|
|
1155
|
+
True if at least one agent is found and a connection request is made, False otherwise.
|
|
1156
|
+
"""
|
|
1157
|
+
self.out(f"Asking to get in touch with all agents whose role is {role}")
|
|
1158
|
+
assert time is not None and timeout is not None, "Missing basic action information"
|
|
1159
|
+
|
|
1160
|
+
if self.get_action_step() == 0:
|
|
1161
|
+
role_list = role if isinstance(role, list) else [role]
|
|
1162
|
+
self._found_agents = set()
|
|
1163
|
+
at_least_one_is_valid = False
|
|
1164
|
+
|
|
1165
|
+
for role in role_list:
|
|
1166
|
+
role = self.ROLE_STR_TO_BITS[role]
|
|
1167
|
+
|
|
1168
|
+
found_addresses1, found_peer_ids1 = self._node_conn.find_addrs_by_role(Agent.ROLE_WORLD_MASTER | role,
|
|
1169
|
+
return_peer_ids_too=True)
|
|
1170
|
+
found_addresses2, found_peer_ids2 = self._node_conn.find_addrs_by_role(Agent.ROLE_WORLD_AGENT | role,
|
|
1171
|
+
return_peer_ids_too=True)
|
|
1172
|
+
found_addresses = found_addresses1 + found_addresses2
|
|
1173
|
+
found_peer_ids = found_peer_ids1 + found_peer_ids2
|
|
1174
|
+
|
|
1175
|
+
if filter_fcn is not None:
|
|
1176
|
+
if hasattr(self, filter_fcn):
|
|
1177
|
+
filter_fcn = getattr(self, filter_fcn)
|
|
1178
|
+
if callable(filter_fcn):
|
|
1179
|
+
found_addresses, found_peer_ids = filter_fcn(found_addresses, found_peer_ids)
|
|
1180
|
+
else:
|
|
1181
|
+
self.err(f"Filter function not found: {filter_fcn}")
|
|
1182
|
+
|
|
1183
|
+
self.out(f"Found addresses ({len(found_addresses)}) with role: {role}")
|
|
1184
|
+
for f_addr, f_peer_id in zip(found_addresses, found_peer_ids):
|
|
1185
|
+
if not self._node_conn.is_connected(f_peer_id):
|
|
1186
|
+
self.out(f"Asking to get in touch with {f_addr}...")
|
|
1187
|
+
peer_id = self._node_ask_to_get_in_touch_fcn(addresses=f_addr, public=False)
|
|
1188
|
+
else:
|
|
1189
|
+
self.out(f"Not-asking to get in touch with {f_addr}, "
|
|
1190
|
+
f"since I am already connected to the corresponding peer...")
|
|
1191
|
+
peer_id = f_peer_id
|
|
1192
|
+
if peer_id is not None:
|
|
1193
|
+
at_least_one_is_valid = True
|
|
1194
|
+
self._found_agents.add(peer_id)
|
|
1195
|
+
self.out(f"...returned {peer_id}")
|
|
1196
|
+
return at_least_one_is_valid
|
|
1197
|
+
else:
|
|
1198
|
+
return True
|
|
1199
|
+
|
|
1200
|
+
def find_agents(self, role: str | list[str], engage: bool = False):
|
|
1201
|
+
"""Locally searches through the agent's known peers (world and public agents) to find agents with a specific
|
|
1202
|
+
role. It populates the `_found_agents` set with the peer IDs of matching agents.
|
|
1203
|
+
|
|
1204
|
+
Args:
|
|
1205
|
+
role: The role or list of roles to search for.
|
|
1206
|
+
engage: If you want to force the found agents to be the ones that you are engaged with.
|
|
1207
|
+
|
|
1208
|
+
Returns:
|
|
1209
|
+
True if at least one agent is found, False otherwise.
|
|
1210
|
+
"""
|
|
1211
|
+
self.out(f"Finding an available agent whose role is {role}")
|
|
1212
|
+
role_list = role if isinstance(role, list) else [role]
|
|
1213
|
+
self._found_agents = set()
|
|
1214
|
+
|
|
1215
|
+
for role_str in role_list:
|
|
1216
|
+
agents = self.all_agents
|
|
1217
|
+
role_int = self.ROLE_STR_TO_BITS[role_str]
|
|
1218
|
+
role_clean = (role_int >> 2) << 2
|
|
1219
|
+
for peer_id, profile in agents.items():
|
|
1220
|
+
_role_int = self.ROLE_STR_TO_BITS[profile.get_dynamic_profile()['connections']['role']]
|
|
1221
|
+
_role_clean = (_role_int >> 2) << 2
|
|
1222
|
+
if _role_clean == role_clean:
|
|
1223
|
+
self._found_agents.add(peer_id) # Peer IDs here
|
|
1224
|
+
|
|
1225
|
+
self.deb(f"[find_agents] Found these agents: {self._found_agents}")
|
|
1226
|
+
if engage:
|
|
1227
|
+
self._engaged_agents = copy.deepcopy(self._found_agents)
|
|
1228
|
+
return len(self._found_agents) > 0
|
|
1229
|
+
|
|
1230
|
+
def next_pref_stream(self):
|
|
1231
|
+
"""Moves the internal pointer to the next stream in the list of preferred streams, which is often used for
|
|
1232
|
+
playlist-like operations. It wraps around to the beginning if it reaches the end.
|
|
1233
|
+
|
|
1234
|
+
Returns:
|
|
1235
|
+
True if the move is successful, False if the list is empty.
|
|
1236
|
+
"""
|
|
1237
|
+
if len(self._preferred_streams) == 0:
|
|
1238
|
+
self.err(f"Cannot move to the next stream because the list of preferred streams is empty")
|
|
1239
|
+
return False
|
|
1240
|
+
|
|
1241
|
+
self._cur_preferred_stream = (self._cur_preferred_stream + 1) % len(self._preferred_streams)
|
|
1242
|
+
suffix = ", warning: restarted" if self._cur_preferred_stream == 0 else ""
|
|
1243
|
+
self.out(f"Moving to the next preferred stream ({self._preferred_streams[self._cur_preferred_stream]}){suffix}")
|
|
1244
|
+
return True
|
|
1245
|
+
|
|
1246
|
+
def first_pref_stream(self):
|
|
1247
|
+
"""Resets the internal pointer to the first stream in the list of preferred streams. This is useful for
|
|
1248
|
+
restarting a playback or processing loop.
|
|
1249
|
+
|
|
1250
|
+
Returns:
|
|
1251
|
+
True if the move is successful, False if the list is empty.
|
|
1252
|
+
"""
|
|
1253
|
+
if len(self._preferred_streams) == 0:
|
|
1254
|
+
self.err(f"Cannot move to the first stream because the list of preferred streams is empty")
|
|
1255
|
+
return False
|
|
1256
|
+
|
|
1257
|
+
self._cur_preferred_stream = 0
|
|
1258
|
+
self.out(f"Moving to the first preferred stream ({self._preferred_streams[self._cur_preferred_stream]})")
|
|
1259
|
+
return True
|
|
1260
|
+
|
|
1261
|
+
def check_pref_stream(self, what: str = "last"):
|
|
1262
|
+
"""Checks the position of the current preferred stream within the list. It can check if it's the first, last,
|
|
1263
|
+
or if it has completed a full round, among other checks.
|
|
1264
|
+
|
|
1265
|
+
Args:
|
|
1266
|
+
what: A string specifying the type of check to perform (e.g., 'first', 'last', 'last_round').
|
|
1267
|
+
|
|
1268
|
+
Returns:
|
|
1269
|
+
True if the condition is met, False otherwise.
|
|
1270
|
+
"""
|
|
1271
|
+
valid = ['first', 'last', 'not_first', 'not_last', 'last_round', 'not_last_round', 'last_song', 'not_last_song']
|
|
1272
|
+
assert what in valid, f"The what argument can only be one of {valid}"
|
|
1273
|
+
|
|
1274
|
+
self.out(f"Checking if the current preferred playlist item "
|
|
1275
|
+
f"(id: {self._cur_preferred_stream}) is the '{what}' one")
|
|
1276
|
+
if what == "first":
|
|
1277
|
+
return self._cur_preferred_stream == 0
|
|
1278
|
+
elif what == "last":
|
|
1279
|
+
return self._cur_preferred_stream == len(self._preferred_streams) - 1
|
|
1280
|
+
elif what == "not_first":
|
|
1281
|
+
return self._cur_preferred_stream != 0
|
|
1282
|
+
elif what == "not_last":
|
|
1283
|
+
return self._cur_preferred_stream != len(self._preferred_streams) - 1
|
|
1284
|
+
elif what == "last_round":
|
|
1285
|
+
return (self._cur_preferred_stream + len(self._preferred_streams) // self._repeat >=
|
|
1286
|
+
len(self._preferred_streams))
|
|
1287
|
+
elif what == "not_last_round":
|
|
1288
|
+
return (self._cur_preferred_stream + len(self._preferred_streams) // self._repeat <
|
|
1289
|
+
len(self._preferred_streams))
|
|
1290
|
+
elif what == "last_song":
|
|
1291
|
+
num_streams_in_playlist = len(self._preferred_streams) // self._repeat
|
|
1292
|
+
return (self._cur_preferred_stream + 1) % num_streams_in_playlist == 0
|
|
1293
|
+
elif what == "not_last_song":
|
|
1294
|
+
num_streams_in_playlist = len(self._preferred_streams) // self._repeat
|
|
1295
|
+
return (self._cur_preferred_stream + 1) % num_streams_in_playlist != 0
|
|
1296
|
+
|
|
1297
|
+
def set_pref_streams(self, net_hashes: list[str], repeat: int = 1):
|
|
1298
|
+
"""Fills the agent's list of preferred streams (a playlist). It can repeat the playlist a specified number of
|
|
1299
|
+
times and resolves user-provided stream hashes to their full network hashes.
|
|
1300
|
+
|
|
1301
|
+
Args:
|
|
1302
|
+
net_hashes: A list of stream hashes to add to the playlist.
|
|
1303
|
+
repeat: The number of times to repeat the playlist.
|
|
1304
|
+
|
|
1305
|
+
Returns:
|
|
1306
|
+
Always True.
|
|
1307
|
+
"""
|
|
1308
|
+
self.out(f"Setting up a list of {len(net_hashes)} preferred streams")
|
|
1309
|
+
self._cur_preferred_stream = 0
|
|
1310
|
+
self._preferred_streams = []
|
|
1311
|
+
self._repeat = repeat
|
|
1312
|
+
for i in range(0, self._repeat):
|
|
1313
|
+
for net_hash in net_hashes:
|
|
1314
|
+
|
|
1315
|
+
# We are tolerating both peer_id:name_or_group and also peer_id::ps:name_or_group
|
|
1316
|
+
components = net_hash.split(":")
|
|
1317
|
+
peer_id = components[0]
|
|
1318
|
+
name_or_group = components[-1]
|
|
1319
|
+
net_hash_to_streams = self.find_streams(peer_id=peer_id, name_or_group=name_or_group)
|
|
1320
|
+
for _net_hash in net_hash_to_streams.keys():
|
|
1321
|
+
self._preferred_streams.append(_net_hash)
|
|
1322
|
+
|
|
1323
|
+
return True
|
|
1324
|
+
|
|
1325
|
+
def evaluate(self, stream_hash: str, how: str, steps: int = 100, re_offset: bool = False):
|
|
1326
|
+
"""Evaluates the performance of agents that have completed a generation task. It compares the generated data
|
|
1327
|
+
from each agent with a local stream (which can be a ground truth or reference stream) using a specified
|
|
1328
|
+
comparison method.
|
|
1329
|
+
|
|
1330
|
+
Args:
|
|
1331
|
+
stream_hash: The hash of the local stream to use for comparison.
|
|
1332
|
+
how: The name of the comparison method to use.
|
|
1333
|
+
steps: The number of steps to perform the evaluation.
|
|
1334
|
+
re_offset: A boolean to indicate whether to re-offset the streams.
|
|
1335
|
+
|
|
1336
|
+
Returns:
|
|
1337
|
+
True if the evaluation is successful, False otherwise.
|
|
1338
|
+
"""
|
|
1339
|
+
if not self.buffer_generated_by_others:
|
|
1340
|
+
self.err("Cannot evaluate if not buffering data generated by others")
|
|
1341
|
+
return False
|
|
1342
|
+
|
|
1343
|
+
if stream_hash == "<playlist>":
|
|
1344
|
+
net_hash = self._preferred_streams[self._cur_preferred_stream]
|
|
1345
|
+
else:
|
|
1346
|
+
net_hash = self.user_stream_hash_to_net_hash(stream_hash)
|
|
1347
|
+
|
|
1348
|
+
self._eval_results = {}
|
|
1349
|
+
self.deb(f"[eval] Agents returning streams: {self._agents_who_completed_what_they_were_asked}")
|
|
1350
|
+
for peer_id in self._agents_who_completed_what_they_were_asked:
|
|
1351
|
+
received_net_hash = self.last_buffered_peer_id_to_info[peer_id]["net_hash"]
|
|
1352
|
+
self.out(f"Comparing {net_hash} with {received_net_hash}")
|
|
1353
|
+
eval_result, ret = self.__compare_streams(net_hash_a=net_hash,
|
|
1354
|
+
net_hash_b=received_net_hash,
|
|
1355
|
+
how=how, steps=steps, re_offset=re_offset)
|
|
1356
|
+
self.out(f"Result of the comparison: {eval_result}")
|
|
1357
|
+
if not ret:
|
|
1358
|
+
return False
|
|
1359
|
+
else:
|
|
1360
|
+
peer_id = DataProps.peer_id_from_net_hash(received_net_hash)
|
|
1361
|
+
self._eval_results[peer_id] = eval_result
|
|
1362
|
+
|
|
1363
|
+
return True
|
|
1364
|
+
|
|
1365
|
+
def compare_eval(self, cmp: str, thres: float, good_if_true: bool = True):
|
|
1366
|
+
"""Compares the results of a previous evaluation to a given threshold or finds the best result among all
|
|
1367
|
+
agents. It can check for minimum, maximum, or simple threshold-based comparisons, and it populates a list of
|
|
1368
|
+
'valid' agents that passed the comparison.
|
|
1369
|
+
|
|
1370
|
+
Args:
|
|
1371
|
+
cmp: The comparison operator (e.g., '<', '>', 'min').
|
|
1372
|
+
thres: The threshold value for comparison.
|
|
1373
|
+
good_if_true: A boolean to invert the pass/fail logic.
|
|
1374
|
+
|
|
1375
|
+
Returns:
|
|
1376
|
+
True if at least one agent passed the comparison, False otherwise.
|
|
1377
|
+
"""
|
|
1378
|
+
assert cmp in ["<", ">", ">=", "<=", "min", "max"], f"Invalid comparison operator: {cmp}"
|
|
1379
|
+
assert thres >= 0. or cmp in ["min", "max"], f"Invalid evaluation threshold: {thres} (it must be in >= 0.)"
|
|
1380
|
+
|
|
1381
|
+
self._valid_cmp_agents = set()
|
|
1382
|
+
msgs = []
|
|
1383
|
+
best_so_far = -1
|
|
1384
|
+
|
|
1385
|
+
min_or_max = None
|
|
1386
|
+
leq_or_geq = None
|
|
1387
|
+
if cmp in ["min", "max"]:
|
|
1388
|
+
min_or_max = "minimum" if cmp == "min" else "maximum"
|
|
1389
|
+
leq_or_geq = "<=" if cmp == "min" else ">="
|
|
1390
|
+
|
|
1391
|
+
for agent, eval_result in self._eval_results.items():
|
|
1392
|
+
if cmp not in ["min", "max"]:
|
|
1393
|
+
self.out(f"Checking if result {eval_result} {cmp} {thres}, for agent {agent}")
|
|
1394
|
+
else:
|
|
1395
|
+
if thres >= 0:
|
|
1396
|
+
self.out(f"Checking if result {eval_result} is the {min_or_max} so far, "
|
|
1397
|
+
f"only if {leq_or_geq} {thres}, for agent {agent}")
|
|
1398
|
+
else:
|
|
1399
|
+
self.out(f"Checking if result {eval_result} is the {min_or_max} so far, for agent {agent}")
|
|
1400
|
+
|
|
1401
|
+
if eval_result < 0.:
|
|
1402
|
+
self.err(f"Invalid evaluation result: {eval_result}")
|
|
1403
|
+
return False
|
|
1404
|
+
|
|
1405
|
+
if cmp != "min" and cmp != "max":
|
|
1406
|
+
outcome = False
|
|
1407
|
+
if cmp == "<" and eval_result < thres:
|
|
1408
|
+
outcome = True
|
|
1409
|
+
elif cmp == "<=" and eval_result <= thres:
|
|
1410
|
+
outcome = True
|
|
1411
|
+
elif cmp == ">" and eval_result > thres:
|
|
1412
|
+
outcome = True
|
|
1413
|
+
elif cmp == ">=" and eval_result >= thres:
|
|
1414
|
+
outcome = True
|
|
1415
|
+
|
|
1416
|
+
if cmp[0] == "<" or cmp[0] == "<=":
|
|
1417
|
+
alias = 'error level' if good_if_true else 'mark'
|
|
1418
|
+
else:
|
|
1419
|
+
alias = 'mark' if good_if_true else 'error level'
|
|
1420
|
+
|
|
1421
|
+
if good_if_true:
|
|
1422
|
+
if outcome:
|
|
1423
|
+
msgs.append(f"Agent {agent} passed with {alias} {eval_result}/{thres}")
|
|
1424
|
+
self._valid_cmp_agents.add(agent)
|
|
1425
|
+
else:
|
|
1426
|
+
msgs.append(f"Agent {agent} did not pass")
|
|
1427
|
+
else:
|
|
1428
|
+
if outcome:
|
|
1429
|
+
msgs.append(f"Agent {agent} did not pass")
|
|
1430
|
+
else:
|
|
1431
|
+
msgs.append(f"Agent {agent} passed with {alias} {eval_result}/{thres}")
|
|
1432
|
+
self._valid_cmp_agents.add(agent)
|
|
1433
|
+
|
|
1434
|
+
if len(msgs) > 1:
|
|
1435
|
+
msgs[-1] = str(msgs[-1].lower())[0] + msgs[-1][1:]
|
|
1436
|
+
else:
|
|
1437
|
+
if ((cmp == "min" and (thres < 0 or eval_result <= thres) and
|
|
1438
|
+
(eval_result < best_so_far or best_so_far < 0)) or
|
|
1439
|
+
(cmp == "max" and (thres < 0 or eval_result >= thres) and
|
|
1440
|
+
(eval_result > best_so_far or best_so_far < 0))):
|
|
1441
|
+
best_so_far = eval_result
|
|
1442
|
+
self._valid_cmp_agents = {agent}
|
|
1443
|
+
msgs = [f"The best agent is {agent}"]
|
|
1444
|
+
else:
|
|
1445
|
+
msgs = [f"No best agent found for the considered threshold ({thres})"]
|
|
1446
|
+
|
|
1447
|
+
if len(self._valid_cmp_agents) == 0:
|
|
1448
|
+
|
|
1449
|
+
# # cheating (hack):
|
|
1450
|
+
# self._valid_cmp_agents.append(agent)
|
|
1451
|
+
# self.out(", ".join(msgs))
|
|
1452
|
+
# return True
|
|
1453
|
+
self.err(f"The evaluation was not passed by any agents")
|
|
1454
|
+
return False
|
|
1455
|
+
else:
|
|
1456
|
+
self.out(", ".join(msgs))
|
|
1457
|
+
return True
|
|
1458
|
+
|
|
1459
|
+
def suggest_role_to_world(self, agent: str | None, role: str):
|
|
1460
|
+
"""Suggests a role change for one or more agents to the world master. It iterates through the involved agents,
|
|
1461
|
+
checks if their current role differs from the suggested one, and sends a role suggestion message to the
|
|
1462
|
+
world master.
|
|
1463
|
+
|
|
1464
|
+
Args:
|
|
1465
|
+
agent: The ID of the agent or a wildcard to suggest the role for.
|
|
1466
|
+
role: The new role to suggest (as a string).
|
|
1467
|
+
|
|
1468
|
+
Returns:
|
|
1469
|
+
True if the suggestion was sent successfully, False otherwise.
|
|
1470
|
+
"""
|
|
1471
|
+
self.out("Suggesting role to world")
|
|
1472
|
+
|
|
1473
|
+
agents = self.__involved_agents(agent)
|
|
1474
|
+
role_bits = (self.ROLE_STR_TO_BITS[role] >> 2) << 2
|
|
1475
|
+
|
|
1476
|
+
content = []
|
|
1477
|
+
|
|
1478
|
+
for _agent in agents:
|
|
1479
|
+
cur_role_bits = self.ROLE_STR_TO_BITS[self.all_agents[_agent].get_dynamic_profile()['connections']['role']]
|
|
1480
|
+
cur_role_bits = (cur_role_bits >> 2) << 2
|
|
1481
|
+
if cur_role_bits == role_bits:
|
|
1482
|
+
self.out(f"Not suggesting to change the role of {_agent} "
|
|
1483
|
+
f"since it has already such a role")
|
|
1484
|
+
else:
|
|
1485
|
+
self.out(f"Suggesting to change the role of {_agent} to {self.ROLE_BITS_TO_STR[role_bits]}")
|
|
1486
|
+
content.append({'peer_id': _agent, 'role': role_bits})
|
|
1487
|
+
|
|
1488
|
+
if len(content) > 0:
|
|
1489
|
+
world_peer_id = self._node_conn.get_world_peer_id()
|
|
1490
|
+
if not self._node_conn.send(world_peer_id, channel_trail=None,
|
|
1491
|
+
content=content,
|
|
1492
|
+
content_type=Msg.ROLE_SUGGESTION):
|
|
1493
|
+
self.err("Failed to send role suggestion to the world")
|
|
1494
|
+
return False
|
|
1495
|
+
return True
|
|
1496
|
+
|
|
1497
|
+
def suggest_badges_to_world(self, agent: str | None = None,
|
|
1498
|
+
score: float = -1.0, badge_type: str = "completed",
|
|
1499
|
+
badge_description: str | None = None):
|
|
1500
|
+
"""Suggests one or more badges to the world master for specific agents. This is typically used to reward agents
|
|
1501
|
+
for completing tasks, such as for a competition. It sends a message with the badge details, including the score
|
|
1502
|
+
and type, to the world master.
|
|
1503
|
+
|
|
1504
|
+
Args:
|
|
1505
|
+
agent: The ID of the agent or a wildcard for which to suggest the badge.
|
|
1506
|
+
score: The score associated with the badge.
|
|
1507
|
+
badge_type: The type of badge (e.g., 'completed').
|
|
1508
|
+
badge_description: An optional description for the badge.
|
|
1509
|
+
|
|
1510
|
+
Returns:
|
|
1511
|
+
True if the badge suggestion was sent successfully, False otherwise.
|
|
1512
|
+
"""
|
|
1513
|
+
self.out("Suggesting one or more badges to world")
|
|
1514
|
+
|
|
1515
|
+
if score < 0.:
|
|
1516
|
+
self.err("Invalid score (did you specify the 'score' argument? it must be positive)")
|
|
1517
|
+
return False
|
|
1518
|
+
|
|
1519
|
+
agents = self.__involved_agents(agent)
|
|
1520
|
+
world_peer_id = self._node_conn.get_world_peer_id()
|
|
1521
|
+
|
|
1522
|
+
if badge_type not in Agent.BADGE_TYPES:
|
|
1523
|
+
self.err(f"Unknown badge type: {badge_type}")
|
|
1524
|
+
return False
|
|
1525
|
+
|
|
1526
|
+
list_of_badge_dictionaries = []
|
|
1527
|
+
for peer_id in agents:
|
|
1528
|
+
list_of_badge_dictionaries.append({'peer_id': peer_id,
|
|
1529
|
+
'score': score,
|
|
1530
|
+
'badge_type': badge_type,
|
|
1531
|
+
'badge_description': badge_description,
|
|
1532
|
+
'agent_token': self._node_conn.get_last_token(peer_id)})
|
|
1533
|
+
|
|
1534
|
+
if not self._node_conn.send(world_peer_id, channel_trail=None,
|
|
1535
|
+
content=list_of_badge_dictionaries,
|
|
1536
|
+
content_type=Msg.BADGE_SUGGESTIONS):
|
|
1537
|
+
self.err("Failed to send badge suggestions to the world")
|
|
1538
|
+
return False
|
|
1539
|
+
else:
|
|
1540
|
+
return True
|
|
1541
|
+
|
|
1542
|
+
def __ask_gen_or_learn(self, for_what: str, agent: str,
|
|
1543
|
+
u_hashes: list[str] | None,
|
|
1544
|
+
yhat_hashes: list[str] | None,
|
|
1545
|
+
samples: int = 100, time: float = -1., timeout: float = -1., ref_uuid: str | None = None):
|
|
1546
|
+
"""A private helper method that encapsulates the logic for sending a 'do_gen' or 'do_learn' action request to
|
|
1547
|
+
another agent. It handles the normalization of stream hashes, sets up recipients for direct messages, and adds
|
|
1548
|
+
the target agent to the list of agents asked.
|
|
1549
|
+
|
|
1550
|
+
Args:
|
|
1551
|
+
for_what: A string indicating whether to ask for 'gen' or 'learn'.
|
|
1552
|
+
agent: The ID of the agent to send the request to.
|
|
1553
|
+
u_hashes: A list of input stream hashes.
|
|
1554
|
+
yhat_hashes: A list of target stream hashes (for learning).
|
|
1555
|
+
samples: The number of samples.
|
|
1556
|
+
time: The time duration.
|
|
1557
|
+
timeout: The request timeout.
|
|
1558
|
+
ref_uuid: The UUID for the request.
|
|
1559
|
+
|
|
1560
|
+
Returns:
|
|
1561
|
+
True if the request was sent successfully, False otherwise.
|
|
1562
|
+
"""
|
|
1563
|
+
if agent not in self.all_agents:
|
|
1564
|
+
self.err(f"Unknown agent: {agent}")
|
|
1565
|
+
return False
|
|
1566
|
+
|
|
1567
|
+
assert for_what in ["gen", "learn"]
|
|
1568
|
+
|
|
1569
|
+
if for_what == "learn":
|
|
1570
|
+
for yhat_hash in yhat_hashes:
|
|
1571
|
+
yhat_stream_dict = self.known_streams[yhat_hash]
|
|
1572
|
+
for yhat_stream in yhat_stream_dict.values():
|
|
1573
|
+
if isinstance(yhat_stream, BufferedDataStream):
|
|
1574
|
+
y_text = yhat_stream.to_text_snippet(length=200)
|
|
1575
|
+
if y_text is not None and len(y_text) > 0:
|
|
1576
|
+
self.out("Asking to learn: \"" + y_text + "\"")
|
|
1577
|
+
|
|
1578
|
+
# Setting recipient in the case of direct messages
|
|
1579
|
+
# (differently, in case of pubsub, the agent is already sending messages to all)
|
|
1580
|
+
if u_hashes is not None:
|
|
1581
|
+
for u_hash in u_hashes:
|
|
1582
|
+
if not DataProps.is_pubsub_from_net_hash(u_hash):
|
|
1583
|
+
self.recipients[u_hash] = agent
|
|
1584
|
+
if yhat_hashes is not None:
|
|
1585
|
+
for yhat_hash in yhat_hashes:
|
|
1586
|
+
if not DataProps.is_pubsub_from_net_hash(yhat_hash):
|
|
1587
|
+
self.recipients[yhat_hash] = agent
|
|
1588
|
+
|
|
1589
|
+
# Triggering
|
|
1590
|
+
if for_what == "gen":
|
|
1591
|
+
if self.set_next_action(agent, action="do_gen", args={"u_hashes": u_hashes,
|
|
1592
|
+
"samples": samples, "time": time,
|
|
1593
|
+
"timeout": timeout},
|
|
1594
|
+
ref_uuid=ref_uuid):
|
|
1595
|
+
self._agents_who_were_asked.add(agent)
|
|
1596
|
+
return True
|
|
1597
|
+
else:
|
|
1598
|
+
self.err(f"Unable to ask {agent} to generate")
|
|
1599
|
+
return False
|
|
1600
|
+
elif for_what == "learn":
|
|
1601
|
+
if self.set_next_action(agent, action="do_learn", args={"u_hashes": u_hashes, "yhat_hashes": yhat_hashes,
|
|
1602
|
+
"samples": samples, "time": time,
|
|
1603
|
+
"timeout": timeout},
|
|
1604
|
+
ref_uuid=ref_uuid):
|
|
1605
|
+
self._agents_who_were_asked.add(agent)
|
|
1606
|
+
return True
|
|
1607
|
+
else:
|
|
1608
|
+
self.err(f"Unable to ask {agent} to learn to generate")
|
|
1609
|
+
return False
|
|
1610
|
+
|
|
1611
|
+
def __process_streams(self,
|
|
1612
|
+
u_hashes: list[str] | None,
|
|
1613
|
+
yhat_hashes: list[str] | None,
|
|
1614
|
+
learn: bool = False,
|
|
1615
|
+
recipient: str | None = None,
|
|
1616
|
+
ref_uuid: str | None = None):
|
|
1617
|
+
"""A private helper method that contains the core logic for processing data streams, either for generation or
|
|
1618
|
+
learning. It reads input streams, passes them to the agent's processor, and handles the output streams.
|
|
1619
|
+
It's designed to be called repeatedly by multistep actions like `do_gen` and `do_learn`.
|
|
1620
|
+
|
|
1621
|
+
Args:
|
|
1622
|
+
u_hashes: A list of input stream hashes.
|
|
1623
|
+
yhat_hashes: A list of target stream hashes (for learning).
|
|
1624
|
+
learn: A boolean to indicate if the task is a learning task.
|
|
1625
|
+
recipient: The ID of the agent to send data back to.
|
|
1626
|
+
ref_uuid: The UUID for the request.
|
|
1627
|
+
|
|
1628
|
+
Returns:
|
|
1629
|
+
True if the stream processing is successful, False otherwise.
|
|
1630
|
+
"""
|
|
1631
|
+
|
|
1632
|
+
# Getting current step index
|
|
1633
|
+
k = self.get_action_step()
|
|
1634
|
+
|
|
1635
|
+
# Checking data and creating new buffered streams
|
|
1636
|
+
if k == 0:
|
|
1637
|
+
self.deb("[__process_streams] First action step")
|
|
1638
|
+
|
|
1639
|
+
# Checking data
|
|
1640
|
+
if u_hashes is not None:
|
|
1641
|
+
for u_hash in u_hashes:
|
|
1642
|
+
if u_hash is not None and u_hash not in self.known_streams:
|
|
1643
|
+
self.err(f"Unknown stream (u_hash): {u_hash}")
|
|
1644
|
+
return False
|
|
1645
|
+
if yhat_hashes is not None:
|
|
1646
|
+
for yhat_hash in yhat_hashes:
|
|
1647
|
+
if yhat_hash is not None and yhat_hash not in self.known_streams:
|
|
1648
|
+
self.err(f"Unknown stream (yhat_hash): {yhat_hash}")
|
|
1649
|
+
return False
|
|
1650
|
+
|
|
1651
|
+
if self.is_last_action_step():
|
|
1652
|
+
self.deb("[__process_streams] Last action step detected")
|
|
1653
|
+
|
|
1654
|
+
self.deb(f"[__process_streams] Generating data, step {k}")
|
|
1655
|
+
|
|
1656
|
+
# Generate output
|
|
1657
|
+
outputs, data_tag_from_inputs = (
|
|
1658
|
+
self.generate(input_net_hashes=u_hashes, first=(k == 0), last=self.is_last_action_step(),
|
|
1659
|
+
ref_uuid=ref_uuid))
|
|
1660
|
+
if outputs is None:
|
|
1661
|
+
return False
|
|
1662
|
+
self.deb(f"[__process_streams] data_tag_from_inputs: {data_tag_from_inputs}")
|
|
1663
|
+
if data_tag_from_inputs is None:
|
|
1664
|
+
data_tag_from_inputs = -1
|
|
1665
|
+
self.deb(f"[__process_streams] data_tag_from_inputs (forced): {data_tag_from_inputs}")
|
|
1666
|
+
|
|
1667
|
+
# Learn
|
|
1668
|
+
if learn:
|
|
1669
|
+
self.deb(f"[__process_streams] learning, step {k}")
|
|
1670
|
+
loss_values, data_tags_from_targets = self.learn_generate(outputs=outputs, targets_net_hashes=yhat_hashes)
|
|
1671
|
+
self.deb(f"[__process_streams] data_tags_from_targets: {data_tags_from_targets}")
|
|
1672
|
+
|
|
1673
|
+
if loss_values is None:
|
|
1674
|
+
return False
|
|
1675
|
+
else:
|
|
1676
|
+
self.out(f"Losses: {loss_values}")
|
|
1677
|
+
|
|
1678
|
+
# Fusing data tags
|
|
1679
|
+
data_tags = [data_tag_from_inputs if _data_tag == -1 else _data_tag for _data_tag in data_tags_from_targets]
|
|
1680
|
+
else:
|
|
1681
|
+
data_tags = [data_tag_from_inputs] * len(outputs)
|
|
1682
|
+
self.deb(f"[__process_streams] data_tags (final): {data_tags}")
|
|
1683
|
+
|
|
1684
|
+
# Set each data sample in "outputs" to the right stream
|
|
1685
|
+
i = 0
|
|
1686
|
+
for net_hash, stream_dict in self.proc_streams.items():
|
|
1687
|
+
|
|
1688
|
+
# Setting the data sample
|
|
1689
|
+
for name, stream_obj in stream_dict.items():
|
|
1690
|
+
|
|
1691
|
+
# Public output streams are only considered if the agent IS NOT acting in a world
|
|
1692
|
+
# private output streams are only considered if the agent IS acting in a world
|
|
1693
|
+
if self.behaving_in_world() != stream_obj.props.is_public():
|
|
1694
|
+
|
|
1695
|
+
# Guessing recipient of the communication
|
|
1696
|
+
if i == 0:
|
|
1697
|
+
self.recipients[net_hash] = recipient \
|
|
1698
|
+
if not DataProps.is_pubsub_from_net_hash(net_hash) else None
|
|
1699
|
+
|
|
1700
|
+
self.deb(f"[__process_streams] Setting the {i}-th network output to stream with "
|
|
1701
|
+
f"net_hash: {net_hash}, name: {name}")
|
|
1702
|
+
|
|
1703
|
+
# Here we exploit the fact that streams were inserted in order
|
|
1704
|
+
try:
|
|
1705
|
+
stream_obj.set(stream_obj.props.check_and_postprocess(outputs[i]), data_tags[i])
|
|
1706
|
+
except Exception as e:
|
|
1707
|
+
self.err(f"Error while post-processing the processor output\nException: {e}")
|
|
1708
|
+
return False
|
|
1709
|
+
|
|
1710
|
+
if k == 0:
|
|
1711
|
+
stream_obj.set_uuid(ref_uuid, expected=False)
|
|
1712
|
+
stream_obj.set_uuid(None, expected=True)
|
|
1713
|
+
i += 1
|
|
1714
|
+
|
|
1715
|
+
return True
|
|
1716
|
+
|
|
1717
|
+
def __complete_do(self, do_what: str, peer_id_who_asked: str, all_hashes: list[str] | None,
|
|
1718
|
+
send_back_confirmation: bool = True):
|
|
1719
|
+
"""A private helper method to be called at the end of a `do_gen` or `do_learn` action. It performs cleanup
|
|
1720
|
+
tasks, such as clearing UUIDs on streams, and sends a confirmation message back to the requesting agent.
|
|
1721
|
+
|
|
1722
|
+
Args:
|
|
1723
|
+
do_what: A string ('gen' or 'learn') indicating which task was completed.
|
|
1724
|
+
peer_id_who_asked: The ID of the agent who requested the task.
|
|
1725
|
+
all_hashes: A list of all stream hashes involved in the task.
|
|
1726
|
+
send_back_confirmation: A boolean to indicate if a confirmation message should be sent.
|
|
1727
|
+
|
|
1728
|
+
Returns:
|
|
1729
|
+
True if the completion process is successful, False otherwise.
|
|
1730
|
+
"""
|
|
1731
|
+
assert do_what in ["gen", "learn"]
|
|
1732
|
+
|
|
1733
|
+
if do_what == "gen":
|
|
1734
|
+
for net_hash, stream_dict in self.proc_streams.items():
|
|
1735
|
+
for stream in stream_dict.values():
|
|
1736
|
+
if isinstance(stream, BufferedDataStream):
|
|
1737
|
+
y_text = stream.to_text_snippet(length=200)
|
|
1738
|
+
if y_text is not None:
|
|
1739
|
+
self.out("Generated: \"" + y_text + "\"")
|
|
1740
|
+
|
|
1741
|
+
for stream_dict in self.proc_streams.values():
|
|
1742
|
+
for stream_obj in stream_dict.values():
|
|
1743
|
+
if stream_obj.props.is_public() != self.behaving_in_world():
|
|
1744
|
+
stream_obj.mark_uuid_as_clearable()
|
|
1745
|
+
|
|
1746
|
+
if all_hashes is not None:
|
|
1747
|
+
for net_hash in all_hashes:
|
|
1748
|
+
for stream_obj in self.known_streams[net_hash].values():
|
|
1749
|
+
stream_obj.set_uuid(None, expected=False)
|
|
1750
|
+
stream_obj.set_uuid(None, expected=True)
|
|
1751
|
+
|
|
1752
|
+
# Confirming
|
|
1753
|
+
if send_back_confirmation:
|
|
1754
|
+
if self.set_next_action(peer_id_who_asked, action="done_" + do_what, args={}):
|
|
1755
|
+
return True
|
|
1756
|
+
else:
|
|
1757
|
+
self.err(f"Unable to confirm '{do_what}' to {peer_id_who_asked}")
|
|
1758
|
+
return False
|
|
1759
|
+
else:
|
|
1760
|
+
return True
|
|
1761
|
+
|
|
1762
|
+
def __compare_streams(self, net_hash_a: str, net_hash_b: str,
|
|
1763
|
+
how: str = "mse", steps: int = 100, re_offset: bool = False):
|
|
1764
|
+
"""A private helper method that compares two buffered data streams based on a specified metric (e.g., MSE,
|
|
1765
|
+
max accuracy). It handles stream compatibility checks, data retrieval, and the actual comparison, returning a
|
|
1766
|
+
dissimilarity score.
|
|
1767
|
+
|
|
1768
|
+
Args:
|
|
1769
|
+
net_hash_a: The network hash of the first stream.
|
|
1770
|
+
net_hash_b: The network hash of the second stream.
|
|
1771
|
+
how: The comparison metric ('mse', 'max', 'geqX').
|
|
1772
|
+
steps: The number of samples to compare.
|
|
1773
|
+
re_offset: A boolean to re-align stream tags before comparison.
|
|
1774
|
+
|
|
1775
|
+
Returns:
|
|
1776
|
+
A tuple containing the dissimilarity score and a success flag (e.g., `(0.5, True)`).
|
|
1777
|
+
"""
|
|
1778
|
+
if net_hash_a not in self.known_streams:
|
|
1779
|
+
self.err(f"Unknown stream (net_hash_a): {net_hash_a}")
|
|
1780
|
+
return -1., False
|
|
1781
|
+
|
|
1782
|
+
if net_hash_b not in self.known_streams:
|
|
1783
|
+
self.err(f"Unknown stream (net_hash_b): {net_hash_b}")
|
|
1784
|
+
return -1., False
|
|
1785
|
+
|
|
1786
|
+
if steps <= 0:
|
|
1787
|
+
self.err(f"Invalid number of steps: {steps}")
|
|
1788
|
+
return -1., False
|
|
1789
|
+
|
|
1790
|
+
if how not in ["mse", "max"] and not how.startswith("geq"):
|
|
1791
|
+
self.err(f"Data can be compared by MSE, or by comparing the argmax ('max'), or comparing the number "
|
|
1792
|
+
f"of corresponding bits (obtained by 'geqX', where 'X' is a number). Unknown: {how})")
|
|
1793
|
+
return -1., False
|
|
1794
|
+
|
|
1795
|
+
stream_dict_a = self.known_streams[net_hash_a]
|
|
1796
|
+
stream_dict_b = self.known_streams[net_hash_b]
|
|
1797
|
+
|
|
1798
|
+
if len(stream_dict_a) == 1 and len(stream_dict_b) == 1:
|
|
1799
|
+
|
|
1800
|
+
# If there is only 1 stream is each group, things are easy
|
|
1801
|
+
stream_a = next(iter(stream_dict_a.values()))
|
|
1802
|
+
stream_b = next(iter(stream_dict_b.values()))
|
|
1803
|
+
elif len(stream_dict_a) == 1 and len(stream_dict_b) > 1:
|
|
1804
|
+
|
|
1805
|
+
# If there is only 1 stream is one of the groups, we look for a compatible stream in the other group,
|
|
1806
|
+
# giving priority to streams with labels
|
|
1807
|
+
stream_a = next(iter(stream_dict_a.values()))
|
|
1808
|
+
stream_b = None
|
|
1809
|
+
for stream_obj in stream_dict_b.values():
|
|
1810
|
+
if (stream_a.get_props().has_tensor_labels() and stream_obj.get_props().has_tensor_labels() and
|
|
1811
|
+
stream_obj.get_props().is_compatible(stream_a.get_props())):
|
|
1812
|
+
stream_b = stream_obj
|
|
1813
|
+
break
|
|
1814
|
+
if stream_b is None:
|
|
1815
|
+
for stream_obj in stream_dict_b.values():
|
|
1816
|
+
if stream_obj.get_props().is_compatible(stream_a.get_props()):
|
|
1817
|
+
stream_b = stream_obj
|
|
1818
|
+
break
|
|
1819
|
+
elif len(stream_dict_a) > 1 and len(stream_dict_b) == 1:
|
|
1820
|
+
|
|
1821
|
+
# If there is only 1 stream is one of the groups, we look for a compatible stream in the other group,
|
|
1822
|
+
# giving priority to streams with labels
|
|
1823
|
+
stream_a = None
|
|
1824
|
+
stream_b = next(iter(stream_dict_b.values()))
|
|
1825
|
+
for stream_obj in stream_dict_a.values():
|
|
1826
|
+
if (stream_b.get_props().has_tensor_labels() and stream_obj.get_props().has_tensor_labels() and
|
|
1827
|
+
stream_obj.get_props().is_compatible(stream_b.get_props())):
|
|
1828
|
+
stream_a = stream_obj
|
|
1829
|
+
break
|
|
1830
|
+
if stream_a is None:
|
|
1831
|
+
for stream_obj in stream_dict_a.values():
|
|
1832
|
+
if stream_obj.get_props().is_compatible(stream_b.get_props()):
|
|
1833
|
+
stream_a = stream_obj
|
|
1834
|
+
break
|
|
1835
|
+
else:
|
|
1836
|
+
|
|
1837
|
+
# If both groups have more than a stream, let's give priority to streams with labels to find a match
|
|
1838
|
+
stream_a = None
|
|
1839
|
+
stream_b = None
|
|
1840
|
+
for stream_obj_a in stream_dict_a.values():
|
|
1841
|
+
if not stream_obj_a.get_props().has_tensor_labels():
|
|
1842
|
+
continue
|
|
1843
|
+
if stream_a is not None and stream_b is not None:
|
|
1844
|
+
break
|
|
1845
|
+
for stream_obj_b in stream_dict_b.values():
|
|
1846
|
+
if (stream_obj_b.get_props().has_tensor_labels() and
|
|
1847
|
+
stream_obj_a.get_props().is_compatible(stream_obj_b.get_props())):
|
|
1848
|
+
stream_a = stream_obj_a
|
|
1849
|
+
stream_b = stream_obj_b
|
|
1850
|
+
break
|
|
1851
|
+
if stream_a is None and stream_b is None:
|
|
1852
|
+
for stream_obj_a in stream_dict_a.values():
|
|
1853
|
+
if stream_a is not None and stream_b is not None:
|
|
1854
|
+
break
|
|
1855
|
+
for stream_obj_b in stream_dict_b.values():
|
|
1856
|
+
if stream_obj_a.get_props().is_compatible(stream_obj_b.get_props()):
|
|
1857
|
+
stream_a = stream_obj_a
|
|
1858
|
+
stream_b = stream_obj_b
|
|
1859
|
+
break
|
|
1860
|
+
|
|
1861
|
+
if stream_a is None:
|
|
1862
|
+
self.err(f"Cannot find the data stream to consider in the comparison, {net_hash_a}")
|
|
1863
|
+
return -1., False
|
|
1864
|
+
if stream_b is None:
|
|
1865
|
+
self.err(f"Cannot find the data stream to consider in the comparison, {net_hash_b}")
|
|
1866
|
+
return -1., False
|
|
1867
|
+
|
|
1868
|
+
if not isinstance(stream_a, BufferedDataStream):
|
|
1869
|
+
self.err(f"Can only compare buffered streams and {net_hash_a} is not buffered")
|
|
1870
|
+
return -1., False
|
|
1871
|
+
|
|
1872
|
+
if not isinstance(stream_b, BufferedDataStream):
|
|
1873
|
+
self.err(f"Can only compare buffered streams and {net_hash_b} is not buffered")
|
|
1874
|
+
return -1., False
|
|
1875
|
+
|
|
1876
|
+
if steps > len(stream_a) and steps > len(stream_b):
|
|
1877
|
+
self.err(f"Cannot compare streams for {steps} steps, since both of them are shorter "
|
|
1878
|
+
f"(length of the first stream is {len(stream_a)}, of the second stream is {len(stream_b)})")
|
|
1879
|
+
|
|
1880
|
+
if not stream_a.get_props().is_compatible(stream_b.get_props()):
|
|
1881
|
+
self.err(f"Cannot compare incompatible streams")
|
|
1882
|
+
|
|
1883
|
+
stream_a.restart()
|
|
1884
|
+
stream_b.restart()
|
|
1885
|
+
|
|
1886
|
+
def compare(_a: torch.Tensor | str, _b: torch.Tensor | str, _how: str = "mse") -> float:
|
|
1887
|
+
"""Compare two samples of signals or descriptors, returning a dissimilarity score >= 0."""
|
|
1888
|
+
|
|
1889
|
+
assert how in ['mse', 'max', 'same'] or how.startswith("geq"), f"Invalid comparison in terms of {how}"
|
|
1890
|
+
|
|
1891
|
+
if isinstance(_a, torch.Tensor) and isinstance(_b, torch.Tensor):
|
|
1892
|
+
if _a.dtype == torch.long and _b.dtype == torch.long: # Token IDS
|
|
1893
|
+
return 1. - float((_a == _b).sum().item()) / a.numel() # Accuracy
|
|
1894
|
+
elif how == "mse":
|
|
1895
|
+
ret = torch.nn.functional.mse_loss(_a, _b, reduction='mean')
|
|
1896
|
+
elif how == "max":
|
|
1897
|
+
ret = 1. - float((torch.argmax(_a) == torch.argmax(_b)).sum().item()) / a.numel()
|
|
1898
|
+
elif how == "same":
|
|
1899
|
+
ret = 1. - float(torch.eq(_a, _b).sum()) / a.numel()
|
|
1900
|
+
else:
|
|
1901
|
+
thres = float(how[3:])
|
|
1902
|
+
ret = 1. - float(torch.sum((_a > thres) == (_b > thres)).item()) / a.numel()
|
|
1903
|
+
else:
|
|
1904
|
+
ret = 1. - float(_a == _b) # Strings (always handled as 'same')
|
|
1905
|
+
return ret
|
|
1906
|
+
|
|
1907
|
+
# Comparing data (averaging)
|
|
1908
|
+
o = 0.
|
|
1909
|
+
k_b = 0
|
|
1910
|
+
a_tag_offset = 0
|
|
1911
|
+
b_tag_offset = 0
|
|
1912
|
+
a_tag = None
|
|
1913
|
+
a_tag_prev = None
|
|
1914
|
+
for k_a in range(0, steps):
|
|
1915
|
+
|
|
1916
|
+
restart_detected = False
|
|
1917
|
+
if a_tag is not None:
|
|
1918
|
+
a_tag_prev = a_tag
|
|
1919
|
+
|
|
1920
|
+
# Signals or descriptors
|
|
1921
|
+
a, a_tag = stream_a[k_a]
|
|
1922
|
+
b, b_tag = stream_b[k_b]
|
|
1923
|
+
|
|
1924
|
+
# If the streams do not share the same first tag equal to zero, and we asked to re-offset them,
|
|
1925
|
+
# then we force the initial offsets to be zero on both
|
|
1926
|
+
# if not, then re-offset the tags
|
|
1927
|
+
if k_a == 0 and k_b == 0 and re_offset:
|
|
1928
|
+
a_tag_offset = a_tag
|
|
1929
|
+
b_tag_offset = b_tag
|
|
1930
|
+
|
|
1931
|
+
# Offset-based tags
|
|
1932
|
+
a_tag_w_offset = a_tag - a_tag_offset
|
|
1933
|
+
b_tag_w_offset = b_tag - b_tag_offset
|
|
1934
|
+
|
|
1935
|
+
# Checking
|
|
1936
|
+
if a is None:
|
|
1937
|
+
self.err("Cannot compare stream samples if the reference stream yields None")
|
|
1938
|
+
return -1., False
|
|
1939
|
+
|
|
1940
|
+
# Some streams might have been pre-buffered in advance, and have increasing data tags belonging to finite,
|
|
1941
|
+
# fixed set (such as 0, 1, 2, ..., N). when continuously streaming them, we will go from tag N to tag 0 at
|
|
1942
|
+
# a certain point, which is a "restart".
|
|
1943
|
+
# We have to remember that this happened, and we do it for stream "a", our "reference" stream.
|
|
1944
|
+
# Then, below, we will fix tags on stream "b" if needed, considering that such a restart happened.
|
|
1945
|
+
if a_tag_prev is not None and a_tag < a_tag_prev:
|
|
1946
|
+
restart_detected = True
|
|
1947
|
+
|
|
1948
|
+
# Some streams might have been pre-buffered in advance, and have a fixed data tag (usually -1).
|
|
1949
|
+
# Being it negative, it will happen that the data tag will be replaced by a clock cycle, but this function
|
|
1950
|
+
# does not change clock cycles at all, so all samples will have the exact same data tag.
|
|
1951
|
+
# The following code automatically advances the tag by 1 for stream "a", that is expected to be the
|
|
1952
|
+
# reference stream (i.e., the one for which the agent has all samples, with no missing data in between)
|
|
1953
|
+
if a_tag_prev is not None and a_tag <= a_tag_prev:
|
|
1954
|
+
a_tag = a_tag_prev + 1 # Fixed tag detected (patching)
|
|
1955
|
+
a_tag_w_offset = a_tag - a_tag_offset
|
|
1956
|
+
|
|
1957
|
+
# Fixing
|
|
1958
|
+
if b is None:
|
|
1959
|
+
o = o + (1. if how != "mse" else (o / steps) * 1.1)
|
|
1960
|
+
self.deb(f"[__compare_streams] The second stream yields None")
|
|
1961
|
+
else:
|
|
1962
|
+
if b_tag_w_offset == a_tag_w_offset:
|
|
1963
|
+
o += compare(a, b, how)
|
|
1964
|
+
k_b += 1
|
|
1965
|
+
self.deb(f"[__compare_streams] Comparing tags: {a_tag} vs {b_tag} "
|
|
1966
|
+
f"(with offsets: {a_tag_w_offset} vs {b_tag_w_offset}), samples: {a} vs {b}")
|
|
1967
|
+
elif b_tag_w_offset > a_tag_w_offset:
|
|
1968
|
+
if not restart_detected:
|
|
1969
|
+
o = o + (1. if how != "mse" else (o / steps) * 1.1) # Don't change k_b, some samples missing
|
|
1970
|
+
self.deb(f"[__compare_streams] (b) Comparing tags: {a_tag} vs {b_tag} -> "
|
|
1971
|
+
f"expected one was missing "
|
|
1972
|
+
f"(with offsets: {a_tag_w_offset} vs {b_tag_w_offset}) "
|
|
1973
|
+
f"samples: {a} vs {b}")
|
|
1974
|
+
else:
|
|
1975
|
+
o = o + (1. if how != "mse" else (o / steps) * 1.1)
|
|
1976
|
+
self.deb(f"[__compare_streams] (c) Comparing tags: {a_tag} vs {b_tag} -> "
|
|
1977
|
+
f"expected one was missing "
|
|
1978
|
+
f"(with offsets: {a_tag_w_offset} vs {b_tag_w_offset}) "
|
|
1979
|
+
f"samples: {a} vs {b}")
|
|
1980
|
+
k_b += 1 # A restart was detected, it means that "stream_b" is behind, let's move it ahead
|
|
1981
|
+
elif b_tag_w_offset < a_tag_w_offset:
|
|
1982
|
+
self.deb(f"[__compare_streams] (d) Comparing tags: {a_tag} vs {b_tag} -> too early w.r.t. expected "
|
|
1983
|
+
f"(with offsets: {a_tag_w_offset} vs {b_tag_w_offset}) "
|
|
1984
|
+
f"samples: {a} vs {b}")
|
|
1985
|
+
return -1., False
|
|
1986
|
+
|
|
1987
|
+
self.deb(f"[__compare_streams] Error: {o / steps}")
|
|
1988
|
+
|
|
1989
|
+
# Input("*** press enter to continue ***")
|
|
1990
|
+
return o / steps, True
|
|
1991
|
+
|
|
1992
|
+
def __involved_agents(self, agent: str | None):
|
|
1993
|
+
"""A private helper method that resolves an agent ID or a wildcard into a list of specific peer IDs.
|
|
1994
|
+
It can resolve a single agent, a group of agents that passed a previous comparison (`<valid_cmp>`), or all
|
|
1995
|
+
currently engaged agents.
|
|
1996
|
+
|
|
1997
|
+
Args:
|
|
1998
|
+
agent: The agent ID or wildcard string.
|
|
1999
|
+
|
|
2000
|
+
Returns:
|
|
2001
|
+
A list of peer IDs corresponding to the involved agents.
|
|
2002
|
+
"""
|
|
2003
|
+
peer_id = agent
|
|
2004
|
+
involved_agents = [peer_id] if peer_id is not None and peer_id != "<valid_cmp>" else (
|
|
2005
|
+
self._valid_cmp_agents) if peer_id is not None and peer_id == "<valid_cmp>" else self._engaged_agents
|
|
2006
|
+
if len(involved_agents) == 0:
|
|
2007
|
+
self.err("Not engaged to any agents or no agent specified")
|
|
2008
|
+
return involved_agents
|