unaiverse 0.1.6__cp311-cp311-macosx_10_9_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unaiverse might be problematic. Click here for more details.

Files changed (50) hide show
  1. unaiverse/__init__.py +19 -0
  2. unaiverse/agent.py +2008 -0
  3. unaiverse/agent_basics.py +1846 -0
  4. unaiverse/clock.py +191 -0
  5. unaiverse/dataprops.py +1209 -0
  6. unaiverse/hsm.py +1880 -0
  7. unaiverse/modules/__init__.py +18 -0
  8. unaiverse/modules/cnu/__init__.py +17 -0
  9. unaiverse/modules/cnu/cnus.py +536 -0
  10. unaiverse/modules/cnu/layers.py +261 -0
  11. unaiverse/modules/cnu/psi.py +60 -0
  12. unaiverse/modules/hl/__init__.py +15 -0
  13. unaiverse/modules/hl/hl_utils.py +411 -0
  14. unaiverse/modules/networks.py +1509 -0
  15. unaiverse/modules/utils.py +680 -0
  16. unaiverse/networking/__init__.py +16 -0
  17. unaiverse/networking/node/__init__.py +18 -0
  18. unaiverse/networking/node/connpool.py +1261 -0
  19. unaiverse/networking/node/node.py +2223 -0
  20. unaiverse/networking/node/profile.py +446 -0
  21. unaiverse/networking/node/tokens.py +79 -0
  22. unaiverse/networking/p2p/__init__.py +198 -0
  23. unaiverse/networking/p2p/go.mod +127 -0
  24. unaiverse/networking/p2p/go.sum +548 -0
  25. unaiverse/networking/p2p/golibp2p.py +18 -0
  26. unaiverse/networking/p2p/golibp2p.pyi +135 -0
  27. unaiverse/networking/p2p/lib.go +2714 -0
  28. unaiverse/networking/p2p/lib.go.sha256 +1 -0
  29. unaiverse/networking/p2p/lib_types.py +312 -0
  30. unaiverse/networking/p2p/message_pb2.py +63 -0
  31. unaiverse/networking/p2p/messages.py +265 -0
  32. unaiverse/networking/p2p/mylogger.py +77 -0
  33. unaiverse/networking/p2p/p2p.py +929 -0
  34. unaiverse/networking/p2p/proto-go/message.pb.go +616 -0
  35. unaiverse/networking/p2p/unailib.cpython-311-darwin.so +0 -0
  36. unaiverse/streamlib/__init__.py +15 -0
  37. unaiverse/streamlib/streamlib.py +210 -0
  38. unaiverse/streams.py +770 -0
  39. unaiverse/utils/__init__.py +16 -0
  40. unaiverse/utils/ask_lone_wolf.json +27 -0
  41. unaiverse/utils/lone_wolf.json +19 -0
  42. unaiverse/utils/misc.py +305 -0
  43. unaiverse/utils/sandbox.py +293 -0
  44. unaiverse/utils/server.py +435 -0
  45. unaiverse/world.py +175 -0
  46. unaiverse-0.1.6.dist-info/METADATA +365 -0
  47. unaiverse-0.1.6.dist-info/RECORD +50 -0
  48. unaiverse-0.1.6.dist-info/WHEEL +6 -0
  49. unaiverse-0.1.6.dist-info/licenses/LICENSE +43 -0
  50. unaiverse-0.1.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1846 @@
1
+ """
2
+ █████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
3
+ ░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
4
+ ░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
5
+ ░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
6
+ ░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
7
+ ░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
8
+ ░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
9
+ ░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
10
+ A Collectionless AI Project (https://collectionless.ai)
11
+ Registration/Login: https://unaiverse.io
12
+ Code Repositories: https://github.com/collectionlessai/
13
+ Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
14
+ """
15
+ import os
16
+ import torch
17
+ import types
18
+ import pickle
19
+ import importlib.resources
20
+ from PIL.Image import Image
21
+ from unaiverse.clock import Clock
22
+ from unaiverse.hsm import HybridStateMachine
23
+ from unaiverse.networking.p2p.messages import Msg
24
+ from unaiverse.dataprops import DataProps, Data4Proc
25
+ from unaiverse.networking.node.profile import NodeProfile
26
+ from unaiverse.utils.misc import GenException, FileTracker
27
+ from unaiverse.streams import BufferedDataStream, DataStream
28
+ from unaiverse.networking.node.connpool import ConnectionPools
29
+ from unaiverse.modules.utils import AgentProcessorChecker, ModuleWrapper
30
+
31
+
32
+ class AgentBasics:
33
+ """This class contains those methods and properties that are about building the agent, known agents,
34
+ known streams, etc., and no actions at all (see the class "Agent" for actions)."""
35
+
36
+ DEBUG = True # Turns on/off extra logging
37
+
38
+ # Role bits (a.k.a. role int): default roles, shared by every possible agent
39
+ ROLE_PUBLIC = 0 << 0 # 00000000 = 0 means "public"
40
+ ROLE_WORLD_MASTER = (1 << 0) | (1 << 1) # 00000011 = 3 means "world master" (the first bit means "about world")
41
+ ROLE_WORLD_AGENT = (1 << 0) | (0 << 1) # 00000001 = 2 means "world agent" (the first bit means "about world")
42
+ CUSTOM_ROLES = []
43
+
44
+ # From role bits (int) to string
45
+ ROLE_BITS_TO_STR = {
46
+ ROLE_PUBLIC: "public_agent",
47
+ ROLE_WORLD_MASTER: "world_master",
48
+ ROLE_WORLD_AGENT: "world_agent",
49
+ }
50
+
51
+ # From role string to bits (int)
52
+ ROLE_STR_TO_BITS = {
53
+ "public_agent": ROLE_PUBLIC,
54
+ "world_master": ROLE_WORLD_MASTER,
55
+ "world_agent": ROLE_WORLD_AGENT,
56
+ }
57
+
58
+ # Types of badges
59
+ BADGE_TYPES = {'completed', 'attended', 'intermediate', 'pro'}
60
+
61
+ def __init__(self,
62
+ proc: ModuleWrapper | torch.nn.Module | None,
63
+ proc_inputs: list[Data4Proc] | None = None,
64
+ proc_outputs: list[Data4Proc] | None = None,
65
+ proc_opts: dict | None = None,
66
+ behav: HybridStateMachine | None = None,
67
+ behav_lone_wolf: HybridStateMachine | str = "serve",
68
+ merge_flat_stream_labels: bool = False,
69
+ buffer_generated: bool = False,
70
+ buffer_generated_by_others: str = "none",
71
+ world_folder: str | None = None):
72
+ """Create a new agent.
73
+
74
+ Args:
75
+ proc: The processing module (e.g., a neural network) for the agent. Can be None or "default".
76
+ proc_inputs: list of DataProps defining the expected inputs for processor (if None it will be guessed).
77
+ proc_outputs: list of DataProps defining the expected outputs from processor (if None it will be guessed).
78
+ proc_opts: A dictionary of options for the processor.
79
+ behav: The HybridStateMachine that describes the agent's behavior when joining a world.
80
+ behav_lone_wolf: The HybridStateMachine that describes the agent's behavior when in the public net
81
+ (it can also be a string "serve" or "ask", that will load pre-designed HSMs).
82
+ merge_flat_stream_labels: If True, merges flat stream labels across all owned streams.
83
+ buffer_generated: If True, generated streams will be buffered.
84
+ buffer_generated_by_others: If set to "one" or "last", streams generated by other agents will be buffered
85
+ ("one" per peer or "all"). If set to "none", no buffering will happen (default).
86
+ world_folder: World only. Folder where the world data is (role files, represented by *.json behavior files).
87
+ """
88
+
89
+ # Agent-related features
90
+ self.behav = behav # HSM that describes the agent behavior in the private/world net
91
+ self.behav_lone_wolf = behav_lone_wolf # HSM that describes the agent behavior in the public net
92
+ self.behav_wildcards = {}
93
+ self.proc = proc
94
+ self.proc_inputs = proc_inputs
95
+ self.proc_outputs = proc_outputs
96
+ self.proc_opts = proc_opts
97
+ self.proc_last_inputs = None
98
+ self.proc_last_outputs = None
99
+ self.proc_optional_inputs = None
100
+ self.merge_flat_stream_labels = merge_flat_stream_labels
101
+ self.buffer_generated = buffer_generated
102
+ self.buffer_generated_by_others = buffer_generated_by_others
103
+ self.world_folder = world_folder
104
+
105
+ if self.buffer_generated_by_others not in {"one", "all", "none"}:
106
+ raise GenException("Param buffer_generated_by_others can be set to 'one', 'all', or 'none' only.")
107
+
108
+ # Streams
109
+ self.known_streams = {} # All streams that are known to this agent
110
+ self.owned_streams = {} # The streams that are generated/offered by this agent
111
+ self.env_streams = {} # The owned streams that come from environmental sources (e.g., a camera)
112
+ self.proc_streams = {} # The owned streams that are generated by the agent's processor
113
+ self.compat_in_streams = set() # Streams compatible with the processor input (dynamically set)
114
+ self.compat_out_streams = set() # Streams compatible with the processor output (dynamically set)
115
+
116
+ # Agents, world masters, expected world masters
117
+ self.all_agents = {} # ID -> profile (all types of agent)
118
+ self.public_agents = {} # ID -> profile of lone wolves talking to this world in a public manner (profile)
119
+ self.world_agents = {} # ID -> profile of all agents living in this world (profile)
120
+ self.world_masters = {} # ID -> profile of all master-agents living in this world (profile)
121
+ self.world_profile = None
122
+ self.is_world = False # If this instance is about a world: it will be discovered at creation time
123
+
124
+ # World specific attributes (they are only used if this agent is actually a world)
125
+ self.agent_actions = None
126
+ self.role_to_behav = {}
127
+ self.agent_badges: dict[str, list[dict]] = {} # Peer_id -> collected badges for other agents
128
+ self.role_changed_by_world: bool = False
129
+ self.received_address_update: bool = False
130
+
131
+ # Internal properties about the way streams are used
132
+ self.last_buffered_peer_id_to_info = {} # If buffering was turned on
133
+ self.last_ref_uuid = None
134
+ self.recipients = {} # The peer IDs of the recipients of the next batch of direct messages
135
+
136
+ # Information inherited from the node that hosts this agent
137
+ self._node_name = "unk"
138
+ self._node_clock = None
139
+ self._node_conn = None
140
+ self._node_profile = None
141
+ self._node_out_fcn = print
142
+ self._node_ask_to_get_in_touch_fcn = None
143
+ self._node_purge_fcn = None
144
+ self._node_agents_waiting = None
145
+
146
+ # Checking
147
+ if not (self.proc is None or
148
+ (isinstance(self.proc, torch.nn.Module) or (isinstance(self.proc, str) and self.proc == "default"))):
149
+ raise GenException("Invalid data processor: it must be either the string 'default' or a torch.nn.module")
150
+ if not (self.behav is None or isinstance(self.behav, HybridStateMachine)):
151
+ raise GenException("Invalid behavior: it must be either None or a HybridStateMachine")
152
+
153
+ # Filling (guessing) missing processor-related info (proc_inputs and proc_outputs)
154
+ # and allocating a dummy processor if it was not specified (if None)
155
+ AgentProcessorChecker(self)
156
+
157
+ # The stream_hash of compatible streams for each data_props are stored in a set
158
+ self.compat_in_streams = [set() for _ in range(len(self.proc_inputs))] \
159
+ if self.proc_inputs is not None else None
160
+ self.compat_out_streams = [set() for _ in range(len(self.proc_outputs))] \
161
+ if self.proc_outputs is not None else None
162
+
163
+ # Loading default public HSM
164
+ if hasattr(self, "do_gen"): # Trick to distinguish if this is an Agent or a World (both sons of this class)
165
+ self.is_world = False
166
+
167
+ # Setting an empty HSM as default is not provided (private/world)
168
+ if self.behav is None:
169
+ self.behav = HybridStateMachine(self)
170
+ self.behav.add_state("empty")
171
+
172
+ if self.behav_lone_wolf is not None and isinstance(self.behav_lone_wolf, str):
173
+ template_string = self.behav_lone_wolf
174
+ if template_string == "serve":
175
+ json_to_load = "lone_wolf.json"
176
+ elif template_string == "ask":
177
+ json_to_load = "ask_lone_wolf.json"
178
+ else:
179
+ raise ValueError("Invalid behav_lone_wolf: it must be an HybridStateMachine or a string "
180
+ "in ('serve', 'ask')")
181
+
182
+ # Safe way to load a file packed in a pip package
183
+ self.behav_lone_wolf = HybridStateMachine(self)
184
+ utils_path = importlib.resources.files("unaiverse.utils")
185
+ json_file = utils_path.joinpath(json_to_load)
186
+ file = json_file.open()
187
+ self.behav_lone_wolf.load(file)
188
+ file.close()
189
+ else:
190
+ self.is_world = True
191
+ if self.world_folder is None:
192
+ raise GenException("No world folder was indicated (world_folder argument)")
193
+
194
+ def set_node_info(self, clock: Clock, conn: ConnectionPools, profile: NodeProfile,
195
+ out_fcn, ask_to_get_in_touch_fcn, purge_fcn, agents_waiting, print_level):
196
+ """Set the required information from the node that hosts this agent.
197
+
198
+ Args:
199
+ clock: The global clock instance from the node.
200
+ conn: The connection pool manager from the node.
201
+ profile: The profile of the hosting node.
202
+ out_fcn: The function to use for general output messages.
203
+ ask_to_get_in_touch_fcn: The function to call to request getting in touch with another peer.
204
+ purge_fcn: The function to call to purge (kill/disconnect) a connection.
205
+ agents_waiting: Set of agents that connected to this node but have not been evaluated yet to be added.
206
+ print_level: The level of output printing verbosity (0, 1, 2).
207
+ """
208
+
209
+ # Getting basic references
210
+ self._node_name = profile.get_static_profile()['node_name']
211
+ self._node_clock = clock
212
+ self._node_profile = profile
213
+ self._node_conn = conn
214
+ self._node_out_fcn = out_fcn
215
+ self._node_ask_to_get_in_touch_fcn = ask_to_get_in_touch_fcn
216
+ self._node_purge_fcn = purge_fcn
217
+ self._node_agents_waiting = agents_waiting
218
+
219
+ # Adding peer_id information into the already existing stream data (if any)
220
+ # (initially marked with generic wildcards like <public_peer_id>, ...)
221
+ net_hashes = list(self.known_streams.keys())
222
+ for net_hash in net_hashes:
223
+ if net_hash.startswith("<public_peer_id>") or net_hash.startswith("<private_peer_id>"):
224
+ stream_dict = self.known_streams[net_hash]
225
+ for stream_obj in stream_dict.values():
226
+ self.add_stream(stream_obj, owned=True) # This will also re-add streams using the node clock
227
+ self.remove_streams("<public_peer_id>", owned_too=True)
228
+ self.remove_streams("<private_peer_id>", owned_too=True)
229
+
230
+ # World only: loading action files and refactoring (or building) JSON files of the different roles.
231
+ # This where the world guesses roles.
232
+ if self.is_world:
233
+
234
+ # Check JSON files in the world folder
235
+ json_tracker = FileTracker(self.world_folder, ext=".json") # Case insensitive
236
+
237
+ # This usually does nothing, but if you like to dynamically create JSON files, overload this method
238
+ self.create_behav_files()
239
+
240
+ # Loading and refactoring
241
+ self.load_and_refactor_action_file_and_behav_files(force_save=json_tracker.something_changed())
242
+
243
+ # Building combination of default roles (considering public, world_agent, world_master default roles), and
244
+ # agent/world specific roles
245
+ self.augment_roles()
246
+
247
+ # Creating streams associated to the processor output
248
+ self.create_proc_output_streams(buffered=self.buffer_generated)
249
+
250
+ # Updating node profile by indicating the processor-related streams
251
+ self.update_streams_in_profile()
252
+
253
+ # Print level
254
+ AgentBasics.DEBUG = print_level > 1
255
+ ConnectionPools.DEBUG = print_level > 1
256
+ HybridStateMachine.DEBUG = print_level > 1
257
+
258
+ # Subscribing/creating our own pubsub
259
+ return self.subscribe_to_pubsub_owned_streams()
260
+
261
+ def augment_roles(self):
262
+ """Augment the custom roles (role1, role2, etc.) with the default ones (public, world_master, etc.), generating
263
+ all the mixed roles (world_master~role1, world_master~role2, ...)"""
264
+
265
+ # Both Agent and World: Fusing basic roles and custom roles
266
+ if len(self.CUSTOM_ROLES) > 0:
267
+ if len(self.CUSTOM_ROLES) > 30: # Safe value, could be increased
268
+ raise GenException("Maximum number of custom role overcame (max is 30)")
269
+ for i, role_str in enumerate(self.CUSTOM_ROLES):
270
+ role_int = 1 << (i + 2) # 000000100, then 00001000, etc. (recall that the first two bits are reserved)
271
+ self.ROLE_BITS_TO_STR[role_int] = role_str
272
+ self.ROLE_STR_TO_BITS[role_str] = role_int
273
+
274
+ # Both Agent and World: Augmenting roles
275
+ roles_not_to_be_augmented = {self.ROLE_PUBLIC, self.ROLE_WORLD_AGENT, self.ROLE_WORLD_MASTER}
276
+ role_bits_to_str_original = {k: v for k, v in self.ROLE_BITS_TO_STR.items()}
277
+ for role_int, role_str in role_bits_to_str_original.items():
278
+ if role_int not in roles_not_to_be_augmented and "~" not in role_str:
279
+ for role_base_int in {self.ROLE_WORLD_AGENT, self.ROLE_WORLD_MASTER}:
280
+ augmented_role_int = role_base_int | role_int
281
+ augmented_role_str = self.ROLE_BITS_TO_STR[role_base_int] + "~" + role_str
282
+ if augmented_role_str not in self.ROLE_STR_TO_BITS:
283
+ self.ROLE_STR_TO_BITS[augmented_role_str] = augmented_role_int
284
+ self.ROLE_BITS_TO_STR[augmented_role_int] = augmented_role_str
285
+
286
+ def clear_world_related_data(self):
287
+ """Destroy all the cached information that is about a world (useful when leaving a world)."""
288
+
289
+ # Clearing status variables
290
+ self.reset_agent_status_attrs()
291
+
292
+ # Clear/reset
293
+ self.__remove_all_world_private_streams()
294
+ self.__remove_all_world_related_agents()
295
+ self._node_conn.reset_rendezvous_tag()
296
+
297
+ def load_and_refactor_action_file_and_behav_files(self, force_save: bool = False):
298
+ """This method is called when building a world object. It loads the behavior files and refactors them.
299
+ It loads the action file agent.py. It checks consistency between the agent action files agent.py and the roles
300
+ in the behavior files.
301
+
302
+ Args:
303
+ force_save: Boolean to force the saving of the JSON and of a "pdf" folder with the PDFs of the state
304
+ machines.
305
+ """
306
+
307
+ # World only: the world discovers CUSTOM_ROLES from the JSON files in the world folder
308
+ if self.world_folder is not None and self.is_world:
309
+
310
+ # Guessing roles from the list of json files
311
+ self.CUSTOM_ROLES = [os.path.splitext(f)[0] for f in os.listdir(self.world_folder)
312
+ if os.path.isfile(os.path.join(self.world_folder, f)) and f.lower().endswith(".json")]
313
+ if len(self.CUSTOM_ROLES) == 0:
314
+ raise GenException(f"No world-role files (*.json) were found in the world folder {self.world_folder}")
315
+
316
+ # Default behaviours (getting roles, that are the names of the files with extension "json")
317
+ default_behav_files = [os.path.join(self.world_folder, f) for f in os.listdir(self.world_folder)
318
+ if os.path.isfile(os.path.join(self.world_folder, f)) and
319
+ f.lower().endswith(".json")]
320
+
321
+ # Loading action file
322
+ action_file = os.path.join(self.world_folder, 'agent.py')
323
+ try:
324
+ with open(action_file, 'r', encoding='utf-8') as file:
325
+ self.agent_actions = file.read()
326
+ except Exception as e:
327
+ raise GenException(f'Error while reading the agent.py file: {action_file} [{e}]')
328
+
329
+ # Creating a dummy agent which supports the actions of the following state machines
330
+ mod = types.ModuleType("dynamic_module")
331
+ try:
332
+ exec(self.agent_actions, mod.__dict__)
333
+ dummy_agent = mod.WAgent(proc=None)
334
+ dummy_agent.CUSTOM_ROLES = self.CUSTOM_ROLES
335
+ except Exception as e:
336
+ raise GenException(f'Unable to create a valid agent object from the agent action file '
337
+ f'{action_file} [{e}]')
338
+
339
+ # Checking if the roles you wrote in agent.py are coherent with the JSON files in this folder
340
+ if dummy_agent.CUSTOM_ROLES != self.CUSTOM_ROLES:
341
+ raise GenException(f"Mismatching roles. "
342
+ f"Roles in JSON files: {self.CUSTOM_ROLES}. "
343
+ f"Roles specified in the agent.py file: {dummy_agent.CUSTOM_ROLES}")
344
+
345
+ # Loading and refactoring behaviors
346
+ for role, default_behav_file in zip(self.CUSTOM_ROLES, default_behav_files):
347
+ try:
348
+ behav = HybridStateMachine(dummy_agent)
349
+ behav.load(default_behav_file)
350
+ self.role_to_behav[role] = str(behav)
351
+
352
+ # Adding roles and machines to profile
353
+ self._node_profile.get_dynamic_profile()['world_roles_fsm'] = self.role_to_behav
354
+ except Exception as e:
355
+ raise GenException(f'Error while loading or handling '
356
+ f'behav file {default_behav_file} for role {role} [{e}]')
357
+
358
+ # Refactoring and saving PDF
359
+ try:
360
+ if (force_save or
361
+ behav.save(os.path.join(self.world_folder, f'{role}.json'), only_if_changed=dummy_agent)):
362
+ os.makedirs(os.path.join(self.world_folder, 'pdf'), exist_ok=True)
363
+ behav.save_pdf(os.path.join(self.world_folder, 'pdf', f'{role}.pdf'))
364
+ except Exception as e:
365
+ raise GenException(f'Error while saving the behav file {default_behav_file} for role {role} [{e}]')
366
+
367
+ def create_behav_files(self):
368
+ """This method is called when building a world object. In your custom world-class, you can overload this method
369
+ and create the JSON files with the role-related behaviors, if you like. Recall that acting like this is not
370
+ mandatory at all: you can just manually create the JSON files, and this method will simply do nothing."""
371
+ pass
372
+
373
+ def out(self, msg: str):
374
+ """Print a message to the console, if enabled at node level (it reuses the node-out-function).
375
+
376
+ Args:
377
+ msg: The message string to print.
378
+ """
379
+ self._node_out_fcn(msg)
380
+
381
+ def err(self, msg: str):
382
+ """Print an error message to the console, if enabled at node level (it reuses the node-err-function).
383
+
384
+ Args:
385
+ msg: The error message string to print.
386
+ """
387
+ self.out("<ERROR> " + msg)
388
+
389
+ def deb(self, msg: str):
390
+ """Print an error message to the console, if debug is enabled for this agent (it reuses the agent-out-function).
391
+
392
+ Args:
393
+ msg: The error message string to print.
394
+ """
395
+ if AgentBasics.DEBUG:
396
+ self.out("[DEBUG " + ("AGENT" if not self.is_world else "WORLD") + "] " + msg)
397
+
398
+ def get_name(self) -> str:
399
+ """Returns the name of the agent or world from the node's profile.
400
+
401
+ Args:
402
+ None.
403
+
404
+ Returns:
405
+ The name of the agent or world.
406
+ """
407
+ return self._node_name
408
+
409
+ def get_profile(self) -> NodeProfile:
410
+ """Returns the profile of the node hosting this agent/world.
411
+
412
+ Returns:
413
+ The NodeProfile of this node.
414
+ """
415
+ return self._node_profile
416
+
417
+ def get_current_role(self, return_int: bool = False, ignore_base_role: bool = True) -> str | int | None:
418
+ """Returns the current role of the agent.
419
+
420
+ Args:
421
+ return_int: If True, returns the integer representation of the role.
422
+ ignore_base_role: If True, returns only the specific role part, not the base.
423
+
424
+ Returns:
425
+ The role as a string or integer, or None if the agent is not living in any worlds.
426
+ """
427
+ if self.in_world():
428
+ role_str = self._node_profile.get_dynamic_profile()['connections']['role']
429
+ if ignore_base_role:
430
+ role_str = role_str.split("~")[-1]
431
+ if not return_int:
432
+ return role_str
433
+ else:
434
+ return self.ROLE_STR_TO_BITS[role_str]
435
+ else:
436
+ return None
437
+
438
+ def add_agent(self, peer_id: str, profile: NodeProfile) -> bool:
439
+ """Add a new known agent.
440
+
441
+ Args:
442
+ peer_id: The unique identifier of the peer.
443
+ profile: The NodeProfile object containing the peer's/agent's information.
444
+
445
+ Returns:
446
+ True if the agent was successfully added, False otherwise.
447
+ """
448
+
449
+ # If the agent was already there, we remove it and add it again (in case of changes)
450
+ self.remove_agent(peer_id) # It has no effects if the agent is not existing
451
+
452
+ # Guessing the type of agent to add (accordingly to the default roles shared by every agent)
453
+ role = self._node_conn.get_role(peer_id)
454
+ self.all_agents[peer_id] = profile
455
+ if role & 1 == self.ROLE_PUBLIC:
456
+ self.public_agents[peer_id] = profile
457
+ public = True
458
+ elif role & 3 == self.ROLE_WORLD_AGENT:
459
+ self.world_agents[peer_id] = profile
460
+ public = False
461
+ elif role & 3 == self.ROLE_WORLD_MASTER:
462
+ self.world_masters[peer_id] = profile
463
+ public = False
464
+ else:
465
+ self.err(f"Cannot add agent with peer ID {peer_id} - unknown role: {role}")
466
+ return False
467
+
468
+ # Check compatibility of the streams owned by the agent we are adding with our-agent's processor
469
+ if self.proc_outputs is not None and self.proc_inputs is not None:
470
+
471
+ # Check compatibility of the environmental streams of the agent we are adding with our-agent's processor
472
+ environmental_streams = profile.get_dynamic_profile()['streams']
473
+ if (environmental_streams is not None and
474
+ not self.add_compatible_streams(peer_id, environmental_streams,
475
+ buffered=False, public=public)): # This will also "add" the stream
476
+ return False
477
+
478
+ # Check compatibility of the generated streams of the agent we are adding with our-agent's processor
479
+ proc_streams = profile.get_dynamic_profile()['proc_outputs']
480
+ if (proc_streams is not None and
481
+ not self.add_compatible_streams(peer_id, profile.get_dynamic_profile()['proc_outputs'],
482
+ buffered=False, public=public)): # This will also "add" the stream
483
+ return False
484
+
485
+ self.out(f"Successfully added agent with peer ID {peer_id} (public: {public})")
486
+ return True
487
+
488
+ def remove_agent(self, peer_id: str):
489
+ """Remove an agent.
490
+
491
+ Args:
492
+ peer_id: The unique identifier of the peer to remove.
493
+ """
494
+ if peer_id in self.all_agents:
495
+
496
+ # Removing from agent list
497
+ del self.all_agents[peer_id]
498
+ if peer_id in self.world_agents:
499
+ del self.world_agents[peer_id]
500
+ elif peer_id in self.world_masters:
501
+ del self.world_masters[peer_id]
502
+ elif peer_id in self.public_agents:
503
+ del self.public_agents[peer_id]
504
+
505
+ # Clearing from the list of processor-input-compatible-streams
506
+ if self.compat_in_streams is not None:
507
+ for i, _ in enumerate(self.compat_in_streams):
508
+ to_remove = []
509
+ for net_hash_name in self.compat_in_streams[i]:
510
+ if DataProps.peer_id_from_net_hash(net_hash_name[0]) == peer_id:
511
+ to_remove.append(net_hash_name)
512
+ for net_hash_name in to_remove:
513
+ self.compat_in_streams[i].remove(net_hash_name)
514
+
515
+ # Clearing from the list of processor-output-compatible-streams
516
+ if self.compat_out_streams is not None:
517
+ for i, _ in enumerate(self.compat_out_streams):
518
+ to_remove = []
519
+ for net_hash_name in self.compat_out_streams[i]:
520
+ if DataProps.peer_id_from_net_hash(net_hash_name[0]) == peer_id:
521
+ to_remove.append(net_hash_name)
522
+ for net_hash_name in to_remove:
523
+ self.compat_out_streams[i].remove(net_hash_name)
524
+
525
+ # Clearing streams owned by the removed agent from the list of known streams
526
+ self.remove_streams(peer_id)
527
+
528
+ # Removing from the status variables
529
+ self.remove_peer_from_agent_status_attrs(peer_id)
530
+
531
+ # Updating buffered stream index
532
+ if peer_id in self.last_buffered_peer_id_to_info:
533
+ del self.last_buffered_peer_id_to_info[peer_id] # Only if present
534
+ self.out(f"Successfully removed agent with peer ID {peer_id}")
535
+
536
+ def remove_all_agents(self):
537
+ """Remove all known agents."""
538
+
539
+ # Clearing all agents
540
+ self.all_agents = {}
541
+ self.public_agents = {}
542
+ self.world_masters = {}
543
+ self.world_agents = {}
544
+
545
+ # Clearing the list of processor-output-compatible-streams
546
+ if self.compat_in_streams is not None and self.proc_inputs is not None:
547
+ self.compat_in_streams = [set() for _ in range(len(self.proc_inputs))]
548
+ if self.compat_out_streams is not None and self.proc_outputs is not None:
549
+ self.compat_out_streams = [set() for _ in range(len(self.proc_outputs))]
550
+
551
+ # Clearing the list of known streams (not our own streams!)
552
+ self.remove_all_streams(owned_too=False)
553
+ self.out(f"Successfully removed all agents")
554
+
555
+ def add_behav_wildcard(self, wildcard_from: str, wildcard_to: object):
556
+ """Adds a wildcard mapping for the agent's behavior state machine.
557
+
558
+ Args:
559
+ wildcard_from: The string to be used as a wildcard.
560
+ wildcard_to: The object to replace the wildcard.
561
+ """
562
+ self.behav_wildcards[wildcard_from] = wildcard_to
563
+
564
+ def add_stream(self, stream: DataStream, owned: bool = True, net_hash: str | None = None) -> dict[str, DataStream]:
565
+ """Add a new stream to the set of known streams.
566
+
567
+ Args:
568
+ stream: The DataStream object to add.
569
+ owned: If True, the streams are considered owned by this agent.
570
+ net_hash: Optional network hash for the streams. If None, it will be generated.
571
+
572
+ Returns:
573
+ A dictionary containing the added stream and the possibly already present streams belonging to the same
574
+ group (stream name -> stream object).
575
+ """
576
+
577
+ # Forcing clock
578
+ stream.clock = self._node_clock
579
+
580
+ # Stream net hash
581
+ if net_hash is None:
582
+ public_peer_id, private_peer_id = self.get_peer_ids()
583
+ peer_id = public_peer_id if stream.is_public() else private_peer_id
584
+ net_hash = stream.net_hash(peer_id)
585
+
586
+ # Adding the new stream
587
+ if net_hash not in self.known_streams:
588
+ self.known_streams[net_hash] = {}
589
+ else:
590
+ for _stream in self.known_streams[net_hash].values():
591
+ public = _stream.get_props().is_public()
592
+ pubsub = _stream.get_props().is_pubsub()
593
+ if public and not stream.get_props().is_public():
594
+ self.err(f"Cannot add a stream to a group with different properties (public): "
595
+ f"hash: {net_hash}, name: {stream.get_props().get_name()}, "
596
+ f"public: {stream.get_props().is_public()}")
597
+ return {}
598
+ if pubsub and not stream.get_props().is_pubsub():
599
+ self.err(f"Cannot add a stream to a group with different properties (pubsub): "
600
+ f"hash: {net_hash}, name: {stream.get_props().get_name()}, "
601
+ f"public: {stream.get_props().is_public()}")
602
+ return {}
603
+ break
604
+ self.known_streams[net_hash][stream.get_props().get_name()] = stream
605
+
606
+ if owned:
607
+
608
+ # Adding an 'owned' processor output stream (i.e., the stream coming from OUR OWN processor)
609
+ is_proc_outputs_stream = False
610
+ if self.proc_outputs is not None:
611
+ proc_outputs_name_and_group = set()
612
+ for props in self.proc_outputs:
613
+ proc_outputs_name_and_group.add((props.get_name(), props.get_group()))
614
+ if (stream.get_props().get_name(), stream.get_props().get_group()) in proc_outputs_name_and_group:
615
+ if net_hash not in self.proc_streams:
616
+ self.proc_streams[net_hash] = {}
617
+ self.proc_streams[net_hash][stream.get_props().get_name()] = stream
618
+ is_proc_outputs_stream = True
619
+
620
+ if net_hash not in self.owned_streams:
621
+ self.owned_streams[net_hash] = {}
622
+ self.owned_streams[net_hash][stream.get_props().get_name()] = stream
623
+
624
+ if not is_proc_outputs_stream:
625
+ if net_hash not in self.env_streams:
626
+ self.env_streams[net_hash] = {}
627
+ self.env_streams[net_hash][stream.get_props().get_name()] = stream
628
+
629
+ # Adding empty recipient slot
630
+ if net_hash not in self.recipients:
631
+ self.recipients[net_hash] = None
632
+
633
+ # If needed, merging descriptor labels (attribute labels) and sharing them with all streams
634
+ if self.merge_flat_stream_labels:
635
+ self.merge_flat_data_stream_props()
636
+
637
+ return self.known_streams[net_hash]
638
+
639
+ def add_streams(self, streams: list[DataStream], owned: bool = True, net_hash: str | None = None) \
640
+ -> list[dict[str, DataStream]]:
641
+ """Add a list of new streams to this environment.
642
+
643
+ Args:
644
+ streams: A list of DataStream objects to add.
645
+ owned: If True, the streams are considered owned by this agent.
646
+ net_hash: Optional network hash for the streams. If None, it will be generated for each.
647
+
648
+ Returns:
649
+ A list of dictionaries (it could be empty in case of issues), where each dictionary is what
650
+ is returned by add_stream().
651
+ """
652
+
653
+ # Adding the new stream
654
+ ret = []
655
+ for stream in streams:
656
+ stream_dict = self.add_stream(stream, owned, net_hash)
657
+ if len(stream_dict) == 0:
658
+ return []
659
+ ret.append(stream_dict)
660
+ return ret
661
+
662
+ def remove_streams(self, peer_id: str, name: str | None = None, owned_too: bool = False):
663
+ """Remove a known stream.
664
+
665
+ Args:
666
+ peer_id: The hash of each stream included the peer ID of the owner, so this is the peer ID associated with
667
+ the stream(s) to remove.
668
+ name: The optional name of the stream to remove. If None, all streams with this peer_id are removed.
669
+ owned_too: If True, also removes streams from the owned stream dict (so also environmental and processor).
670
+ """
671
+
672
+ # Identifying what to remove
673
+ to_remove = []
674
+ for net_hash in self.known_streams.keys():
675
+ if DataProps.peer_id_from_net_hash(net_hash) == peer_id:
676
+ for _name, _stream in self.known_streams[net_hash].items():
677
+ if name is None or name == _name:
678
+ to_remove.append((net_hash, _name))
679
+
680
+ # Removing
681
+ for (net_hash, name) in to_remove:
682
+ if not owned_too and net_hash in self.owned_streams:
683
+ continue
684
+
685
+ del self.known_streams[net_hash][name]
686
+ if len(self.known_streams[net_hash]) == 0:
687
+ del self.known_streams[net_hash]
688
+
689
+ # Unsubscribing to pubsub
690
+ if DataProps.is_pubsub_from_net_hash(net_hash):
691
+ if peer_id != "<private_peer_id>" and peer_id != "<public_peer_id>":
692
+ if not self._node_conn.unsubscribe(peer_id, channel=net_hash):
693
+ self.err(f"Failed in unsubscribing from pubsub, peer_id: {peer_id}, channel: {net_hash}")
694
+ else:
695
+ self.out(f"Successfully unsubscribed from pubsub, peer_id: {peer_id}, channel: {net_hash}")
696
+
697
+ # Removing all the owned streams (environment and processor streams are of course "owned")
698
+ if net_hash in self.owned_streams:
699
+ if name in self.owned_streams[net_hash]:
700
+ del self.owned_streams[net_hash][name]
701
+ if len(self.owned_streams[net_hash]) == 0:
702
+ del self.owned_streams[net_hash]
703
+ if net_hash in self.env_streams:
704
+ if name in self.env_streams[net_hash]:
705
+ del self.env_streams[net_hash][name]
706
+ if len(self.env_streams[net_hash]) == 0:
707
+ del self.env_streams[net_hash]
708
+ if net_hash in self.proc_streams:
709
+ if name in self.proc_streams[net_hash]:
710
+ del self.proc_streams[net_hash][name]
711
+ if len(self.proc_streams[net_hash]) == 0:
712
+ del self.proc_streams[net_hash]
713
+ self.out(f"Successfully removed known stream with network hash {net_hash}, stream name: {name}")
714
+
715
+ def remove_all_streams(self, owned_too: bool = False):
716
+ """Remove all not-owned streams.
717
+
718
+ Args:
719
+ owned_too: If True, also removes the owned streams of this agent (so also environmental and processor ones).
720
+ """
721
+ if not owned_too:
722
+ self.known_streams = {k: v for k, v in self.owned_streams}
723
+ else:
724
+ self.known_streams = {}
725
+ self.owned_streams = {}
726
+ self.env_streams = {}
727
+ self.proc_streams = {}
728
+ self.out(f"Successfully removed all streams!")
729
+
730
+ def find_streams(self, peer_id: str, name_or_group: str | None = None) -> dict[str, dict[str, DataStream]]:
731
+ """Find streams associated with a given peer ID and optionally by name or group.
732
+
733
+ Args:
734
+ peer_id: The peer ID of the (owner of the) streams to find.
735
+ name_or_group: Optional name or group of the streams to find.
736
+
737
+ Returns:
738
+ A dictionary where keys are network hashes and values are dictionaries of streams
739
+ (stream name to DataStream object) matching the criteria.
740
+ """
741
+ ret = {}
742
+ for net_hash, streams_dict in self.known_streams.items():
743
+ _peer_id = DataStream.peer_id_from_net_hash(net_hash)
744
+ _name_or_group = DataStream.name_or_group_from_net_hash(net_hash)
745
+ if peer_id == _peer_id:
746
+ if name_or_group is None or name_or_group == _name_or_group:
747
+ ret[net_hash] = streams_dict
748
+ else:
749
+ for _name, _stream in streams_dict.items():
750
+ if name_or_group == _name:
751
+ if net_hash not in ret:
752
+ ret[net_hash] = {}
753
+ ret[net_hash][name_or_group] = _stream
754
+ return ret
755
+
756
+ def get_last_streamed_data(self, agent_name: str):
757
+ """Find streams associated with a given peer ID and optionally by name or group.
758
+
759
+ Args:
760
+ agent_name: The name of the agent.
761
+
762
+ Returns:
763
+ A list of data samples taken from all the known streams associated to the provided agent.
764
+ """
765
+ data_list = []
766
+ for peer_id, profile in self.all_agents.items():
767
+ if profile.get_static_profile()['node_name'] == agent_name:
768
+ net_hash_to_stream_dict = self.find_streams(peer_id)
769
+ for net_hash, streams_dict in net_hash_to_stream_dict.items():
770
+ for stream_name, stream_obj in streams_dict.items():
771
+ data_list.append(stream_obj.get())
772
+ return data_list
773
+
774
+ def merge_flat_data_stream_props(self):
775
+ """Merge the labels of the descriptor components, across all streams, sharing them."""
776
+
777
+ # Set of pivot labels
778
+ superset_labels = []
779
+
780
+ # Checking the whole list of streams, but considering only the ones with generic data, flat, and labels
781
+ considered_streams = []
782
+
783
+ for stream_dict in self.owned_streams.values():
784
+ for stream in stream_dict.values():
785
+
786
+ # Skipping not flat, or not generic, or unlabeled streams
787
+ if not stream.props.is_flat_tensor_with_labels():
788
+ continue
789
+
790
+ # Saving list of considered streams
791
+ considered_streams.append(stream)
792
+
793
+ # Adding the current stream-labels to the pivot labels
794
+ for label in stream.props.tensor_labels:
795
+ if label not in superset_labels:
796
+ superset_labels.append(label)
797
+
798
+ # Telling each stream in which positions their labels fall, given the pivot labels
799
+ for stream in considered_streams:
800
+
801
+ # In the case of BufferedDataStream, we have to update the data buffer by clearing previously applied
802
+ # adaptation first (I know it looks similar to what is done below, but we must clear first!)
803
+ if isinstance(stream, BufferedDataStream):
804
+ for i, (data, data_tag) in enumerate(stream.data_buffer):
805
+ stream.data_buffer[i] = (stream.props.clear_label_adaptation(data), data_tag)
806
+
807
+ # Updating labels
808
+ stream.props.tensor_labels.interleave_with(superset_labels)
809
+
810
+ # In the case of BufferedDataStream, we have to update the data buffer with the new labels
811
+ if isinstance(stream, BufferedDataStream):
812
+ for i, (data, data_tag) in enumerate(stream.data_buffer):
813
+ stream.data_buffer[i] = (stream.props.adapt_tensor_to_tensor_labels(data), data_tag)
814
+
815
+ def user_stream_hash_to_net_hash(self, user_stream_hash: str) -> str | None:
816
+ """Converts a user-defined stream hash (peer_id:name_or_group) to a network hash
817
+ (peer_id::dm:... or peer_id::ps:name_or_group) by searching the known hashes in the known streams.
818
+
819
+ Args:
820
+ user_stream_hash: The user-defined stream hash string (peer_id:name_or_group).
821
+
822
+ Returns:
823
+ The corresponding network hash string (peer_id::dm:... or peer_id::ps:name_or_group), or None if not found.
824
+ """
825
+ components = user_stream_hash.split(":")
826
+ peer_id = components[0]
827
+ name_or_group = components[-1]
828
+ for net_hash in self.known_streams.keys():
829
+ _peer_id = DataStream.peer_id_from_net_hash(net_hash)
830
+ _name_or_group = DataStream.name_or_group_from_net_hash(net_hash)
831
+ if _peer_id == peer_id and _name_or_group == name_or_group:
832
+ return net_hash
833
+ return None
834
+
835
+ def create_proc_output_streams(self, buffered: bool = False):
836
+ """Creates the processor output streams based on the `proc_outputs` defined for the agent.
837
+
838
+ Args:
839
+ buffered: If True, the created streams will be of type BufferedDataStream.
840
+ """
841
+
842
+ # Adding generated streams (grouped together), passing the node clock
843
+ if self.proc_outputs is not None:
844
+ for i, procs in enumerate(self.proc_outputs):
845
+ procs.set_group("processor") # Adding default group info, forced, do not change this!
846
+
847
+ # Creating the streams
848
+ for props in procs.props:
849
+ if not buffered:
850
+ stream = DataStream(props=props.clone(), clock=self._node_clock)
851
+ else:
852
+ stream = BufferedDataStream(props=props.clone(), clock=self._node_clock)
853
+
854
+ self.add_stream(stream, owned=True)
855
+
856
+ def add_compatible_streams(self, peer_id: str,
857
+ streams_in_profile: list[DataProps], buffered: bool = False,
858
+ add_all: bool = False, public: bool = True) -> bool:
859
+ """Add to the list of processor-compatible-streams those streams provided as arguments that are actually
860
+ found to be compatible with the processor (if they are pubsub, it also subscribes to them).
861
+
862
+ Args:
863
+ peer_id: The peer ID of the agent providing the streams.
864
+ streams_in_profile: A list of DataProps objects representing the streams from the peer's profile.
865
+ buffered: If True, the added streams will be of type BufferedDataStream.
866
+ add_all: If True, all streams from the profile are added, regardless of processor compatibility.
867
+ public: Consider public streams only (or private streams only).
868
+
869
+ Returns:
870
+ True if compatible streams were successfully added and subscribed to, False otherwise.
871
+ """
872
+ added_streams = []
873
+
874
+ if add_all:
875
+
876
+ # This is the case in which we add all streams, storing all pairs (DataProps, net_hash)
877
+ for j in streams_in_profile:
878
+ jj = DataProps.from_dict(j)
879
+ if public == jj.is_public():
880
+ net_hash = jj.net_hash(peer_id)
881
+ added_streams.append((jj, net_hash))
882
+ else:
883
+
884
+ # This is the case in which a processor is present, hence storing pairs (DataProps, net_hash)
885
+ # of the found compatible streams
886
+ added_net_hash_to_prop_name = {}
887
+
888
+ # Find streams that are compatible with our 'proc_inputs'
889
+ for i, in_proc in enumerate(self.proc_inputs):
890
+ for j in streams_in_profile:
891
+ jj = DataProps.from_dict(j)
892
+ if public == jj.is_public() and in_proc.is_compatible(jj):
893
+ net_hash = jj.net_hash(peer_id)
894
+
895
+ if net_hash not in added_net_hash_to_prop_name:
896
+ added_net_hash_to_prop_name[net_hash] = set()
897
+ if jj.name not in added_net_hash_to_prop_name[net_hash]:
898
+ added_net_hash_to_prop_name[net_hash].add(jj.name)
899
+ added_streams.append((jj, net_hash))
900
+
901
+ # Saving the position in the proc_input list
902
+ self.compat_in_streams[i].add((net_hash, jj.get_name()))
903
+
904
+ # Find streams that are compatible with our 'proc_outputs'
905
+ has_cross_entropy = []
906
+ if 'losses' in self.proc_opts:
907
+ for i in range(0, len(self.proc_outputs)):
908
+ if self.proc_opts['losses'][i] is not None and \
909
+ (self.proc_opts['losses'][i] == torch.nn.functional.cross_entropy or
910
+ isinstance(self.proc_opts['losses'][i], torch.nn.CrossEntropyLoss) or
911
+ "cross_entropy" in self.proc_opts['losses'][i].__name__):
912
+ has_cross_entropy.append(True)
913
+ else:
914
+ has_cross_entropy.append(False)
915
+
916
+ for i, out_proc in enumerate(self.proc_outputs):
917
+ for j in streams_in_profile:
918
+ jj = DataProps.from_dict(j)
919
+ if (public == jj.is_public() and
920
+ (out_proc.is_compatible(jj) or (jj.is_tensor_target_id() and has_cross_entropy[i]))):
921
+ net_hash = jj.net_hash(peer_id)
922
+
923
+ if net_hash not in added_net_hash_to_prop_name:
924
+ added_net_hash_to_prop_name[net_hash] = set()
925
+ if jj.name not in added_net_hash_to_prop_name[net_hash]:
926
+ added_net_hash_to_prop_name[net_hash].add(jj.name)
927
+ added_streams.append((jj, net_hash))
928
+
929
+ # Saving the position in the proc_output list
930
+ self.compat_out_streams[i].add((net_hash, jj.get_name()))
931
+
932
+ net_hashes_to_subscribe = set()
933
+
934
+ # For each compatible stream found...
935
+ for (props, net_hash) in added_streams:
936
+
937
+ # Check if it is a new stream or a data stream to add to an already known stream
938
+ already_known_stream = net_hash in self.known_streams
939
+
940
+ # Creating the stream object
941
+ if not buffered:
942
+ stream = DataStream(props=props.clone(), clock=self._node_clock)
943
+ else:
944
+ stream = BufferedDataStream(props=props.clone(), clock=self._node_clock)
945
+
946
+ # Add the data stream to the list of known streams
947
+ # if the stream already exists it will be overwritten (which is fine in case of changes)
948
+ self.add_stream(stream, owned=False, net_hash=net_hash)
949
+
950
+ # If the stream is over PubSub, and we are not already subscribed, we will subscribe
951
+ if props.is_pubsub() and not already_known_stream:
952
+ net_hashes_to_subscribe.add(net_hash)
953
+
954
+ # Opening PubSubs
955
+ for net_hash in net_hashes_to_subscribe:
956
+ self.out(f"Opening channel for the not-owned but processor-compatible stream {net_hash}")
957
+ if not self._node_conn.subscribe(peer_id, channel=net_hash):
958
+ self.err(f"Error subscribing to {net_hash}")
959
+ return False
960
+
961
+ return True
962
+
963
+ def subscribe_to_pubsub_owned_streams(self) -> bool:
964
+ """Subscribes to all owned streams that are marked as PubSub.
965
+
966
+ Returns:
967
+ True if all subscriptions were successful, False otherwise.
968
+ """
969
+
970
+ # Opening channels for all the (groups of) owned streams (generated and not)
971
+ for net_hash in self.owned_streams.keys():
972
+ is_pubsub = DataStream.is_pubsub_from_net_hash(net_hash)
973
+
974
+ if is_pubsub:
975
+ self.out(f"Opening channel for the owned stream {net_hash}")
976
+ peer_id = DataStream.peer_id_from_net_hash(net_hash) # Guessing peer ID from the net hash
977
+
978
+ if not self._node_conn.subscribe(peer_id, channel=net_hash):
979
+ self.err(f"Cannot open a channel for owned stream hash {net_hash}")
980
+ return False
981
+ return True
982
+
983
+ def update_streams_in_profile(self):
984
+ """Updates the agent's profile with information about its owned (environmental and processor) streams."""
985
+
986
+ # Filling the information about the streams that can be generated and handled
987
+ dynamic_profile = self._node_profile.get_dynamic_profile()
988
+ if hasattr(self, 'proc_outputs') and hasattr(self, 'proc_inputs'):
989
+ dynamic_profile['proc_outputs'] = \
990
+ [dct for d in self.proc_outputs for dct in d.to_list_of_dicts()] # List of dict of DataProp
991
+ dynamic_profile['proc_inputs'] = \
992
+ [dct for d in self.proc_inputs for dct in d.to_list_of_dicts()] # List of dict of DataProp
993
+
994
+ # Adding the list of locally-created ("environmental") streams to the profile
995
+ list_of_props = []
996
+ public_peer_id, private_peer_id = self.get_peer_ids()
997
+ for net_hash, streams_dict in self.known_streams.items():
998
+ if net_hash not in self.proc_streams.keys():
999
+ if (DataProps.peer_id_from_net_hash(net_hash) == public_peer_id or
1000
+ DataProps.peer_id_from_net_hash(net_hash) == private_peer_id):
1001
+ for stream in streams_dict.values():
1002
+ list_of_props.append(stream.get_props().to_dict()) # DataProp
1003
+ if len(list_of_props) > 0:
1004
+ dynamic_profile['streams'] = list_of_props
1005
+
1006
+ def send_profile_to_all(self):
1007
+ """Sends the agent's profile to all known agents."""
1008
+
1009
+ for peer_id in self.all_agents.keys():
1010
+ self.out(f"Sending profile to {peer_id}")
1011
+ if not self._node_conn.send(peer_id, channel_trail=None,
1012
+ content=self._node_profile.get_all_profile(),
1013
+ content_type=Msg.PROFILE):
1014
+ self.err("Failed to send profile, removing (disconnecting) " + peer_id)
1015
+ self.remove_agent(peer_id)
1016
+
1017
+ def generate(self, input_net_hashes: list[str] | None = None,
1018
+ inputs: list[str | torch.Tensor | Image] | None = None,
1019
+ first: bool = False, last: bool = False, ref_uuid: str | None = None) -> (
1020
+ tuple[tuple[torch.Tensor] | None, int]):
1021
+ """Generate new signals.
1022
+
1023
+ Args:
1024
+ input_net_hashes: A list of network hashes to be considered as input streams (they will be sub-selected).
1025
+ inputs: A list of data to be directly provided as input to the processor (if not None, input_net_hashes is
1026
+ ignored).
1027
+ first: If True, indicates this is the first generation call in a sequence.
1028
+ last: If True, indicates this is the last generation call in a sequence.
1029
+ ref_uuid: An optional UUID to match against input stream UUIDs (it can be None).
1030
+
1031
+ Returns:
1032
+ A tuple containing:
1033
+ - A tuple of torch.Tensor objects representing the generated output, or None if generation failed.
1034
+ - An integer representing a data tag or status.
1035
+ """
1036
+
1037
+ # Preparing processor input
1038
+ if inputs is None:
1039
+ inputs = [None] * len(self.proc_inputs)
1040
+ matched = set()
1041
+ data_tag = None
1042
+
1043
+ if input_net_hashes is None:
1044
+ input_net_hashes = []
1045
+
1046
+ # Checking UUIDs and searching the provided input streams: we look to match them with the processor input
1047
+ for net_hash in input_net_hashes:
1048
+ stream_dict = self.known_streams[net_hash]
1049
+ for stream_name, stream in stream_dict.items():
1050
+
1051
+ # Checking the UUID in our known streams, comparing it with the UUID provided as input:
1052
+ # if they are not compatible, we don't generate at all
1053
+ if ref_uuid is not None and stream.get_uuid(expected=False) != ref_uuid:
1054
+ self.deb(f"[generate] The UUID ({stream.get_uuid(expected=False)}, expected: "
1055
+ f"{stream.get_uuid(expected=True)}) of stream {net_hash} is not the one we were "
1056
+ f"looking for ({ref_uuid}), skipping this data stream")
1057
+ continue
1058
+
1059
+ # Matching the currently checked input stream with one of the processor inputs
1060
+ stream_sample = None
1061
+ for i in range(len(self.proc_inputs)):
1062
+
1063
+ # If the current input stream is compatible with the i-th input slot...
1064
+ if (net_hash, stream_name) in self.compat_in_streams[i]:
1065
+
1066
+ # If the current input stream was already assigned to another input slot
1067
+ # (different from "i") we skip the generation
1068
+ if (net_hash, stream_name) in matched:
1069
+ self.err("Cannot generate: ambiguous input streams provided "
1070
+ "(they can match multiple processor inputs)")
1071
+ return None, -1
1072
+
1073
+ # Found a valid assignment: getting stream sample
1074
+ self.deb(f"[generate] Setting the {i}-th network input to stream with "
1075
+ f"net_hash: {net_hash}, name: {stream_name}")
1076
+ if stream_sample is None:
1077
+ stream_sample = stream.get(requested_by="generate")
1078
+ if stream_sample is None: # If there are no samples in this stream, we cannot generate
1079
+ self.err(
1080
+ f"Cannot generate: got nothing (None) from what we would provide to the {i}-th "
1081
+ f"input position of the processor")
1082
+ return None, -1
1083
+
1084
+ # Found a valid assignment: associating it to the i-th input slot
1085
+ try:
1086
+ inputs[i] = self.proc_inputs[i].check_and_preprocess(stream_sample,
1087
+ device=self.proc.device)
1088
+ except Exception as e:
1089
+ self.err(f"Error while checking and preprocessing the {i}-th input [{e}]")
1090
+
1091
+ # Found a valid assignment: saving match
1092
+ matched.add((net_hash, stream_name))
1093
+
1094
+ # If all the inputs share the same data tag, we will return it,
1095
+ # otherwise we set it at -1 (meaning no tag)
1096
+ if data_tag is None:
1097
+ data_tag = stream.get_tag()
1098
+ elif data_tag != stream.get_tag():
1099
+ data_tag = -1
1100
+
1101
+ if AgentBasics.DEBUG:
1102
+ if stream.props.is_text():
1103
+ self.deb(f"[generate] Input of the network: {stream_sample}")
1104
+
1105
+ break
1106
+
1107
+ # Checking if we were able to match some data for each input slot of the network (processor)
1108
+ for i in range(len(self.proc_inputs)):
1109
+ if inputs[i] is None:
1110
+ if self.proc_optional_inputs[i]["has_default"]:
1111
+ inputs[i] = self.proc_optional_inputs[i]["default_value"]
1112
+ else:
1113
+ self.err(
1114
+ f"Cannot generate: couldn't find a valid input for the "
1115
+ f"{i}-th input position of the processor (and no default values are present)")
1116
+ return None, -1
1117
+ else:
1118
+ data_tag = -1
1119
+
1120
+ if AgentBasics.DEBUG:
1121
+ if inputs is not None:
1122
+ self.deb(f"[generate] Input shapes: {[x.shape for x in inputs if isinstance(x, torch.Tensor)]}")
1123
+ self.deb(f"[generate] Input data tag: {data_tag}")
1124
+
1125
+ # Calling processor (inference) passing the collected inputs
1126
+ inputs = self.proc_callback_inputs(inputs)
1127
+ try:
1128
+ outputs = self.proc(*inputs, first=first, last=last)
1129
+
1130
+ # Ensuring the output is a tuple, even if composed by a single tensor
1131
+ if not isinstance(outputs, tuple):
1132
+ outputs = (outputs, )
1133
+ except Exception as e:
1134
+ self.err(f"Error while calling the processor [{e}]")
1135
+ outputs = None
1136
+ outputs = self.proc_callback_outputs(outputs)
1137
+
1138
+ # Saving
1139
+ self.last_ref_uuid = ref_uuid
1140
+
1141
+ if AgentBasics.DEBUG:
1142
+ if outputs is not None:
1143
+ for net_hash, stream_dict in self.proc_streams.items():
1144
+ for stream in stream_dict.values():
1145
+ if stream.props.is_tensor() or stream.props.is_text():
1146
+ if outputs is not None:
1147
+ self.deb(f"[generate] Text outputs: "
1148
+ f"{[str(stream.props.to_text(x)) for i, x in enumerate(outputs)]}")
1149
+ break
1150
+ self.deb(f"[generate] Output shapes: {[x.shape for x in outputs if isinstance(x, torch.Tensor)]}")
1151
+
1152
+ return outputs, data_tag
1153
+
1154
+ def learn_generate(self,
1155
+ outputs: tuple[torch.Tensor],
1156
+ targets_net_hashes: list[str] | None) -> tuple[list[float] | None, list[float] | None]:
1157
+ """Learn (i.e., update model params) by matching the given processor outputs with a set of targets (if any).
1158
+
1159
+ Args:
1160
+ outputs: A tuple of torch.Tensor representing the outputs generated by the agent's processor.
1161
+ targets_net_hashes: An optional list of network hashes identifying the streams
1162
+ from which target data should be retrieved for learning.
1163
+ If None, losses are evaluated without explicit targets.
1164
+
1165
+ Returns:
1166
+ A tuple containing:
1167
+ - A list of float values representing the individual loss values for each output.
1168
+ Returns None if targets are specified but cannot be found.
1169
+ - A list of integers representing the data tags of the given target streams (None if no targets were given).
1170
+ """
1171
+
1172
+ # Cannot learn without optimizer and losses
1173
+ if (self.proc_opts['optimizer'] is None or self.proc_opts['losses'] is None or
1174
+ len(self.proc_opts['losses']) == 0):
1175
+ return None, None
1176
+
1177
+ # Matching targets with the output slots of the processor
1178
+ at_least_one_target_found = False
1179
+ if targets_net_hashes is not None:
1180
+ targets = [None] * len(self.proc_outputs)
1181
+ matched = set()
1182
+ data_tags = [-1] * len(self.proc_outputs)
1183
+
1184
+ # For each target stream group...
1185
+ for net_hash in targets_net_hashes:
1186
+ stream_dict = self.known_streams[net_hash]
1187
+
1188
+ # For each stream of the current target group....
1189
+ for stream_name, stream in stream_dict.items():
1190
+ stream_sample = None
1191
+
1192
+ # For each output slot of our processor... (index "i")
1193
+ for i in range(len(self.proc_outputs)):
1194
+
1195
+ # Check if the i-th target was already assigned or if the i-th output is not a tensor
1196
+ if targets[i] is not None or not isinstance(outputs[i], torch.Tensor):
1197
+ continue
1198
+
1199
+ # If the target stream is compatible with the i-th output of the processor...
1200
+ if (net_hash, stream_name) in self.compat_out_streams[i]:
1201
+
1202
+ # If the current target was already assigned to another output slot (different from "i)"
1203
+ # we skip learning
1204
+ if (net_hash, stream_name) in matched:
1205
+ self.err("Cannot generate: ambiguous target streams provided "
1206
+ "(they can match multiple processor outputs)")
1207
+ return None, None
1208
+
1209
+ # Found a valid assignment: getting stream sample
1210
+ if stream_sample is None:
1211
+ stream_sample = stream.get(requested_by="learn_generate")
1212
+ if stream_sample is None:
1213
+ return None, None
1214
+
1215
+ # Found a valid assignment: associating target to the i-th output slot
1216
+ try:
1217
+ targets[i] = self.proc_outputs[i].check_and_preprocess(stream_sample,
1218
+ allow_class_ids=True,
1219
+ targets=True,
1220
+ device=self.proc.device)
1221
+ except Exception as e:
1222
+ self.err(f"Error while checking and preprocessing the {i}-th targets [{e}]")
1223
+
1224
+ # Found a valid assignment: saving match
1225
+ matched.add((net_hash, stream_name))
1226
+
1227
+ # Saving tag
1228
+ data_tags[i] = stream.get_tag()
1229
+
1230
+ # Confirming
1231
+ at_least_one_target_found = True
1232
+
1233
+ if AgentBasics.DEBUG:
1234
+ if stream.props.is_tensor():
1235
+ self.deb("[generate] Target of the network: " +
1236
+ str(stream.props.to_text(targets[i])))
1237
+ elif stream.props.is_text():
1238
+ self.deb("[generate] Target of the network: " + stream_sample)
1239
+ break
1240
+
1241
+ # If no targets were matched, we skip learning
1242
+ if not at_least_one_target_found:
1243
+ self.err(f"Cannot learn: cannot find a valid target for any output positions of the processor")
1244
+ return None, None
1245
+ else:
1246
+
1247
+ # If no targets were provided, it is expected to be the case of fully unsupervised learning
1248
+ data_tags = None
1249
+ targets = None
1250
+
1251
+ # Retrieving custom elements from the option dictionary
1252
+ loss_functions: list = self.proc_opts['losses']
1253
+ optimizer: torch.optim.optimizer.Optimizer | None = self.proc_opts['optimizer']
1254
+
1255
+ # Evaluating loss function(s), one for each processor output slot (they are set to 0. if no targets are there)
1256
+ if targets_net_hashes is not None:
1257
+
1258
+ # Supervised or partly supervised learning
1259
+ loss_values = [loss_fcn(outputs[i], targets[i]) if targets[i] is not None else
1260
+ torch.tensor(0., device=self.proc.device)
1261
+ for i, loss_fcn in enumerate(loss_functions)]
1262
+ loss = torch.stack(loss_values).sum() # Sum of losses
1263
+ else:
1264
+
1265
+ # Unsupervised learning
1266
+ loss_values = [loss_fcn(outputs[i]) for i, loss_fcn in enumerate(loss_functions)]
1267
+ loss = torch.stack(loss_values).sum() # Sum of losses
1268
+
1269
+ # Learning step
1270
+ optimizer.zero_grad()
1271
+ loss.backward()
1272
+ optimizer.step()
1273
+
1274
+ # Teaching (for autoregressive models, expected to have attribute "y")
1275
+ if hasattr(self.proc, 'y'):
1276
+ self.proc.y = targets[0]
1277
+
1278
+ # Returning a list of float values and the data tags of the targets
1279
+ return [loss_value.item() for loss_value in loss_values], data_tags
1280
+
1281
+ def behave(self):
1282
+ """Behave in the current environment, calling the state-machines of the public and private networks."""
1283
+
1284
+ if self.in_world():
1285
+ self.out("Behaving (world)...")
1286
+ if self.behav is None:
1287
+ self.err("No behaviour specified")
1288
+ else:
1289
+ self.behav_lone_wolf.enable(False)
1290
+ self.behav.enable(True)
1291
+ self.behav.act()
1292
+ self.behav.enable(False)
1293
+
1294
+ self.out("Behaving (public)...")
1295
+ if self.behav_lone_wolf is None:
1296
+ self.err("No behaviour specified")
1297
+ else:
1298
+ self.behav.enable(False)
1299
+ self.behav_lone_wolf.enable(True)
1300
+ self.behav_lone_wolf.act()
1301
+ self.behav_lone_wolf.enable(False)
1302
+
1303
+ def learn_behave(self, state: int, last_action: int, prev_state: int):
1304
+ """A placeholder method for behavioral learning, intended to be implemented by child classes.
1305
+ It receives state and action information to update a behavioral model.
1306
+
1307
+ Args:
1308
+ state: The current state of the agent.
1309
+ last_action: The last action taken.
1310
+ prev_state: The previous state of the agent.
1311
+
1312
+ Returns:
1313
+ An integer representing a new state, or similar feedback.
1314
+ """
1315
+ pass
1316
+
1317
+ def get_peer_ids(self):
1318
+ """Retrieve the public and private peer IDs of the agent, from the underlying node's dynamic profile.
1319
+
1320
+ Returns:
1321
+ A tuple containing the public peer ID and the private peer ID.
1322
+ If either ID is not available, a placeholder string is returned <public_peer_id>, <private_peer_id>.
1323
+ """
1324
+ public_peer_id = None
1325
+ private_peer_id = None
1326
+ if self._node_profile is not None:
1327
+ dynamic_profile = self._node_profile.get_dynamic_profile()
1328
+ public_peer_id = dynamic_profile['peer_id'] # Public
1329
+ private_peer_id = dynamic_profile['private_peer_id'] # Private
1330
+ public_peer_id = '<public_peer_id>' if public_peer_id is None else public_peer_id
1331
+ private_peer_id = '<private_peer_id>' if private_peer_id is None else private_peer_id
1332
+ return public_peer_id, private_peer_id
1333
+
1334
+ def evaluate_profile(self, role: int, profile: NodeProfile) -> bool:
1335
+ """Evaluate if a given profile is valid for this agent based on its role. It helps in identifying and filtering
1336
+ out invalid or 'cheating' profiles.
1337
+
1338
+ Args:
1339
+ role: The expected integer role (e.g., ROLE_PUBLIC, ROLE_WORLD_MASTER) for the profile.
1340
+ profile: The NodeProfile object to be evaluated.
1341
+
1342
+ Returns:
1343
+ True if the profile is considered valid for the specified role, False otherwise.
1344
+ """
1345
+
1346
+ # If the role in the profile is not the provided role, a profile-cheater was found
1347
+ if self.ROLE_STR_TO_BITS[profile.get_dynamic_profile()['connections']['role']] != role:
1348
+ self.out(f"Cheater found: "
1349
+ f"{profile.get_dynamic_profile()['connections']['role']} != {self.ROLE_BITS_TO_STR[role]}")
1350
+ return False # Cheater found
1351
+
1352
+ # These are just examples: you are expected to reimplement this method in your custom agent file
1353
+ if (role & 1 == self.ROLE_PUBLIC and
1354
+ profile.get_dynamic_profile()['guessed_location'] == 'Some Dummy Location, Just An Example Here'):
1355
+ return False
1356
+ elif (role & 3 == self.ROLE_WORLD_MASTER and
1357
+ profile.get_dynamic_profile()['guessed_location'] == 'Some Other Location, Just Another Example Here'):
1358
+ return False
1359
+ else:
1360
+ return True
1361
+
1362
+ def accept_new_role(self, role: int):
1363
+ """Set the agent's role and optionally load a default behavior (private/world behaviour).
1364
+
1365
+ Args:
1366
+ role: The integer role to assign to the agent (e.g., ROLE_PUBLIC, ROLE_WORLD_MASTER).
1367
+ """
1368
+ base_role_str = self.ROLE_BITS_TO_STR[(role >> 2) << 2]
1369
+ full_role_str = self.ROLE_BITS_TO_STR[role]
1370
+
1371
+ self._node_profile.get_dynamic_profile()['connections']['role'] = full_role_str
1372
+
1373
+ base_role_to_behav = self.world_profile.get_dynamic_profile()['world_roles_fsm']
1374
+ if base_role_str in base_role_to_behav:
1375
+ default_behav = self.world_profile.get_dynamic_profile()['world_roles_fsm'][base_role_str]
1376
+ else:
1377
+ default_behav = None # A public role will not be found in the map
1378
+
1379
+ if default_behav is not None and len(default_behav) > 0:
1380
+ default_behav_hsm = HybridStateMachine(self)
1381
+ default_behav_hsm.load(default_behav)
1382
+ self.behav = HybridStateMachine(self)
1383
+ self.behav.include(default_behav_hsm, make_a_copy=True)
1384
+
1385
+ def in_world(self):
1386
+ """Check if the agent is currently operating within a 'world'.
1387
+
1388
+ Returns:
1389
+ True if the agent is in a world, False otherwise.
1390
+ """
1391
+ if self._node_profile is not None:
1392
+ return self.ROLE_STR_TO_BITS[self._node_profile.get_dynamic_profile()['connections']['role']] & 1 == 1
1393
+ else:
1394
+ return False
1395
+
1396
+ def behaving_in_world(self):
1397
+ """Checks if the agent's world-specific behavior state machine is currently active.
1398
+
1399
+ Returns:
1400
+ True if the world behavior is active, False otherwise.
1401
+ """
1402
+ return self.behav.is_enabled()
1403
+
1404
+ def get_stream_sample(self, net_hash: str, sample_dict: dict[str, dict[str, torch.Tensor | None | int | str]]):
1405
+ """Receive and process stream samples that were provided by another agent.
1406
+
1407
+ Args:
1408
+ net_hash: The network hash identifying the source of the stream samples.
1409
+ sample_dict: A dictionary where keys are stream names and values are dictionaries
1410
+ containing 'data', 'data_tag', and 'data_uuid' for each sample.
1411
+
1412
+ Returns:
1413
+ True if the stream samples were successfully processed and stored, False otherwise
1414
+ (e.g., if the stream is unknown, not compatible, or data is None/stale).
1415
+ """
1416
+
1417
+ # Let's be sure that the net hash is converted from the user's perspective to the one of the code here
1418
+ net_hash = DataProps.normalize_net_hash(net_hash)
1419
+
1420
+ self.out(f"Got a stream sample from {net_hash}...")
1421
+
1422
+ if net_hash in self.known_streams:
1423
+ for name, data_and_tag_and_uuid in sample_dict.items():
1424
+ if AgentBasics.DEBUG:
1425
+ if net_hash in self.known_streams and name in self.known_streams[net_hash]:
1426
+ self.deb(f"[get_stream_sample] Local data stream {name} status: tag="
1427
+ f"{self.known_streams[net_hash][name].get_tag()}, uuid="
1428
+ f"{self.known_streams[net_hash][name].get_uuid(expected=False)}, uuid-expected="
1429
+ f"{self.known_streams[net_hash][name].get_uuid(expected=True)}")
1430
+
1431
+ data, data_tag, data_uuid = (data_and_tag_and_uuid['data'],
1432
+ data_and_tag_and_uuid['data_tag'],
1433
+ data_and_tag_and_uuid['data_uuid'])
1434
+
1435
+ # - data must be not None
1436
+ # - the stream name must be known
1437
+ # - if the UUID associated to our local stream is the same of the data, then we check tag order
1438
+ # - if the UUID associated to our local stream is the expected one, we don't check tag order
1439
+ skip = data is None
1440
+ skip = skip or net_hash not in self.known_streams
1441
+ skip = skip or name not in self.known_streams[net_hash]
1442
+ skip = (skip or (self.known_streams[net_hash][name].get_uuid(expected=True) is not None and
1443
+ data_uuid != self.known_streams[net_hash][name].get_uuid(expected=True)))
1444
+ skip = (skip or (self.known_streams[net_hash][name].get_uuid(expected=True) is None and
1445
+ self.known_streams[net_hash][name].get_uuid(expected=False) is not None and
1446
+ data_uuid != self.known_streams[net_hash][name].get_uuid(expected=False)))
1447
+ skip = (skip or (self.known_streams[net_hash][name].get_uuid(expected=True) is None and
1448
+ self.known_streams[net_hash][name].get_uuid(expected=False) is not None and
1449
+ data_uuid == self.known_streams[net_hash][name].get_uuid(expected=False) and
1450
+ data_tag <= self.known_streams[net_hash][name].get_tag()))
1451
+
1452
+ # If we sample can be accepted...
1453
+ if not skip:
1454
+ self.out(f"Accepted sample named {name}: tag={data_tag}, uuid={data_uuid}")
1455
+
1456
+ # Saving the data sample on the known stream objects
1457
+ self.known_streams[net_hash][name].set(data, data_tag)
1458
+
1459
+ # If the local stream was expecting data with a certain UUID, and we got it ...
1460
+ # OR
1461
+ # if the local stream was not expecting anything and was also not set to any UUID, and we got data
1462
+ # with some UUID ...
1463
+ # THEN
1464
+ # we clear expectations and set the current UUID to the one of the data.
1465
+ # (the second part of the OR above is the case of data that arrives before an action request,
1466
+ # since action requests set expectations only)
1467
+ if ((self.known_streams[net_hash][name].get_uuid(expected=True) is not None and
1468
+ data_uuid == self.known_streams[net_hash][name].get_uuid(expected=True)) or
1469
+ (self.known_streams[net_hash][name].get_uuid(expected=True) is None and
1470
+ self.known_streams[net_hash][name].get_uuid(expected=False) is None and
1471
+ data_uuid is not None)):
1472
+
1473
+ # Setting what was the expected UUID as the local UUID from now on
1474
+ self.known_streams[net_hash][name].set_uuid(data_uuid, expected=False) # Setting current
1475
+ self.known_streams[net_hash][name].set_uuid(None, expected=True) # Clearing expected
1476
+
1477
+ if AgentBasics.DEBUG:
1478
+ self.deb(f"[get_stream_sample] Switched uuid of the local data stream!")
1479
+ self.deb(f"[get_stream_sample] New local data stream status: tag="
1480
+ f"{self.known_streams[net_hash][name].get_tag()}, uuid="
1481
+ f"{self.known_streams[net_hash][name].get_uuid(expected=False)}, uuid-expected="
1482
+ f"{self.known_streams[net_hash][name].get_uuid(expected=True)}")
1483
+
1484
+ # Buffering data, if it was requested and if this sample comes from somebody's processor
1485
+ if (self.buffer_generated_by_others != "none" and
1486
+ DataProps.name_or_group_from_net_hash(net_hash) == "processor"):
1487
+ self.deb(f"[get_stream_sample] Buffering others' processor generated data...")
1488
+
1489
+ # Getting the streams of the processor of the source agent
1490
+ _processor_stream_dict = self.known_streams[net_hash]
1491
+ _peer_id = DataProps.peer_id_from_net_hash(net_hash)
1492
+
1493
+ # Setting buffered stream counter
1494
+ clear = False
1495
+ if _peer_id in self.last_buffered_peer_id_to_info:
1496
+ if self.buffer_generated_by_others == "one":
1497
+ _buffered_uuid_to_id = self.last_buffered_peer_id_to_info[_peer_id]["uuid_to_id"]
1498
+ if data_uuid not in _buffered_uuid_to_id:
1499
+ _id = next(iter(_buffered_uuid_to_id.values()))
1500
+ _buffered_uuid_to_id.clear()
1501
+ _buffered_uuid_to_id[data_uuid] = _id
1502
+ clear = True
1503
+ else:
1504
+ self.last_buffered_peer_id_to_info[_peer_id] = {"uuid_to_id": {}, "net_hash": None}
1505
+ _buffered_uuid_to_id = self.last_buffered_peer_id_to_info[_peer_id]["uuid_to_id"]
1506
+ if data_uuid not in _buffered_uuid_to_id:
1507
+ _buffered_uuid_to_id[data_uuid] = sum(
1508
+ len(v["uuid_to_id"]) for v in self.last_buffered_peer_id_to_info.values()) + 1
1509
+ _buffered_id = _buffered_uuid_to_id[data_uuid]
1510
+
1511
+ # Building net hash to retrieve the buffered stream
1512
+ _net_hash = DataProps.build_net_hash(
1513
+ _peer_id,
1514
+ pubsub=False,
1515
+ name_or_group=("buffered" + str(_buffered_id)))
1516
+
1517
+ # If the buffered stream was not created before
1518
+ if _net_hash not in self.known_streams:
1519
+ self.deb(f"[get_stream_sample] Adding a new buffered stream to the list of known "
1520
+ f"streams, hash: {_net_hash}")
1521
+ for stream_obj in _processor_stream_dict.values():
1522
+
1523
+ # Same properties of the stream of the processor of the source agent
1524
+ props = stream_obj.get_props().clone()
1525
+ props.set_group("buffered" + str(_buffered_id))
1526
+
1527
+ # Adding the newly created stream
1528
+ self.add_stream(BufferedDataStream(props=props, clock=self._node_clock),
1529
+ owned=False,
1530
+ net_hash=_net_hash)
1531
+
1532
+ # Saving hash of the new buffered stream
1533
+ self.last_buffered_peer_id_to_info[_peer_id]["net_hash"] = _net_hash
1534
+ else:
1535
+ if clear:
1536
+ for stream_obj in self.known_streams[_net_hash].values():
1537
+ stream_obj.clear_buffer()
1538
+
1539
+ # Saving sample
1540
+ self.known_streams[_net_hash][name].set(data, data_tag)
1541
+
1542
+ # Clearing all UUID of the locally buffered stream
1543
+ self.known_streams[_net_hash][name].set_uuid(None, expected=False)
1544
+ self.known_streams[_net_hash][name].set_uuid(None, expected=True)
1545
+
1546
+ # If we decided to skip this sample...
1547
+ else:
1548
+ self.out(f"Skipping sample named {name}: tag={data_tag}, uuid={data_uuid}" +
1549
+ (", data is None!" if data is None else ""))
1550
+ return True
1551
+
1552
+ # If this stream is not known at all...
1553
+ else:
1554
+ self.out(f"Skipping sample from {net_hash} (data stream is unknown)")
1555
+ return False
1556
+
1557
+ def send_stream_samples(self):
1558
+ """Collect and send stream samples from all owned streams to appropriate recipients."""
1559
+
1560
+ # Get samples from all the owned streams
1561
+ for net_hash, streams_dict in self.owned_streams.items():
1562
+
1563
+ # Preparing content to send
1564
+ something_to_send = False
1565
+ content = {name: {} for name in streams_dict.keys()}
1566
+ for name, stream in streams_dict.items():
1567
+ data = stream.get(requested_by="send_stream_samples")
1568
+
1569
+ if data is not None:
1570
+ something_to_send = True
1571
+ self.deb(f"[send_stream_samples] Preparing to send stream samples from {net_hash}, named {name} "
1572
+ f"(tag={stream.get_tag()}, uuid={stream.get_uuid()})")
1573
+
1574
+ content[name] = {'data': data, 'data_tag': stream.get_tag(), 'data_uuid': stream.get_uuid()}
1575
+
1576
+ stream.clear_uuid_if_marked_as_clearable()
1577
+
1578
+ # Checking if there is something valid in this group of streams
1579
+ if not something_to_send:
1580
+ continue
1581
+
1582
+ # Guessing recipient of direct message (if None, then PubSub)
1583
+ recipient = self.recipients[net_hash]
1584
+
1585
+ # Debug: force pubsub to be sent as direct message to the first agent
1586
+ # if self._recipients[net_hash] is None:
1587
+ # for peer_id in self.all_agents.keys():
1588
+ # recipient = peer_id
1589
+ # break
1590
+
1591
+ # If pubsub...
1592
+ if recipient is None:
1593
+ if DataStream.is_pubsub_from_net_hash(net_hash):
1594
+ self.deb(f"[send_stream_samples] Sending stream samples of the whole {net_hash} by pubsub")
1595
+
1596
+ peer_id = DataStream.peer_id_from_net_hash(net_hash) # Guessing agent peer ID from the net hash
1597
+ ret = self._node_conn.publish(peer_id, channel=net_hash,
1598
+ content_type=Msg.STREAM_SAMPLE,
1599
+ content=content)
1600
+
1601
+ self.deb(f"[send_stream_samples] Sending returned: " + str(ret))
1602
+
1603
+ # If direct message...
1604
+ else:
1605
+ if not DataStream.is_pubsub_from_net_hash(net_hash):
1606
+ _recipients = recipient if isinstance(recipient, list) else [recipient]
1607
+ for i, _recipient in enumerate(_recipients):
1608
+ self.deb(f"[send_stream_samples] Sending samples by direct message, to {_recipient}")
1609
+
1610
+ peer_id = _recipient # Peer ID from the recipient information
1611
+ name_or_group = DataProps.name_or_group_from_net_hash(net_hash)
1612
+ ret = self._node_conn.send(peer_id, channel_trail=name_or_group,
1613
+ content_type=Msg.STREAM_SAMPLE,
1614
+ content=content)
1615
+
1616
+ self.deb(f"[send_stream_samples] Sending returned: " + str(ret))
1617
+ else:
1618
+ raise ValueError(f"Unexpected scenario: recipient set ({recipient}) and sending on a pubsub stream")
1619
+
1620
+ def get_action_step(self):
1621
+ """Retrieve the current action step from the agent's private/world behavior.
1622
+
1623
+ Returns:
1624
+ The current action step object from the HybridStateMachine's active action, or None if no action.
1625
+ """
1626
+ behav = self.behav if self.behav.is_enabled() else self.behav_lone_wolf
1627
+ return behav.get_action_step()
1628
+
1629
+ def is_last_action_step(self):
1630
+ """Check if the agent's current action (private/world behaviour) is on its last step.
1631
+
1632
+ Returns:
1633
+ True if the current action was its last step, False otherwise. Returns None if there is no active action.
1634
+ """
1635
+ behav = self.behav if self.behav.is_enabled() else self.behav_lone_wolf
1636
+ action = behav.get_action()
1637
+ if action is not None:
1638
+ return action.was_last_step_done()
1639
+ else:
1640
+ return None
1641
+
1642
+ def is_multi_steps_action(self):
1643
+ """Determines if the current action is a multistep action.
1644
+
1645
+ Returns:
1646
+ True if the action is multistep, False otherwise.
1647
+ """
1648
+ behav = self.behav if self.behav.is_enabled() else self.behav_lone_wolf
1649
+ action = behav.get_action()
1650
+ return action.is_multi_steps() if action is not None else False
1651
+
1652
+ def proc_callback_inputs(self, inputs):
1653
+ """A callback method that saves the inputs to the processor right before execution.
1654
+
1655
+ Args:
1656
+ inputs: The data inputs for the processor.
1657
+
1658
+ Returns:
1659
+ The same inputs passed to the function.
1660
+ """
1661
+ self.proc_last_inputs = inputs
1662
+ return inputs
1663
+
1664
+ def proc_callback_outputs(self, outputs):
1665
+ """A callback method that saves the outputs from the processor right after execution.
1666
+
1667
+ Args:
1668
+ outputs: The data outputs from the processor.
1669
+
1670
+ Returns:
1671
+ The same outputs passed to the function.
1672
+ """
1673
+ self.proc_last_outputs = outputs
1674
+ return outputs
1675
+
1676
+ def save(self, where: str = "output"):
1677
+ """Save the agent's state, including its processor and other attributes, to a specified location.
1678
+
1679
+ Args:
1680
+ where: The directory path where the agent's state should be saved. Defaults to "output".
1681
+
1682
+ Returns:
1683
+ The string "<SAVE_OK>" upon successful saving.
1684
+
1685
+ Raises:
1686
+ IOError: If there is an issue with file operations (e.g., directory creation, writing files).
1687
+ TypeError, ValueError, RuntimeError: For other potential issues during serialization or saving.
1688
+ """
1689
+
1690
+ # Saving the processor
1691
+ if self.proc is not None:
1692
+ torch.save(self.proc.state_dict(), os.path.join(where, f"{self._node_name}.pt"))
1693
+
1694
+ try:
1695
+
1696
+ # Creating output folder
1697
+ if not os.path.exists(where):
1698
+ os.makedirs(where)
1699
+
1700
+ # Saving the whole thing (excluding the processor)
1701
+ proc = self.proc
1702
+ self.proc = None
1703
+ with open(os.path.join(where, f"{self._node_name}.pkl"), "wb") as f:
1704
+ pickle.dump(self, f)
1705
+ self.proc = proc
1706
+ except (TypeError, ValueError, RuntimeError, IOError, FileNotFoundError) as e:
1707
+ self.out("Could not save " + ("agent" if not self.is_world else "world") + f" {self._node_name}: {e}")
1708
+ raise e
1709
+
1710
+ return "<SAVE_OK>" # This means OK
1711
+
1712
+ def load(self, where: str = "output"):
1713
+ """Load the agent's state from a specified location.
1714
+
1715
+ Args:
1716
+ where: The directory path from which the agent's state should be loaded. Defaults to "output".
1717
+
1718
+ Returns:
1719
+ The loaded AgentBasics object.
1720
+
1721
+ Raises:
1722
+ AssertionError: If the specified load path does not exist.
1723
+ IOError: If there is an issue with file operations (e.g., reading files).
1724
+ """
1725
+
1726
+ # Checking output folder
1727
+ if not os.path.exists(where):
1728
+ raise GenException(f"Invalid load path: {where}")
1729
+ load_proc = self.proc is not None
1730
+
1731
+ # Loading the whole object (no processor)
1732
+ with open(os.path.join(where, f"{self._node_name}.pkl"), "rb") as f:
1733
+ loaded = pickle.load(f)
1734
+
1735
+ # Update self's attributes with the loaded object's attributes
1736
+ self.__dict__.update(loaded.__dict__)
1737
+
1738
+ # Loading the processor
1739
+ if load_proc:
1740
+ self.proc.load_state_dict(torch.load(os.path.join(where, f"{self._node_name}.pt")))
1741
+
1742
+ return loaded
1743
+
1744
+ def __str__(self):
1745
+ """String representation of an agent.
1746
+
1747
+ Returns:
1748
+ A formatted string describing the agent's current state and relationships.
1749
+ """
1750
+ s = ("[" + ("Agent" if not self.is_world else "World") + "]"
1751
+ + f" {self._node_name} (role: {self._node_profile.get_dynamic_profile()['connections']['role']})")
1752
+ if len(self.world_masters) > 0:
1753
+ s += "\n\t- known world masters:"
1754
+ for _s in self.world_masters.keys():
1755
+ s += "\n\t\t" + str(_s)
1756
+ if len(self.world_agents) > 0:
1757
+ s += "\n\t- known agents living in the same world (non-world-masters):"
1758
+ for _s in self.world_agents.keys():
1759
+ s += "\n\t\t" + str(_s)
1760
+ if len(self.public_agents) > 0:
1761
+ s += "\n\t- known lone wolves:"
1762
+ for _s in self.public_agents.keys():
1763
+ s += "\n\t\t" + str(_s)
1764
+ if len(self.known_streams) > 0:
1765
+ s += "\n\t- known_streams:"
1766
+ for _s in self.known_streams:
1767
+ s += "\n\t\t" + str(_s)
1768
+ s += "\n\t- behaviour (public):"
1769
+ s += "\n\t\t" + (str(self.behav_lone_wolf).replace("\n", "\n\t\t")
1770
+ if self.behav_lone_wolf is not None else "none")
1771
+ s += "\n\t- behaviour (private):"
1772
+ s += "\n\t\t" + (str(self.behav).replace("\n", "\n\t\t") if self.behav is not None else "none")
1773
+ s += "\n\t- processor:"
1774
+ s += "\n\t\t" + (str(self.proc).replace("\n", "\n\t\t") if self.proc is not None else "none")
1775
+ return s
1776
+
1777
+ def __remove_all_world_related_agents(self):
1778
+ """Remove all world-related agents (masters and regular agents) from the agent's known lists."""
1779
+
1780
+ to_remove = list(self.world_masters.keys())
1781
+ for peer_id in to_remove:
1782
+ self.remove_agent(peer_id)
1783
+
1784
+ to_remove = list(self.world_agents.keys())
1785
+ for peer_id in to_remove:
1786
+ self.remove_agent(peer_id)
1787
+
1788
+ def __remove_all_world_private_streams(self):
1789
+ """Remove all known streams that are flagged as not-public and are not owned by this agent."""
1790
+
1791
+ # Find what to remove
1792
+ to_remove = []
1793
+ for net_hash, stream_dict in self.known_streams.items():
1794
+ for name, stream_obj in stream_dict.items():
1795
+ if not stream_obj.get_props().is_public() and net_hash not in self.owned_streams:
1796
+ to_remove.append((DataProps.peer_id_from_net_hash(net_hash), name))
1797
+
1798
+ # Remove it
1799
+ for (peer_id, name) in to_remove:
1800
+ self.remove_streams(peer_id, name)
1801
+
1802
+ # Clear recipients associated to these streams
1803
+ recipient_net_hashes = list(self.recipients.keys())
1804
+ for net_hash in recipient_net_hashes:
1805
+ if net_hash not in self.known_streams:
1806
+ del self.recipients[net_hash]
1807
+
1808
+ def remove_peer_from_agent_status_attrs(self, peer_id):
1809
+ """Remove a peer ID from the status of the agent, assuming it to be the represented by attributes that start
1810
+ with '_'."""
1811
+ for attr_name in dir(self):
1812
+ if attr_name.startswith("_") and (not attr_name.startswith("__") and not attr_name.startswith("_Agent")
1813
+ and not attr_name.startswith("_WAgent")):
1814
+ try:
1815
+ value = getattr(self, attr_name)
1816
+ if isinstance(value, list):
1817
+ setattr(self, attr_name, [v for v in value if v != peer_id])
1818
+ elif isinstance(value, set):
1819
+ value.discard(peer_id)
1820
+ elif isinstance(value, dict):
1821
+ if peer_id in value:
1822
+ del value[peer_id]
1823
+ except AttributeError:
1824
+ continue # Skip read-only attributes
1825
+
1826
+ def reset_agent_status_attrs(self):
1827
+ """Resets attributes that represent the status of the agent, assuming to be the ones that start with '_'."""
1828
+ for attr_name in dir(self):
1829
+ if attr_name.startswith("_") and (not attr_name.startswith("__") and not attr_name.startswith("_Agent")
1830
+ and not attr_name.startswith("_WAgent")):
1831
+ try:
1832
+ value = getattr(self, attr_name)
1833
+ if isinstance(value, list):
1834
+ setattr(self, attr_name, [])
1835
+ elif isinstance(value, set):
1836
+ setattr(self, attr_name, set())
1837
+ elif isinstance(value, dict):
1838
+ setattr(self, attr_name, {})
1839
+ elif isinstance(value, int):
1840
+ setattr(self, attr_name, 0)
1841
+ elif isinstance(value, float):
1842
+ setattr(self, attr_name, 0.)
1843
+ elif isinstance(value, bool):
1844
+ setattr(self, attr_name, False)
1845
+ except AttributeError:
1846
+ continue # Skip read-only attributes