unaiverse 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unaiverse/__init__.py +19 -0
- unaiverse/agent.py +2226 -0
- unaiverse/agent_basics.py +2389 -0
- unaiverse/clock.py +234 -0
- unaiverse/dataprops.py +1282 -0
- unaiverse/hsm.py +2471 -0
- unaiverse/modules/__init__.py +18 -0
- unaiverse/modules/cnu/__init__.py +17 -0
- unaiverse/modules/cnu/cnus.py +536 -0
- unaiverse/modules/cnu/layers.py +261 -0
- unaiverse/modules/cnu/psi.py +60 -0
- unaiverse/modules/hl/__init__.py +15 -0
- unaiverse/modules/hl/hl_utils.py +411 -0
- unaiverse/modules/networks.py +1509 -0
- unaiverse/modules/utils.py +748 -0
- unaiverse/networking/__init__.py +16 -0
- unaiverse/networking/node/__init__.py +18 -0
- unaiverse/networking/node/connpool.py +1332 -0
- unaiverse/networking/node/node.py +2752 -0
- unaiverse/networking/node/profile.py +446 -0
- unaiverse/networking/node/tokens.py +79 -0
- unaiverse/networking/p2p/__init__.py +188 -0
- unaiverse/networking/p2p/go.mod +127 -0
- unaiverse/networking/p2p/go.sum +548 -0
- unaiverse/networking/p2p/golibp2p.py +18 -0
- unaiverse/networking/p2p/golibp2p.pyi +136 -0
- unaiverse/networking/p2p/lib.go +2765 -0
- unaiverse/networking/p2p/lib_types.py +311 -0
- unaiverse/networking/p2p/message_pb2.py +50 -0
- unaiverse/networking/p2p/messages.py +360 -0
- unaiverse/networking/p2p/mylogger.py +78 -0
- unaiverse/networking/p2p/p2p.py +900 -0
- unaiverse/networking/p2p/proto-go/message.pb.go +846 -0
- unaiverse/stats.py +1506 -0
- unaiverse/streamlib/__init__.py +15 -0
- unaiverse/streamlib/streamlib.py +210 -0
- unaiverse/streams.py +804 -0
- unaiverse/utils/__init__.py +16 -0
- unaiverse/utils/lone_wolf.json +28 -0
- unaiverse/utils/misc.py +441 -0
- unaiverse/utils/sandbox.py +292 -0
- unaiverse/world.py +384 -0
- unaiverse-0.1.12.dist-info/METADATA +366 -0
- unaiverse-0.1.12.dist-info/RECORD +47 -0
- unaiverse-0.1.12.dist-info/WHEEL +5 -0
- unaiverse-0.1.12.dist-info/licenses/LICENSE +177 -0
- unaiverse-0.1.12.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
"""
|
|
2
|
+
█████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
|
|
3
|
+
░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
|
|
4
|
+
░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
|
|
5
|
+
░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
|
|
6
|
+
░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
|
|
7
|
+
░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
|
|
8
|
+
░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
|
|
9
|
+
░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
|
|
10
|
+
A Collectionless AI Project (https://collectionless.ai)
|
|
11
|
+
Registration/Login: https://unaiverse.io
|
|
12
|
+
Code Repositories: https://github.com/collectionlessai/
|
|
13
|
+
Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
|
|
14
|
+
"""
|
|
15
|
+
import os
|
|
16
|
+
import sys
|
|
17
|
+
import uuid
|
|
18
|
+
import argparse
|
|
19
|
+
import subprocess
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
# Configuration
|
|
23
|
+
DOCKER_IMAGE_NAME = "unaiverse-sandbox"
|
|
24
|
+
CONTAINER_NAME_BASE = "unaiverse-sandbox-container"
|
|
25
|
+
CONTAINER_NAME = f"{CONTAINER_NAME_BASE}-{uuid.uuid4().hex[:8]}" # Append a short unique ID
|
|
26
|
+
DOCKERFILE_CONTENT = """
|
|
27
|
+
|
|
28
|
+
# Debian image, automatically guessed architecture
|
|
29
|
+
FROM python:3.12-slim-bookworm
|
|
30
|
+
|
|
31
|
+
# Installing Go compiler
|
|
32
|
+
RUN apt-get update && apt-get install -y --no-install-recommends build-essential curl git
|
|
33
|
+
RUN rm -rf /var/lib/apt/lists/*
|
|
34
|
+
RUN ARCH=$(dpkg --print-architecture) && curl -LO https://go.dev/dl/go1.24.5.linux-${ARCH}.tar.gz
|
|
35
|
+
RUN ARCH=$(dpkg --print-architecture) && tar -C /usr/local -xzf go1.24.5.linux-${ARCH}.tar.gz
|
|
36
|
+
RUN ARCH=$(dpkg --print-architecture) && rm go1.24.5.linux-${ARCH}.tar.gz
|
|
37
|
+
|
|
38
|
+
# Set Go environment variables
|
|
39
|
+
ENV PATH="/usr/local/go/bin:${PATH}"
|
|
40
|
+
ENV GOPATH="/go"
|
|
41
|
+
RUN mkdir -p /go/bin /go/src /go/pkg
|
|
42
|
+
|
|
43
|
+
# Setting the working directory inside the container
|
|
44
|
+
WORKDIR /unaiverse
|
|
45
|
+
|
|
46
|
+
# Dependencies
|
|
47
|
+
RUN <create_requirements.txt>
|
|
48
|
+
RUN pip install --no-cache-dir -r requirements.txt --break-system-packages
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def sandbox(file_to_run: str,
|
|
53
|
+
read_only_paths: tuple[str] | list[str] | None = None,
|
|
54
|
+
writable_paths: tuple[str] | list[str] | None = None) -> None:
|
|
55
|
+
|
|
56
|
+
# Path of this file
|
|
57
|
+
absolute_path_of_this_file = os.path.abspath(__file__)
|
|
58
|
+
|
|
59
|
+
# Folders composing the path (and file name at the end)
|
|
60
|
+
path_components = list(Path(absolute_path_of_this_file).parts)
|
|
61
|
+
|
|
62
|
+
# Ensuring the folder/file structure was not manipulated
|
|
63
|
+
assert path_components[-1] == 'sandbox.py', "Major security issue, stopping."
|
|
64
|
+
assert path_components[-2] == 'utils', "Major security issue, stopping."
|
|
65
|
+
assert path_components[-3] == 'unaiverse', "Major security issue, stopping."
|
|
66
|
+
|
|
67
|
+
# Main folder of UNaIVERSE
|
|
68
|
+
abspath_of_unaiverse_code = str(Path(*path_components[0:-3]))
|
|
69
|
+
|
|
70
|
+
# Clean up any remnants from previous runs first (safety)
|
|
71
|
+
cleanup_docker_artifacts(where=abspath_of_unaiverse_code)
|
|
72
|
+
|
|
73
|
+
# Requirements
|
|
74
|
+
echoed_contents_of_requirements = 'printf "'
|
|
75
|
+
with open(os.path.join(abspath_of_unaiverse_code, "requirements.txt"), 'r') as req_file:
|
|
76
|
+
req_lines = req_file.readlines()
|
|
77
|
+
for i, req_line in enumerate(req_lines):
|
|
78
|
+
if i != (len(req_lines) - 1) and len(req_line.strip()) > 0:
|
|
79
|
+
echoed_contents_of_requirements += req_line.strip() + "\\n"
|
|
80
|
+
else:
|
|
81
|
+
echoed_contents_of_requirements += req_line.strip() + "\\n\" > requirements.txt"
|
|
82
|
+
|
|
83
|
+
# Create Dockerfile
|
|
84
|
+
print("Creating Dockerfile...")
|
|
85
|
+
with open(os.path.join(abspath_of_unaiverse_code, "Dockerfile"), "w") as f:
|
|
86
|
+
f.write(DOCKERFILE_CONTENT.replace('<create_requirements.txt>', echoed_contents_of_requirements))
|
|
87
|
+
|
|
88
|
+
# Building Docker image
|
|
89
|
+
if not build_docker_image(where=abspath_of_unaiverse_code):
|
|
90
|
+
print("Exiting due to Docker image build failure")
|
|
91
|
+
cleanup_docker_artifacts(where=abspath_of_unaiverse_code) # Try to clean up what was created (if any)
|
|
92
|
+
sys.exit(1)
|
|
93
|
+
|
|
94
|
+
# Read only folders from the host machine
|
|
95
|
+
read_only_mount_paths = ([abspath_of_unaiverse_code] +
|
|
96
|
+
(list(read_only_paths) if read_only_paths is not None else []))
|
|
97
|
+
|
|
98
|
+
# Writable folders in host machine
|
|
99
|
+
writable_mount_paths = ([os.path.join(abspath_of_unaiverse_code, 'runners'),
|
|
100
|
+
os.path.join(abspath_of_unaiverse_code, 'unaiverse', 'library'),
|
|
101
|
+
os.path.join(abspath_of_unaiverse_code, 'unaiverse', 'networking', 'p2p')] +
|
|
102
|
+
(list(writable_paths) if writable_paths is not None else []))
|
|
103
|
+
|
|
104
|
+
# Running
|
|
105
|
+
if not run_in_docker(file_to_run=os.path.abspath(file_to_run),
|
|
106
|
+
read_only_host_paths=read_only_mount_paths,
|
|
107
|
+
writable_host_paths=writable_mount_paths):
|
|
108
|
+
print("Exiting due to Docker container run failure")
|
|
109
|
+
sys.exit(1)
|
|
110
|
+
|
|
111
|
+
# Final cleanup
|
|
112
|
+
cleanup_docker_artifacts(where=abspath_of_unaiverse_code)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def build_docker_image(where: str):
|
|
116
|
+
"""Builds the Docker image."""
|
|
117
|
+
print(f"Building Docker image '{DOCKER_IMAGE_NAME}'...")
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
|
|
121
|
+
# The '.' at the end means build from the current directory
|
|
122
|
+
subprocess.run(["docker", "build", "-t", DOCKER_IMAGE_NAME, where], check=True)
|
|
123
|
+
print(f"Docker image '{DOCKER_IMAGE_NAME}' built successfully.")
|
|
124
|
+
return True
|
|
125
|
+
except subprocess.CalledProcessError as e:
|
|
126
|
+
print(f"Error building Docker image: {e}")
|
|
127
|
+
return False
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def cleanup_docker_artifacts(where: str):
|
|
131
|
+
"""Cleans up the generated files and Docker image."""
|
|
132
|
+
print("Cleaning...")
|
|
133
|
+
|
|
134
|
+
# Stop and remove container if it's still running (e.g., if previous run failed)
|
|
135
|
+
try:
|
|
136
|
+
print(f"Attempting to stop and remove container '{CONTAINER_NAME}' (if running)...")
|
|
137
|
+
subprocess.run(["docker", "stop", CONTAINER_NAME],
|
|
138
|
+
check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
139
|
+
subprocess.run(["docker", "rm", CONTAINER_NAME],
|
|
140
|
+
check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
141
|
+
except Exception as e:
|
|
142
|
+
print(f"Error during preliminary container cleanup: {e}")
|
|
143
|
+
|
|
144
|
+
# Remove the Docker image
|
|
145
|
+
try:
|
|
146
|
+
print(f"Removing Docker image '{DOCKER_IMAGE_NAME}'...")
|
|
147
|
+
subprocess.run(["docker", "rmi", DOCKER_IMAGE_NAME], check=True)
|
|
148
|
+
print("Docker image removed.")
|
|
149
|
+
except subprocess.CalledProcessError as e:
|
|
150
|
+
print(f"Error removing Docker image '{DOCKER_IMAGE_NAME}': {e}")
|
|
151
|
+
|
|
152
|
+
# Remove the generated Dockerfile
|
|
153
|
+
if os.path.exists(os.path.join(where, "Dockerfile")):
|
|
154
|
+
os.remove(os.path.join(where, "Dockerfile"))
|
|
155
|
+
print("Removed Dockerfile.")
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def run_in_docker(file_to_run: str, read_only_host_paths: list[str] = None, writable_host_paths: list[str] = None):
|
|
159
|
+
"""Runs the code in a Docker container with optional mounts."""
|
|
160
|
+
print(f"\nRunning code in Docker container '{CONTAINER_NAME}'...")
|
|
161
|
+
|
|
162
|
+
# Building command (it will continue below...)
|
|
163
|
+
command = ["docker", "run",
|
|
164
|
+
"--rm", # Automatically remove the container when it exits
|
|
165
|
+
"-e", "PYTHONUNBUFFERED=1", # Ensure Python output is unbuffered
|
|
166
|
+
"-e", "NODE_STARTING_PORT",
|
|
167
|
+
"--name", CONTAINER_NAME]
|
|
168
|
+
|
|
169
|
+
if sys.platform.startswith('linux'):
|
|
170
|
+
|
|
171
|
+
# Linux
|
|
172
|
+
command.extend(["--net", "host"]), # Expose the host network (in macOS and Windows it is still a virtual host)
|
|
173
|
+
else:
|
|
174
|
+
|
|
175
|
+
# Not-linux: check ports (adding -p port:port)
|
|
176
|
+
port_int = int(os.getenv("NODE_STARTING_PORT", "0"))
|
|
177
|
+
if port_int > 0:
|
|
178
|
+
command.extend(["-p", str(port_int + 0) + ":" + str(port_int + 0)])
|
|
179
|
+
command.extend(["-p", str(port_int + 1) + ":" + str(port_int + 1) + "/udp"])
|
|
180
|
+
command.extend(["-p", str(port_int + 2) + ":" + str(port_int + 2)])
|
|
181
|
+
command.extend(["-p", str(port_int + 3) + ":" + str(port_int + 3) + "/udp"])
|
|
182
|
+
|
|
183
|
+
# Add read-only mount if path is provided
|
|
184
|
+
if read_only_host_paths is not None and len(read_only_host_paths) > 0:
|
|
185
|
+
for path in read_only_host_paths:
|
|
186
|
+
|
|
187
|
+
# Ensure the host path exists and is a directory
|
|
188
|
+
if not os.path.isdir(path):
|
|
189
|
+
print(
|
|
190
|
+
f"Error: Read-only host path '{path}' does not exist or is not a directory. Cannot mount.")
|
|
191
|
+
return False
|
|
192
|
+
else:
|
|
193
|
+
|
|
194
|
+
# Augmenting command
|
|
195
|
+
path = os.path.abspath(path)
|
|
196
|
+
command.extend(["-v", f"{path}:{path}:ro"])
|
|
197
|
+
print(f"Mounted host '{path}' as read-only to container")
|
|
198
|
+
|
|
199
|
+
# Add writable mount if path is provided
|
|
200
|
+
if writable_host_paths is not None and len(writable_host_paths) > 0:
|
|
201
|
+
for path in writable_host_paths:
|
|
202
|
+
|
|
203
|
+
# Ensure the host path exists and is a directory
|
|
204
|
+
if not os.path.isdir(path):
|
|
205
|
+
print(
|
|
206
|
+
f"Error: Writable host path '{path}' does not exist or is not a directory. Cannot mount.")
|
|
207
|
+
return False
|
|
208
|
+
else:
|
|
209
|
+
|
|
210
|
+
# Augmenting command
|
|
211
|
+
path = os.path.abspath(path)
|
|
212
|
+
command.extend(["-v", f"{path}:{path}"])
|
|
213
|
+
print(f"Mounted host '{path}' as writable to container")
|
|
214
|
+
|
|
215
|
+
# Completing command
|
|
216
|
+
command.append(DOCKER_IMAGE_NAME)
|
|
217
|
+
|
|
218
|
+
try:
|
|
219
|
+
|
|
220
|
+
# Running the prepared command... (using Popen to stream output in real-time)
|
|
221
|
+
try:
|
|
222
|
+
command.extend(["python3", file_to_run])
|
|
223
|
+
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
|
224
|
+
for line in iter(process.stdout.readline, ''):
|
|
225
|
+
sys.stdout.write(line)
|
|
226
|
+
process.wait() # Wait for the process to finish
|
|
227
|
+
if process.returncode != 0:
|
|
228
|
+
print(f"Container exited with non-zero status code: {process.returncode}")
|
|
229
|
+
except KeyboardInterrupt:
|
|
230
|
+
pass
|
|
231
|
+
|
|
232
|
+
print(f"\nContainer '{CONTAINER_NAME}' finished execution.")
|
|
233
|
+
return True
|
|
234
|
+
except FileNotFoundError:
|
|
235
|
+
print("Error: Docker command not found. Is Docker installed and in your PATH?")
|
|
236
|
+
print("Please ensure Docker is installed and running.")
|
|
237
|
+
return False
|
|
238
|
+
except subprocess.CalledProcessError as e:
|
|
239
|
+
print(f"Error running Docker container: {e}")
|
|
240
|
+
return False
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
# Entry point
|
|
244
|
+
if __name__ == "__main__":
|
|
245
|
+
parser = argparse.ArgumentParser(
|
|
246
|
+
description="Run a Python script adding customizable read-only and writable paths.",
|
|
247
|
+
formatter_class=argparse.RawTextHelpFormatter,
|
|
248
|
+
epilog="""
|
|
249
|
+
Examples:
|
|
250
|
+
python utils/sandbox.py my_script.py -r /home/user/data:/opt/app/data -p 1234
|
|
251
|
+
python utils/sandbox.py another_script.py -w /tmp/output:/mnt/results
|
|
252
|
+
python utils/sandbox.py script_with_both.py -r /input:/app/in -w /output:/app/out -p 8082
|
|
253
|
+
""")
|
|
254
|
+
parser.add_argument(help="Path to the Python script to execute.", dest="script_to_run",
|
|
255
|
+
type=str)
|
|
256
|
+
parser.add_argument("-p", "--port", dest="port",
|
|
257
|
+
help="The starting port of the node(s) (each node uses 4 ports, consecutive port numbers)",
|
|
258
|
+
type=str, required=True)
|
|
259
|
+
parser.add_argument("-r", "--read-only", dest="read_only_folders",
|
|
260
|
+
help="One or multiple paths to mount as read-only. "
|
|
261
|
+
"Use a colon to separate multiple paths (e.g., /path/a:/path/b).",
|
|
262
|
+
type=str, default=None)
|
|
263
|
+
parser.add_argument("-w", "--writable", dest="writable_folders",
|
|
264
|
+
help="One or multiple paths to mount as writable. "
|
|
265
|
+
"Use a colon to separate multiple paths (e.g., /path/c:/path/d).",
|
|
266
|
+
type=str, default=None)
|
|
267
|
+
args = parser.parse_args()
|
|
268
|
+
|
|
269
|
+
if not args.script_to_run.endswith(".py"):
|
|
270
|
+
parser.error(f"The script '{args.script_to_run}' must be a Python file (e.g., ending with .py)")
|
|
271
|
+
script_to_run = args.script_to_run
|
|
272
|
+
if not int(args.port) > 0:
|
|
273
|
+
parser.error(f"Invalid port")
|
|
274
|
+
|
|
275
|
+
read_only_folders = None
|
|
276
|
+
if args.read_only_folders:
|
|
277
|
+
read_only_folders = args.read_only_folders.split(':')
|
|
278
|
+
writable_folders = None
|
|
279
|
+
if args.writable_folders:
|
|
280
|
+
writable_folders = args.writable_folders.split(':')
|
|
281
|
+
|
|
282
|
+
print("\n Running in sandbox...")
|
|
283
|
+
print(f"- Script to run: {script_to_run}")
|
|
284
|
+
print(f"- Starting port (+0, +1, +2, +3): {args.port}")
|
|
285
|
+
print(f"- Read only paths to mount (the UNaIVERSE code folder will be automatically mounted): {read_only_folders}")
|
|
286
|
+
print(f"- Writable paths to mount: {writable_folders}\n")
|
|
287
|
+
|
|
288
|
+
# Marking
|
|
289
|
+
os.environ["NODE_STARTING_PORT"] = args.port
|
|
290
|
+
|
|
291
|
+
# Running the sandbox and the script
|
|
292
|
+
sandbox(script_to_run, read_only_paths=read_only_folders, writable_paths=writable_folders)
|
unaiverse/world.py
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
1
|
+
"""
|
|
2
|
+
█████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
|
|
3
|
+
░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
|
|
4
|
+
░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
|
|
5
|
+
░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
|
|
6
|
+
░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
|
|
7
|
+
░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
|
|
8
|
+
░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
|
|
9
|
+
░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
|
|
10
|
+
A Collectionless AI Project (https://collectionless.ai)
|
|
11
|
+
Registration/Login: https://unaiverse.io
|
|
12
|
+
Code Repositories: https://github.com/collectionlessai/
|
|
13
|
+
Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
|
|
14
|
+
"""
|
|
15
|
+
from unaiverse.stats import Stats
|
|
16
|
+
from typing import List, Dict, Any
|
|
17
|
+
from unaiverse.agent import AgentBasics
|
|
18
|
+
from unaiverse.hsm import HybridStateMachine
|
|
19
|
+
from unaiverse.networking.p2p.messages import Msg
|
|
20
|
+
from unaiverse.networking.node.profile import NodeProfile
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class World(AgentBasics):
|
|
24
|
+
|
|
25
|
+
def __init__(self, world_folder: str, merge_flat_stream_labels: bool = False, stats: Stats | None = None):
|
|
26
|
+
"""Initializes a World object, which acts as a special agent without a processor or behavior.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
world_folder: The path of the world folder, with JSON files of the behaviors (per role) and agent.py.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
# Creating a "special" agent with no processor and no behavior, but with a "world_folder", which is our world
|
|
33
|
+
super().__init__(proc=None, proc_inputs=None, proc_outputs=None, proc_opts=None, behav=None,
|
|
34
|
+
world_folder=world_folder, merge_flat_stream_labels=merge_flat_stream_labels)
|
|
35
|
+
|
|
36
|
+
# Clearing processor (world must have no processor, and, maybe, a dummy processor was allocated when building
|
|
37
|
+
# the agent in the init call above)
|
|
38
|
+
self.proc = None
|
|
39
|
+
self.proc_inputs = [] # Do not set it to None
|
|
40
|
+
self.proc_outputs = [] # Do not set it to None
|
|
41
|
+
self.compat_in_streams = None
|
|
42
|
+
self.compat_out_streams = None
|
|
43
|
+
|
|
44
|
+
# Map from public peer IDs to private peer IDs
|
|
45
|
+
self.private_peer_of = {}
|
|
46
|
+
|
|
47
|
+
# Stats
|
|
48
|
+
if stats is not None:
|
|
49
|
+
self.stats = stats
|
|
50
|
+
else:
|
|
51
|
+
# fallback to default Stats class
|
|
52
|
+
self.stats = Stats(is_world=True, db_path=f"{self.world_folder}/stats/world_stats.db",
|
|
53
|
+
cache_window_hours=2.0)
|
|
54
|
+
|
|
55
|
+
def assign_role(self, profile: NodeProfile, is_world_master: bool) -> str:
|
|
56
|
+
"""Assigns an initial role to a newly connected agent.
|
|
57
|
+
|
|
58
|
+
In this basic implementation, the role is determined based on whether the agent is a world master or a regular
|
|
59
|
+
world agent, ensuring there's only one master.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
profile: The NodeProfile of the new agent.
|
|
63
|
+
is_world_master: A boolean indicating if the new agent is attempting to be a master.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
A string representing the assigned role.
|
|
67
|
+
"""
|
|
68
|
+
assert self.is_world, "Assigning a role is expected to be done by the world"
|
|
69
|
+
|
|
70
|
+
if profile.get_dynamic_profile()['guessed_location'] == 'Some Dummy Location, Just An Example Here':
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
# Currently, roles are only world masters and world agents
|
|
74
|
+
if is_world_master:
|
|
75
|
+
if len(self.world_masters) <= 1:
|
|
76
|
+
return AgentBasics.ROLE_BITS_TO_STR[AgentBasics.ROLE_WORLD_MASTER]
|
|
77
|
+
else:
|
|
78
|
+
return AgentBasics.ROLE_BITS_TO_STR[AgentBasics.ROLE_WORLD_AGENT]
|
|
79
|
+
else:
|
|
80
|
+
return AgentBasics.ROLE_BITS_TO_STR[AgentBasics.ROLE_WORLD_AGENT]
|
|
81
|
+
|
|
82
|
+
async def set_role(self, peer_id: str, role: int):
|
|
83
|
+
"""Sets a new role for a specific agent and broadcasts this change to the agent (async).
|
|
84
|
+
|
|
85
|
+
It computes the new role and sends a message containing the new role and the corresponding default behavior
|
|
86
|
+
for that role.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
peer_id: The ID of the agent whose role is to be set.
|
|
90
|
+
role: The new role to be assigned (as an integer).
|
|
91
|
+
"""
|
|
92
|
+
assert self.is_world, "Setting the role is expected to be done by the world, which will broadcast such info"
|
|
93
|
+
|
|
94
|
+
# Computing new role (keeping the first two bits as before)
|
|
95
|
+
cur_role = self._node_conn.get_role(peer_id)
|
|
96
|
+
new_role_without_base_int = (role >> 2) << 2
|
|
97
|
+
new_role = (cur_role & 3) | new_role_without_base_int
|
|
98
|
+
|
|
99
|
+
if new_role != role:
|
|
100
|
+
self._node_conn.set_role(peer_id, new_role)
|
|
101
|
+
self.out("Telling an agent that his role changed")
|
|
102
|
+
if not (await self._node_conn.send(peer_id, channel_trail=None,
|
|
103
|
+
content={'peer_id': peer_id, 'role': new_role,
|
|
104
|
+
'default_behav':
|
|
105
|
+
self.role_to_behav[
|
|
106
|
+
self.ROLE_BITS_TO_STR[new_role_without_base_int]]
|
|
107
|
+
if self.role_to_behav is not None else
|
|
108
|
+
str(HybridStateMachine(None))},
|
|
109
|
+
content_type=Msg.ROLE_SUGGESTION)):
|
|
110
|
+
self.err("Failed to send role change, removing (disconnecting) " + peer_id)
|
|
111
|
+
await self._node_purge_fcn(peer_id)
|
|
112
|
+
else:
|
|
113
|
+
self.role_changed_by_world = True
|
|
114
|
+
|
|
115
|
+
def set_addresses_in_profile(self, peer_id, addresses):
|
|
116
|
+
"""Updates the network addresses in an agent's profile.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
peer_id: The ID of the agent whose profile is being updated.
|
|
120
|
+
addresses: A list of new addresses to set.
|
|
121
|
+
"""
|
|
122
|
+
if peer_id in self.all_agents:
|
|
123
|
+
profile = self.all_agents[peer_id]
|
|
124
|
+
addrs = profile.get_dynamic_profile()['private_peer_addresses']
|
|
125
|
+
addrs.clear() # Warning: do not allocate a new list, keep the current one (it is referenced by others)
|
|
126
|
+
for _addrs in addresses:
|
|
127
|
+
addrs.append(_addrs)
|
|
128
|
+
self.received_address_update = True
|
|
129
|
+
else:
|
|
130
|
+
self.err(f"Cannot set addresses in profile, unknown peer_id {peer_id}")
|
|
131
|
+
|
|
132
|
+
def add_badge(self, peer_id: str, score: float, badge_type: str, agent_token: str,
|
|
133
|
+
badge_description: str | None = None):
|
|
134
|
+
"""Requests a badge for a specific agent, which can be used to track and reward agent performance.
|
|
135
|
+
It validates the score and badge type and stores the badge information in an internal dictionary.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
peer_id: The ID of the agent for whom the badge is requested.
|
|
139
|
+
score: The score associated with the badge (must be in [0, 1]).
|
|
140
|
+
badge_type: The type of badge to be awarded.
|
|
141
|
+
agent_token: The token of the agent receiving the badge.
|
|
142
|
+
badge_description: An optional text description for the badge.
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
# Validate score
|
|
146
|
+
if score < 0. or score > 1.:
|
|
147
|
+
raise ValueError(f"Score must be in [0.0, 1.0], got {score}")
|
|
148
|
+
|
|
149
|
+
# Validate badge_type
|
|
150
|
+
if badge_type not in AgentBasics.BADGE_TYPES:
|
|
151
|
+
raise ValueError(f"Invalid badge_type '{badge_type}'. Must be one of {AgentBasics.BADGE_TYPES}.")
|
|
152
|
+
|
|
153
|
+
if badge_description is None:
|
|
154
|
+
badge_description = ""
|
|
155
|
+
|
|
156
|
+
# The world not necessarily knows the token of the agents, since they usually do not send messages to the world
|
|
157
|
+
badge = {
|
|
158
|
+
'agent_node_id': self.all_agents[peer_id].get_static_profile()['node_id'],
|
|
159
|
+
'agent_token': agent_token,
|
|
160
|
+
'badge_type': badge_type,
|
|
161
|
+
'score': score,
|
|
162
|
+
'badge_description': badge_description,
|
|
163
|
+
'last_edit_utc': self._node_clock.get_time_as_string(),
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
if peer_id not in self.agent_badges:
|
|
167
|
+
self.agent_badges[peer_id] = [badge]
|
|
168
|
+
else:
|
|
169
|
+
self.agent_badges[peer_id].append(badge)
|
|
170
|
+
|
|
171
|
+
# This will force the sending of the dynamic profile at the defined time instants
|
|
172
|
+
self._node_profile.mark_change_in_connections()
|
|
173
|
+
|
|
174
|
+
# Get all the badges requested by the world
|
|
175
|
+
def get_all_badges(self):
|
|
176
|
+
"""Retrieves all badges that have been added to the world's record for all agents.
|
|
177
|
+
This provides a central log of achievements or performance metrics.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
A dictionary where keys are agent peer IDs and values are lists of badge dictionaries.
|
|
181
|
+
"""
|
|
182
|
+
return self.agent_badges
|
|
183
|
+
|
|
184
|
+
def clear_badges(self):
|
|
185
|
+
"""Clears all badge records from the world's memory.
|
|
186
|
+
This can be used to reset competition results or clean up state after a specific event.
|
|
187
|
+
"""
|
|
188
|
+
self.agent_badges = {}
|
|
189
|
+
|
|
190
|
+
async def add_agent(self, peer_id: str, profile: NodeProfile) -> bool:
|
|
191
|
+
if await super().add_agent(peer_id, profile):
|
|
192
|
+
public_peer_id = profile.get_dynamic_profile()["peer_id"]
|
|
193
|
+
self.private_peer_of[public_peer_id] = peer_id
|
|
194
|
+
return True
|
|
195
|
+
else:
|
|
196
|
+
return False
|
|
197
|
+
|
|
198
|
+
async def remove_agent(self, peer_id: str):
|
|
199
|
+
profile = None
|
|
200
|
+
if peer_id in self.all_agents:
|
|
201
|
+
profile = self.all_agents[peer_id]
|
|
202
|
+
if await super().remove_agent(peer_id):
|
|
203
|
+
public_peer_id = profile.get_dynamic_profile()["peer_id"]
|
|
204
|
+
if public_peer_id in self.private_peer_of:
|
|
205
|
+
del self.private_peer_of[public_peer_id]
|
|
206
|
+
return True
|
|
207
|
+
else:
|
|
208
|
+
return False
|
|
209
|
+
|
|
210
|
+
def collect_and_store_own_stats(self):
|
|
211
|
+
"""Collects this world's own stats and pushes them to the stats recorder."""
|
|
212
|
+
if self.stats is None:
|
|
213
|
+
return
|
|
214
|
+
|
|
215
|
+
t = self._node_clock.get_time_ms()
|
|
216
|
+
_, own_private_pid = self.get_peer_ids()
|
|
217
|
+
|
|
218
|
+
# Helper to add if value changed
|
|
219
|
+
def store_if_changed(stat_name, new_value):
|
|
220
|
+
last_value = self.stats.get_last_value(stat_name)
|
|
221
|
+
if last_value != new_value:
|
|
222
|
+
# Note: We pass the world's *own* peer_id for its *own* stats
|
|
223
|
+
self.stats.store_stat(stat_name, new_value, peer_id=own_private_pid, timestamp=t)
|
|
224
|
+
|
|
225
|
+
try:
|
|
226
|
+
store_if_changed("world_masters", len(self.world_masters))
|
|
227
|
+
store_if_changed("world_agents", len(self.world_agents))
|
|
228
|
+
store_if_changed("human_agents", len(self.human_agents))
|
|
229
|
+
store_if_changed("artificial_agents", len(self.artificial_agents))
|
|
230
|
+
except Exception as e:
|
|
231
|
+
self.err(f"[Stats] Error updating own world stats: {e}")
|
|
232
|
+
|
|
233
|
+
def _process_custom_stat(self, stat_name, value, peer_id, timestamp) -> bool:
|
|
234
|
+
"""Hook for subclasses to intercept a stat. Return True if handled."""
|
|
235
|
+
return False
|
|
236
|
+
|
|
237
|
+
def _extract_graph_node_info(self, peer_id: str) -> Dict[str, Any]:
|
|
238
|
+
"""Helper to extract lightweight visualization data from NodeProfile."""
|
|
239
|
+
|
|
240
|
+
if peer_id == self.get_peer_ids()[1]:
|
|
241
|
+
# this is the world itself
|
|
242
|
+
profile = self._node_profile
|
|
243
|
+
else:
|
|
244
|
+
profile = self.all_agents.get(peer_id)
|
|
245
|
+
if profile is None:
|
|
246
|
+
return {}
|
|
247
|
+
|
|
248
|
+
# Accessing the inner private dict of NodeProfile based on your class structure
|
|
249
|
+
static_profile = profile.get_static_profile()
|
|
250
|
+
dynamic_profile = profile.get_dynamic_profile()
|
|
251
|
+
|
|
252
|
+
return {
|
|
253
|
+
'Name': static_profile.get('node_name', '~'),
|
|
254
|
+
'Owner': static_profile.get('email', '~'),
|
|
255
|
+
'Role': dynamic_profile.get('connections', {}).get('role', 'unknown').split('~')[-1],
|
|
256
|
+
'Type': static_profile.get('node_type', '~'),
|
|
257
|
+
'Number of Badges': len(dynamic_profile.get('cv', [])),
|
|
258
|
+
'Current Action': self.stats.get_last_value('action', peer_id=peer_id) or '~',
|
|
259
|
+
'Current State': self.stats.get_last_value('state', peer_id=peer_id) or '~',
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
def _update_graph(self, peer_id: str, connected_peers_list: List[str], timestamp: int):
|
|
263
|
+
"""Updates both graph connectivity (edges) and node metadata."""
|
|
264
|
+
|
|
265
|
+
# 1. initialize structure if missing (e.g. first run or after DB load)
|
|
266
|
+
graph_stat = self.stats.get_stats().setdefault("graph", {'nodes': {}, 'edges': {}})
|
|
267
|
+
|
|
268
|
+
nodes = graph_stat.setdefault('nodes', {})
|
|
269
|
+
edges = graph_stat.setdefault('edges', {})
|
|
270
|
+
|
|
271
|
+
# 2. Update Node Metadata
|
|
272
|
+
# We update the sender's info
|
|
273
|
+
node_data = self._extract_graph_node_info(peer_id)
|
|
274
|
+
node_data['last_seen'] = timestamp
|
|
275
|
+
nodes[peer_id] = node_data
|
|
276
|
+
|
|
277
|
+
# We also ensure connected peers exist in 'nodes', even if we don't have their full profile yet
|
|
278
|
+
connected_peers = set(connected_peers_list)
|
|
279
|
+
for target_id in connected_peers:
|
|
280
|
+
if target_id not in nodes:
|
|
281
|
+
# Try to fetch profile if we have it, otherwise placeholder
|
|
282
|
+
nodes[target_id] = self._extract_graph_node_info(target_id)
|
|
283
|
+
|
|
284
|
+
# 3. Update Edges (Logic adapted from your previous code)
|
|
285
|
+
prev_connected_peers = edges.setdefault(peer_id, set())
|
|
286
|
+
|
|
287
|
+
# Add reverse connections (Undirected/Bidirectional logic)
|
|
288
|
+
for _peer_id in connected_peers:
|
|
289
|
+
edges.setdefault(_peer_id, set()).add(peer_id)
|
|
290
|
+
|
|
291
|
+
# Remove dropped reverse connections
|
|
292
|
+
to_remove = prev_connected_peers - connected_peers
|
|
293
|
+
for _peer_id in to_remove:
|
|
294
|
+
if _peer_id in edges and peer_id in edges[_peer_id]:
|
|
295
|
+
edges[_peer_id].remove(peer_id)
|
|
296
|
+
|
|
297
|
+
# Update peer's own forward connections
|
|
298
|
+
edges[peer_id] = connected_peers
|
|
299
|
+
|
|
300
|
+
# 4. Store
|
|
301
|
+
world_peer_id = self.get_peer_ids()[1]
|
|
302
|
+
self.stats.store_stat('graph', graph_stat, peer_id=world_peer_id, timestamp=timestamp)
|
|
303
|
+
|
|
304
|
+
def _prune_graph(self):
|
|
305
|
+
"""Removes nodes that are no longer connected to the World."""
|
|
306
|
+
graph_stat = self.stats.get_stats().get("graph")
|
|
307
|
+
if not graph_stat:
|
|
308
|
+
return
|
|
309
|
+
|
|
310
|
+
nodes = graph_stat.get('nodes', {})
|
|
311
|
+
edges = graph_stat.get('edges', {})
|
|
312
|
+
|
|
313
|
+
# Get the active/inactive peers
|
|
314
|
+
active_peers = set(self.all_agents.keys())
|
|
315
|
+
# active_peers_in_world = set(self.world_agents.keys()) | set(self.world_masters.keys())
|
|
316
|
+
active_peers.add(self.get_peer_ids()[1])
|
|
317
|
+
current_graph_nodes = set(nodes.keys())
|
|
318
|
+
dead_peers = current_graph_nodes - active_peers
|
|
319
|
+
|
|
320
|
+
if not dead_peers:
|
|
321
|
+
return
|
|
322
|
+
|
|
323
|
+
# 2. Kill Zombies
|
|
324
|
+
for pid in dead_peers:
|
|
325
|
+
nodes.pop(pid, None) # Nodes
|
|
326
|
+
edges.pop(pid, None) # Outgoing edges
|
|
327
|
+
|
|
328
|
+
# Remove incoming edges
|
|
329
|
+
for other_pid in edges:
|
|
330
|
+
edges[other_pid].discard(pid)
|
|
331
|
+
|
|
332
|
+
def add_peer_stats(self, peer_stats_batch: List[Dict[str, Any]], sender_peer_id: str | None = None):
|
|
333
|
+
"""(World-only) Processes a batch of stats received from a peer."""
|
|
334
|
+
|
|
335
|
+
# 1. Update own stats (this logic is now in the World)
|
|
336
|
+
self.collect_and_store_own_stats()
|
|
337
|
+
|
|
338
|
+
# 2. Process peer stats
|
|
339
|
+
connected_peers = []
|
|
340
|
+
for update in peer_stats_batch:
|
|
341
|
+
try:
|
|
342
|
+
p_id = update['peer_id']
|
|
343
|
+
if p_id != sender_peer_id:
|
|
344
|
+
# TODO: decide if we want to filter the stats
|
|
345
|
+
pass
|
|
346
|
+
stat_name = update['stat_name']
|
|
347
|
+
t = int(update['timestamp'])
|
|
348
|
+
v = update['value']
|
|
349
|
+
|
|
350
|
+
# Call the hook (which also lives in the World now)
|
|
351
|
+
if self._process_custom_stat(stat_name, v, p_id, t):
|
|
352
|
+
continue # The custom processor handled it
|
|
353
|
+
|
|
354
|
+
# Generate the graph and handle the connected_peers stat
|
|
355
|
+
if stat_name == 'connected_peers':
|
|
356
|
+
# We need to wait for all the info to arrive before updating the graph.
|
|
357
|
+
# Otherwise, _extract_graph_node_info may not find data yet.
|
|
358
|
+
connected_peers.append((p_id, v, t))
|
|
359
|
+
continue
|
|
360
|
+
|
|
361
|
+
# 3. Push to the "dumb" Stats recorder
|
|
362
|
+
if stat_name in self.stats.all_keys:
|
|
363
|
+
self.stats.store_stat(stat_name, v, peer_id=p_id, timestamp=t)
|
|
364
|
+
else:
|
|
365
|
+
self.err(f"[World] Unknown stat received: {stat_name}")
|
|
366
|
+
|
|
367
|
+
except Exception as e:
|
|
368
|
+
self.err(f"[World] Error processing stats update {update}: {e}")
|
|
369
|
+
|
|
370
|
+
# Now update the graph for all collected connected_peers stats
|
|
371
|
+
for p_id, v, t in connected_peers:
|
|
372
|
+
self._update_graph(p_id, v, t)
|
|
373
|
+
|
|
374
|
+
# Clean the graph from potentially stale peers
|
|
375
|
+
self._prune_graph()
|
|
376
|
+
|
|
377
|
+
def debug_stats_dashboard(self):
|
|
378
|
+
"""Helper to verify the dashboard looks correct during development."""
|
|
379
|
+
import plotly.io as pio
|
|
380
|
+
|
|
381
|
+
print("[DEBUG] Rendering Dashboard...")
|
|
382
|
+
json_str = self.stats.plot()
|
|
383
|
+
if json_str:
|
|
384
|
+
pio.from_json(json_str).show()
|