unaiverse 0.1.8__cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unaiverse might be problematic. Click here for more details.

Files changed (50) hide show
  1. unaiverse/__init__.py +19 -0
  2. unaiverse/agent.py +2008 -0
  3. unaiverse/agent_basics.py +2041 -0
  4. unaiverse/clock.py +191 -0
  5. unaiverse/dataprops.py +1209 -0
  6. unaiverse/hsm.py +1889 -0
  7. unaiverse/modules/__init__.py +18 -0
  8. unaiverse/modules/cnu/__init__.py +17 -0
  9. unaiverse/modules/cnu/cnus.py +536 -0
  10. unaiverse/modules/cnu/layers.py +261 -0
  11. unaiverse/modules/cnu/psi.py +60 -0
  12. unaiverse/modules/hl/__init__.py +15 -0
  13. unaiverse/modules/hl/hl_utils.py +411 -0
  14. unaiverse/modules/networks.py +1509 -0
  15. unaiverse/modules/utils.py +710 -0
  16. unaiverse/networking/__init__.py +16 -0
  17. unaiverse/networking/node/__init__.py +18 -0
  18. unaiverse/networking/node/connpool.py +1261 -0
  19. unaiverse/networking/node/node.py +2299 -0
  20. unaiverse/networking/node/profile.py +447 -0
  21. unaiverse/networking/node/tokens.py +79 -0
  22. unaiverse/networking/p2p/__init__.py +188 -0
  23. unaiverse/networking/p2p/go.mod +127 -0
  24. unaiverse/networking/p2p/go.sum +548 -0
  25. unaiverse/networking/p2p/golibp2p.py +18 -0
  26. unaiverse/networking/p2p/golibp2p.pyi +135 -0
  27. unaiverse/networking/p2p/lib.go +2527 -0
  28. unaiverse/networking/p2p/lib.go.sha256 +1 -0
  29. unaiverse/networking/p2p/lib_types.py +312 -0
  30. unaiverse/networking/p2p/message_pb2.py +63 -0
  31. unaiverse/networking/p2p/messages.py +268 -0
  32. unaiverse/networking/p2p/mylogger.py +77 -0
  33. unaiverse/networking/p2p/p2p.py +929 -0
  34. unaiverse/networking/p2p/proto-go/message.pb.go +616 -0
  35. unaiverse/networking/p2p/unailib.cpython-312-aarch64-linux-gnu.so +0 -0
  36. unaiverse/streamlib/__init__.py +15 -0
  37. unaiverse/streamlib/streamlib.py +210 -0
  38. unaiverse/streams.py +770 -0
  39. unaiverse/utils/__init__.py +16 -0
  40. unaiverse/utils/ask_lone_wolf.json +27 -0
  41. unaiverse/utils/lone_wolf.json +19 -0
  42. unaiverse/utils/misc.py +492 -0
  43. unaiverse/utils/sandbox.py +293 -0
  44. unaiverse/utils/server.py +435 -0
  45. unaiverse/world.py +353 -0
  46. unaiverse-0.1.8.dist-info/METADATA +365 -0
  47. unaiverse-0.1.8.dist-info/RECORD +50 -0
  48. unaiverse-0.1.8.dist-info/WHEEL +7 -0
  49. unaiverse-0.1.8.dist-info/licenses/LICENSE +43 -0
  50. unaiverse-0.1.8.dist-info/top_level.txt +1 -0
@@ -0,0 +1,16 @@
1
+ """
2
+ █████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
3
+ ░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
4
+ ░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
5
+ ░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
6
+ ░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
7
+ ░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
8
+ ░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
9
+ ░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
10
+ A Collectionless AI Project (https://collectionless.ai)
11
+ Registration/Login: https://unaiverse.io
12
+ Code Repositories: https://github.com/collectionlessai/
13
+ Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
14
+ """
15
+ from . import misc
16
+ from . import sandbox
@@ -0,0 +1,27 @@
1
+ {
2
+ "initial_state": "ready",
3
+ "state": "ready",
4
+ "prev_state": null,
5
+ "limbo_state": null,
6
+ "state_actions": {
7
+ "found_lone_wolf": ["do_gen", {"samples": 1}, 0, false]
8
+ },
9
+ "transitions": {
10
+ "ready": {
11
+ "found_lone_wolf": [["find_agents", {"role": "public_agent", "engage": true}, true, 0]]
12
+ },
13
+ "found_lone_wolf": {
14
+ "asked": [["ask_gen", {"u_hashes": ["<agent>:processor"], "samples": 1}, true, 1]]
15
+ },
16
+ "asked": {
17
+ "lone_wolf_done": [
18
+ ["done_gen", {}, false, 2],
19
+ ["nop", {"delay": 30.0}, true, 3]
20
+ ]
21
+ },
22
+ "lone_wolf_done": {
23
+ "ready": [["disconnect_by_role", {"role": "public_agent"}, true, 4]]
24
+ }
25
+ },
26
+ "cur_action": null
27
+ }
@@ -0,0 +1,19 @@
1
+ {
2
+ "initial_state": "ready",
3
+ "state": "ready",
4
+ "prev_state": null,
5
+ "limbo_state": null,
6
+ "state_actions": {
7
+ "ready": [null, null, 0, false],
8
+ "done_gen": [null, null, 1, false]
9
+ },
10
+ "transitions": {
11
+ "ready": {
12
+ "done_gen": [["do_gen", {"timeout": 7.5}, false, 0]]
13
+ },
14
+ "done_gen": {
15
+ "ready": [["nop", {}, true, 3]]
16
+ }
17
+ },
18
+ "cur_action": null
19
+ }
@@ -0,0 +1,492 @@
1
+ """
2
+ █████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
3
+ ░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
4
+ ░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
5
+ ░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
6
+ ░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
7
+ ░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
8
+ ░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
9
+ ░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
10
+ A Collectionless AI Project (https://collectionless.ai)
11
+ Registration/Login: https://unaiverse.io
12
+ Code Repositories: https://github.com/collectionlessai/
13
+ Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
14
+ """
15
+ import os
16
+ import ast
17
+ import sys
18
+ import time
19
+ import json
20
+ import math
21
+ import shutil
22
+ import threading
23
+ from tqdm import tqdm
24
+ from pathlib import Path
25
+ from datetime import datetime
26
+
27
+
28
+ class GenException(Exception):
29
+ """Base exception for this application (a simple wrapper around a generic Exception)."""
30
+ pass
31
+
32
+
33
+ def save_node_addresses_to_file(node, dir_path: str, public: bool,
34
+ filename: str = "addresses.txt", append: bool = False):
35
+ address_file = os.path.join(dir_path, filename)
36
+ with open(address_file, "w" if not append else "a") as file:
37
+ file.write(node.hosted.get_name() + ";" +
38
+ str(node.get_public_addresses() if public else node.get_world_addresses()) + "\n")
39
+ file.flush()
40
+
41
+
42
+ def get_node_addresses_from_file(dir_path: str, filename: str = "addresses.txt") -> dict[str, list[str]]:
43
+ ret = {}
44
+ with open(os.path.join(dir_path, filename)) as file:
45
+ lines = file.readlines()
46
+
47
+ # Old file format
48
+ if lines[0].strip() == "/":
49
+ addresses = []
50
+ for line in lines:
51
+ _line = line.strip()
52
+ if len(_line) > 0:
53
+ addresses.append(_line)
54
+ ret["unk"] = addresses
55
+ return ret
56
+
57
+ # New file format
58
+ for line in lines:
59
+ if line.strip().startswith("***"): # Header marker
60
+ continue
61
+ comma_separated_values = [v.strip() for v in line.split(';')]
62
+ node_name, addresses_str = comma_separated_values
63
+ ret[node_name] = ast.literal_eval(addresses_str) # Name appearing multiple times? the last entry is kept
64
+
65
+ return ret
66
+
67
+
68
+ class Silent:
69
+ def __init__(self, ignore: bool = False):
70
+ self.ignore = ignore
71
+
72
+ def __enter__(self):
73
+ if not self.ignore:
74
+ self._original_stdout = sys.stdout
75
+ sys.stdout = open(os.devnull, "w")
76
+
77
+ def __exit__(self, exc_type, exc_val, exc_tb):
78
+ if not self.ignore:
79
+ sys.stdout.close()
80
+ sys.stdout = self._original_stdout
81
+
82
+
83
+ # The countdown function
84
+ def countdown_start(seconds: int, msg: str):
85
+ class TqdmPrintRedirector:
86
+ def __init__(self, tqdm_instance):
87
+ self.tqdm_instance = tqdm_instance
88
+ self.original_stdout = sys.__stdout__
89
+
90
+ def write(self, s):
91
+ if s.strip(): # Ignore empty lines (needed for the way tqdm works)
92
+ self.tqdm_instance.write(s, file=self.original_stdout)
93
+
94
+ def flush(self):
95
+ pass # Tqdm handles flushing
96
+
97
+ def drawing(secs: int, message: str):
98
+ with tqdm(total=secs, desc=message, file=sys.__stdout__) as t:
99
+ sys.stdout = TqdmPrintRedirector(t) # Redirect prints to tqdm.write
100
+ for i in range(secs):
101
+ time.sleep(1)
102
+ t.update(1.)
103
+ sys.stdout = sys.__stdout__ # Restore original stdout
104
+
105
+ sys.stdout.flush()
106
+ handle = threading.Thread(target=drawing, args=(seconds, msg))
107
+ handle.start()
108
+ return handle
109
+
110
+
111
+ def countdown_wait(handle):
112
+ handle.join()
113
+
114
+
115
+ def check_json_start(file: str, msg: str, delete_existing: bool = False):
116
+ from rich.json import JSON
117
+ from rich.console import Console
118
+ cons = Console(file=sys.__stdout__)
119
+
120
+ if delete_existing:
121
+ if os.path.exists(file):
122
+ os.remove(file)
123
+
124
+ def checking(file_path: str, console: Console):
125
+ print(msg)
126
+ prev_dict = {}
127
+ while True:
128
+ if os.path.exists(file_path):
129
+ try:
130
+ with open(file_path, "r") as f:
131
+ json_dict = json.load(f)
132
+ if json_dict != prev_dict:
133
+ now = datetime.now()
134
+ console.print("─" * 80)
135
+ console.print("Printing updated file "
136
+ "(print time: " + now.strftime("%Y-%m-%d %H:%M:%S") + ")")
137
+ console.print("─" * 80)
138
+ console.print(JSON.from_data(json_dict))
139
+ prev_dict = json_dict
140
+ except KeyboardInterrupt:
141
+ break
142
+ except Exception:
143
+ pass
144
+ time.sleep(1)
145
+
146
+ handle = threading.Thread(target=checking, args=(file, cons), daemon=True)
147
+ handle.start()
148
+ return handle
149
+
150
+
151
+ def check_json_start_wait(handle):
152
+ handle.join()
153
+
154
+
155
+ def show_images_grid(image_paths, max_cols=3):
156
+ import matplotlib.pyplot as plt
157
+ import matplotlib.image as mpimg
158
+
159
+ n = len(image_paths)
160
+ cols = min(max_cols, n)
161
+ rows = math.ceil(n / cols)
162
+
163
+ # Load images
164
+ images = [mpimg.imread(p) for p in image_paths]
165
+
166
+ # Determine figure size based on image sizes
167
+ widths, heights = zip(*[(img.shape[1], img.shape[0]) for img in images])
168
+
169
+ # Use average width/height for scaling
170
+ avg_width = sum(widths) / len(widths)
171
+ avg_height = sum(heights) / len(heights)
172
+
173
+ fig_width = cols * avg_width / 100
174
+ fig_height = rows * avg_height / 100
175
+
176
+ fig, axes = plt.subplots(rows, cols, figsize=(fig_width, fig_height))
177
+ axes = axes.flatten() if n > 1 else [axes]
178
+
179
+ fig.canvas.manager.set_window_title("Image Grid")
180
+
181
+ # Hide unused axes
182
+ for ax in axes[n:]:
183
+ ax.axis('off')
184
+
185
+ for idx, (ax, img) in enumerate(zip(axes, images)):
186
+ ax.imshow(img)
187
+ ax.axis('off')
188
+ ax.set_title(str(idx), fontsize=12, fontweight='bold')
189
+
190
+ # Display images
191
+ for ax, img in zip(axes, images):
192
+ ax.imshow(img)
193
+ ax.axis('off')
194
+
195
+ plt.subplots_adjust(wspace=0, hspace=0)
196
+
197
+ # Turn on interactive mode
198
+ plt.ion()
199
+ plt.show()
200
+
201
+ fig.canvas.draw()
202
+ plt.pause(0.1)
203
+
204
+
205
+ class FileTracker:
206
+ def __init__(self, folder, ext=".json", prefix=None, skip=None):
207
+ self.folder = Path(folder)
208
+ self.ext = ext.lower()
209
+ self.skip = skip
210
+ self.prefix = prefix
211
+ self.last_state = self.__scan_files()
212
+
213
+ def __scan_files(self):
214
+ state = {}
215
+ for file in self.folder.iterdir():
216
+ if ((file.is_file() and file.suffix.lower() == self.ext and
217
+ (self.skip is None or file.name != self.skip)) and
218
+ (self.prefix is None or file.name.startswith(self.prefix))):
219
+ state[file.name] = os.path.getmtime(file)
220
+ return state
221
+
222
+ def something_changed(self):
223
+ new_state = self.__scan_files()
224
+ created = [f for f in new_state if f not in self.last_state]
225
+ modified = [f for f in new_state
226
+ if f in self.last_state and new_state[f] != self.last_state[f]]
227
+ self.last_state = new_state
228
+ return created or modified
229
+
230
+
231
+ def prepare_key_dir(app_name):
232
+ app_name = app_name.lower()
233
+ if os.name == "nt": # Windows
234
+ if os.getenv("APPDATA") is not None:
235
+ key_dir = os.path.join(os.getenv("APPDATA"), "Local", app_name) # Expected
236
+ else:
237
+ key_dir = os.path.join(str(Path.home()), f".{app_name}") # Fallback
238
+ else: # Linux/macOS
239
+ key_dir = os.path.join(str(Path.home()), f".{app_name}")
240
+ os.makedirs(key_dir, exist_ok=True)
241
+ return key_dir
242
+
243
+
244
+ def get_key_considering_multiple_sources(key_variable: str | None) -> str:
245
+
246
+ # Creating folder (if needed) to store the key
247
+ try:
248
+ key_dir = prepare_key_dir(app_name="UNaIVERSE")
249
+ except Exception:
250
+ raise GenException("Cannot create folder to store the key file")
251
+ key_file = os.path.join(key_dir, "key")
252
+
253
+ # Getting from an existing file
254
+ key_from_file = None
255
+ if os.path.exists(key_file):
256
+ with open(key_file, "r") as f:
257
+ key_from_file = f.read().strip()
258
+
259
+ # Getting from env variable
260
+ key_from_env = os.getenv("NODE_KEY", None)
261
+
262
+ # Getting from code-specified option
263
+ if key_variable is not None and len(key_variable.strip()) > 0:
264
+ key_from_var = key_variable.strip()
265
+ if key_from_var.startswith("<") and key_from_var.endswith(">"): # Something like <UNAIVERSE_KEY_GOES_HERE>
266
+ key_from_var = None
267
+ else:
268
+ key_from_var = None
269
+
270
+ # Finding valid sources and checking if multiple keys were provided
271
+ _keys = [key_from_var, key_from_env, key_from_file]
272
+ _source_names = ["your code", "env variable 'NODE_KEY'", f"cache file {key_file}"]
273
+ source_names = []
274
+ mismatching = False
275
+ multiple_source = False
276
+ first_key = None
277
+ first_source = None
278
+ _prev_key = None
279
+ for i, (_key, _source_name) in enumerate(zip(_keys, _source_names)):
280
+ if _key is not None:
281
+ source_names.append(_source_name)
282
+ if _prev_key is not None:
283
+ if _key != _prev_key:
284
+ mismatching = True
285
+ multiple_source = True
286
+ else:
287
+ _prev_key = _key
288
+ first_key = _key
289
+ first_source = _source_name
290
+
291
+ if len(source_names) > 0:
292
+ msg = ""
293
+ if multiple_source and not mismatching:
294
+ msg = "UNaIVERSE key (the exact same key) present in multiple locations: " + ", ".join(source_names)
295
+ if multiple_source and mismatching:
296
+ msg = "UNaIVERSE keys (different keys) present in multiple locations: " + ", ".join(source_names)
297
+ msg += "\nLoaded the one stored in " + first_source
298
+ if not multiple_source:
299
+ msg = f"UNaIVERSE key loaded from {first_source}"
300
+ print(msg)
301
+ return first_key
302
+ else:
303
+
304
+ # If no key present, ask user and save to file
305
+ print("UNaIVERSE key not present in " + ", ".join(_source_names))
306
+ print("If you did not already do it, go to https://unaiverse.io, login, and generate a key")
307
+ key = input("Enter your UNaIVERSE key, that will be saved to the cache file: ").strip()
308
+ with open(key_file, "w") as f:
309
+ f.write(key)
310
+ return key
311
+
312
+
313
+ class StatLoadedSaver:
314
+
315
+ def __init__(self, base_filename: str = "stats", save_dir: str = "./", max_size_mb: int = 5,
316
+ dynamic_stats: set | list | tuple | None = None, static_stats: set | list | tuple | None = None,
317
+ group_indexed_stats: set | list | tuple | None = None, group_key: str | None = None):
318
+ self.base_filename = base_filename
319
+ self.save_dir = save_dir
320
+ self.max_size_bytes = max_size_mb * 1024 * 1024
321
+
322
+ self.time_indexed_stats = dynamic_stats
323
+ self.static_stats = static_stats
324
+ self.group_indexed_stats = group_indexed_stats
325
+ self.group_key = group_key
326
+
327
+ self.changed_stats = set()
328
+ self.last_saved = {} # (group_id, stat_name) -> last_saved_timestamp
329
+
330
+ if not os.path.exists(self.save_dir) or not os.path.isdir(self.save_dir):
331
+ os.makedirs(self.save_dir, exist_ok=True)
332
+
333
+ self.__ensure_current_file()
334
+
335
+ assert group_indexed_stats is None or len(group_indexed_stats) == 0 or (group_key != None), \
336
+ "Specify the group key (if you have group indexed stats)"
337
+
338
+ def mark_stat_as_changed(self, stat_name):
339
+ self.changed_stats.add(stat_name)
340
+
341
+ def load_existing_data(self):
342
+ """Load all existing CSV files and rebuild last_saved timestamps."""
343
+ self.last_saved = {} # Reset
344
+
345
+ # Find all files that match the pattern, to get the time indexed data
346
+ files = []
347
+ prefix = self.base_filename + "_"
348
+ for f_name in os.listdir(self.save_dir):
349
+ if f_name.startswith(prefix) and f_name.endswith(".csv"):
350
+ try:
351
+ idx = int(f_name.split("_")[-1].split(".")[0])
352
+ files.append((idx, f_name))
353
+ except ValueError:
354
+ continue
355
+
356
+ # Sort by index to read in order
357
+ files.sort(reverse=True) # From the oldest to the newest
358
+ stats = {}
359
+
360
+ for _, f_name in files:
361
+ path = os.path.join(self.save_dir, f_name)
362
+ with open(path, "r") as f:
363
+ lines = f.readlines()
364
+ for row in lines:
365
+ row_tokens = row.split(',')
366
+ group = row_tokens[0]
367
+ if group == "group": # Header row
368
+ continue
369
+ stat_name = row_tokens[1]
370
+ ts = row_tokens[2]
371
+ val = float(row_tokens[3])
372
+ last_ts = self.last_saved.get((group, stat_name), float("-1.0"))
373
+ if ts > last_ts:
374
+ self.last_saved[(group, stat_name)] = ts
375
+ stats[self.group_key][group][stat_name][ts] = val
376
+
377
+ # Set file_index to one past the highest existing index
378
+ self.__ensure_current_file()
379
+
380
+ def save_incremental(self, stats):
381
+ """Save every static not-grouped stats to its own JSON file; save static grouped stats in a single, shared CSV;
382
+ save dynamic stats (grouped and not) to a single, shared CSV => only new data points since the last call."""
383
+
384
+ # Static (and not group indexed) => <base_filename>_<stat_name>.json
385
+ for stat_name in self.static_stats:
386
+ if stat_name not in self.group_indexed_stats:
387
+ if stat_name not in self.changed_stats:
388
+ data = stats.get(stat_name, {})
389
+ with open(os.path.join(self.save_dir, f"{self.base_filename}_{stat_name}.json"), "w") as f:
390
+ json.dump(data, f)
391
+
392
+ # Static and group indexed => <base_filename>_static.csv
393
+ shared_static_stats_changed = False
394
+ for stat_name in self.static_stats:
395
+ if stat_name in self.group_indexed_stats:
396
+ if stat_name not in self.changed_stats:
397
+ shared_static_stats_changed = True
398
+ stats_list = [s for s in self.static_stats if s in self.group_indexed_stats]
399
+ if shared_static_stats_changed and len(stats_list) > 0:
400
+ header = ["group"] + stats_list
401
+ with open(os.path.join(self.save_dir, f"{self.base_filename}_static.json"), "w") as f:
402
+ f.write(",".join(header) + "\n")
403
+
404
+ group_to_group_stats = stats[self.group_key]
405
+ for group_name, group_stats in group_to_group_stats.items():
406
+ row = [group_name]
407
+ for stat_name in self.static_stats:
408
+ if stat_name in self.group_indexed_stats and stat_name in group_stats:
409
+ row.append(group_stats[stat_name])
410
+ f.write(",".join(row) + "\n")
411
+
412
+ # Dynamic (both group indexed and not group indexed) => <base_filename>_1.csv, <base_filename>_2.csv, ...
413
+ filename = self.__current_filename()
414
+ self.__ensure_current_file()
415
+
416
+ with open(filename, "a") as f:
417
+
418
+ # Dynamic and not group indexed (introducing a fake group to handle all of them the same way)
419
+ group_to_group_stats = {}
420
+ fake_group_for_not_grouped_stats = "<ungrouped>"
421
+ for stat_name in self.time_indexed_stats:
422
+ if stat_name not in self.group_indexed_stats and stat_name in stats:
423
+ if fake_group_for_not_grouped_stats not in group_to_group_stats:
424
+ group_to_group_stats[fake_group_for_not_grouped_stats] = {}
425
+ group_to_group_stats[fake_group_for_not_grouped_stats][stat_name] = stats[stat_name]
426
+
427
+ # Dynamic and group indexed
428
+ if self.group_key in stats:
429
+ group_to_group_stats.update(stats[self.group_key])
430
+
431
+ # Dynamic (not they are all group indexed, thanks to the introduction of the fake group)
432
+ for group_name, group_stats in group_to_group_stats.items():
433
+ for stat_name in self.time_indexed_stats:
434
+ if stat_name in self.group_indexed_stats and stat_name in group_stats:
435
+ timestamps = group_stats[stat_name].keys()
436
+ last_ts = self.last_saved.get((group_name, stat_name), float("-1.0"))
437
+
438
+ for ts in timestamps:
439
+ if ts > last_ts:
440
+ value = group_stats[stat_name][ts]
441
+ row = [group_name, stat_name, ts, value]
442
+ f.write(",".join(row) + "\n")
443
+ self.last_saved[(group_name, stat_name)] = ts
444
+
445
+ # Clearing markers
446
+ self.changed_stats = set()
447
+
448
+ def __current_filename(self):
449
+ """Always return the newest (index 1) file."""
450
+ return os.path.join(self.save_dir, f"{self.base_filename}_{1:06d}.csv")
451
+
452
+ def __ensure_current_file(self):
453
+ """Ensure the current newest file is <base_filename>_000001.csv. If rotation is needed, shift existing files."""
454
+ filename = self.__current_filename() # This will return the file with suffix '_1'
455
+ stats_list = [s for s in self.time_indexed_stats if s in self.group_indexed_stats]
456
+
457
+ if len(stats_list) > 0:
458
+
459
+ # If current file exists but is too large, rotate all existing ones upward
460
+ if os.path.exists(filename) and os.path.getsize(filename) >= self.max_size_bytes:
461
+ self.__rotate_files_up()
462
+
463
+ # Create a new fresh file as _1
464
+ with open(filename, "w") as f:
465
+ header = ["group"] + stats_list
466
+ f.write(",".join(header) + "\n")
467
+ elif not os.path.exists(filename):
468
+
469
+ # Create _1 if it does not exist
470
+ with open(filename, "w") as f:
471
+ header = ["group"] + stats_list
472
+ f.write(",".join(header) + "\n")
473
+
474
+ def __rotate_files_up(self):
475
+ """Shift existing files upward by 1 index (e.g. _1 -> _2; _2 -> _3, etc.)."""
476
+ prefix = self.base_filename + "_"
477
+ files = []
478
+ for f_name in os.listdir(self.save_dir):
479
+ if f_name.startswith(prefix) and f_name.endswith(".csv"):
480
+ try:
481
+ idx = int(f_name.split("_")[-1].split(".")[0])
482
+ files.append((idx, f_name))
483
+ except ValueError:
484
+ continue
485
+
486
+ # Sort descending so renaming does not overwrite
487
+ files.sort(reverse=True)
488
+
489
+ for idx, f_name in files:
490
+ src = os.path.join(self.save_dir, f_name)
491
+ dst = os.path.join(self.save_dir, f"{self.base_filename}_{idx+1:06d}.csv")
492
+ shutil.move(src, dst)