naeural-client 2.7.30__py3-none-any.whl → 2.7.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- naeural_client/_ver.py +1 -1
- naeural_client/const/evm_net.py +5 -1
- naeural_client/ipfs/__init__.py +1 -0
- naeural_client/ipfs/r1fs.py +555 -0
- naeural_client/logging/base_logger.py +1 -1
- naeural_client/utils/config.py +1 -1
- naeural_client/utils/oracle_sync/oracle_tester.py +56 -19
- {naeural_client-2.7.30.dist-info → naeural_client-2.7.40.dist-info}/METADATA +1 -1
- {naeural_client-2.7.30.dist-info → naeural_client-2.7.40.dist-info}/RECORD +12 -10
- {naeural_client-2.7.30.dist-info → naeural_client-2.7.40.dist-info}/WHEEL +0 -0
- {naeural_client-2.7.30.dist-info → naeural_client-2.7.40.dist-info}/entry_points.txt +0 -0
- {naeural_client-2.7.30.dist-info → naeural_client-2.7.40.dist-info}/licenses/LICENSE +0 -0
naeural_client/_ver.py
CHANGED
naeural_client/const/evm_net.py
CHANGED
@@ -8,7 +8,8 @@ class EvmNetData:
|
|
8
8
|
EE_EPOCH_INTERVAL_SECONDS_KEY = 'EE_EPOCH_INTERVAL_SECONDS'
|
9
9
|
|
10
10
|
EE_SUPERVISOR_MIN_AVAIL_PRC_KEY = 'EE_SUPERVISOR_MIN_AVAIL_PRC'
|
11
|
-
|
11
|
+
|
12
|
+
EE_ORACLE_API_URL_KEY = 'EE_ORACLE_API_URL'
|
12
13
|
|
13
14
|
EVM_NET_DATA = {
|
14
15
|
'mainnet': {
|
@@ -19,6 +20,7 @@ EVM_NET_DATA = {
|
|
19
20
|
EvmNetData.EE_EPOCH_INTERVALS_KEY : 24,
|
20
21
|
EvmNetData.EE_EPOCH_INTERVAL_SECONDS_KEY : 3600,
|
21
22
|
EvmNetData.EE_SUPERVISOR_MIN_AVAIL_PRC_KEY : 0.98,
|
23
|
+
EvmNetData.EE_ORACLE_API_URL_KEY : "https://oracle.ratio1.ai",
|
22
24
|
},
|
23
25
|
|
24
26
|
'testnet': {
|
@@ -29,6 +31,7 @@ EVM_NET_DATA = {
|
|
29
31
|
EvmNetData.EE_EPOCH_INTERVALS_KEY : 24,
|
30
32
|
EvmNetData.EE_EPOCH_INTERVAL_SECONDS_KEY : 3600,
|
31
33
|
EvmNetData.EE_SUPERVISOR_MIN_AVAIL_PRC_KEY : 0.6,
|
34
|
+
EvmNetData.EE_ORACLE_API_URL_KEY : "https://testnet-oracle.ratio1.ai",
|
32
35
|
},
|
33
36
|
|
34
37
|
|
@@ -40,6 +43,7 @@ EVM_NET_DATA = {
|
|
40
43
|
EvmNetData.EE_EPOCH_INTERVALS_KEY : 1,
|
41
44
|
EvmNetData.EE_EPOCH_INTERVAL_SECONDS_KEY : 3600,
|
42
45
|
EvmNetData.EE_SUPERVISOR_MIN_AVAIL_PRC_KEY : 0.6,
|
46
|
+
EvmNetData.EE_ORACLE_API_URL_KEY : "https://devnet-oracle.ratio1.ai",
|
43
47
|
},
|
44
48
|
|
45
49
|
}
|
@@ -0,0 +1 @@
|
|
1
|
+
from .r1fs import R1FSEngine, log_info
|
@@ -0,0 +1,555 @@
|
|
1
|
+
"""
|
2
|
+
Ratio1 base IPFS utility functions.
|
3
|
+
|
4
|
+
"""
|
5
|
+
import subprocess
|
6
|
+
import json
|
7
|
+
from datetime import datetime
|
8
|
+
import base64
|
9
|
+
import time
|
10
|
+
import os
|
11
|
+
import tempfile
|
12
|
+
import uuid
|
13
|
+
|
14
|
+
from threading import Lock
|
15
|
+
|
16
|
+
__VER__ = "0.2.1"
|
17
|
+
|
18
|
+
|
19
|
+
class IPFSCt:
|
20
|
+
EE_IPFS_RELAY_ENV_KEY = "EE_IPFS_RELAY"
|
21
|
+
EE_SWARM_KEY_CONTENT_BASE64_ENV_KEY = "EE_SWARM_KEY_CONTENT_BASE64"
|
22
|
+
R1FS_DOWNLOADS = "ipfs_downloads"
|
23
|
+
R1FS_UPLOADS = "ipfs_uploads"
|
24
|
+
TEMP_DOWNLOAD = os.path.join("./_local_cache/_output", R1FS_DOWNLOADS)
|
25
|
+
TEMP_UPLOAD = os.path.join("./_local_cache/_output", R1FS_UPLOADS)
|
26
|
+
|
27
|
+
ERROR_TAG = "Unknown"
|
28
|
+
|
29
|
+
COLOR_CODES = {
|
30
|
+
"g": "\033[92m",
|
31
|
+
"r": "\033[91m",
|
32
|
+
"b": "\033[94m",
|
33
|
+
"y": "\033[93m",
|
34
|
+
"m": "\033[95m",
|
35
|
+
'd': "\033[90m", # dark gray
|
36
|
+
"reset": "\033[0m"
|
37
|
+
}
|
38
|
+
|
39
|
+
def log_info(msg: str, color="reset", **kwargs):
|
40
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
41
|
+
color_code = COLOR_CODES.get(color, COLOR_CODES["reset"])
|
42
|
+
reset_code = COLOR_CODES["reset"]
|
43
|
+
print(f"{color_code}[{timestamp}] {msg}{reset_code}", flush=True)
|
44
|
+
return
|
45
|
+
|
46
|
+
class SimpleLogger:
|
47
|
+
def P(self, *args, **kwargs):
|
48
|
+
log_info(*args, **kwargs)
|
49
|
+
return
|
50
|
+
|
51
|
+
def require_ipfs_started(method):
|
52
|
+
"""
|
53
|
+
decorator to ensure the IPFS is started before executing the method.
|
54
|
+
|
55
|
+
parameters
|
56
|
+
----------
|
57
|
+
method : callable
|
58
|
+
the method to be decorated.
|
59
|
+
|
60
|
+
returns
|
61
|
+
-------
|
62
|
+
callable
|
63
|
+
the wrapped method that checks the 'started' attribute.
|
64
|
+
|
65
|
+
raises
|
66
|
+
------
|
67
|
+
RuntimeError
|
68
|
+
if the instance's 'started' attribute is False.
|
69
|
+
"""
|
70
|
+
def wrapper(self, *args, **kwargs):
|
71
|
+
if not self.ipfs_started:
|
72
|
+
raise RuntimeError(f"{method.__name__} FAILED. R1FS.ipfs_started=={self.ipfs_started}")
|
73
|
+
return method(self, *args, **kwargs)
|
74
|
+
return wrapper
|
75
|
+
|
76
|
+
|
77
|
+
|
78
|
+
class R1FSEngine:
|
79
|
+
_lock: Lock = Lock()
|
80
|
+
__instances = {}
|
81
|
+
|
82
|
+
def __new__(
|
83
|
+
cls,
|
84
|
+
name: str = "default",
|
85
|
+
logger: any = None,
|
86
|
+
downloads_dir: str = None,
|
87
|
+
uploads_dir: str = None,
|
88
|
+
base64_swarm_key: str = None,
|
89
|
+
ipfs_relay: str = None,
|
90
|
+
debug=False,
|
91
|
+
):
|
92
|
+
with cls._lock:
|
93
|
+
if name not in cls.__instances:
|
94
|
+
instance = super(R1FSEngine, cls).__new__(cls)
|
95
|
+
instance._build(
|
96
|
+
name=name, logger=logger, downloads_dir=downloads_dir, uploads_dir=uploads_dir,
|
97
|
+
base64_swarm_key=base64_swarm_key, ipfs_relay=ipfs_relay, debug=debug,
|
98
|
+
)
|
99
|
+
cls.__instances[name] = instance
|
100
|
+
else:
|
101
|
+
instance = cls.__instances[name]
|
102
|
+
return instance
|
103
|
+
|
104
|
+
def _build(
|
105
|
+
self,
|
106
|
+
name: str = "default",
|
107
|
+
logger: any = None,
|
108
|
+
downloads_dir: str = None,
|
109
|
+
uploads_dir: str = None,
|
110
|
+
base64_swarm_key: str = None,
|
111
|
+
ipfs_relay: str = None,
|
112
|
+
debug=False,
|
113
|
+
):
|
114
|
+
"""
|
115
|
+
Initialize the IPFS wrapper with a given logger function.
|
116
|
+
By default, it uses the built-in print function for logging.
|
117
|
+
"""
|
118
|
+
self.__name = name
|
119
|
+
if logger is None:
|
120
|
+
logger = SimpleLogger()
|
121
|
+
|
122
|
+
self.logger = logger
|
123
|
+
|
124
|
+
self.__ipfs_started = False
|
125
|
+
self.__ipfs_address = None
|
126
|
+
self.__ipfs_id = None
|
127
|
+
self.__ipfs_agent = None
|
128
|
+
self.__uploaded_files = {}
|
129
|
+
self.__downloaded_files = {}
|
130
|
+
self.__base64_swarm_key = base64_swarm_key
|
131
|
+
self.__ipfs_relay = ipfs_relay
|
132
|
+
self.__downloads_dir = downloads_dir
|
133
|
+
self.__uploads_dir = uploads_dir
|
134
|
+
self.__debug = debug
|
135
|
+
|
136
|
+
self.startup()
|
137
|
+
return
|
138
|
+
|
139
|
+
def startup(self):
|
140
|
+
|
141
|
+
if self.__downloads_dir is None:
|
142
|
+
if hasattr(self.logger, "get_output_folder"):
|
143
|
+
self.__downloads_dir = os.path.join(
|
144
|
+
self.logger.get_output_folder(),
|
145
|
+
IPFSCt.R1FS_DOWNLOADS
|
146
|
+
)
|
147
|
+
else:
|
148
|
+
self.__downloads_dir = IPFSCt.TEMP_DOWNLOAD
|
149
|
+
#end if downloads_dir
|
150
|
+
os.makedirs(self.__downloads_dir, exist_ok=True)
|
151
|
+
|
152
|
+
if self.__uploads_dir is None:
|
153
|
+
if hasattr(self.logger, "get_output_folder"):
|
154
|
+
self.__uploads_dir = os.path.join(
|
155
|
+
self.logger.get_output_folder(),
|
156
|
+
IPFSCt.R1FS_UPLOADS
|
157
|
+
)
|
158
|
+
else:
|
159
|
+
self.__uploads_dir = IPFSCt.TEMP_UPLOAD
|
160
|
+
os.makedirs(self.__uploads_dir, exist_ok=True)
|
161
|
+
|
162
|
+
self.maybe_start_ipfs(
|
163
|
+
base64_swarm_key=self.__base64_swarm_key,
|
164
|
+
ipfs_relay=self.__ipfs_relay,
|
165
|
+
)
|
166
|
+
return
|
167
|
+
|
168
|
+
|
169
|
+
def P(self, s, *args, **kwargs):
|
170
|
+
s = "[R1FS] " + s
|
171
|
+
self.logger.P(s, *args, **kwargs)
|
172
|
+
return
|
173
|
+
|
174
|
+
def Pd(self, s, *args, **kwargs):
|
175
|
+
if self.__debug:
|
176
|
+
s = "[R1FS][DEBUG] " + s
|
177
|
+
self.logger.P(s, *args, **kwargs)
|
178
|
+
return
|
179
|
+
|
180
|
+
@property
|
181
|
+
def ipfs_id(self):
|
182
|
+
return self.__ipfs_id
|
183
|
+
|
184
|
+
@property
|
185
|
+
def ipfs_address(self):
|
186
|
+
return self.__ipfs_address
|
187
|
+
|
188
|
+
@property
|
189
|
+
def ipfs_agent(self):
|
190
|
+
return self.__ipfs_agent
|
191
|
+
|
192
|
+
@property
|
193
|
+
def ipfs_started(self):
|
194
|
+
return self.__ipfs_started
|
195
|
+
|
196
|
+
@property
|
197
|
+
def uploaded_files(self):
|
198
|
+
return self.__uploaded_files
|
199
|
+
|
200
|
+
@property
|
201
|
+
def downloaded_files(self):
|
202
|
+
return self.__downloaded_files
|
203
|
+
|
204
|
+
def get_unique_name(self, prefix="r1fs", suffix=""):
|
205
|
+
str_id = str(uuid.uuid4()).replace("-", "")[:8]
|
206
|
+
return f"{prefix}_{str_id}{suffix}"
|
207
|
+
|
208
|
+
def get_unique_upload_name(self, prefix="r1fs", suffix=""):
|
209
|
+
return os.path.join(self.__uploads_dir, self.get_unique_name(prefix, suffix))
|
210
|
+
|
211
|
+
def get_unique_or_complete_upload_name(self, fn=None, prefix="r1fs", suffix=""):
|
212
|
+
if fn is not None and os.path.dirname(fn) == "":
|
213
|
+
return os.path.join(self.__uploads_dir, fn)
|
214
|
+
return self.get_unique_upload_name(prefix, suffix)
|
215
|
+
|
216
|
+
|
217
|
+
def __run_command(
|
218
|
+
self,
|
219
|
+
cmd_list: list,
|
220
|
+
raise_on_error=True,
|
221
|
+
timeout=60,
|
222
|
+
verbose=False,
|
223
|
+
):
|
224
|
+
"""
|
225
|
+
Run a shell command using subprocess.run with a timeout.
|
226
|
+
Logs the command and its result. If verbose is enabled,
|
227
|
+
prints command details. Raises an exception on error if raise_on_error is True.
|
228
|
+
"""
|
229
|
+
failed = False
|
230
|
+
output = ""
|
231
|
+
cmd_str = " ".join(cmd_list)
|
232
|
+
self.Pd(f"Running command: {cmd_str}", color='d')
|
233
|
+
try:
|
234
|
+
result = subprocess.run(
|
235
|
+
cmd_list,
|
236
|
+
capture_output=True,
|
237
|
+
text=True,
|
238
|
+
timeout=timeout,
|
239
|
+
)
|
240
|
+
except subprocess.TimeoutExpired as e:
|
241
|
+
failed = True
|
242
|
+
self.P(f"Command timed out after {timeout} seconds: {cmd_str}", color='r')
|
243
|
+
if raise_on_error:
|
244
|
+
raise Exception(f"Timeout expired for '{cmd_str}'") from e
|
245
|
+
|
246
|
+
if result.returncode != 0:
|
247
|
+
failed = True
|
248
|
+
self.P(f"Command error: {result.stderr.strip()}", color='r')
|
249
|
+
if raise_on_error:
|
250
|
+
raise Exception(f"Error while running '{cmd_str}': {result.stderr.strip()}")
|
251
|
+
|
252
|
+
if not failed:
|
253
|
+
if verbose:
|
254
|
+
self.Pd(f"Command output: {result.stdout.strip()}")
|
255
|
+
output = result.stdout.strip()
|
256
|
+
return output
|
257
|
+
|
258
|
+
|
259
|
+
def __get_id(self) -> str:
|
260
|
+
"""
|
261
|
+
Get the IPFS peer ID via 'ipfs id' (JSON output).
|
262
|
+
Returns the 'ID' field as a string.
|
263
|
+
"""
|
264
|
+
output = self.__run_command(["ipfs", "id"])
|
265
|
+
try:
|
266
|
+
data = json.loads(output)
|
267
|
+
self.__ipfs_id = data.get("ID", ERROR_TAG)
|
268
|
+
self.__ipfs_address = data.get("Addresses", [ERROR_TAG,ERROR_TAG])[1]
|
269
|
+
self.__ipfs_agent = data.get("AgentVersion", ERROR_TAG)
|
270
|
+
return data.get("ID", ERROR_TAG)
|
271
|
+
except json.JSONDecodeError:
|
272
|
+
raise Exception("Failed to parse JSON from 'ipfs id' output.")
|
273
|
+
|
274
|
+
@require_ipfs_started
|
275
|
+
def __pin_add(self, cid: str) -> str:
|
276
|
+
"""
|
277
|
+
Explicitly pin a CID (and fetch its data) so it appears in the local pinset.
|
278
|
+
"""
|
279
|
+
res = self.__run_command(["ipfs", "pin", "add", cid])
|
280
|
+
self.Pd(f"{res}")
|
281
|
+
return res
|
282
|
+
|
283
|
+
|
284
|
+
# Public methods
|
285
|
+
|
286
|
+
def add_json(self, data, fn=None, tempfile=False) -> bool:
|
287
|
+
"""
|
288
|
+
Add a JSON object to IPFS.
|
289
|
+
"""
|
290
|
+
try:
|
291
|
+
json_data = json.dumps(data)
|
292
|
+
if tempfile:
|
293
|
+
self.Pd("Using tempfile for JSON")
|
294
|
+
with tempfile.NamedTemporaryFile(
|
295
|
+
mode='w', suffix='.json', delete=False
|
296
|
+
) as f:
|
297
|
+
f.write(json_data)
|
298
|
+
fn = f.name
|
299
|
+
else:
|
300
|
+
fn = self.get_unique_or_complete_upload_name(fn=fn, suffix=".json")
|
301
|
+
self.Pd(f"Using unique name for JSON: {fn}")
|
302
|
+
with open(fn, "w") as f:
|
303
|
+
f.write(json_data)
|
304
|
+
#end if tempfile
|
305
|
+
cid = self.add_file(fn)
|
306
|
+
return cid
|
307
|
+
except Exception as e:
|
308
|
+
self.P(f"Error adding JSON to IPFS: {e}", color='r')
|
309
|
+
return None
|
310
|
+
|
311
|
+
|
312
|
+
def add_yaml(self, data, fn=None, tempfile=False) -> bool:
|
313
|
+
"""
|
314
|
+
Add a YAML object to IPFS.
|
315
|
+
"""
|
316
|
+
try:
|
317
|
+
import yaml
|
318
|
+
yaml_data = yaml.dump(data)
|
319
|
+
if tempfile:
|
320
|
+
self.Pd("Using tempfile for YAML")
|
321
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
322
|
+
f.write(yaml_data)
|
323
|
+
fn = f.name
|
324
|
+
else:
|
325
|
+
fn = self.get_unique_or_complete_upload_name(fn=fn, suffix=".yaml")
|
326
|
+
self.Pd(f"Using unique name for YAML: {fn}")
|
327
|
+
with open(fn, "w") as f:
|
328
|
+
f.write(yaml_data)
|
329
|
+
cid = self.add_file(fn)
|
330
|
+
return cid
|
331
|
+
except Exception as e:
|
332
|
+
self.P(f"Error adding YAML to IPFS: {e}", color='r')
|
333
|
+
return None
|
334
|
+
|
335
|
+
|
336
|
+
def add_pickle(self, data, fn=None, tempfile=False) -> bool:
|
337
|
+
"""
|
338
|
+
Add a Pickle object to IPFS.
|
339
|
+
"""
|
340
|
+
try:
|
341
|
+
import pickle
|
342
|
+
if tempfile:
|
343
|
+
self.Pd("Using tempfile for Pickle")
|
344
|
+
with tempfile.NamedTemporaryFile(mode='wb', suffix='.pkl', delete=False) as f:
|
345
|
+
pickle.dump(data, f)
|
346
|
+
fn = f.name
|
347
|
+
else:
|
348
|
+
fn = self.get_unique_or_complete_upload_name(fn=fn, suffix=".pkl")
|
349
|
+
self.Pd(f"Using unique name for pkl: {fn}")
|
350
|
+
with open(fn, "wb") as f:
|
351
|
+
pickle.dump(data, f)
|
352
|
+
cid = self.add_file(fn)
|
353
|
+
return cid
|
354
|
+
except Exception as e:
|
355
|
+
self.P(f"Error adding Pickle to IPFS: {e}", color='r')
|
356
|
+
return None
|
357
|
+
|
358
|
+
|
359
|
+
@require_ipfs_started
|
360
|
+
def add_file(self, file_path: str) -> str:
|
361
|
+
"""
|
362
|
+
This method adds a file to IPFS and returns the CID of the wrapped folder.
|
363
|
+
|
364
|
+
Parameters
|
365
|
+
----------
|
366
|
+
file_path : str
|
367
|
+
The path to the file to be added.
|
368
|
+
|
369
|
+
Returns
|
370
|
+
-------
|
371
|
+
str
|
372
|
+
The CID of the wrapped folder
|
373
|
+
|
374
|
+
"""
|
375
|
+
assert os.path.isfile(file_path), f"File not found: {file_path}"
|
376
|
+
|
377
|
+
output = self.__run_command(["ipfs", "add", "-q", "-w", file_path])
|
378
|
+
# "ipfs add -w <file>" typically prints two lines:
|
379
|
+
# added <hash_of_file> <filename>
|
380
|
+
# added <hash_of_wrapped_folder> <foldername?>
|
381
|
+
# We want the *last* line's CID (the wrapped folder).
|
382
|
+
lines = output.strip().split("\n")
|
383
|
+
if not lines:
|
384
|
+
raise Exception("No output from 'ipfs add -w -q'")
|
385
|
+
folder_cid = lines[-1].strip()
|
386
|
+
self.__uploaded_files[folder_cid] = file_path
|
387
|
+
# now we pin the folder
|
388
|
+
res = self.__pin_add(folder_cid)
|
389
|
+
self.P(f"Added file {file_path} as <{folder_cid}>")
|
390
|
+
return folder_cid
|
391
|
+
|
392
|
+
|
393
|
+
@require_ipfs_started
|
394
|
+
def get_file(self, cid: str, local_folder: str = None, pin=True) -> str:
|
395
|
+
"""
|
396
|
+
Get a file from IPFS by CID and save it to a local folder.
|
397
|
+
If no local folder is provided, the default downloads directory is used.
|
398
|
+
Returns the full path of the downloaded file.
|
399
|
+
|
400
|
+
Parameters
|
401
|
+
----------
|
402
|
+
cid : str
|
403
|
+
The CID of the file to download.
|
404
|
+
|
405
|
+
local_folder : str
|
406
|
+
The local folder to save the
|
407
|
+
|
408
|
+
"""
|
409
|
+
if pin:
|
410
|
+
pin_result = self.__pin_add(cid)
|
411
|
+
|
412
|
+
if local_folder is None:
|
413
|
+
local_folder = self.__downloads_dir # default downloads directory
|
414
|
+
os.makedirs(local_folder, exist_ok=True)
|
415
|
+
local_folder = os.path.join(local_folder, cid) # add the CID as a subfolder
|
416
|
+
|
417
|
+
self.Pd(f"Downloading file {cid} to {local_folder}")
|
418
|
+
self.__run_command(["ipfs", "get", cid, "-o", local_folder])
|
419
|
+
# now we need to get the file from the folder
|
420
|
+
folder_contents = os.listdir(local_folder)
|
421
|
+
if len(folder_contents) != 1:
|
422
|
+
raise Exception(f"Expected one file in {local_folder}, found {folder_contents}")
|
423
|
+
# get the full path of the file
|
424
|
+
out_local_filename = os.path.join(local_folder, folder_contents[0])
|
425
|
+
self.P(f"Downloaded <{cid}> to {out_local_filename}")
|
426
|
+
self.__downloaded_files[cid] = out_local_filename
|
427
|
+
return out_local_filename
|
428
|
+
|
429
|
+
|
430
|
+
|
431
|
+
|
432
|
+
|
433
|
+
@require_ipfs_started
|
434
|
+
def list_pins(self):
|
435
|
+
"""
|
436
|
+
List pinned CIDs via 'ipfs pin ls --type=recursive'.
|
437
|
+
Returns a list of pinned CIDs.
|
438
|
+
"""
|
439
|
+
output = self.__run_command(["ipfs", "pin", "ls", "--type=recursive"])
|
440
|
+
pinned_cids = []
|
441
|
+
for line in output.split("\n"):
|
442
|
+
line = line.strip()
|
443
|
+
if not line:
|
444
|
+
continue
|
445
|
+
parts = line.split()
|
446
|
+
if len(parts) > 0:
|
447
|
+
pinned_cids.append(parts[0])
|
448
|
+
return pinned_cids
|
449
|
+
|
450
|
+
|
451
|
+
|
452
|
+
|
453
|
+
def maybe_start_ipfs(
|
454
|
+
self,
|
455
|
+
base64_swarm_key: str = None,
|
456
|
+
ipfs_relay: str = None
|
457
|
+
) -> bool:
|
458
|
+
"""
|
459
|
+
This method initializes the IPFS repository if needed, connects to a relay, and starts the daemon.
|
460
|
+
"""
|
461
|
+
if self.ipfs_started:
|
462
|
+
return
|
463
|
+
|
464
|
+
if base64_swarm_key is None:
|
465
|
+
base64_swarm_key = os.getenv(IPFSCt.EE_SWARM_KEY_CONTENT_BASE64_ENV_KEY)
|
466
|
+
if base64_swarm_key is not None:
|
467
|
+
self.P("Found IPFS swarm key in environment variable.", color='d')
|
468
|
+
|
469
|
+
if ipfs_relay is None:
|
470
|
+
ipfs_relay = os.getenv(IPFSCt.EE_IPFS_RELAY_ENV_KEY)
|
471
|
+
if ipfs_relay is not None:
|
472
|
+
self.P("Found IPFS relay in environment variable.", color='d')
|
473
|
+
|
474
|
+
|
475
|
+
if not base64_swarm_key or not ipfs_relay:
|
476
|
+
self.P("Missing env values EE_SWARM_KEY_CONTENT_BASE64 and EE_IPFS_RELAY.", color='r')
|
477
|
+
return False
|
478
|
+
|
479
|
+
self.__base64_swarm_key = base64_swarm_key
|
480
|
+
self.__ipfs_relay = ipfs_relay
|
481
|
+
hidden_base64_swarm_key = base64_swarm_key[:8] + "..." + base64_swarm_key[-8:]
|
482
|
+
msg = f"Starting R1FS <{self.__name}>:"
|
483
|
+
msg += f"\n Relay: {self.__ipfs_relay}"
|
484
|
+
msg += f"\n Download: {self.__downloads_dir}"
|
485
|
+
msg += f"\n Upload: {self.__uploads_dir}"
|
486
|
+
msg += f"\n SwarmKey: {hidden_base64_swarm_key}"
|
487
|
+
self.P(msg, color='m')
|
488
|
+
|
489
|
+
ipfs_repo = os.path.expanduser("~/.ipfs")
|
490
|
+
os.makedirs(ipfs_repo, exist_ok=True)
|
491
|
+
config_path = os.path.join(ipfs_repo, "config")
|
492
|
+
swarm_key_path = os.path.join(ipfs_repo, "swarm.key")
|
493
|
+
|
494
|
+
if not os.path.isfile(config_path):
|
495
|
+
# Repository is not initialized; write the swarm key and init.
|
496
|
+
try:
|
497
|
+
decoded_key = base64.b64decode(base64_swarm_key)
|
498
|
+
with open(swarm_key_path, "wb") as f:
|
499
|
+
f.write(decoded_key)
|
500
|
+
os.chmod(swarm_key_path, 0o600)
|
501
|
+
self.P("Swarm key written successfully.", color='g')
|
502
|
+
except Exception as e:
|
503
|
+
self.P(f"Error writing swarm.key: {e}", color='r')
|
504
|
+
return False
|
505
|
+
|
506
|
+
try:
|
507
|
+
self.P("Initializing IPFS repository...")
|
508
|
+
self.__run_command(["ipfs", "init"])
|
509
|
+
except Exception as e:
|
510
|
+
self.P(f"Error during IPFS init: {e}", color='r')
|
511
|
+
return False
|
512
|
+
else:
|
513
|
+
self.P(f"IPFS repository already initialized in {config_path}.", color='g')
|
514
|
+
|
515
|
+
try:
|
516
|
+
self.P("Removing public IPFS bootstrap nodes...")
|
517
|
+
self.__run_command(["ipfs", "bootstrap", "rm", "--all"])
|
518
|
+
except Exception as e:
|
519
|
+
self.P(f"Error removing bootstrap nodes: {e}", color='r')
|
520
|
+
|
521
|
+
# Check if daemon is already running by attempting to get the node id.
|
522
|
+
try:
|
523
|
+
# explicit run no get_id
|
524
|
+
result = self.__run_command(["ipfs", "id"])
|
525
|
+
self.__ipfs_id = json.loads(result)["ID"]
|
526
|
+
self.__ipfs_address = json.loads(result)["Addresses"][1]
|
527
|
+
self.__ipfs_agent = json.loads(result)["AgentVersion"]
|
528
|
+
self.P("IPFS daemon running", color='g')
|
529
|
+
|
530
|
+
except Exception:
|
531
|
+
try:
|
532
|
+
self.P("Starting IPFS daemon in background...")
|
533
|
+
subprocess.Popen(["ipfs", "daemon"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
534
|
+
time.sleep(5)
|
535
|
+
except Exception as e:
|
536
|
+
self.P(f"Error starting IPFS daemon: {e}", color='r')
|
537
|
+
return
|
538
|
+
|
539
|
+
try:
|
540
|
+
my_id = self.__get_id()
|
541
|
+
assert my_id != ERROR_TAG, "Failed to get IPFS ID."
|
542
|
+
self.P("IPFS ID: " + my_id, color='g')
|
543
|
+
self.P(f"Connecting to relay: {ipfs_relay}")
|
544
|
+
result = self.__run_command(["ipfs", "swarm", "connect", ipfs_relay])
|
545
|
+
relay_ip = ipfs_relay.split("/")[2]
|
546
|
+
if "connect" in result.lower() and "success" in result.lower():
|
547
|
+
self.P(f"R1FS connected to: {relay_ip}", color='g', boxed=True)
|
548
|
+
self.__ipfs_started = True
|
549
|
+
else:
|
550
|
+
self.P("Relay connection result did not indicate success.", color='r')
|
551
|
+
except Exception as e:
|
552
|
+
self.P(f"Error connecting to relay: {e}", color='r')
|
553
|
+
|
554
|
+
return self.ipfs_started
|
555
|
+
|
@@ -1122,7 +1122,7 @@ class BaseLogger(object):
|
|
1122
1122
|
def _maybe_migrate_folder(self):
|
1123
1123
|
user_base_folder = BaseLogger.get_user_folder(as_str=True, include_sdk_home=True)
|
1124
1124
|
if user_base_folder in self._base_folder:
|
1125
|
-
BaseLogger.maybe_migrate_user_folder(
|
1125
|
+
BaseLogger.maybe_migrate_user_folder()
|
1126
1126
|
return
|
1127
1127
|
|
1128
1128
|
def _configure_data_and_dirs(self, config_file, config_file_encoding=None):
|
naeural_client/utils/config.py
CHANGED
@@ -327,7 +327,7 @@ def maybe_init_config():
|
|
327
327
|
"""
|
328
328
|
config_file = get_user_config_file()
|
329
329
|
if not config_file.exists():
|
330
|
-
BaseLogger.maybe_migrate_user_folder(
|
330
|
+
BaseLogger.maybe_migrate_user_folder()
|
331
331
|
if not config_file.exists():
|
332
332
|
log_with_color(f"No configuration file found at {config_file}. Initializing configuration...", color="y")
|
333
333
|
reset_config(keep_existing=True)
|
@@ -11,11 +11,10 @@ from collections import defaultdict
|
|
11
11
|
from naeural_client import Logger
|
12
12
|
from naeural_client.bc import DefaultBlockEngine
|
13
13
|
from naeural_client.utils.config import log_with_color, get_user_folder
|
14
|
-
|
14
|
+
from naeural_client.const.evm_net import EvmNetData
|
15
15
|
|
16
16
|
|
17
17
|
class OracleTesterConstants:
|
18
|
-
BASE_URL = "https://naeural-oracle.ngrok.app"
|
19
18
|
TEST_ENDPOINT = "/node_epochs_range"
|
20
19
|
CURRENT_EPOCH_ENDPOINT = "/current_epoch"
|
21
20
|
ACTIVE_NODES_ENDPOINT = "/active_nodes_list"
|
@@ -38,7 +37,6 @@ class OracleTester:
|
|
38
37
|
):
|
39
38
|
self.bc = bce
|
40
39
|
self.log = log
|
41
|
-
self.BASE_URL = ct.BASE_URL
|
42
40
|
self.TEST_ENDPOINT = ct.TEST_ENDPOINT
|
43
41
|
self.CURRENT_EPOCH_ENDPOINT = ct.CURRENT_EPOCH_ENDPOINT
|
44
42
|
self.ACTIVE_NODES_ENDPOINT = ct.ACTIVE_NODES_ENDPOINT
|
@@ -56,6 +54,27 @@ class OracleTester:
|
|
56
54
|
|
57
55
|
"""UTILS"""
|
58
56
|
if True:
|
57
|
+
def get_base_url(self, network=None):
|
58
|
+
"""
|
59
|
+
Get the base URL for the oracle API server.
|
60
|
+
Parameters
|
61
|
+
----------
|
62
|
+
network : str or None
|
63
|
+
The network for which to get the base URL. Default None.
|
64
|
+
If None, the network from the user config will be used.
|
65
|
+
|
66
|
+
Returns
|
67
|
+
-------
|
68
|
+
str
|
69
|
+
The base URL for the oracle API server.
|
70
|
+
"""
|
71
|
+
network = network or self.bc.evm_network
|
72
|
+
res = self.bc.get_network_data(network=network).get(EvmNetData.EE_ORACLE_API_URL_KEY)
|
73
|
+
if res is None:
|
74
|
+
msg_end = f" for the network: {network}" if network is not None else ""
|
75
|
+
raise ValueError(f"Failed to get the base URL{msg_end}.")
|
76
|
+
return res
|
77
|
+
|
59
78
|
def maybe_register_node(self, node_addr: str, eth_address: str, alias: str = None):
|
60
79
|
if node_addr is None:
|
61
80
|
return
|
@@ -239,7 +258,7 @@ class OracleTester:
|
|
239
258
|
return stats_dict
|
240
259
|
"""END RESPONSE HANDLING"""
|
241
260
|
|
242
|
-
def gather(self, nodes, request_kwargs=None, debug=False, rounds=None):
|
261
|
+
def gather(self, nodes, request_kwargs=None, debug=False, rounds=None, network=None):
|
243
262
|
"""
|
244
263
|
Gather data from the oracle server for the given nodes.
|
245
264
|
|
@@ -253,6 +272,12 @@ class OracleTester:
|
|
253
272
|
The request kwargs to be used for the request. Default None.
|
254
273
|
debug : bool
|
255
274
|
Whether to enable debug mode or not. If enabled the function will exit after one request round.
|
275
|
+
rounds : int
|
276
|
+
The number of rounds to be executed. Default None. If None, self.max_request_rounds will be used.
|
277
|
+
network : str
|
278
|
+
The network for which to gather data. Default None.
|
279
|
+
If None, the network from the user config will be used.
|
280
|
+
In case the network is not found in the user config, the testnet will be used.
|
256
281
|
|
257
282
|
Returns
|
258
283
|
-------
|
@@ -266,7 +291,7 @@ class OracleTester:
|
|
266
291
|
while not self.done(rounds):
|
267
292
|
try:
|
268
293
|
self.P(f'Starting request round {self.request_rounds + 1} for {len(nodes)} nodes...')
|
269
|
-
current_url = self.
|
294
|
+
current_url = self.get_base_url(network=network) + self.TEST_ENDPOINT
|
270
295
|
# TODO: maybe shuffle the nodes list in order to avoid
|
271
296
|
# the same order of requests in each round
|
272
297
|
# relevant if the number of nodes is divisible by the number of oracles.
|
@@ -309,7 +334,7 @@ class OracleTester:
|
|
309
334
|
self.P(f'Finished gathering data for {len(nodes)} nodes and {self.max_request_rounds}.')
|
310
335
|
return responses, stats_dict
|
311
336
|
|
312
|
-
def gather_and_compare(self, nodes, request_kwargs=None, debug=False, rounds=None):
|
337
|
+
def gather_and_compare(self, nodes, request_kwargs=None, debug=False, rounds=None, network=None):
|
313
338
|
"""
|
314
339
|
Gather data from the oracle server for the given nodes and compare the results between oracles.
|
315
340
|
|
@@ -323,6 +348,12 @@ class OracleTester:
|
|
323
348
|
The request kwargs to be used for the request. Default None.
|
324
349
|
debug : bool
|
325
350
|
Whether to enable debug mode or not. If enabled the function will exit after one request round.
|
351
|
+
rounds : int
|
352
|
+
The number of rounds to be executed. Default None. If None, self.max_request_rounds will be used.
|
353
|
+
network : str
|
354
|
+
The network for which to gather data. Default None.
|
355
|
+
If None, the network from the user config will be used.
|
356
|
+
In case the network is not found in the user config, the testnet will be used.
|
326
357
|
|
327
358
|
Returns
|
328
359
|
-------
|
@@ -333,7 +364,8 @@ class OracleTester:
|
|
333
364
|
nodes=nodes,
|
334
365
|
request_kwargs=request_kwargs,
|
335
366
|
debug=debug,
|
336
|
-
rounds=rounds
|
367
|
+
rounds=rounds,
|
368
|
+
network=network
|
337
369
|
)
|
338
370
|
# Statistics for each node of each epoch
|
339
371
|
epochs_nodes_stats = {}
|
@@ -381,15 +413,15 @@ class OracleTester:
|
|
381
413
|
|
382
414
|
return responses, stats_dict
|
383
415
|
|
384
|
-
def get_current_epoch(self):
|
385
|
-
epoch_url = self.
|
416
|
+
def get_current_epoch(self, network=None):
|
417
|
+
epoch_url = self.get_base_url(network=network) + self.CURRENT_EPOCH_ENDPOINT
|
386
418
|
response = self.make_request(epoch_url)
|
387
419
|
if response:
|
388
420
|
return response.get("result", {}).get("current_epoch", 1)
|
389
421
|
return None
|
390
422
|
|
391
|
-
def get_active_nodes(self):
|
392
|
-
active_nodes_url = self.
|
423
|
+
def get_active_nodes(self, network=None):
|
424
|
+
active_nodes_url = self.get_base_url(network=network) + self.ACTIVE_NODES_ENDPOINT
|
393
425
|
response = self.make_request(active_nodes_url)
|
394
426
|
result = []
|
395
427
|
if response:
|
@@ -589,10 +621,15 @@ def oracle_tester_init(silent=True, **kwargs):
|
|
589
621
|
return tester
|
590
622
|
|
591
623
|
def test_commands():
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
624
|
+
from naeural_client.utils.config import load_user_defined_config
|
625
|
+
load_user_defined_config()
|
626
|
+
tester = oracle_tester_init(max_requests_rounds=30)
|
627
|
+
start = 10
|
628
|
+
end = 10
|
629
|
+
# node_eth_addr = "0x7C07758C23DF14c2fF4b016F0ad58F2D4aF329a7" # r1s-ssj-1
|
630
|
+
# node_eth_addr = "0xdc4fDFd5B86aeA7BaB17d4742B7c39A2728Ff59B" # r1s-02
|
631
|
+
# node_eth_addr = "0x93B04EF1152D81A0847C2272860a8a5C70280E14" # tr1s-aid02
|
632
|
+
node_eth_addr = '0x37379B80c7657620E5631832c4437B51D67A88cB' # dr1s-db-1
|
596
633
|
|
597
634
|
# Single round
|
598
635
|
tester.P(f'Test single round: Epochs {start} to {end}', show=True)
|
@@ -601,12 +638,12 @@ def test_commands():
|
|
601
638
|
|
602
639
|
# Multiple rounds
|
603
640
|
tester.P(f'Test multiple rounds: Epochs {start} to {end}', show=True)
|
604
|
-
res = tester.execute_command(node_eth_addr=node_eth_addr, start=start, end=end, rounds=
|
641
|
+
res = tester.execute_command(node_eth_addr=node_eth_addr, start=start, end=end, rounds=3)
|
605
642
|
handle_command_results(res)
|
606
643
|
|
607
644
|
# Debug mode
|
608
645
|
tester.P(f'Test debug mode: Epochs {start} to {end}', show=True)
|
609
|
-
res = tester.execute_command(node_eth_addr=node_eth_addr, start=
|
646
|
+
res = tester.execute_command(node_eth_addr=node_eth_addr, start=5, end=7, debug=True)
|
610
647
|
handle_command_results(res)
|
611
648
|
return
|
612
649
|
|
@@ -694,8 +731,8 @@ def oracle_test(N=10):
|
|
694
731
|
|
695
732
|
# Main loop
|
696
733
|
def main():
|
697
|
-
TEST_COMMANDS =
|
698
|
-
TEST_ORACLE =
|
734
|
+
TEST_COMMANDS = True
|
735
|
+
TEST_ORACLE = False
|
699
736
|
if TEST_COMMANDS:
|
700
737
|
test_commands()
|
701
738
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: naeural_client
|
3
|
-
Version: 2.7.
|
3
|
+
Version: 2.7.40
|
4
4
|
Summary: `naeural_client` is the Python SDK required for client app development for the Naeural Edge Protocol Edge Protocol framework
|
5
5
|
Project-URL: Homepage, https://github.com/NaeuralEdgeProtocol/naeural_client
|
6
6
|
Project-URL: Bug Tracker, https://github.com/NaeuralEdgeProtocol/naeural_client/issues
|
@@ -1,5 +1,5 @@
|
|
1
1
|
naeural_client/__init__.py,sha256=YimqgDbjLuywsf8zCWE0EaUXH4MBUrqLxt0TDV558hQ,632
|
2
|
-
naeural_client/_ver.py,sha256=
|
2
|
+
naeural_client/_ver.py,sha256=w-s-yz0eHjAKD9mUqSByv0yLn__Zw5dFOhJPHU7DI3w,331
|
3
3
|
naeural_client/base_decentra_object.py,sha256=C4iwZTkhKNBS4VHlJs5DfElRYLo4Q9l1V1DNVSk1fyQ,4412
|
4
4
|
naeural_client/plugins_manager_mixin.py,sha256=X1JdGLDz0gN1rPnTN_5mJXR8JmqoBFQISJXmPR9yvCo,11106
|
5
5
|
naeural_client/base/__init__.py,sha256=hACh83_cIv7-PwYMM3bQm2IBmNqiHw-3PAfDfAEKz9A,259
|
@@ -38,7 +38,7 @@ naeural_client/const/apps.py,sha256=ePBiJXLuPfFOKuw-LJrT9OWbaodU7QApfDurIPNDoB4,
|
|
38
38
|
naeural_client/const/base.py,sha256=zfJyGGdrNz_OA5i_6cQxH2lEeJ4PO3092NrwM_gZ_U8,5670
|
39
39
|
naeural_client/const/comms.py,sha256=La6JXWHexH8CfcBCKyT4fCIoeaoZlcm7KtZ57ab4ZgU,2201
|
40
40
|
naeural_client/const/environment.py,sha256=RpdDhDgB8NgRoFTk28eODigf9y0WcT9lul6mBOD029w,879
|
41
|
-
naeural_client/const/evm_net.py,sha256=
|
41
|
+
naeural_client/const/evm_net.py,sha256=IEX8t7OcDwONfYcqooZjmgsbFeWy6VynKHK-7nZD0KM,2891
|
42
42
|
naeural_client/const/formatter.py,sha256=AW3bWlqf39uaqV4BBUuW95qKYfF2OkkU4f9hy3kSVhM,200
|
43
43
|
naeural_client/const/heartbeat.py,sha256=xHZBX_NzHTklwA2_AEKR0SGdlbavMT4nirqjQg8WlTU,2550
|
44
44
|
naeural_client/const/misc.py,sha256=VDCwwpf5bl9ltx9rzT2WPVP8B3mZFRufU1tSS5MO240,413
|
@@ -60,8 +60,10 @@ naeural_client/io_formatter/default/__init__.py,sha256=zOm2tsOk6fXvyCXxsXDnsNs6B
|
|
60
60
|
naeural_client/io_formatter/default/a_dummy.py,sha256=qr9eUizQ-NN5jdXVzkaZKMaf9KS41MpPN_iDoTN_Qd0,1148
|
61
61
|
naeural_client/io_formatter/default/aixp1.py,sha256=MX0TeUR4APA-qN3vUC6uzcz8Pssz5lgrQWo7td5Ri1A,3052
|
62
62
|
naeural_client/io_formatter/default/default.py,sha256=gEy78cP2D5s0y8vQh4aHuxqz7D10gGfuiKF311QhrpE,494
|
63
|
+
naeural_client/ipfs/__init__.py,sha256=vXEDLUNUO6lOTMGa8iQ9Zf7ajIQq9GZuvYraAHt3meE,38
|
64
|
+
naeural_client/ipfs/r1fs.py,sha256=u_Dy05wH_Xt4GTvxjLwSoz5hzRz0Tc2Tr-0c2BxwXtg,16503
|
63
65
|
naeural_client/logging/__init__.py,sha256=b79X45VC6c37u32flKB2GAK9f-RR0ocwP0JDCy0t7QQ,33
|
64
|
-
naeural_client/logging/base_logger.py,sha256=
|
66
|
+
naeural_client/logging/base_logger.py,sha256=qqqMX30Vmh5Dz8YYaeL_ainQPTP5FsX1Y4QMbsIG5Rg,69599
|
65
67
|
naeural_client/logging/small_logger.py,sha256=m12hCb_H4XifJYYfgCAOUDkcXm-h4pSODnFf277OFVI,2937
|
66
68
|
naeural_client/logging/logger_mixins/__init__.py,sha256=yQO7umlRvz63FeWpi-F9GRmC_MOHcNW6R6pwvZZBy3A,600
|
67
69
|
naeural_client/logging/logger_mixins/class_instance_mixin.py,sha256=xUXE2VZgmrlrSrvw0f6GF1jlTnVLeVkIiG0bhlBfq3o,2741
|
@@ -83,11 +85,11 @@ naeural_client/logging/tzlocal/win32.py,sha256=zBoj0vFVrGhnCm_f7xmYzGym4-fV-4Ij2
|
|
83
85
|
naeural_client/logging/tzlocal/windows_tz.py,sha256=Sv9okktjZJfRGGUOOppsvQuX_eXyXUxkSKCAFmWT9Hw,34203
|
84
86
|
naeural_client/utils/__init__.py,sha256=mAnke3-MeRzz3nhQvhuHqLnpaaCSmDxicd7Ck9uwpmI,77
|
85
87
|
naeural_client/utils/comm_utils.py,sha256=4cS9llRr_pK_3rNgDcRMCQwYPO0kcNU7AdWy_LtMyCY,1072
|
86
|
-
naeural_client/utils/config.py,sha256=
|
88
|
+
naeural_client/utils/config.py,sha256=lAbWe3UMi40BOdsAIZIb-fYtb4LwG3MIYg0EOA1ITr8,10340
|
87
89
|
naeural_client/utils/dotenv.py,sha256=_AgSo35n7EnQv5yDyu7C7i0kHragLJoCGydHjvOkrYY,2008
|
88
|
-
naeural_client/utils/oracle_sync/oracle_tester.py,sha256=
|
89
|
-
naeural_client-2.7.
|
90
|
-
naeural_client-2.7.
|
91
|
-
naeural_client-2.7.
|
92
|
-
naeural_client-2.7.
|
93
|
-
naeural_client-2.7.
|
90
|
+
naeural_client/utils/oracle_sync/oracle_tester.py,sha256=X-923ccjkr6_kzbbiuAAcWSIhMtBDOH2VURjTh55apQ,27235
|
91
|
+
naeural_client-2.7.40.dist-info/METADATA,sha256=cRiEi7AbNAdspHTFOmzlUxQwk6MzIoKtTYwudP65_fo,12354
|
92
|
+
naeural_client-2.7.40.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
93
|
+
naeural_client-2.7.40.dist-info/entry_points.txt,sha256=CTua17GUrRa4aXeafezGC9TiWKGKQzwTjQmB2jyj22g,91
|
94
|
+
naeural_client-2.7.40.dist-info/licenses/LICENSE,sha256=cvOsJVslde4oIaTCadabXnPqZmzcBO2f2zwXZRmJEbE,11311
|
95
|
+
naeural_client-2.7.40.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|