naeural-client 2.7.31__py3-none-any.whl → 2.7.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- naeural_client/_ver.py +1 -1
- naeural_client/ipfs/__init__.py +1 -0
- naeural_client/ipfs/r1fs.py +576 -0
- {naeural_client-2.7.31.dist-info → naeural_client-2.7.41.dist-info}/METADATA +1 -1
- {naeural_client-2.7.31.dist-info → naeural_client-2.7.41.dist-info}/RECORD +8 -6
- {naeural_client-2.7.31.dist-info → naeural_client-2.7.41.dist-info}/WHEEL +0 -0
- {naeural_client-2.7.31.dist-info → naeural_client-2.7.41.dist-info}/entry_points.txt +0 -0
- {naeural_client-2.7.31.dist-info → naeural_client-2.7.41.dist-info}/licenses/LICENSE +0 -0
naeural_client/_ver.py
CHANGED
@@ -0,0 +1 @@
|
|
1
|
+
from .r1fs import R1FSEngine, log_info
|
@@ -0,0 +1,576 @@
|
|
1
|
+
"""
|
2
|
+
Ratio1 base IPFS utility functions.
|
3
|
+
|
4
|
+
"""
|
5
|
+
import subprocess
|
6
|
+
import json
|
7
|
+
from datetime import datetime
|
8
|
+
import base64
|
9
|
+
import time
|
10
|
+
import os
|
11
|
+
import tempfile
|
12
|
+
import uuid
|
13
|
+
|
14
|
+
from threading import Lock
|
15
|
+
|
16
|
+
__VER__ = "0.2.1"
|
17
|
+
|
18
|
+
|
19
|
+
class IPFSCt:
|
20
|
+
EE_IPFS_RELAY_ENV_KEY = "EE_IPFS_RELAY"
|
21
|
+
EE_SWARM_KEY_CONTENT_BASE64_ENV_KEY = "EE_SWARM_KEY_CONTENT_BASE64"
|
22
|
+
R1FS_DOWNLOADS = "ipfs_downloads"
|
23
|
+
R1FS_UPLOADS = "ipfs_uploads"
|
24
|
+
TEMP_DOWNLOAD = os.path.join("./_local_cache/_output", R1FS_DOWNLOADS)
|
25
|
+
TEMP_UPLOAD = os.path.join("./_local_cache/_output", R1FS_UPLOADS)
|
26
|
+
|
27
|
+
TIMEOUT = 90 # seconds
|
28
|
+
REPROVIDER = "1m"
|
29
|
+
|
30
|
+
|
31
|
+
ERROR_TAG = "Unknown"
|
32
|
+
|
33
|
+
COLOR_CODES = {
|
34
|
+
"g": "\033[92m",
|
35
|
+
"r": "\033[91m",
|
36
|
+
"b": "\033[94m",
|
37
|
+
"y": "\033[93m",
|
38
|
+
"m": "\033[95m",
|
39
|
+
'd': "\033[90m", # dark gray
|
40
|
+
"reset": "\033[0m"
|
41
|
+
}
|
42
|
+
|
43
|
+
def log_info(msg: str, color="reset", **kwargs):
|
44
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
45
|
+
color_code = COLOR_CODES.get(color, COLOR_CODES["reset"])
|
46
|
+
reset_code = COLOR_CODES["reset"]
|
47
|
+
print(f"{color_code}[{timestamp}] {msg}{reset_code}", flush=True)
|
48
|
+
return
|
49
|
+
|
50
|
+
class SimpleLogger:
|
51
|
+
def P(self, *args, **kwargs):
|
52
|
+
log_info(*args, **kwargs)
|
53
|
+
return
|
54
|
+
|
55
|
+
def require_ipfs_started(method):
|
56
|
+
"""
|
57
|
+
decorator to ensure the IPFS is started before executing the method.
|
58
|
+
|
59
|
+
parameters
|
60
|
+
----------
|
61
|
+
method : callable
|
62
|
+
the method to be decorated.
|
63
|
+
|
64
|
+
returns
|
65
|
+
-------
|
66
|
+
callable
|
67
|
+
the wrapped method that checks the 'started' attribute.
|
68
|
+
|
69
|
+
raises
|
70
|
+
------
|
71
|
+
RuntimeError
|
72
|
+
if the instance's 'started' attribute is False.
|
73
|
+
"""
|
74
|
+
def wrapper(self, *args, **kwargs):
|
75
|
+
if not self.ipfs_started:
|
76
|
+
raise RuntimeError(f"{method.__name__} FAILED. R1FS.ipfs_started=={self.ipfs_started}")
|
77
|
+
return method(self, *args, **kwargs)
|
78
|
+
return wrapper
|
79
|
+
|
80
|
+
|
81
|
+
|
82
|
+
class R1FSEngine:
|
83
|
+
_lock: Lock = Lock()
|
84
|
+
__instances = {}
|
85
|
+
|
86
|
+
def __new__(
|
87
|
+
cls,
|
88
|
+
name: str = "default",
|
89
|
+
logger: any = None,
|
90
|
+
downloads_dir: str = None,
|
91
|
+
uploads_dir: str = None,
|
92
|
+
base64_swarm_key: str = None,
|
93
|
+
ipfs_relay: str = None,
|
94
|
+
debug=False,
|
95
|
+
):
|
96
|
+
with cls._lock:
|
97
|
+
if name not in cls.__instances:
|
98
|
+
instance = super(R1FSEngine, cls).__new__(cls)
|
99
|
+
instance._build(
|
100
|
+
name=name, logger=logger, downloads_dir=downloads_dir, uploads_dir=uploads_dir,
|
101
|
+
base64_swarm_key=base64_swarm_key, ipfs_relay=ipfs_relay, debug=debug,
|
102
|
+
)
|
103
|
+
cls.__instances[name] = instance
|
104
|
+
else:
|
105
|
+
instance = cls.__instances[name]
|
106
|
+
return instance
|
107
|
+
|
108
|
+
def _build(
|
109
|
+
self,
|
110
|
+
name: str = "default",
|
111
|
+
logger: any = None,
|
112
|
+
downloads_dir: str = None,
|
113
|
+
uploads_dir: str = None,
|
114
|
+
base64_swarm_key: str = None,
|
115
|
+
ipfs_relay: str = None,
|
116
|
+
debug=False,
|
117
|
+
):
|
118
|
+
"""
|
119
|
+
Initialize the IPFS wrapper with a given logger function.
|
120
|
+
By default, it uses the built-in print function for logging.
|
121
|
+
"""
|
122
|
+
self.__name = name
|
123
|
+
if logger is None:
|
124
|
+
logger = SimpleLogger()
|
125
|
+
|
126
|
+
self.logger = logger
|
127
|
+
|
128
|
+
self.__ipfs_started = False
|
129
|
+
self.__ipfs_address = None
|
130
|
+
self.__ipfs_id = None
|
131
|
+
self.__ipfs_agent = None
|
132
|
+
self.__uploaded_files = {}
|
133
|
+
self.__downloaded_files = {}
|
134
|
+
self.__base64_swarm_key = base64_swarm_key
|
135
|
+
self.__ipfs_relay = ipfs_relay
|
136
|
+
self.__downloads_dir = downloads_dir
|
137
|
+
self.__uploads_dir = uploads_dir
|
138
|
+
self.__debug = debug
|
139
|
+
|
140
|
+
self.startup()
|
141
|
+
return
|
142
|
+
|
143
|
+
def startup(self):
|
144
|
+
|
145
|
+
if self.__downloads_dir is None:
|
146
|
+
if hasattr(self.logger, "get_output_folder"):
|
147
|
+
self.__downloads_dir = os.path.join(
|
148
|
+
self.logger.get_output_folder(),
|
149
|
+
IPFSCt.R1FS_DOWNLOADS
|
150
|
+
)
|
151
|
+
else:
|
152
|
+
self.__downloads_dir = IPFSCt.TEMP_DOWNLOAD
|
153
|
+
#end if downloads_dir
|
154
|
+
os.makedirs(self.__downloads_dir, exist_ok=True)
|
155
|
+
|
156
|
+
if self.__uploads_dir is None:
|
157
|
+
if hasattr(self.logger, "get_output_folder"):
|
158
|
+
self.__uploads_dir = os.path.join(
|
159
|
+
self.logger.get_output_folder(),
|
160
|
+
IPFSCt.R1FS_UPLOADS
|
161
|
+
)
|
162
|
+
else:
|
163
|
+
self.__uploads_dir = IPFSCt.TEMP_UPLOAD
|
164
|
+
os.makedirs(self.__uploads_dir, exist_ok=True)
|
165
|
+
|
166
|
+
self.maybe_start_ipfs(
|
167
|
+
base64_swarm_key=self.__base64_swarm_key,
|
168
|
+
ipfs_relay=self.__ipfs_relay,
|
169
|
+
)
|
170
|
+
return
|
171
|
+
|
172
|
+
|
173
|
+
def P(self, s, *args, **kwargs):
|
174
|
+
s = "[R1FS] " + s
|
175
|
+
self.logger.P(s, *args, **kwargs)
|
176
|
+
return
|
177
|
+
|
178
|
+
def Pd(self, s, *args, **kwargs):
|
179
|
+
if self.__debug:
|
180
|
+
s = "[R1FS][DEBUG] " + s
|
181
|
+
self.logger.P(s, *args, **kwargs)
|
182
|
+
return
|
183
|
+
|
184
|
+
@property
|
185
|
+
def ipfs_id(self):
|
186
|
+
return self.__ipfs_id
|
187
|
+
|
188
|
+
@property
|
189
|
+
def ipfs_address(self):
|
190
|
+
return self.__ipfs_address
|
191
|
+
|
192
|
+
@property
|
193
|
+
def ipfs_agent(self):
|
194
|
+
return self.__ipfs_agent
|
195
|
+
|
196
|
+
@property
|
197
|
+
def ipfs_started(self):
|
198
|
+
return self.__ipfs_started
|
199
|
+
|
200
|
+
@property
|
201
|
+
def uploaded_files(self):
|
202
|
+
return self.__uploaded_files
|
203
|
+
|
204
|
+
@property
|
205
|
+
def downloaded_files(self):
|
206
|
+
return self.__downloaded_files
|
207
|
+
|
208
|
+
def _get_unique_name(self, prefix="r1fs", suffix=""):
|
209
|
+
str_id = str(uuid.uuid4()).replace("-", "")[:8]
|
210
|
+
return f"{prefix}_{str_id}{suffix}"
|
211
|
+
|
212
|
+
def _get_unique_upload_name(self, prefix="r1fs", suffix=""):
|
213
|
+
return os.path.join(self.__uploads_dir, self._get_unique_name(prefix, suffix))
|
214
|
+
|
215
|
+
def _get_unique_or_complete_upload_name(self, fn=None, prefix="r1fs", suffix=""):
|
216
|
+
if fn is not None and os.path.dirname(fn) == "":
|
217
|
+
return os.path.join(self.__uploads_dir, fn)
|
218
|
+
return self._get_unique_upload_name(prefix, suffix)
|
219
|
+
|
220
|
+
def __set_reprovider_interval(self):
|
221
|
+
# Command to set the Reprovider.Interval to 1 minute
|
222
|
+
cmd = ["ipfs", "config", "--json", "Reprovider.Interval", f'"{IPFSCt.REPROVIDER}"']
|
223
|
+
result = self.__run_command(cmd)
|
224
|
+
return
|
225
|
+
|
226
|
+
|
227
|
+
def __set_relay(self):
|
228
|
+
result = self.__run_command(
|
229
|
+
["ipfs", "config", "--json", "Swarm.DisableRelay", "false"]
|
230
|
+
)
|
231
|
+
return
|
232
|
+
|
233
|
+
|
234
|
+
def __run_command(
|
235
|
+
self,
|
236
|
+
cmd_list: list,
|
237
|
+
raise_on_error=True,
|
238
|
+
timeout=IPFSCt.TIMEOUT,
|
239
|
+
verbose=False,
|
240
|
+
):
|
241
|
+
"""
|
242
|
+
Run a shell command using subprocess.run with a timeout.
|
243
|
+
Logs the command and its result. If verbose is enabled,
|
244
|
+
prints command details. Raises an exception on error if raise_on_error is True.
|
245
|
+
"""
|
246
|
+
failed = False
|
247
|
+
output = ""
|
248
|
+
cmd_str = " ".join(cmd_list)
|
249
|
+
self.Pd(f"Running command: {cmd_str}", color='d')
|
250
|
+
try:
|
251
|
+
result = subprocess.run(
|
252
|
+
cmd_list,
|
253
|
+
capture_output=True,
|
254
|
+
text=True,
|
255
|
+
timeout=timeout,
|
256
|
+
)
|
257
|
+
except subprocess.TimeoutExpired as e:
|
258
|
+
failed = True
|
259
|
+
self.P(f"Command timed out after {timeout} seconds: {cmd_str}", color='r')
|
260
|
+
if raise_on_error:
|
261
|
+
raise Exception(f"Timeout expired for '{cmd_str}'") from e
|
262
|
+
|
263
|
+
if result.returncode != 0:
|
264
|
+
failed = True
|
265
|
+
self.P(f"Command error: {result.stderr.strip()}", color='r')
|
266
|
+
if raise_on_error:
|
267
|
+
raise Exception(f"Error while running '{cmd_str}': {result.stderr.strip()}")
|
268
|
+
|
269
|
+
if not failed:
|
270
|
+
if verbose:
|
271
|
+
self.Pd(f"Command output: {result.stdout.strip()}")
|
272
|
+
output = result.stdout.strip()
|
273
|
+
return output
|
274
|
+
|
275
|
+
|
276
|
+
def __get_id(self) -> str:
|
277
|
+
"""
|
278
|
+
Get the IPFS peer ID via 'ipfs id' (JSON output).
|
279
|
+
Returns the 'ID' field as a string.
|
280
|
+
"""
|
281
|
+
output = self.__run_command(["ipfs", "id"])
|
282
|
+
try:
|
283
|
+
data = json.loads(output)
|
284
|
+
self.__ipfs_id = data.get("ID", ERROR_TAG)
|
285
|
+
self.__ipfs_address = data.get("Addresses", [ERROR_TAG,ERROR_TAG])[1]
|
286
|
+
self.__ipfs_agent = data.get("AgentVersion", ERROR_TAG)
|
287
|
+
return data.get("ID", ERROR_TAG)
|
288
|
+
except json.JSONDecodeError:
|
289
|
+
raise Exception("Failed to parse JSON from 'ipfs id' output.")
|
290
|
+
|
291
|
+
@require_ipfs_started
|
292
|
+
def __pin_add(self, cid: str) -> str:
|
293
|
+
"""
|
294
|
+
Explicitly pin a CID (and fetch its data) so it appears in the local pinset.
|
295
|
+
"""
|
296
|
+
res = self.__run_command(["ipfs", "pin", "add", cid])
|
297
|
+
self.Pd(f"{res}")
|
298
|
+
return res
|
299
|
+
|
300
|
+
|
301
|
+
# Public methods
|
302
|
+
|
303
|
+
def add_json(self, data, fn=None, tempfile=False) -> bool:
|
304
|
+
"""
|
305
|
+
Add a JSON object to IPFS.
|
306
|
+
"""
|
307
|
+
try:
|
308
|
+
json_data = json.dumps(data)
|
309
|
+
if tempfile:
|
310
|
+
self.Pd("Using tempfile for JSON")
|
311
|
+
with tempfile.NamedTemporaryFile(
|
312
|
+
mode='w', suffix='.json', delete=False
|
313
|
+
) as f:
|
314
|
+
f.write(json_data)
|
315
|
+
fn = f.name
|
316
|
+
else:
|
317
|
+
fn = self._get_unique_or_complete_upload_name(fn=fn, suffix=".json")
|
318
|
+
self.Pd(f"Using unique name for JSON: {fn}")
|
319
|
+
with open(fn, "w") as f:
|
320
|
+
f.write(json_data)
|
321
|
+
#end if tempfile
|
322
|
+
cid = self.add_file(fn)
|
323
|
+
return cid
|
324
|
+
except Exception as e:
|
325
|
+
self.P(f"Error adding JSON to IPFS: {e}", color='r')
|
326
|
+
return None
|
327
|
+
|
328
|
+
|
329
|
+
def add_yaml(self, data, fn=None, tempfile=False) -> bool:
|
330
|
+
"""
|
331
|
+
Add a YAML object to IPFS.
|
332
|
+
"""
|
333
|
+
try:
|
334
|
+
import yaml
|
335
|
+
yaml_data = yaml.dump(data)
|
336
|
+
if tempfile:
|
337
|
+
self.Pd("Using tempfile for YAML")
|
338
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
339
|
+
f.write(yaml_data)
|
340
|
+
fn = f.name
|
341
|
+
else:
|
342
|
+
fn = self._get_unique_or_complete_upload_name(fn=fn, suffix=".yaml")
|
343
|
+
self.Pd(f"Using unique name for YAML: {fn}")
|
344
|
+
with open(fn, "w") as f:
|
345
|
+
f.write(yaml_data)
|
346
|
+
cid = self.add_file(fn)
|
347
|
+
return cid
|
348
|
+
except Exception as e:
|
349
|
+
self.P(f"Error adding YAML to IPFS: {e}", color='r')
|
350
|
+
return None
|
351
|
+
|
352
|
+
|
353
|
+
def add_pickle(self, data, fn=None, tempfile=False) -> bool:
|
354
|
+
"""
|
355
|
+
Add a Pickle object to IPFS.
|
356
|
+
"""
|
357
|
+
try:
|
358
|
+
import pickle
|
359
|
+
if tempfile:
|
360
|
+
self.Pd("Using tempfile for Pickle")
|
361
|
+
with tempfile.NamedTemporaryFile(mode='wb', suffix='.pkl', delete=False) as f:
|
362
|
+
pickle.dump(data, f)
|
363
|
+
fn = f.name
|
364
|
+
else:
|
365
|
+
fn = self._get_unique_or_complete_upload_name(fn=fn, suffix=".pkl")
|
366
|
+
self.Pd(f"Using unique name for pkl: {fn}")
|
367
|
+
with open(fn, "wb") as f:
|
368
|
+
pickle.dump(data, f)
|
369
|
+
cid = self.add_file(fn)
|
370
|
+
return cid
|
371
|
+
except Exception as e:
|
372
|
+
self.P(f"Error adding Pickle to IPFS: {e}", color='r')
|
373
|
+
return None
|
374
|
+
|
375
|
+
|
376
|
+
@require_ipfs_started
|
377
|
+
def add_file(self, file_path: str) -> str:
|
378
|
+
"""
|
379
|
+
This method adds a file to IPFS and returns the CID of the wrapped folder.
|
380
|
+
|
381
|
+
Parameters
|
382
|
+
----------
|
383
|
+
file_path : str
|
384
|
+
The path to the file to be added.
|
385
|
+
|
386
|
+
Returns
|
387
|
+
-------
|
388
|
+
str
|
389
|
+
The CID of the wrapped folder
|
390
|
+
|
391
|
+
"""
|
392
|
+
assert os.path.isfile(file_path), f"File not found: {file_path}"
|
393
|
+
|
394
|
+
output = self.__run_command(["ipfs", "add", "-q", "-w", file_path])
|
395
|
+
# "ipfs add -w <file>" typically prints two lines:
|
396
|
+
# added <hash_of_file> <filename>
|
397
|
+
# added <hash_of_wrapped_folder> <foldername?>
|
398
|
+
# We want the *last* line's CID (the wrapped folder).
|
399
|
+
lines = output.strip().split("\n")
|
400
|
+
if not lines:
|
401
|
+
raise Exception("No output from 'ipfs add -w -q'")
|
402
|
+
folder_cid = lines[-1].strip()
|
403
|
+
self.__uploaded_files[folder_cid] = file_path
|
404
|
+
# now we pin the folder
|
405
|
+
res = self.__pin_add(folder_cid)
|
406
|
+
self.P(f"Added file {file_path} as <{folder_cid}>")
|
407
|
+
return folder_cid
|
408
|
+
|
409
|
+
|
410
|
+
@require_ipfs_started
|
411
|
+
def get_file(self, cid: str, local_folder: str = None, pin=True) -> str:
|
412
|
+
"""
|
413
|
+
Get a file from IPFS by CID and save it to a local folder.
|
414
|
+
If no local folder is provided, the default downloads directory is used.
|
415
|
+
Returns the full path of the downloaded file.
|
416
|
+
|
417
|
+
Parameters
|
418
|
+
----------
|
419
|
+
cid : str
|
420
|
+
The CID of the file to download.
|
421
|
+
|
422
|
+
local_folder : str
|
423
|
+
The local folder to save the
|
424
|
+
|
425
|
+
"""
|
426
|
+
if pin:
|
427
|
+
pin_result = self.__pin_add(cid)
|
428
|
+
|
429
|
+
if local_folder is None:
|
430
|
+
local_folder = self.__downloads_dir # default downloads directory
|
431
|
+
os.makedirs(local_folder, exist_ok=True)
|
432
|
+
local_folder = os.path.join(local_folder, cid) # add the CID as a subfolder
|
433
|
+
|
434
|
+
self.Pd(f"Downloading file {cid} to {local_folder}")
|
435
|
+
start_time = time.time()
|
436
|
+
self.__run_command(["ipfs", "get", cid, "-o", local_folder])
|
437
|
+
elapsed_time = time.time() - start_time
|
438
|
+
# now we need to get the file from the folder
|
439
|
+
folder_contents = os.listdir(local_folder)
|
440
|
+
if len(folder_contents) != 1:
|
441
|
+
raise Exception(f"Expected one file in {local_folder}, found {folder_contents}")
|
442
|
+
# get the full path of the file
|
443
|
+
out_local_filename = os.path.join(local_folder, folder_contents[0])
|
444
|
+
self.P(f"Downloaded in {elapsed_time:.1f}s <{cid}> to {out_local_filename}")
|
445
|
+
self.__downloaded_files[cid] = out_local_filename
|
446
|
+
return out_local_filename
|
447
|
+
|
448
|
+
|
449
|
+
|
450
|
+
|
451
|
+
|
452
|
+
@require_ipfs_started
|
453
|
+
def list_pins(self):
|
454
|
+
"""
|
455
|
+
List pinned CIDs via 'ipfs pin ls --type=recursive'.
|
456
|
+
Returns a list of pinned CIDs.
|
457
|
+
"""
|
458
|
+
output = self.__run_command(["ipfs", "pin", "ls", "--type=recursive"])
|
459
|
+
pinned_cids = []
|
460
|
+
for line in output.split("\n"):
|
461
|
+
line = line.strip()
|
462
|
+
if not line:
|
463
|
+
continue
|
464
|
+
parts = line.split()
|
465
|
+
if len(parts) > 0:
|
466
|
+
pinned_cids.append(parts[0])
|
467
|
+
return pinned_cids
|
468
|
+
|
469
|
+
|
470
|
+
|
471
|
+
|
472
|
+
def maybe_start_ipfs(
|
473
|
+
self,
|
474
|
+
base64_swarm_key: str = None,
|
475
|
+
ipfs_relay: str = None
|
476
|
+
) -> bool:
|
477
|
+
"""
|
478
|
+
This method initializes the IPFS repository if needed, connects to a relay, and starts the daemon.
|
479
|
+
"""
|
480
|
+
if self.ipfs_started:
|
481
|
+
return
|
482
|
+
|
483
|
+
if base64_swarm_key is None:
|
484
|
+
base64_swarm_key = os.getenv(IPFSCt.EE_SWARM_KEY_CONTENT_BASE64_ENV_KEY)
|
485
|
+
if base64_swarm_key is not None:
|
486
|
+
self.P("Found IPFS swarm key in environment variable.", color='d')
|
487
|
+
|
488
|
+
if ipfs_relay is None:
|
489
|
+
ipfs_relay = os.getenv(IPFSCt.EE_IPFS_RELAY_ENV_KEY)
|
490
|
+
if ipfs_relay is not None:
|
491
|
+
self.P("Found IPFS relay in environment variable.", color='d')
|
492
|
+
|
493
|
+
|
494
|
+
if not base64_swarm_key or not ipfs_relay:
|
495
|
+
self.P("Missing env values EE_SWARM_KEY_CONTENT_BASE64 and EE_IPFS_RELAY.", color='r')
|
496
|
+
return False
|
497
|
+
|
498
|
+
self.__base64_swarm_key = base64_swarm_key
|
499
|
+
self.__ipfs_relay = ipfs_relay
|
500
|
+
hidden_base64_swarm_key = base64_swarm_key[:8] + "..." + base64_swarm_key[-8:]
|
501
|
+
msg = f"Starting R1FS <{self.__name}>:"
|
502
|
+
msg += f"\n Relay: {self.__ipfs_relay}"
|
503
|
+
msg += f"\n Download: {self.__downloads_dir}"
|
504
|
+
msg += f"\n Upload: {self.__uploads_dir}"
|
505
|
+
msg += f"\n SwarmKey: {hidden_base64_swarm_key}"
|
506
|
+
self.P(msg, color='m')
|
507
|
+
|
508
|
+
ipfs_repo = os.path.expanduser("~/.ipfs")
|
509
|
+
os.makedirs(ipfs_repo, exist_ok=True)
|
510
|
+
config_path = os.path.join(ipfs_repo, "config")
|
511
|
+
swarm_key_path = os.path.join(ipfs_repo, "swarm.key")
|
512
|
+
|
513
|
+
if not os.path.isfile(config_path):
|
514
|
+
# Repository is not initialized; write the swarm key and init.
|
515
|
+
try:
|
516
|
+
decoded_key = base64.b64decode(base64_swarm_key)
|
517
|
+
with open(swarm_key_path, "wb") as f:
|
518
|
+
f.write(decoded_key)
|
519
|
+
os.chmod(swarm_key_path, 0o600)
|
520
|
+
self.P("Swarm key written successfully.", color='g')
|
521
|
+
except Exception as e:
|
522
|
+
self.P(f"Error writing swarm.key: {e}", color='r')
|
523
|
+
return False
|
524
|
+
|
525
|
+
try:
|
526
|
+
self.P("Initializing IPFS repository...")
|
527
|
+
self.__run_command(["ipfs", "init"])
|
528
|
+
except Exception as e:
|
529
|
+
self.P(f"Error during IPFS init: {e}", color='r')
|
530
|
+
return False
|
531
|
+
else:
|
532
|
+
self.P(f"IPFS repository already initialized in {config_path}.", color='g')
|
533
|
+
|
534
|
+
try:
|
535
|
+
self.P("Removing public IPFS bootstrap nodes...")
|
536
|
+
self.__run_command(["ipfs", "bootstrap", "rm", "--all"])
|
537
|
+
except Exception as e:
|
538
|
+
self.P(f"Error removing bootstrap nodes: {e}", color='r')
|
539
|
+
|
540
|
+
# Check if daemon is already running by attempting to get the node id.
|
541
|
+
try:
|
542
|
+
# explicit run no get_id
|
543
|
+
result = self.__run_command(["ipfs", "id"])
|
544
|
+
self.__ipfs_id = json.loads(result)["ID"]
|
545
|
+
self.__ipfs_address = json.loads(result)["Addresses"][1]
|
546
|
+
self.__ipfs_agent = json.loads(result)["AgentVersion"]
|
547
|
+
self.P("IPFS daemon running", color='g')
|
548
|
+
|
549
|
+
except Exception:
|
550
|
+
try:
|
551
|
+
self.__set_reprovider_interval()
|
552
|
+
self.__set_relay()
|
553
|
+
self.P("Starting IPFS daemon in background...")
|
554
|
+
subprocess.Popen(["ipfs", "daemon"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
555
|
+
time.sleep(5)
|
556
|
+
except Exception as e:
|
557
|
+
self.P(f"Error starting IPFS daemon: {e}", color='r')
|
558
|
+
return
|
559
|
+
|
560
|
+
try:
|
561
|
+
my_id = self.__get_id()
|
562
|
+
assert my_id != ERROR_TAG, "Failed to get IPFS ID."
|
563
|
+
self.P("IPFS ID: " + my_id, color='g')
|
564
|
+
self.P(f"Connecting to relay: {ipfs_relay}")
|
565
|
+
result = self.__run_command(["ipfs", "swarm", "connect", ipfs_relay])
|
566
|
+
relay_ip = ipfs_relay.split("/")[2]
|
567
|
+
if "connect" in result.lower() and "success" in result.lower():
|
568
|
+
self.P(f"R1FS connected to: {relay_ip}", color='g', boxed=True)
|
569
|
+
self.__ipfs_started = True
|
570
|
+
else:
|
571
|
+
self.P("Relay connection result did not indicate success.", color='r')
|
572
|
+
except Exception as e:
|
573
|
+
self.P(f"Error connecting to relay: {e}", color='r')
|
574
|
+
|
575
|
+
return self.ipfs_started
|
576
|
+
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: naeural_client
|
3
|
-
Version: 2.7.
|
3
|
+
Version: 2.7.41
|
4
4
|
Summary: `naeural_client` is the Python SDK required for client app development for the Naeural Edge Protocol Edge Protocol framework
|
5
5
|
Project-URL: Homepage, https://github.com/NaeuralEdgeProtocol/naeural_client
|
6
6
|
Project-URL: Bug Tracker, https://github.com/NaeuralEdgeProtocol/naeural_client/issues
|
@@ -1,5 +1,5 @@
|
|
1
1
|
naeural_client/__init__.py,sha256=YimqgDbjLuywsf8zCWE0EaUXH4MBUrqLxt0TDV558hQ,632
|
2
|
-
naeural_client/_ver.py,sha256=
|
2
|
+
naeural_client/_ver.py,sha256=dXiGaEP6ulHafTdmgC2ixakRYRX243ddJ6MtPH035Fw,331
|
3
3
|
naeural_client/base_decentra_object.py,sha256=C4iwZTkhKNBS4VHlJs5DfElRYLo4Q9l1V1DNVSk1fyQ,4412
|
4
4
|
naeural_client/plugins_manager_mixin.py,sha256=X1JdGLDz0gN1rPnTN_5mJXR8JmqoBFQISJXmPR9yvCo,11106
|
5
5
|
naeural_client/base/__init__.py,sha256=hACh83_cIv7-PwYMM3bQm2IBmNqiHw-3PAfDfAEKz9A,259
|
@@ -60,6 +60,8 @@ naeural_client/io_formatter/default/__init__.py,sha256=zOm2tsOk6fXvyCXxsXDnsNs6B
|
|
60
60
|
naeural_client/io_formatter/default/a_dummy.py,sha256=qr9eUizQ-NN5jdXVzkaZKMaf9KS41MpPN_iDoTN_Qd0,1148
|
61
61
|
naeural_client/io_formatter/default/aixp1.py,sha256=MX0TeUR4APA-qN3vUC6uzcz8Pssz5lgrQWo7td5Ri1A,3052
|
62
62
|
naeural_client/io_formatter/default/default.py,sha256=gEy78cP2D5s0y8vQh4aHuxqz7D10gGfuiKF311QhrpE,494
|
63
|
+
naeural_client/ipfs/__init__.py,sha256=vXEDLUNUO6lOTMGa8iQ9Zf7ajIQq9GZuvYraAHt3meE,38
|
64
|
+
naeural_client/ipfs/r1fs.py,sha256=JJHWBxRIOYv-LNHp9SLgjsfEAaTM1CPascHQC3AaYdM,17122
|
63
65
|
naeural_client/logging/__init__.py,sha256=b79X45VC6c37u32flKB2GAK9f-RR0ocwP0JDCy0t7QQ,33
|
64
66
|
naeural_client/logging/base_logger.py,sha256=qqqMX30Vmh5Dz8YYaeL_ainQPTP5FsX1Y4QMbsIG5Rg,69599
|
65
67
|
naeural_client/logging/small_logger.py,sha256=m12hCb_H4XifJYYfgCAOUDkcXm-h4pSODnFf277OFVI,2937
|
@@ -86,8 +88,8 @@ naeural_client/utils/comm_utils.py,sha256=4cS9llRr_pK_3rNgDcRMCQwYPO0kcNU7AdWy_L
|
|
86
88
|
naeural_client/utils/config.py,sha256=lAbWe3UMi40BOdsAIZIb-fYtb4LwG3MIYg0EOA1ITr8,10340
|
87
89
|
naeural_client/utils/dotenv.py,sha256=_AgSo35n7EnQv5yDyu7C7i0kHragLJoCGydHjvOkrYY,2008
|
88
90
|
naeural_client/utils/oracle_sync/oracle_tester.py,sha256=X-923ccjkr6_kzbbiuAAcWSIhMtBDOH2VURjTh55apQ,27235
|
89
|
-
naeural_client-2.7.
|
90
|
-
naeural_client-2.7.
|
91
|
-
naeural_client-2.7.
|
92
|
-
naeural_client-2.7.
|
93
|
-
naeural_client-2.7.
|
91
|
+
naeural_client-2.7.41.dist-info/METADATA,sha256=LLdQeOsrFGHWOH8sRONDE7B7ivP3sroWHjtRWHMevY0,12354
|
92
|
+
naeural_client-2.7.41.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
93
|
+
naeural_client-2.7.41.dist-info/entry_points.txt,sha256=CTua17GUrRa4aXeafezGC9TiWKGKQzwTjQmB2jyj22g,91
|
94
|
+
naeural_client-2.7.41.dist-info/licenses/LICENSE,sha256=cvOsJVslde4oIaTCadabXnPqZmzcBO2f2zwXZRmJEbE,11311
|
95
|
+
naeural_client-2.7.41.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|