hippius 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hippius-0.1.6.dist-info → hippius-0.1.7.dist-info}/METADATA +274 -4
- hippius-0.1.7.dist-info/RECORD +10 -0
- hippius_sdk/__init__.py +45 -1
- hippius_sdk/cli.py +1269 -36
- hippius_sdk/client.py +53 -12
- hippius_sdk/config.py +744 -0
- hippius_sdk/ipfs.py +178 -87
- hippius_sdk/substrate.py +130 -68
- hippius-0.1.6.dist-info/RECORD +0 -9
- {hippius-0.1.6.dist-info → hippius-0.1.7.dist-info}/WHEEL +0 -0
- {hippius-0.1.6.dist-info → hippius-0.1.7.dist-info}/entry_points.txt +0 -0
hippius_sdk/ipfs.py
CHANGED
@@ -13,6 +13,10 @@ import uuid
|
|
13
13
|
from typing import Dict, Any, Optional, Union, List, Tuple
|
14
14
|
import ipfshttpclient
|
15
15
|
from dotenv import load_dotenv
|
16
|
+
from hippius_sdk.config import (
|
17
|
+
get_config_value,
|
18
|
+
get_encryption_key,
|
19
|
+
)
|
16
20
|
|
17
21
|
# Import PyNaCl for encryption
|
18
22
|
try:
|
@@ -37,8 +41,8 @@ class IPFSClient:
|
|
37
41
|
|
38
42
|
def __init__(
|
39
43
|
self,
|
40
|
-
gateway: str =
|
41
|
-
api_url: Optional[str] =
|
44
|
+
gateway: Optional[str] = None,
|
45
|
+
api_url: Optional[str] = None,
|
42
46
|
encrypt_by_default: Optional[bool] = None,
|
43
47
|
encryption_key: Optional[bytes] = None,
|
44
48
|
):
|
@@ -46,12 +50,25 @@ class IPFSClient:
|
|
46
50
|
Initialize the IPFS client.
|
47
51
|
|
48
52
|
Args:
|
49
|
-
gateway: IPFS gateway URL for downloading content
|
50
|
-
api_url: IPFS API URL for uploading content
|
53
|
+
gateway: IPFS gateway URL for downloading content (from config if None)
|
54
|
+
api_url: IPFS API URL for uploading content (from config if None)
|
51
55
|
Set to None to try to connect to a local IPFS daemon.
|
52
|
-
encrypt_by_default: Whether to encrypt files by default (from
|
53
|
-
encryption_key: Encryption key for NaCl secretbox (from
|
56
|
+
encrypt_by_default: Whether to encrypt files by default (from config if None)
|
57
|
+
encryption_key: Encryption key for NaCl secretbox (from config if None)
|
54
58
|
"""
|
59
|
+
# Load configuration values if not explicitly provided
|
60
|
+
if gateway is None:
|
61
|
+
gateway = get_config_value("ipfs", "gateway", "https://ipfs.io")
|
62
|
+
|
63
|
+
if api_url is None:
|
64
|
+
api_url = get_config_value(
|
65
|
+
"ipfs", "api_url", "https://relay-fr.hippius.network"
|
66
|
+
)
|
67
|
+
|
68
|
+
# Check if local IPFS is enabled in config
|
69
|
+
if get_config_value("ipfs", "local_ipfs", False):
|
70
|
+
api_url = "http://localhost:5001"
|
71
|
+
|
55
72
|
self.gateway = gateway.rstrip("/")
|
56
73
|
self.api_url = api_url
|
57
74
|
self.client = None
|
@@ -92,7 +109,7 @@ class IPFSClient:
|
|
92
109
|
def _initialize_encryption(
|
93
110
|
self, encrypt_by_default: Optional[bool], encryption_key: Optional[bytes]
|
94
111
|
):
|
95
|
-
"""Initialize encryption settings from parameters or .
|
112
|
+
"""Initialize encryption settings from parameters or configuration."""
|
96
113
|
# Check if encryption is available
|
97
114
|
if not ENCRYPTION_AVAILABLE:
|
98
115
|
self.encryption_available = False
|
@@ -100,31 +117,19 @@ class IPFSClient:
|
|
100
117
|
self.encryption_key = None
|
101
118
|
return
|
102
119
|
|
103
|
-
#
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
self.encrypt_by_default = env_default in ("true", "1", "yes")
|
120
|
+
# Set up encryption default from parameter or config
|
121
|
+
if encrypt_by_default is None:
|
122
|
+
self.encrypt_by_default = get_config_value(
|
123
|
+
"encryption", "encrypt_by_default", False
|
124
|
+
)
|
125
|
+
else:
|
126
|
+
self.encrypt_by_default = encrypt_by_default
|
111
127
|
|
112
|
-
# Set up encryption key from parameter or
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
try:
|
118
|
-
self.encryption_key = base64.b64decode(env_key)
|
119
|
-
# Validate key length
|
120
|
-
if len(self.encryption_key) != nacl.secret.SecretBox.KEY_SIZE:
|
121
|
-
print(
|
122
|
-
f"Warning: Encryption key from .env has incorrect length. Expected {nacl.secret.SecretBox.KEY_SIZE} bytes, got {len(self.encryption_key)} bytes."
|
123
|
-
)
|
124
|
-
self.encryption_key = None
|
125
|
-
except Exception as e:
|
126
|
-
print(f"Warning: Failed to decode encryption key from .env: {e}")
|
127
|
-
self.encryption_key = None
|
128
|
+
# Set up encryption key from parameter or config
|
129
|
+
if encryption_key is None:
|
130
|
+
self.encryption_key = get_encryption_key()
|
131
|
+
else:
|
132
|
+
self.encryption_key = encryption_key
|
128
133
|
|
129
134
|
# Check if we have a valid key and can encrypt
|
130
135
|
self.encryption_available = (
|
@@ -726,7 +731,11 @@ class IPFSClient:
|
|
726
731
|
return cid
|
727
732
|
|
728
733
|
def download_file(
|
729
|
-
self,
|
734
|
+
self,
|
735
|
+
cid: str,
|
736
|
+
output_path: str,
|
737
|
+
decrypt: Optional[bool] = None,
|
738
|
+
max_retries: int = 3,
|
730
739
|
) -> Dict[str, Any]:
|
731
740
|
"""
|
732
741
|
Download a file from IPFS with optional decryption.
|
@@ -735,6 +744,7 @@ class IPFSClient:
|
|
735
744
|
cid: Content Identifier (CID) of the file to download
|
736
745
|
output_path: Path where the downloaded file will be saved
|
737
746
|
decrypt: Whether to decrypt the file (overrides default)
|
747
|
+
max_retries: Maximum number of retry attempts (default: 3)
|
738
748
|
|
739
749
|
Returns:
|
740
750
|
Dict[str, Any]: Dictionary containing download results:
|
@@ -772,16 +782,41 @@ class IPFSClient:
|
|
772
782
|
else:
|
773
783
|
download_path = output_path
|
774
784
|
|
775
|
-
# Download the file
|
776
|
-
|
777
|
-
|
778
|
-
|
785
|
+
# Download the file with retry logic
|
786
|
+
retries = 0
|
787
|
+
last_error = None
|
788
|
+
|
789
|
+
while retries < max_retries:
|
790
|
+
try:
|
791
|
+
# Download the file
|
792
|
+
url = f"{self.gateway}/ipfs/{cid}"
|
793
|
+
response = requests.get(url, stream=True)
|
794
|
+
response.raise_for_status()
|
795
|
+
|
796
|
+
os.makedirs(
|
797
|
+
os.path.dirname(os.path.abspath(download_path)), exist_ok=True
|
798
|
+
)
|
799
|
+
|
800
|
+
with open(download_path, "wb") as f:
|
801
|
+
for chunk in response.iter_content(chunk_size=8192):
|
802
|
+
f.write(chunk)
|
779
803
|
|
780
|
-
|
804
|
+
# If we reach here, download was successful
|
805
|
+
break
|
781
806
|
|
782
|
-
|
783
|
-
|
784
|
-
|
807
|
+
except (requests.exceptions.RequestException, IOError) as e:
|
808
|
+
# Save the error and retry
|
809
|
+
last_error = e
|
810
|
+
retries += 1
|
811
|
+
|
812
|
+
if retries < max_retries:
|
813
|
+
wait_time = 2**retries # Exponential backoff: 2, 4, 8 seconds
|
814
|
+
print(f"Download attempt {retries} failed: {str(e)}")
|
815
|
+
print(f"Retrying in {wait_time} seconds...")
|
816
|
+
time.sleep(wait_time)
|
817
|
+
else:
|
818
|
+
# Raise the last error if we've exhausted all retries
|
819
|
+
raise
|
785
820
|
|
786
821
|
# Decrypt if needed
|
787
822
|
if should_decrypt:
|
@@ -1096,39 +1131,41 @@ class IPFSClient:
|
|
1096
1131
|
print(
|
1097
1132
|
f"Warning: File has fewer chunks ({len(chunks)}) than k={k}. Adjusting parameters."
|
1098
1133
|
)
|
1099
|
-
|
1134
|
+
|
1100
1135
|
# If we have a very small file, we'll just use a single chunk
|
1101
1136
|
# but will still split it into k sub-blocks during encoding
|
1102
1137
|
if len(chunks) == 1:
|
1103
1138
|
if verbose:
|
1104
|
-
print(
|
1139
|
+
print(
|
1140
|
+
f"Small file (single chunk): will split into {k} sub-blocks for encoding"
|
1141
|
+
)
|
1105
1142
|
else:
|
1106
1143
|
# If we have multiple chunks but fewer than k, adjust k to match
|
1107
1144
|
old_k = k
|
1108
1145
|
k = max(1, len(chunks))
|
1109
1146
|
if verbose:
|
1110
1147
|
print(f"Adjusting k from {old_k} to {k} to match available chunks")
|
1111
|
-
|
1148
|
+
|
1112
1149
|
# Ensure m is greater than k for redundancy
|
1113
1150
|
if m <= k:
|
1114
1151
|
old_m = m
|
1115
1152
|
m = k + 2 # Ensure we have at least 2 redundant chunks
|
1116
1153
|
if verbose:
|
1117
1154
|
print(f"Adjusting m from {old_m} to {m} to ensure redundancy")
|
1118
|
-
|
1155
|
+
|
1119
1156
|
if verbose:
|
1120
1157
|
print(f"New parameters: k={k}, m={m}")
|
1121
|
-
|
1158
|
+
|
1122
1159
|
# Ensure we have at least one chunk to process
|
1123
1160
|
if not chunks:
|
1124
1161
|
raise ValueError("File is empty or too small to process")
|
1125
|
-
|
1162
|
+
|
1126
1163
|
# For k=1 case, ensure we have proper sized input for zfec
|
1127
1164
|
if k == 1 and len(chunks) == 1:
|
1128
1165
|
# zfec expects the input to be exactly chunk_size for k=1
|
1129
1166
|
# So we need to pad if shorter or truncate if longer
|
1130
1167
|
if len(chunks[0]) != chunk_size:
|
1131
|
-
chunks[0] = chunks[0].ljust(chunk_size, b
|
1168
|
+
chunks[0] = chunks[0].ljust(chunk_size, b"\0")[:chunk_size]
|
1132
1169
|
|
1133
1170
|
# Create metadata
|
1134
1171
|
metadata = {
|
@@ -1156,41 +1193,47 @@ class IPFSClient:
|
|
1156
1193
|
for i, chunk in enumerate(chunks):
|
1157
1194
|
try:
|
1158
1195
|
# For zfec encoder.encode(), we must provide exactly k blocks
|
1159
|
-
|
1196
|
+
|
1160
1197
|
# Calculate how many bytes each sub-block should have
|
1161
|
-
sub_block_size = (
|
1162
|
-
|
1198
|
+
sub_block_size = (
|
1199
|
+
len(chunk) + k - 1
|
1200
|
+
) // k # ceiling division for even distribution
|
1201
|
+
|
1163
1202
|
# Split the chunk into exactly k sub-blocks of equal size (padding as needed)
|
1164
1203
|
sub_blocks = []
|
1165
1204
|
for j in range(k):
|
1166
1205
|
start = j * sub_block_size
|
1167
1206
|
end = min(start + sub_block_size, len(chunk))
|
1168
1207
|
sub_block = chunk[start:end]
|
1169
|
-
|
1208
|
+
|
1170
1209
|
# Pad if needed to make all sub-blocks the same size
|
1171
1210
|
if len(sub_block) < sub_block_size:
|
1172
|
-
sub_block = sub_block.ljust(sub_block_size, b
|
1173
|
-
|
1211
|
+
sub_block = sub_block.ljust(sub_block_size, b"\0")
|
1212
|
+
|
1174
1213
|
sub_blocks.append(sub_block)
|
1175
|
-
|
1214
|
+
|
1176
1215
|
# Verify we have exactly k sub-blocks
|
1177
1216
|
if len(sub_blocks) != k:
|
1178
|
-
raise ValueError(
|
1179
|
-
|
1217
|
+
raise ValueError(
|
1218
|
+
f"Expected {k} sub-blocks but got {len(sub_blocks)}"
|
1219
|
+
)
|
1220
|
+
|
1180
1221
|
# Encode the k sub-blocks to create m encoded blocks
|
1181
1222
|
encoder = zfec.Encoder(k, m)
|
1182
1223
|
encoded_chunks = encoder.encode(sub_blocks)
|
1183
|
-
|
1224
|
+
|
1184
1225
|
# Add to our collection
|
1185
1226
|
all_encoded_chunks.append(encoded_chunks)
|
1186
|
-
|
1227
|
+
|
1187
1228
|
if verbose and (i + 1) % 10 == 0:
|
1188
1229
|
print(f" Encoded {i+1}/{len(chunks)} chunks")
|
1189
1230
|
except Exception as e:
|
1190
1231
|
# If encoding fails, provide more helpful error message
|
1191
1232
|
error_msg = f"Error encoding chunk {i}: {str(e)}"
|
1192
1233
|
print(f"Error details: chunk size={len(chunk)}, k={k}, m={m}")
|
1193
|
-
print(
|
1234
|
+
print(
|
1235
|
+
f"Sub-blocks created: {len(sub_blocks) if 'sub_blocks' in locals() else 'None'}"
|
1236
|
+
)
|
1194
1237
|
raise RuntimeError(f"{error_msg}")
|
1195
1238
|
|
1196
1239
|
# Step 4: Upload all chunks to IPFS
|
@@ -1249,10 +1292,12 @@ class IPFSClient:
|
|
1249
1292
|
print(f"Uploading metadata file...")
|
1250
1293
|
|
1251
1294
|
# Upload the metadata file to IPFS
|
1252
|
-
metadata_cid_result = self.upload_file(
|
1253
|
-
|
1295
|
+
metadata_cid_result = self.upload_file(
|
1296
|
+
metadata_path, max_retries=max_retries
|
1297
|
+
)
|
1298
|
+
|
1254
1299
|
# Extract just the CID string from the result dictionary
|
1255
|
-
metadata_cid = metadata_cid_result[
|
1300
|
+
metadata_cid = metadata_cid_result["cid"]
|
1256
1301
|
metadata["metadata_cid"] = metadata_cid
|
1257
1302
|
|
1258
1303
|
if verbose:
|
@@ -1294,6 +1339,9 @@ class IPFSClient:
|
|
1294
1339
|
"Erasure coding is not available. Install zfec: pip install zfec"
|
1295
1340
|
)
|
1296
1341
|
|
1342
|
+
# Start timing the reconstruction process
|
1343
|
+
start_time = time.time()
|
1344
|
+
|
1297
1345
|
# Create a temporary directory if not provided
|
1298
1346
|
if temp_dir is None:
|
1299
1347
|
temp_dir_obj = tempfile.TemporaryDirectory()
|
@@ -1309,6 +1357,10 @@ class IPFSClient:
|
|
1309
1357
|
metadata_path = os.path.join(temp_dir, "metadata.json")
|
1310
1358
|
self.download_file(metadata_cid, metadata_path, max_retries=max_retries)
|
1311
1359
|
|
1360
|
+
if verbose:
|
1361
|
+
metadata_download_time = time.time() - start_time
|
1362
|
+
print(f"Metadata downloaded in {metadata_download_time:.2f} seconds")
|
1363
|
+
|
1312
1364
|
with open(metadata_path, "r") as f:
|
1313
1365
|
metadata = json.load(f)
|
1314
1366
|
|
@@ -1326,8 +1378,11 @@ class IPFSClient:
|
|
1326
1378
|
print(
|
1327
1379
|
f"File: {original_file['name']} ({original_file['size']/1024/1024:.2f} MB)"
|
1328
1380
|
)
|
1329
|
-
print(
|
1330
|
-
|
1381
|
+
print(
|
1382
|
+
f"Erasure coding parameters: k={k}, m={m} (need {k} of {m} chunks to reconstruct)"
|
1383
|
+
)
|
1384
|
+
if is_encrypted:
|
1385
|
+
print(f"Encrypted: Yes")
|
1331
1386
|
|
1332
1387
|
# Step 3: Group chunks by their original chunk index
|
1333
1388
|
chunks_by_original = {}
|
@@ -1339,9 +1394,15 @@ class IPFSClient:
|
|
1339
1394
|
|
1340
1395
|
# Step 4: For each original chunk, download at least k shares
|
1341
1396
|
if verbose:
|
1342
|
-
|
1397
|
+
total_original_chunks = len(chunks_by_original)
|
1398
|
+
total_chunks_to_download = total_original_chunks * k
|
1399
|
+
print(
|
1400
|
+
f"Downloading and reconstructing {total_chunks_to_download} chunks..."
|
1401
|
+
)
|
1343
1402
|
|
1344
1403
|
reconstructed_chunks = []
|
1404
|
+
chunks_downloaded = 0
|
1405
|
+
chunks_failed = 0
|
1345
1406
|
|
1346
1407
|
for orig_idx in sorted(chunks_by_original.keys()):
|
1347
1408
|
available_chunks = chunks_by_original[orig_idx]
|
@@ -1363,10 +1424,15 @@ class IPFSClient:
|
|
1363
1424
|
chunk_path = os.path.join(temp_dir, chunk["name"])
|
1364
1425
|
try:
|
1365
1426
|
# Extract the CID string from the chunk's cid dictionary
|
1366
|
-
chunk_cid =
|
1427
|
+
chunk_cid = (
|
1428
|
+
chunk["cid"]["cid"]
|
1429
|
+
if isinstance(chunk["cid"], dict) and "cid" in chunk["cid"]
|
1430
|
+
else chunk["cid"]
|
1431
|
+
)
|
1367
1432
|
self.download_file(
|
1368
1433
|
chunk_cid, chunk_path, max_retries=max_retries
|
1369
1434
|
)
|
1435
|
+
chunks_downloaded += 1
|
1370
1436
|
|
1371
1437
|
# Read the chunk data
|
1372
1438
|
with open(chunk_path, "rb") as f:
|
@@ -1378,6 +1444,7 @@ class IPFSClient:
|
|
1378
1444
|
except Exception as e:
|
1379
1445
|
if verbose:
|
1380
1446
|
print(f"Error downloading chunk {chunk['name']}: {str(e)}")
|
1447
|
+
chunks_failed += 1
|
1381
1448
|
# Continue to the next chunk
|
1382
1449
|
|
1383
1450
|
# If we don't have enough chunks, try to download more
|
@@ -1390,20 +1457,32 @@ class IPFSClient:
|
|
1390
1457
|
# Reconstruct this chunk
|
1391
1458
|
decoder = zfec.Decoder(k, m)
|
1392
1459
|
reconstructed_data = decoder.decode(downloaded_shares, share_indexes)
|
1393
|
-
|
1460
|
+
|
1394
1461
|
# If we used the sub-block approach during encoding, we need to recombine the sub-blocks
|
1395
1462
|
if isinstance(reconstructed_data, list):
|
1396
1463
|
# Combine the sub-blocks back into a single chunk
|
1397
|
-
reconstructed_chunk = b
|
1464
|
+
reconstructed_chunk = b"".join(reconstructed_data)
|
1398
1465
|
else:
|
1399
1466
|
# The simple case where we didn't use sub-blocks
|
1400
1467
|
reconstructed_chunk = reconstructed_data
|
1401
|
-
|
1468
|
+
|
1402
1469
|
reconstructed_chunks.append(reconstructed_chunk)
|
1403
1470
|
|
1404
|
-
|
1471
|
+
# Print progress
|
1472
|
+
if verbose:
|
1473
|
+
progress_pct = (orig_idx + 1) / total_original_chunks * 100
|
1405
1474
|
print(
|
1406
|
-
f"
|
1475
|
+
f" Progress: {orig_idx + 1}/{total_original_chunks} chunks ({progress_pct:.1f}%)"
|
1476
|
+
)
|
1477
|
+
|
1478
|
+
if verbose:
|
1479
|
+
download_time = time.time() - start_time
|
1480
|
+
print(
|
1481
|
+
f"Downloaded {chunks_downloaded} chunks in {download_time:.2f} seconds"
|
1482
|
+
)
|
1483
|
+
if chunks_failed > 0:
|
1484
|
+
print(
|
1485
|
+
f"Failed to download {chunks_failed} chunks (not needed for reconstruction)"
|
1407
1486
|
)
|
1408
1487
|
|
1409
1488
|
# Step 5: Combine the reconstructed chunks into a file
|
@@ -1443,9 +1522,12 @@ class IPFSClient:
|
|
1443
1522
|
print(f"Warning: File hash mismatch!")
|
1444
1523
|
print(f" Expected: {expected_hash}")
|
1445
1524
|
print(f" Actual: {actual_hash}")
|
1525
|
+
elif verbose:
|
1526
|
+
print(f"Hash verification successful!")
|
1446
1527
|
|
1528
|
+
total_time = time.time() - start_time
|
1447
1529
|
if verbose:
|
1448
|
-
print(f"Reconstruction complete!")
|
1530
|
+
print(f"Reconstruction complete in {total_time:.2f} seconds!")
|
1449
1531
|
print(f"File saved to: {output_file}")
|
1450
1532
|
|
1451
1533
|
return output_file
|
@@ -1453,7 +1535,7 @@ class IPFSClient:
|
|
1453
1535
|
finally:
|
1454
1536
|
# Clean up temporary directory if we created it
|
1455
1537
|
if temp_dir_obj is not None:
|
1456
|
-
temp_dir_obj.
|
1538
|
+
temp_dir_obj.cleanup()
|
1457
1539
|
|
1458
1540
|
def store_erasure_coded_file(
|
1459
1541
|
self,
|
@@ -1512,42 +1594,49 @@ class IPFSClient:
|
|
1512
1594
|
|
1513
1595
|
original_file = metadata["original_file"]
|
1514
1596
|
metadata_cid = metadata["metadata_cid"]
|
1515
|
-
|
1597
|
+
|
1516
1598
|
# Create a list to hold all the file inputs (metadata + all chunks)
|
1517
1599
|
all_file_inputs = []
|
1518
1600
|
|
1519
1601
|
# Step 3: Prepare metadata file for storage
|
1520
1602
|
if verbose:
|
1521
|
-
print(
|
1603
|
+
print(
|
1604
|
+
f"Preparing to store metadata and {len(metadata['chunks'])} chunks in the Hippius marketplace..."
|
1605
|
+
)
|
1522
1606
|
|
1523
1607
|
# Create a file input for the metadata file
|
1524
1608
|
metadata_file_input = FileInput(
|
1525
1609
|
file_hash=metadata_cid, file_name=f"{original_file['name']}.ec_metadata"
|
1526
1610
|
)
|
1527
1611
|
all_file_inputs.append(metadata_file_input)
|
1528
|
-
|
1612
|
+
|
1529
1613
|
# Step 4: Add all chunks to the storage request
|
1530
1614
|
if verbose:
|
1531
1615
|
print(f"Adding all chunks to storage request...")
|
1532
|
-
|
1616
|
+
|
1533
1617
|
for i, chunk in enumerate(metadata["chunks"]):
|
1534
1618
|
# Extract the CID string from the chunk's cid dictionary
|
1535
|
-
chunk_cid =
|
1536
|
-
|
1537
|
-
|
1538
|
-
|
1619
|
+
chunk_cid = (
|
1620
|
+
chunk["cid"]["cid"]
|
1621
|
+
if isinstance(chunk["cid"], dict) and "cid" in chunk["cid"]
|
1622
|
+
else chunk["cid"]
|
1539
1623
|
)
|
1624
|
+
chunk_file_input = FileInput(file_hash=chunk_cid, file_name=chunk["name"])
|
1540
1625
|
all_file_inputs.append(chunk_file_input)
|
1541
|
-
|
1626
|
+
|
1542
1627
|
# Print progress for large numbers of chunks
|
1543
1628
|
if verbose and (i + 1) % 50 == 0:
|
1544
|
-
print(
|
1629
|
+
print(
|
1630
|
+
f" Prepared {i + 1}/{len(metadata['chunks'])} chunks for storage"
|
1631
|
+
)
|
1545
1632
|
|
1546
1633
|
# Step 5: Submit the storage request for all files
|
1547
1634
|
try:
|
1548
1635
|
if verbose:
|
1549
|
-
print(
|
1550
|
-
|
1636
|
+
print(
|
1637
|
+
f"Submitting storage request for 1 metadata file and {len(metadata['chunks'])} chunks..."
|
1638
|
+
)
|
1639
|
+
|
1551
1640
|
tx_hash = substrate_client.storage_request(
|
1552
1641
|
files=all_file_inputs, miner_ids=miner_ids
|
1553
1642
|
)
|
@@ -1556,13 +1645,15 @@ class IPFSClient:
|
|
1556
1645
|
print(f"Successfully stored all files in marketplace!")
|
1557
1646
|
print(f"Transaction hash: {tx_hash}")
|
1558
1647
|
print(f"Metadata CID: {metadata_cid}")
|
1559
|
-
print(
|
1648
|
+
print(
|
1649
|
+
f"Total files stored: {len(all_file_inputs)} (1 metadata + {len(metadata['chunks'])} chunks)"
|
1650
|
+
)
|
1560
1651
|
|
1561
1652
|
return {
|
1562
1653
|
"metadata": metadata,
|
1563
1654
|
"metadata_cid": metadata_cid,
|
1564
1655
|
"transaction_hash": tx_hash,
|
1565
|
-
"total_files_stored": len(all_file_inputs)
|
1656
|
+
"total_files_stored": len(all_file_inputs),
|
1566
1657
|
}
|
1567
1658
|
|
1568
1659
|
except Exception as e:
|