hippius 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hippius-0.2.4.dist-info → hippius-0.2.6.dist-info}/METADATA +1 -1
- hippius-0.2.6.dist-info/RECORD +17 -0
- hippius_sdk/__init__.py +21 -10
- hippius_sdk/cli.py +12 -0
- hippius_sdk/cli_handlers.py +413 -57
- hippius_sdk/cli_parser.py +20 -0
- hippius_sdk/cli_rich.py +8 -2
- hippius_sdk/client.py +5 -3
- hippius_sdk/errors.py +77 -0
- hippius_sdk/ipfs.py +249 -298
- hippius_sdk/ipfs_core.py +216 -10
- hippius_sdk/substrate.py +101 -14
- hippius-0.2.4.dist-info/RECORD +0 -16
- {hippius-0.2.4.dist-info → hippius-0.2.6.dist-info}/WHEEL +0 -0
- {hippius-0.2.4.dist-info → hippius-0.2.6.dist-info}/entry_points.txt +0 -0
hippius_sdk/ipfs.py
CHANGED
@@ -16,6 +16,13 @@ import httpx
|
|
16
16
|
import requests
|
17
17
|
|
18
18
|
from hippius_sdk.config import get_config_value, get_encryption_key
|
19
|
+
from hippius_sdk.errors import (
|
20
|
+
HippiusAlreadyDeletedError,
|
21
|
+
HippiusFailedIPFSUnpin,
|
22
|
+
HippiusFailedSubstrateDelete,
|
23
|
+
HippiusIPFSConnectionError,
|
24
|
+
HippiusMetadataError,
|
25
|
+
)
|
19
26
|
from hippius_sdk.ipfs_core import AsyncIPFSClient
|
20
27
|
from hippius_sdk.substrate import FileInput, SubstrateClient
|
21
28
|
from hippius_sdk.utils import format_cid, format_size
|
@@ -436,15 +443,18 @@ class IPFSClient:
|
|
436
443
|
output_path: str,
|
437
444
|
decrypt: Optional[bool] = None,
|
438
445
|
max_retries: int = 3,
|
446
|
+
skip_directory_check: bool = False,
|
439
447
|
) -> Dict[str, Any]:
|
440
448
|
"""
|
441
449
|
Download a file from IPFS with optional decryption.
|
450
|
+
Supports downloading directories - in that case, a directory structure will be created.
|
442
451
|
|
443
452
|
Args:
|
444
453
|
cid: Content Identifier (CID) of the file to download
|
445
|
-
output_path: Path where the downloaded file will be saved
|
454
|
+
output_path: Path where the downloaded file/directory will be saved
|
446
455
|
decrypt: Whether to decrypt the file (overrides default)
|
447
456
|
max_retries: Maximum number of retry attempts (default: 3)
|
457
|
+
skip_directory_check: If True, skips directory check (treats as file)
|
448
458
|
|
449
459
|
Returns:
|
450
460
|
Dict[str, Any]: Dictionary containing download results:
|
@@ -454,6 +464,7 @@ class IPFSClient:
|
|
454
464
|
- size_formatted: Human-readable file size
|
455
465
|
- elapsed_seconds: Time taken for the download in seconds
|
456
466
|
- decrypted: Whether the file was decrypted
|
467
|
+
- is_directory: Whether the download was a directory
|
457
468
|
|
458
469
|
Raises:
|
459
470
|
requests.RequestException: If the download fails
|
@@ -461,6 +472,47 @@ class IPFSClient:
|
|
461
472
|
"""
|
462
473
|
start_time = time.time()
|
463
474
|
|
475
|
+
# Skip directory check if requested (important for erasure code chunks)
|
476
|
+
is_directory = False
|
477
|
+
if not skip_directory_check:
|
478
|
+
# Use the improved ls function to properly detect directories
|
479
|
+
try:
|
480
|
+
# The ls function now properly detects directories
|
481
|
+
ls_result = await self.client.ls(cid)
|
482
|
+
is_directory = ls_result.get("is_directory", False)
|
483
|
+
except Exception:
|
484
|
+
# If ls fails, we'll proceed as if it's a file
|
485
|
+
pass
|
486
|
+
|
487
|
+
# If it's a directory, handle it differently
|
488
|
+
if is_directory:
|
489
|
+
# For directories, we don't need to decrypt each file during the initial download
|
490
|
+
# We'll use the AsyncIPFSClient's download_directory method directly
|
491
|
+
try:
|
492
|
+
await self.client.download_directory(cid, output_path)
|
493
|
+
|
494
|
+
# Calculate the total size of the directory
|
495
|
+
total_size = 0
|
496
|
+
for root, _, files in os.walk(output_path):
|
497
|
+
for file in files:
|
498
|
+
file_path = os.path.join(root, file)
|
499
|
+
total_size += os.path.getsize(file_path)
|
500
|
+
|
501
|
+
elapsed_time = time.time() - start_time
|
502
|
+
|
503
|
+
return {
|
504
|
+
"success": True,
|
505
|
+
"output_path": output_path,
|
506
|
+
"size_bytes": total_size,
|
507
|
+
"size_formatted": self.format_size(total_size),
|
508
|
+
"elapsed_seconds": round(elapsed_time, 2),
|
509
|
+
"decrypted": False, # Directories aren't decrypted as a whole
|
510
|
+
"is_directory": True,
|
511
|
+
}
|
512
|
+
except Exception as e:
|
513
|
+
raise RuntimeError(f"Failed to download directory: {str(e)}")
|
514
|
+
|
515
|
+
# For regular files, use the existing logic
|
464
516
|
# Determine if we should decrypt
|
465
517
|
should_decrypt = self.encrypt_by_default if decrypt is None else decrypt
|
466
518
|
|
@@ -482,38 +534,14 @@ class IPFSClient:
|
|
482
534
|
else:
|
483
535
|
download_path = output_path
|
484
536
|
|
485
|
-
#
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
url = f"{self.gateway}/ipfs/{cid}"
|
491
|
-
response = requests.get(url, stream=True)
|
492
|
-
response.raise_for_status()
|
493
|
-
|
494
|
-
os.makedirs(
|
495
|
-
os.path.dirname(os.path.abspath(download_path)), exist_ok=True
|
496
|
-
)
|
497
|
-
|
498
|
-
with open(download_path, "wb") as f:
|
499
|
-
for chunk in response.iter_content(chunk_size=8192):
|
500
|
-
f.write(chunk)
|
501
|
-
|
502
|
-
# If we reach here, download was successful
|
503
|
-
break
|
504
|
-
|
505
|
-
except (requests.exceptions.RequestException, IOError) as e:
|
506
|
-
# Save the error and retry
|
507
|
-
retries += 1
|
537
|
+
# Pass the skip_directory_check parameter to the core client
|
538
|
+
await self.client.download_file(
|
539
|
+
cid, download_path, skip_directory_check=skip_directory_check
|
540
|
+
)
|
541
|
+
download_success = True
|
508
542
|
|
509
|
-
|
510
|
-
|
511
|
-
print(f"Download attempt {retries} failed: {str(e)}")
|
512
|
-
print(f"Retrying in {wait_time} seconds...")
|
513
|
-
time.sleep(wait_time)
|
514
|
-
else:
|
515
|
-
# Raise the last error if we've exhausted all retries
|
516
|
-
raise
|
543
|
+
if not download_success:
|
544
|
+
raise RuntimeError("Failed to download file after multiple attempts")
|
517
545
|
|
518
546
|
# Decrypt if needed
|
519
547
|
if should_decrypt:
|
@@ -548,6 +576,7 @@ class IPFSClient:
|
|
548
576
|
"size_formatted": self.format_size(file_size_bytes),
|
549
577
|
"elapsed_seconds": round(elapsed_time, 2),
|
550
578
|
"decrypted": should_decrypt,
|
579
|
+
"is_directory": False,
|
551
580
|
}
|
552
581
|
|
553
582
|
finally:
|
@@ -1031,11 +1060,20 @@ class IPFSClient:
|
|
1031
1060
|
|
1032
1061
|
# Step 5: Create and upload the metadata file
|
1033
1062
|
metadata_path = os.path.join(temp_dir, f"{file_id}_metadata.json")
|
1034
|
-
|
1035
|
-
|
1063
|
+
|
1064
|
+
# Use binary mode to avoid any platform-specific text encoding issues
|
1065
|
+
with open(metadata_path, "wb") as f:
|
1066
|
+
# Encode the JSON with UTF-8 encoding explicitly
|
1067
|
+
metadata_json = json.dumps(metadata, indent=2, ensure_ascii=False)
|
1068
|
+
f.write(metadata_json.encode("utf-8"))
|
1069
|
+
|
1070
|
+
# Verify file was written correctly
|
1071
|
+
if os.path.getsize(metadata_path) == 0:
|
1072
|
+
raise ValueError("Failed to write metadata file (file size is 0)")
|
1036
1073
|
|
1037
1074
|
if verbose:
|
1038
1075
|
print("Uploading metadata file...")
|
1076
|
+
print(f"Metadata file size: {os.path.getsize(metadata_path)} bytes")
|
1039
1077
|
|
1040
1078
|
# Upload the metadata file to IPFS
|
1041
1079
|
metadata_cid_result = await self.upload_file(
|
@@ -1108,9 +1146,12 @@ class IPFSClient:
|
|
1108
1146
|
if verbose:
|
1109
1147
|
metadata_download_time = time.time() - start_time
|
1110
1148
|
print(f"Metadata downloaded in {metadata_download_time:.2f} seconds")
|
1149
|
+
print(f"Metadata file size: {os.path.getsize(metadata_path)} bytes")
|
1111
1150
|
|
1112
|
-
|
1113
|
-
|
1151
|
+
# Read using binary mode to avoid any encoding issues
|
1152
|
+
with open(metadata_path, "rb") as f:
|
1153
|
+
metadata_content = f.read().decode("utf-8")
|
1154
|
+
metadata = json.loads(metadata_content)
|
1114
1155
|
|
1115
1156
|
# Step 2: Extract key information
|
1116
1157
|
original_file = metadata["original_file"]
|
@@ -1191,8 +1232,12 @@ class IPFSClient:
|
|
1191
1232
|
async def download_chunk(cid, path, chunk_info):
|
1192
1233
|
async with encoded_chunks_semaphore:
|
1193
1234
|
try:
|
1235
|
+
# Always skip directory check for erasure code chunks
|
1194
1236
|
await self.download_file(
|
1195
|
-
cid,
|
1237
|
+
cid,
|
1238
|
+
path,
|
1239
|
+
max_retries=max_retries,
|
1240
|
+
skip_directory_check=True,
|
1196
1241
|
)
|
1197
1242
|
|
1198
1243
|
# Read chunk data
|
@@ -1522,10 +1567,11 @@ class IPFSClient:
|
|
1522
1567
|
self, cid: str, cancel_from_blockchain: bool = True
|
1523
1568
|
) -> Dict[str, Any]:
|
1524
1569
|
"""
|
1525
|
-
Delete a file from IPFS and optionally cancel its storage on the blockchain.
|
1570
|
+
Delete a file or directory from IPFS and optionally cancel its storage on the blockchain.
|
1571
|
+
If deleting a directory, all files within the directory will be unpinned recursively.
|
1526
1572
|
|
1527
1573
|
Args:
|
1528
|
-
cid: Content Identifier (CID) of the file to delete
|
1574
|
+
cid: Content Identifier (CID) of the file/directory to delete
|
1529
1575
|
cancel_from_blockchain: Whether to also cancel the storage request from the blockchain
|
1530
1576
|
|
1531
1577
|
Returns:
|
@@ -1540,77 +1586,117 @@ class IPFSClient:
|
|
1540
1586
|
"end_time": None,
|
1541
1587
|
"duration_seconds": None,
|
1542
1588
|
},
|
1589
|
+
"is_directory": False,
|
1590
|
+
"child_files": [],
|
1543
1591
|
}
|
1544
1592
|
|
1545
|
-
# First
|
1593
|
+
# First check if this is a directory
|
1546
1594
|
try:
|
1547
|
-
|
1548
|
-
|
1549
|
-
|
1550
|
-
|
1551
|
-
|
1552
|
-
|
1595
|
+
ls_result = await self.client.ls(cid)
|
1596
|
+
is_directory = ls_result.get("is_directory", False)
|
1597
|
+
result["is_directory"] = is_directory
|
1598
|
+
|
1599
|
+
# If it's a directory, recursively unpin all contained files first
|
1600
|
+
if is_directory:
|
1601
|
+
print(f"Detected directory: {cid}")
|
1602
|
+
links = []
|
1603
|
+
|
1604
|
+
# Extract all links from the directory listing
|
1605
|
+
if "Objects" in ls_result and len(ls_result["Objects"]) > 0:
|
1606
|
+
for obj in ls_result["Objects"]:
|
1607
|
+
if "Links" in obj:
|
1608
|
+
links.extend(obj["Links"])
|
1609
|
+
|
1610
|
+
child_files = []
|
1611
|
+
# Unpin each item in the directory
|
1612
|
+
for link in links:
|
1613
|
+
link_hash = link.get("Hash")
|
1614
|
+
link_name = link.get("Name", "unknown")
|
1615
|
+
if link_hash:
|
1616
|
+
child_files.append({"cid": link_hash, "name": link_name})
|
1617
|
+
try:
|
1618
|
+
# Recursively delete if it's a subdirectory
|
1619
|
+
link_type = link.get("Type")
|
1620
|
+
if (
|
1621
|
+
link_type == 1
|
1622
|
+
or str(link_type) == "1"
|
1623
|
+
or link_type == "dir"
|
1624
|
+
):
|
1625
|
+
# Recursive delete, but don't cancel from blockchain (we'll do that for parent)
|
1626
|
+
await self.delete_file(
|
1627
|
+
link_hash, cancel_from_blockchain=False
|
1628
|
+
)
|
1629
|
+
else:
|
1630
|
+
# Regular file unpin
|
1631
|
+
try:
|
1632
|
+
await self.client.unpin(link_hash)
|
1633
|
+
print(
|
1634
|
+
f"Unpinned file: {link_name} (CID: {link_hash})"
|
1635
|
+
)
|
1636
|
+
except Exception as unpin_error:
|
1637
|
+
# Just note the error but don't let it stop the whole process
|
1638
|
+
# This is common with IPFS servers that may return 500 errors for
|
1639
|
+
# unpinning content that was never explicitly pinned
|
1640
|
+
print(
|
1641
|
+
f"Note: Could not unpin {link_name}: {str(unpin_error).split('For more information')[0]}"
|
1642
|
+
)
|
1643
|
+
except Exception as e:
|
1644
|
+
print(
|
1645
|
+
f"Warning: Problem processing child item {link_name}: {str(e).split('For more information')[0]}"
|
1646
|
+
)
|
1553
1647
|
|
1648
|
+
# Record the child files that were processed
|
1649
|
+
result["child_files"] = child_files
|
1650
|
+
except Exception as e:
|
1651
|
+
print(f"Warning: Failed to check if CID is a directory: {e}")
|
1652
|
+
# Continue with regular file unpin
|
1653
|
+
|
1654
|
+
# Now unpin the main file/directory
|
1655
|
+
try:
|
1656
|
+
print(f"Unpinning from IPFS: {cid}")
|
1554
1657
|
unpin_result = await self.client.unpin(cid)
|
1555
1658
|
result["unpin_result"] = unpin_result
|
1659
|
+
result["success"] = True
|
1556
1660
|
print("Successfully unpinned from IPFS")
|
1557
1661
|
except Exception as e:
|
1558
|
-
|
1559
|
-
|
1662
|
+
# Handle 500 errors from IPFS server gracefully - they often occur
|
1663
|
+
# when the content wasn't explicitly pinned or was already unpinned
|
1664
|
+
error_str = str(e)
|
1665
|
+
if "500 Internal Server Error" in error_str:
|
1666
|
+
print(
|
1667
|
+
f"Note: IPFS server reported content may already be unpinned: {cid}"
|
1668
|
+
)
|
1669
|
+
result["unpin_result"] = {"Pins": [cid]} # Simulate successful unpin
|
1670
|
+
result["success"] = True
|
1671
|
+
else:
|
1672
|
+
print(
|
1673
|
+
f"Warning: Failed to unpin from IPFS: {error_str.split('For more information')[0]}"
|
1674
|
+
)
|
1675
|
+
result["success"] = False
|
1560
1676
|
|
1561
1677
|
# Then, if requested, cancel from blockchain
|
1562
1678
|
if cancel_from_blockchain:
|
1563
1679
|
try:
|
1564
|
-
# Create a substrate client
|
1565
|
-
print(f"DEBUG: Creating SubstrateClient for blockchain cancellation...")
|
1566
1680
|
substrate_client = SubstrateClient()
|
1567
|
-
|
1568
|
-
|
1569
|
-
|
1570
|
-
|
1571
|
-
|
1572
|
-
|
1573
|
-
|
1574
|
-
|
1575
|
-
# Check the return value - special cases for when blockchain cancellation isn't available
|
1576
|
-
if tx_hash == "no-blockchain-cancellation-available":
|
1577
|
-
print(
|
1578
|
-
"Blockchain cancellation not available, but IPFS unpinning was successful"
|
1579
|
-
)
|
1580
|
-
result["blockchain_result"] = {
|
1581
|
-
"status": "not_available",
|
1582
|
-
"message": "Blockchain cancellation not available, but IPFS unpinning was successful",
|
1583
|
-
}
|
1584
|
-
elif tx_hash.startswith("ipfs-unpinned-only"):
|
1585
|
-
error_msg = tx_hash.replace("ipfs-unpinned-only-", "")
|
1681
|
+
await substrate_client.cancel_storage_request(cid)
|
1682
|
+
print("Successfully cancelled storage from blockchain")
|
1683
|
+
result["blockchain_result"] = {"success": True}
|
1684
|
+
except Exception as e:
|
1685
|
+
# Handle the case where the CID is not in storage requests
|
1686
|
+
error_str = str(e)
|
1687
|
+
if "not found in storage requests" in error_str:
|
1586
1688
|
print(
|
1587
|
-
|
1689
|
+
"Note: Content was not found in blockchain storage requests (may already be deleted)"
|
1588
1690
|
)
|
1589
1691
|
result["blockchain_result"] = {
|
1590
|
-
"
|
1591
|
-
"
|
1592
|
-
"message": "IPFS unpinning successful, but blockchain cancellation failed",
|
1692
|
+
"success": True,
|
1693
|
+
"already_deleted": True,
|
1593
1694
|
}
|
1594
1695
|
else:
|
1595
|
-
|
1596
|
-
result["blockchain_result"] = {
|
1597
|
-
"transaction_hash": tx_hash,
|
1598
|
-
"status": "success",
|
1599
|
-
}
|
1600
|
-
print(f"Successfully canceled storage request from blockchain")
|
1601
|
-
print(
|
1602
|
-
f"DEBUG: Blockchain cancellation succeeded with transaction hash: {tx_hash}"
|
1603
|
-
)
|
1604
|
-
except Exception as e:
|
1605
|
-
print(f"Warning: Failed to cancel storage from blockchain: {e}")
|
1606
|
-
print(
|
1607
|
-
f"DEBUG: Blockchain cancellation exception: {type(e).__name__}: {str(e)}"
|
1608
|
-
)
|
1609
|
-
if hasattr(e, "__dict__"):
|
1610
|
-
print(f"DEBUG: Exception attributes: {e.__dict__}")
|
1611
|
-
result["blockchain_error"] = str(e)
|
1696
|
+
print(f"Warning: Error cancelling from blockchain: {error_str}")
|
1697
|
+
result["blockchain_result"] = {"success": False, "error": error_str}
|
1612
1698
|
|
1613
|
-
#
|
1699
|
+
# Update timing information
|
1614
1700
|
result["timing"]["end_time"] = time.time()
|
1615
1701
|
result["timing"]["duration_seconds"] = (
|
1616
1702
|
result["timing"]["end_time"] - result["timing"]["start_time"]
|
@@ -1623,7 +1709,7 @@ class IPFSClient:
|
|
1623
1709
|
metadata_cid: str,
|
1624
1710
|
cancel_from_blockchain: bool = True,
|
1625
1711
|
parallel_limit: int = 20,
|
1626
|
-
) ->
|
1712
|
+
) -> bool:
|
1627
1713
|
"""
|
1628
1714
|
Delete an erasure-coded file, including all its chunks in parallel.
|
1629
1715
|
|
@@ -1633,219 +1719,84 @@ class IPFSClient:
|
|
1633
1719
|
parallel_limit: Maximum number of concurrent deletion operations
|
1634
1720
|
|
1635
1721
|
Returns:
|
1636
|
-
|
1722
|
+
bool: True if the deletion was successful, False otherwise
|
1637
1723
|
"""
|
1638
|
-
result = {
|
1639
|
-
"metadata_cid": metadata_cid,
|
1640
|
-
"deleted_chunks": [],
|
1641
|
-
"failed_chunks": [],
|
1642
|
-
"blockchain_result": None,
|
1643
|
-
"timing": {
|
1644
|
-
"start_time": time.time(),
|
1645
|
-
"end_time": None,
|
1646
|
-
"duration_seconds": None,
|
1647
|
-
},
|
1648
|
-
}
|
1649
1724
|
|
1650
|
-
#
|
1651
|
-
|
1652
|
-
|
1725
|
+
# Try to download and process metadata file and chunks
|
1726
|
+
ipfs_failure = False
|
1727
|
+
metadata_error = False
|
1653
1728
|
|
1654
|
-
# First, get the metadata to find all chunks
|
1655
1729
|
try:
|
1656
|
-
|
1657
|
-
|
1658
|
-
|
1659
|
-
|
1660
|
-
|
1661
|
-
|
1662
|
-
|
1663
|
-
|
1664
|
-
|
1665
|
-
|
1666
|
-
|
1667
|
-
|
1668
|
-
|
1669
|
-
|
1670
|
-
|
1671
|
-
|
1672
|
-
|
1673
|
-
|
1674
|
-
|
1675
|
-
|
1730
|
+
# First download the metadata to get chunk CIDs
|
1731
|
+
try:
|
1732
|
+
metadata_result = await self.cat(metadata_cid)
|
1733
|
+
metadata_json = json.loads(metadata_result["content"].decode("utf-8"))
|
1734
|
+
chunks = metadata_json.get("chunks", [])
|
1735
|
+
except json.JSONDecodeError:
|
1736
|
+
# If we can't parse the metadata JSON, record the error but continue
|
1737
|
+
metadata_error = True
|
1738
|
+
# Continue with empty chunks so we can at least try to unpin the metadata file
|
1739
|
+
chunks = []
|
1740
|
+
except Exception:
|
1741
|
+
# Any other metadata error
|
1742
|
+
metadata_error = True
|
1743
|
+
chunks = []
|
1744
|
+
|
1745
|
+
# Extract all chunk CIDs
|
1746
|
+
chunk_cids = []
|
1747
|
+
for chunk in chunks:
|
1748
|
+
chunk_cid = chunk.get("cid", {})
|
1749
|
+
if isinstance(chunk_cid, dict) and "cid" in chunk_cid:
|
1750
|
+
chunk_cids.append(chunk_cid["cid"])
|
1751
|
+
elif isinstance(chunk_cid, str):
|
1752
|
+
chunk_cids.append(chunk_cid)
|
1676
1753
|
|
1677
1754
|
# Create a semaphore to limit concurrent operations
|
1678
|
-
|
1755
|
+
semaphore = asyncio.Semaphore(parallel_limit)
|
1679
1756
|
|
1680
|
-
# Define the chunk
|
1681
|
-
async def
|
1682
|
-
async with
|
1757
|
+
# Define the unpin task for each chunk with error handling
|
1758
|
+
async def unpin_chunk(cid):
|
1759
|
+
async with semaphore:
|
1683
1760
|
try:
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
# Record
|
1688
|
-
|
1689
|
-
|
1690
|
-
|
1691
|
-
|
1692
|
-
|
1693
|
-
|
1694
|
-
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1698
|
-
|
1699
|
-
|
1700
|
-
|
1701
|
-
|
1702
|
-
|
1703
|
-
|
1704
|
-
|
1705
|
-
|
1706
|
-
|
1707
|
-
|
1708
|
-
|
1709
|
-
"status": "not_available",
|
1710
|
-
"message": "Blockchain cancellation not available",
|
1711
|
-
}
|
1712
|
-
)
|
1713
|
-
elif tx_hash.startswith("ipfs-unpinned-only"):
|
1714
|
-
error_msg = tx_hash.replace(
|
1715
|
-
"ipfs-unpinned-only-", ""
|
1716
|
-
)
|
1717
|
-
result["blockchain_result"]["chunk_results"].append(
|
1718
|
-
{
|
1719
|
-
"cid": chunk_cid,
|
1720
|
-
"status": "failed",
|
1721
|
-
"error": error_msg,
|
1722
|
-
}
|
1723
|
-
)
|
1724
|
-
else:
|
1725
|
-
# Standard successful transaction
|
1726
|
-
result["blockchain_result"]["chunk_results"].append(
|
1727
|
-
{
|
1728
|
-
"cid": chunk_cid,
|
1729
|
-
"transaction_hash": tx_hash,
|
1730
|
-
"status": "success",
|
1731
|
-
}
|
1732
|
-
)
|
1733
|
-
except Exception as e:
|
1734
|
-
print(
|
1735
|
-
f"Warning: Failed to cancel blockchain storage for chunk {chunk_cid}: {e}"
|
1736
|
-
)
|
1737
|
-
|
1738
|
-
if "chunk_results" not in result["blockchain_result"]:
|
1739
|
-
result["blockchain_result"] = {}
|
1740
|
-
result["blockchain_result"]["chunk_results"] = []
|
1741
|
-
|
1742
|
-
result["blockchain_result"]["chunk_results"].append(
|
1743
|
-
{
|
1744
|
-
"cid": chunk_cid,
|
1745
|
-
"error": str(e),
|
1746
|
-
"status": "failed",
|
1747
|
-
}
|
1748
|
-
)
|
1749
|
-
|
1750
|
-
return True
|
1751
|
-
except Exception as e:
|
1752
|
-
error_msg = f"Failed to delete chunk {chunk_cid}: {e}"
|
1753
|
-
print(f"Warning: {error_msg}")
|
1754
|
-
|
1755
|
-
# Record failure
|
1756
|
-
async with failed_chunks_lock:
|
1757
|
-
result["failed_chunks"].append(
|
1758
|
-
{"cid": chunk_cid, "error": str(e)}
|
1759
|
-
)
|
1760
|
-
|
1761
|
-
return False
|
1762
|
-
|
1763
|
-
# Start deleting chunks in parallel
|
1764
|
-
print(
|
1765
|
-
f"Starting parallel deletion of {total_chunks} chunks with max {parallel_limit} concurrent operations"
|
1766
|
-
)
|
1767
|
-
delete_tasks = [delete_chunk(cid) for cid in chunks]
|
1768
|
-
await asyncio.gather(*delete_tasks)
|
1769
|
-
|
1770
|
-
# Delete the metadata file itself
|
1771
|
-
print(f"Unpinning metadata file: {metadata_cid}")
|
1772
|
-
response = await self.client.unpin(metadata_cid)
|
1773
|
-
|
1774
|
-
print(">>>", response)
|
1775
|
-
raise SystemExit
|
1776
|
-
|
1777
|
-
# Cancel metadata from blockchain if requested
|
1778
|
-
if cancel_from_blockchain:
|
1779
|
-
try:
|
1780
|
-
print(f"Canceling blockchain storage request for metadata file...")
|
1781
|
-
substrate_client = SubstrateClient()
|
1782
|
-
tx_hash = await substrate_client.cancel_storage_request(
|
1783
|
-
metadata_cid
|
1784
|
-
)
|
1785
|
-
|
1786
|
-
# Handle special return values from cancel_storage_request
|
1787
|
-
if tx_hash == "no-blockchain-cancellation-available":
|
1788
|
-
print(
|
1789
|
-
"Blockchain cancellation not available for metadata, but IPFS unpinning was successful"
|
1790
|
-
)
|
1791
|
-
result["blockchain_result"] = {
|
1792
|
-
"status": "not_available",
|
1793
|
-
"message": "Blockchain cancellation not available, but IPFS unpinning was successful",
|
1794
|
-
}
|
1795
|
-
elif tx_hash.startswith("ipfs-unpinned-only"):
|
1796
|
-
error_msg = tx_hash.replace("ipfs-unpinned-only-", "")
|
1797
|
-
print(
|
1798
|
-
f"IPFS unpinning successful, but blockchain cancellation failed for metadata: {error_msg}"
|
1799
|
-
)
|
1800
|
-
result["blockchain_result"] = {
|
1801
|
-
"status": "failed",
|
1802
|
-
"error": error_msg,
|
1803
|
-
"message": "IPFS unpinning successful, but blockchain cancellation failed",
|
1804
|
-
}
|
1805
|
-
else:
|
1806
|
-
# Standard successful transaction
|
1807
|
-
result["blockchain_result"] = {
|
1808
|
-
"metadata_transaction_hash": tx_hash,
|
1809
|
-
"status": "success",
|
1810
|
-
}
|
1811
|
-
print(
|
1812
|
-
f"Successfully canceled blockchain storage for metadata file"
|
1813
|
-
)
|
1814
|
-
except Exception as e:
|
1815
|
-
print(
|
1816
|
-
f"Warning: Failed to cancel blockchain storage for metadata file: {e}"
|
1817
|
-
)
|
1818
|
-
|
1819
|
-
if not result["blockchain_result"]:
|
1820
|
-
result["blockchain_result"] = {}
|
1821
|
-
|
1822
|
-
result["blockchain_result"]["metadata_error"] = str(e)
|
1823
|
-
result["blockchain_result"]["status"] = "failed"
|
1824
|
-
|
1825
|
-
# Calculate and record timing information
|
1826
|
-
end_time = time.time()
|
1827
|
-
duration = end_time - result["timing"]["start_time"]
|
1828
|
-
|
1829
|
-
result["timing"]["end_time"] = end_time
|
1830
|
-
result["timing"]["duration_seconds"] = duration
|
1831
|
-
|
1832
|
-
deleted_count = len(result["deleted_chunks"])
|
1833
|
-
failed_count = len(result["failed_chunks"])
|
1834
|
-
|
1835
|
-
print(f"Deletion complete in {duration:.2f} seconds!")
|
1836
|
-
print(f"Successfully deleted: {deleted_count}/{total_chunks} chunks")
|
1837
|
-
|
1838
|
-
if failed_count > 0:
|
1839
|
-
print(f"Failed to delete: {failed_count}/{total_chunks} chunks")
|
1761
|
+
await self.client.unpin(cid)
|
1762
|
+
return {"success": True, "cid": cid}
|
1763
|
+
except Exception:
|
1764
|
+
# Record failure but continue with other chunks
|
1765
|
+
return {"success": False, "cid": cid}
|
1766
|
+
|
1767
|
+
# Unpin all chunks in parallel
|
1768
|
+
if chunk_cids:
|
1769
|
+
unpin_tasks = [unpin_chunk(cid) for cid in chunk_cids]
|
1770
|
+
results = await asyncio.gather(*unpin_tasks)
|
1771
|
+
|
1772
|
+
# Count failures
|
1773
|
+
failures = [r for r in results if not r["success"]]
|
1774
|
+
if failures:
|
1775
|
+
ipfs_failure = True
|
1776
|
+
except Exception:
|
1777
|
+
# If we can't process chunks at all, record the failure
|
1778
|
+
ipfs_failure = True
|
1779
|
+
|
1780
|
+
# Unpin the metadata file itself, regardless of whether we could process chunks
|
1781
|
+
try:
|
1782
|
+
await self.client.unpin(metadata_cid)
|
1783
|
+
except Exception:
|
1784
|
+
# Record the failure but continue with blockchain cancellation
|
1785
|
+
ipfs_failure = True
|
1840
1786
|
|
1841
|
-
|
1842
|
-
|
1843
|
-
#
|
1844
|
-
|
1845
|
-
result["timing"]["duration_seconds"] = (
|
1846
|
-
result["timing"]["end_time"] - result["timing"]["start_time"]
|
1847
|
-
)
|
1787
|
+
# Handle blockchain cancellation if requested
|
1788
|
+
if cancel_from_blockchain:
|
1789
|
+
# Create a substrate client
|
1790
|
+
substrate_client = SubstrateClient()
|
1848
1791
|
|
1849
|
-
|
1850
|
-
|
1851
|
-
|
1792
|
+
# This will raise appropriate exceptions if it fails:
|
1793
|
+
# - HippiusAlreadyDeletedError if already deleted
|
1794
|
+
# - HippiusFailedSubstrateDelete if transaction fails
|
1795
|
+
# - Other exceptions for other failures
|
1796
|
+
await substrate_client.cancel_storage_request(metadata_cid)
|
1797
|
+
|
1798
|
+
# If we get here, either:
|
1799
|
+
# 1. Blockchain cancellation succeeded (if requested)
|
1800
|
+
# 2. We weren't doing blockchain cancellation
|
1801
|
+
# In either case, we report success
|
1802
|
+
return True
|