hippius 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hippius-0.2.4.dist-info → hippius-0.2.5.dist-info}/METADATA +1 -1
- hippius-0.2.5.dist-info/RECORD +17 -0
- hippius_sdk/__init__.py +21 -10
- hippius_sdk/cli.py +11 -0
- hippius_sdk/cli_handlers.py +256 -48
- hippius_sdk/cli_parser.py +20 -0
- hippius_sdk/cli_rich.py +8 -2
- hippius_sdk/client.py +5 -3
- hippius_sdk/errors.py +77 -0
- hippius_sdk/ipfs.py +237 -297
- hippius_sdk/ipfs_core.py +209 -9
- hippius_sdk/substrate.py +101 -14
- hippius-0.2.4.dist-info/RECORD +0 -16
- {hippius-0.2.4.dist-info → hippius-0.2.5.dist-info}/WHEEL +0 -0
- {hippius-0.2.4.dist-info → hippius-0.2.5.dist-info}/entry_points.txt +0 -0
hippius_sdk/ipfs.py
CHANGED
@@ -16,6 +16,13 @@ import httpx
|
|
16
16
|
import requests
|
17
17
|
|
18
18
|
from hippius_sdk.config import get_config_value, get_encryption_key
|
19
|
+
from hippius_sdk.errors import (
|
20
|
+
HippiusAlreadyDeletedError,
|
21
|
+
HippiusFailedIPFSUnpin,
|
22
|
+
HippiusFailedSubstrateDelete,
|
23
|
+
HippiusIPFSConnectionError,
|
24
|
+
HippiusMetadataError,
|
25
|
+
)
|
19
26
|
from hippius_sdk.ipfs_core import AsyncIPFSClient
|
20
27
|
from hippius_sdk.substrate import FileInput, SubstrateClient
|
21
28
|
from hippius_sdk.utils import format_cid, format_size
|
@@ -439,10 +446,11 @@ class IPFSClient:
|
|
439
446
|
) -> Dict[str, Any]:
|
440
447
|
"""
|
441
448
|
Download a file from IPFS with optional decryption.
|
449
|
+
Supports downloading directories - in that case, a directory structure will be created.
|
442
450
|
|
443
451
|
Args:
|
444
452
|
cid: Content Identifier (CID) of the file to download
|
445
|
-
output_path: Path where the downloaded file will be saved
|
453
|
+
output_path: Path where the downloaded file/directory will be saved
|
446
454
|
decrypt: Whether to decrypt the file (overrides default)
|
447
455
|
max_retries: Maximum number of retry attempts (default: 3)
|
448
456
|
|
@@ -454,6 +462,7 @@ class IPFSClient:
|
|
454
462
|
- size_formatted: Human-readable file size
|
455
463
|
- elapsed_seconds: Time taken for the download in seconds
|
456
464
|
- decrypted: Whether the file was decrypted
|
465
|
+
- is_directory: Whether the download was a directory
|
457
466
|
|
458
467
|
Raises:
|
459
468
|
requests.RequestException: If the download fails
|
@@ -461,6 +470,45 @@ class IPFSClient:
|
|
461
470
|
"""
|
462
471
|
start_time = time.time()
|
463
472
|
|
473
|
+
# Use the improved ls function to properly detect directories
|
474
|
+
is_directory = False
|
475
|
+
try:
|
476
|
+
# The ls function now properly detects directories
|
477
|
+
ls_result = await self.client.ls(cid)
|
478
|
+
is_directory = ls_result.get("is_directory", False)
|
479
|
+
except Exception:
|
480
|
+
# If ls fails, we'll proceed as if it's a file
|
481
|
+
pass
|
482
|
+
|
483
|
+
# If it's a directory, handle it differently
|
484
|
+
if is_directory:
|
485
|
+
# For directories, we don't need to decrypt each file during the initial download
|
486
|
+
# We'll use the AsyncIPFSClient's download_directory method directly
|
487
|
+
try:
|
488
|
+
await self.client.download_directory(cid, output_path)
|
489
|
+
|
490
|
+
# Calculate the total size of the directory
|
491
|
+
total_size = 0
|
492
|
+
for root, _, files in os.walk(output_path):
|
493
|
+
for file in files:
|
494
|
+
file_path = os.path.join(root, file)
|
495
|
+
total_size += os.path.getsize(file_path)
|
496
|
+
|
497
|
+
elapsed_time = time.time() - start_time
|
498
|
+
|
499
|
+
return {
|
500
|
+
"success": True,
|
501
|
+
"output_path": output_path,
|
502
|
+
"size_bytes": total_size,
|
503
|
+
"size_formatted": self.format_size(total_size),
|
504
|
+
"elapsed_seconds": round(elapsed_time, 2),
|
505
|
+
"decrypted": False, # Directories aren't decrypted as a whole
|
506
|
+
"is_directory": True,
|
507
|
+
}
|
508
|
+
except Exception as e:
|
509
|
+
raise RuntimeError(f"Failed to download directory: {str(e)}")
|
510
|
+
|
511
|
+
# For regular files, use the existing logic
|
464
512
|
# Determine if we should decrypt
|
465
513
|
should_decrypt = self.encrypt_by_default if decrypt is None else decrypt
|
466
514
|
|
@@ -482,38 +530,11 @@ class IPFSClient:
|
|
482
530
|
else:
|
483
531
|
download_path = output_path
|
484
532
|
|
485
|
-
|
486
|
-
|
487
|
-
while retries < max_retries:
|
488
|
-
try:
|
489
|
-
# Download the file
|
490
|
-
url = f"{self.gateway}/ipfs/{cid}"
|
491
|
-
response = requests.get(url, stream=True)
|
492
|
-
response.raise_for_status()
|
493
|
-
|
494
|
-
os.makedirs(
|
495
|
-
os.path.dirname(os.path.abspath(download_path)), exist_ok=True
|
496
|
-
)
|
497
|
-
|
498
|
-
with open(download_path, "wb") as f:
|
499
|
-
for chunk in response.iter_content(chunk_size=8192):
|
500
|
-
f.write(chunk)
|
501
|
-
|
502
|
-
# If we reach here, download was successful
|
503
|
-
break
|
533
|
+
await self.client.download_file(cid, download_path)
|
534
|
+
download_success = True
|
504
535
|
|
505
|
-
|
506
|
-
|
507
|
-
retries += 1
|
508
|
-
|
509
|
-
if retries < max_retries:
|
510
|
-
wait_time = 2**retries # Exponential backoff: 2, 4, 8 seconds
|
511
|
-
print(f"Download attempt {retries} failed: {str(e)}")
|
512
|
-
print(f"Retrying in {wait_time} seconds...")
|
513
|
-
time.sleep(wait_time)
|
514
|
-
else:
|
515
|
-
# Raise the last error if we've exhausted all retries
|
516
|
-
raise
|
536
|
+
if not download_success:
|
537
|
+
raise RuntimeError("Failed to download file after multiple attempts")
|
517
538
|
|
518
539
|
# Decrypt if needed
|
519
540
|
if should_decrypt:
|
@@ -548,6 +569,7 @@ class IPFSClient:
|
|
548
569
|
"size_formatted": self.format_size(file_size_bytes),
|
549
570
|
"elapsed_seconds": round(elapsed_time, 2),
|
550
571
|
"decrypted": should_decrypt,
|
572
|
+
"is_directory": False,
|
551
573
|
}
|
552
574
|
|
553
575
|
finally:
|
@@ -1031,11 +1053,20 @@ class IPFSClient:
|
|
1031
1053
|
|
1032
1054
|
# Step 5: Create and upload the metadata file
|
1033
1055
|
metadata_path = os.path.join(temp_dir, f"{file_id}_metadata.json")
|
1034
|
-
|
1035
|
-
|
1056
|
+
|
1057
|
+
# Use binary mode to avoid any platform-specific text encoding issues
|
1058
|
+
with open(metadata_path, "wb") as f:
|
1059
|
+
# Encode the JSON with UTF-8 encoding explicitly
|
1060
|
+
metadata_json = json.dumps(metadata, indent=2, ensure_ascii=False)
|
1061
|
+
f.write(metadata_json.encode("utf-8"))
|
1062
|
+
|
1063
|
+
# Verify file was written correctly
|
1064
|
+
if os.path.getsize(metadata_path) == 0:
|
1065
|
+
raise ValueError("Failed to write metadata file (file size is 0)")
|
1036
1066
|
|
1037
1067
|
if verbose:
|
1038
1068
|
print("Uploading metadata file...")
|
1069
|
+
print(f"Metadata file size: {os.path.getsize(metadata_path)} bytes")
|
1039
1070
|
|
1040
1071
|
# Upload the metadata file to IPFS
|
1041
1072
|
metadata_cid_result = await self.upload_file(
|
@@ -1108,9 +1139,12 @@ class IPFSClient:
|
|
1108
1139
|
if verbose:
|
1109
1140
|
metadata_download_time = time.time() - start_time
|
1110
1141
|
print(f"Metadata downloaded in {metadata_download_time:.2f} seconds")
|
1142
|
+
print(f"Metadata file size: {os.path.getsize(metadata_path)} bytes")
|
1111
1143
|
|
1112
|
-
|
1113
|
-
|
1144
|
+
# Read using binary mode to avoid any encoding issues
|
1145
|
+
with open(metadata_path, "rb") as f:
|
1146
|
+
metadata_content = f.read().decode("utf-8")
|
1147
|
+
metadata = json.loads(metadata_content)
|
1114
1148
|
|
1115
1149
|
# Step 2: Extract key information
|
1116
1150
|
original_file = metadata["original_file"]
|
@@ -1522,10 +1556,11 @@ class IPFSClient:
|
|
1522
1556
|
self, cid: str, cancel_from_blockchain: bool = True
|
1523
1557
|
) -> Dict[str, Any]:
|
1524
1558
|
"""
|
1525
|
-
Delete a file from IPFS and optionally cancel its storage on the blockchain.
|
1559
|
+
Delete a file or directory from IPFS and optionally cancel its storage on the blockchain.
|
1560
|
+
If deleting a directory, all files within the directory will be unpinned recursively.
|
1526
1561
|
|
1527
1562
|
Args:
|
1528
|
-
cid: Content Identifier (CID) of the file to delete
|
1563
|
+
cid: Content Identifier (CID) of the file/directory to delete
|
1529
1564
|
cancel_from_blockchain: Whether to also cancel the storage request from the blockchain
|
1530
1565
|
|
1531
1566
|
Returns:
|
@@ -1540,77 +1575,117 @@ class IPFSClient:
|
|
1540
1575
|
"end_time": None,
|
1541
1576
|
"duration_seconds": None,
|
1542
1577
|
},
|
1578
|
+
"is_directory": False,
|
1579
|
+
"child_files": [],
|
1543
1580
|
}
|
1544
1581
|
|
1545
|
-
# First
|
1582
|
+
# First check if this is a directory
|
1546
1583
|
try:
|
1547
|
-
|
1548
|
-
|
1549
|
-
|
1550
|
-
|
1551
|
-
|
1552
|
-
|
1584
|
+
ls_result = await self.client.ls(cid)
|
1585
|
+
is_directory = ls_result.get("is_directory", False)
|
1586
|
+
result["is_directory"] = is_directory
|
1587
|
+
|
1588
|
+
# If it's a directory, recursively unpin all contained files first
|
1589
|
+
if is_directory:
|
1590
|
+
print(f"Detected directory: {cid}")
|
1591
|
+
links = []
|
1592
|
+
|
1593
|
+
# Extract all links from the directory listing
|
1594
|
+
if "Objects" in ls_result and len(ls_result["Objects"]) > 0:
|
1595
|
+
for obj in ls_result["Objects"]:
|
1596
|
+
if "Links" in obj:
|
1597
|
+
links.extend(obj["Links"])
|
1598
|
+
|
1599
|
+
child_files = []
|
1600
|
+
# Unpin each item in the directory
|
1601
|
+
for link in links:
|
1602
|
+
link_hash = link.get("Hash")
|
1603
|
+
link_name = link.get("Name", "unknown")
|
1604
|
+
if link_hash:
|
1605
|
+
child_files.append({"cid": link_hash, "name": link_name})
|
1606
|
+
try:
|
1607
|
+
# Recursively delete if it's a subdirectory
|
1608
|
+
link_type = link.get("Type")
|
1609
|
+
if (
|
1610
|
+
link_type == 1
|
1611
|
+
or str(link_type) == "1"
|
1612
|
+
or link_type == "dir"
|
1613
|
+
):
|
1614
|
+
# Recursive delete, but don't cancel from blockchain (we'll do that for parent)
|
1615
|
+
await self.delete_file(
|
1616
|
+
link_hash, cancel_from_blockchain=False
|
1617
|
+
)
|
1618
|
+
else:
|
1619
|
+
# Regular file unpin
|
1620
|
+
try:
|
1621
|
+
await self.client.unpin(link_hash)
|
1622
|
+
print(
|
1623
|
+
f"Unpinned file: {link_name} (CID: {link_hash})"
|
1624
|
+
)
|
1625
|
+
except Exception as unpin_error:
|
1626
|
+
# Just note the error but don't let it stop the whole process
|
1627
|
+
# This is common with IPFS servers that may return 500 errors for
|
1628
|
+
# unpinning content that was never explicitly pinned
|
1629
|
+
print(
|
1630
|
+
f"Note: Could not unpin {link_name}: {str(unpin_error).split('For more information')[0]}"
|
1631
|
+
)
|
1632
|
+
except Exception as e:
|
1633
|
+
print(
|
1634
|
+
f"Warning: Problem processing child item {link_name}: {str(e).split('For more information')[0]}"
|
1635
|
+
)
|
1553
1636
|
|
1637
|
+
# Record the child files that were processed
|
1638
|
+
result["child_files"] = child_files
|
1639
|
+
except Exception as e:
|
1640
|
+
print(f"Warning: Failed to check if CID is a directory: {e}")
|
1641
|
+
# Continue with regular file unpin
|
1642
|
+
|
1643
|
+
# Now unpin the main file/directory
|
1644
|
+
try:
|
1645
|
+
print(f"Unpinning from IPFS: {cid}")
|
1554
1646
|
unpin_result = await self.client.unpin(cid)
|
1555
1647
|
result["unpin_result"] = unpin_result
|
1648
|
+
result["success"] = True
|
1556
1649
|
print("Successfully unpinned from IPFS")
|
1557
1650
|
except Exception as e:
|
1558
|
-
|
1559
|
-
|
1651
|
+
# Handle 500 errors from IPFS server gracefully - they often occur
|
1652
|
+
# when the content wasn't explicitly pinned or was already unpinned
|
1653
|
+
error_str = str(e)
|
1654
|
+
if "500 Internal Server Error" in error_str:
|
1655
|
+
print(
|
1656
|
+
f"Note: IPFS server reported content may already be unpinned: {cid}"
|
1657
|
+
)
|
1658
|
+
result["unpin_result"] = {"Pins": [cid]} # Simulate successful unpin
|
1659
|
+
result["success"] = True
|
1660
|
+
else:
|
1661
|
+
print(
|
1662
|
+
f"Warning: Failed to unpin from IPFS: {error_str.split('For more information')[0]}"
|
1663
|
+
)
|
1664
|
+
result["success"] = False
|
1560
1665
|
|
1561
1666
|
# Then, if requested, cancel from blockchain
|
1562
1667
|
if cancel_from_blockchain:
|
1563
1668
|
try:
|
1564
|
-
# Create a substrate client
|
1565
|
-
print(f"DEBUG: Creating SubstrateClient for blockchain cancellation...")
|
1566
1669
|
substrate_client = SubstrateClient()
|
1567
|
-
|
1568
|
-
|
1569
|
-
|
1570
|
-
|
1571
|
-
|
1572
|
-
|
1573
|
-
|
1574
|
-
|
1575
|
-
# Check the return value - special cases for when blockchain cancellation isn't available
|
1576
|
-
if tx_hash == "no-blockchain-cancellation-available":
|
1577
|
-
print(
|
1578
|
-
"Blockchain cancellation not available, but IPFS unpinning was successful"
|
1579
|
-
)
|
1580
|
-
result["blockchain_result"] = {
|
1581
|
-
"status": "not_available",
|
1582
|
-
"message": "Blockchain cancellation not available, but IPFS unpinning was successful",
|
1583
|
-
}
|
1584
|
-
elif tx_hash.startswith("ipfs-unpinned-only"):
|
1585
|
-
error_msg = tx_hash.replace("ipfs-unpinned-only-", "")
|
1670
|
+
await substrate_client.cancel_storage_request(cid)
|
1671
|
+
print("Successfully cancelled storage from blockchain")
|
1672
|
+
result["blockchain_result"] = {"success": True}
|
1673
|
+
except Exception as e:
|
1674
|
+
# Handle the case where the CID is not in storage requests
|
1675
|
+
error_str = str(e)
|
1676
|
+
if "not found in storage requests" in error_str:
|
1586
1677
|
print(
|
1587
|
-
|
1678
|
+
"Note: Content was not found in blockchain storage requests (may already be deleted)"
|
1588
1679
|
)
|
1589
1680
|
result["blockchain_result"] = {
|
1590
|
-
"
|
1591
|
-
"
|
1592
|
-
"message": "IPFS unpinning successful, but blockchain cancellation failed",
|
1681
|
+
"success": True,
|
1682
|
+
"already_deleted": True,
|
1593
1683
|
}
|
1594
1684
|
else:
|
1595
|
-
|
1596
|
-
result["blockchain_result"] = {
|
1597
|
-
"transaction_hash": tx_hash,
|
1598
|
-
"status": "success",
|
1599
|
-
}
|
1600
|
-
print(f"Successfully canceled storage request from blockchain")
|
1601
|
-
print(
|
1602
|
-
f"DEBUG: Blockchain cancellation succeeded with transaction hash: {tx_hash}"
|
1603
|
-
)
|
1604
|
-
except Exception as e:
|
1605
|
-
print(f"Warning: Failed to cancel storage from blockchain: {e}")
|
1606
|
-
print(
|
1607
|
-
f"DEBUG: Blockchain cancellation exception: {type(e).__name__}: {str(e)}"
|
1608
|
-
)
|
1609
|
-
if hasattr(e, "__dict__"):
|
1610
|
-
print(f"DEBUG: Exception attributes: {e.__dict__}")
|
1611
|
-
result["blockchain_error"] = str(e)
|
1685
|
+
print(f"Warning: Error cancelling from blockchain: {error_str}")
|
1686
|
+
result["blockchain_result"] = {"success": False, "error": error_str}
|
1612
1687
|
|
1613
|
-
#
|
1688
|
+
# Update timing information
|
1614
1689
|
result["timing"]["end_time"] = time.time()
|
1615
1690
|
result["timing"]["duration_seconds"] = (
|
1616
1691
|
result["timing"]["end_time"] - result["timing"]["start_time"]
|
@@ -1623,7 +1698,7 @@ class IPFSClient:
|
|
1623
1698
|
metadata_cid: str,
|
1624
1699
|
cancel_from_blockchain: bool = True,
|
1625
1700
|
parallel_limit: int = 20,
|
1626
|
-
) ->
|
1701
|
+
) -> bool:
|
1627
1702
|
"""
|
1628
1703
|
Delete an erasure-coded file, including all its chunks in parallel.
|
1629
1704
|
|
@@ -1633,219 +1708,84 @@ class IPFSClient:
|
|
1633
1708
|
parallel_limit: Maximum number of concurrent deletion operations
|
1634
1709
|
|
1635
1710
|
Returns:
|
1636
|
-
|
1711
|
+
bool: True if the deletion was successful, False otherwise
|
1637
1712
|
"""
|
1638
|
-
result = {
|
1639
|
-
"metadata_cid": metadata_cid,
|
1640
|
-
"deleted_chunks": [],
|
1641
|
-
"failed_chunks": [],
|
1642
|
-
"blockchain_result": None,
|
1643
|
-
"timing": {
|
1644
|
-
"start_time": time.time(),
|
1645
|
-
"end_time": None,
|
1646
|
-
"duration_seconds": None,
|
1647
|
-
},
|
1648
|
-
}
|
1649
1713
|
|
1650
|
-
#
|
1651
|
-
|
1652
|
-
|
1714
|
+
# Try to download and process metadata file and chunks
|
1715
|
+
ipfs_failure = False
|
1716
|
+
metadata_error = False
|
1653
1717
|
|
1654
|
-
# First, get the metadata to find all chunks
|
1655
1718
|
try:
|
1656
|
-
|
1657
|
-
|
1658
|
-
|
1659
|
-
|
1660
|
-
|
1661
|
-
|
1662
|
-
|
1663
|
-
|
1664
|
-
|
1665
|
-
|
1666
|
-
|
1667
|
-
|
1668
|
-
|
1669
|
-
|
1670
|
-
|
1671
|
-
|
1672
|
-
|
1673
|
-
|
1674
|
-
|
1675
|
-
|
1719
|
+
# First download the metadata to get chunk CIDs
|
1720
|
+
try:
|
1721
|
+
metadata_result = await self.cat(metadata_cid)
|
1722
|
+
metadata_json = json.loads(metadata_result["content"].decode("utf-8"))
|
1723
|
+
chunks = metadata_json.get("chunks", [])
|
1724
|
+
except json.JSONDecodeError:
|
1725
|
+
# If we can't parse the metadata JSON, record the error but continue
|
1726
|
+
metadata_error = True
|
1727
|
+
# Continue with empty chunks so we can at least try to unpin the metadata file
|
1728
|
+
chunks = []
|
1729
|
+
except Exception:
|
1730
|
+
# Any other metadata error
|
1731
|
+
metadata_error = True
|
1732
|
+
chunks = []
|
1733
|
+
|
1734
|
+
# Extract all chunk CIDs
|
1735
|
+
chunk_cids = []
|
1736
|
+
for chunk in chunks:
|
1737
|
+
chunk_cid = chunk.get("cid", {})
|
1738
|
+
if isinstance(chunk_cid, dict) and "cid" in chunk_cid:
|
1739
|
+
chunk_cids.append(chunk_cid["cid"])
|
1740
|
+
elif isinstance(chunk_cid, str):
|
1741
|
+
chunk_cids.append(chunk_cid)
|
1676
1742
|
|
1677
1743
|
# Create a semaphore to limit concurrent operations
|
1678
|
-
|
1744
|
+
semaphore = asyncio.Semaphore(parallel_limit)
|
1679
1745
|
|
1680
|
-
# Define the chunk
|
1681
|
-
async def
|
1682
|
-
async with
|
1746
|
+
# Define the unpin task for each chunk with error handling
|
1747
|
+
async def unpin_chunk(cid):
|
1748
|
+
async with semaphore:
|
1683
1749
|
try:
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
# Record
|
1688
|
-
|
1689
|
-
|
1690
|
-
|
1691
|
-
|
1692
|
-
|
1693
|
-
|
1694
|
-
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1698
|
-
|
1699
|
-
|
1700
|
-
|
1701
|
-
|
1702
|
-
|
1703
|
-
|
1704
|
-
|
1705
|
-
|
1706
|
-
|
1707
|
-
|
1708
|
-
|
1709
|
-
"status": "not_available",
|
1710
|
-
"message": "Blockchain cancellation not available",
|
1711
|
-
}
|
1712
|
-
)
|
1713
|
-
elif tx_hash.startswith("ipfs-unpinned-only"):
|
1714
|
-
error_msg = tx_hash.replace(
|
1715
|
-
"ipfs-unpinned-only-", ""
|
1716
|
-
)
|
1717
|
-
result["blockchain_result"]["chunk_results"].append(
|
1718
|
-
{
|
1719
|
-
"cid": chunk_cid,
|
1720
|
-
"status": "failed",
|
1721
|
-
"error": error_msg,
|
1722
|
-
}
|
1723
|
-
)
|
1724
|
-
else:
|
1725
|
-
# Standard successful transaction
|
1726
|
-
result["blockchain_result"]["chunk_results"].append(
|
1727
|
-
{
|
1728
|
-
"cid": chunk_cid,
|
1729
|
-
"transaction_hash": tx_hash,
|
1730
|
-
"status": "success",
|
1731
|
-
}
|
1732
|
-
)
|
1733
|
-
except Exception as e:
|
1734
|
-
print(
|
1735
|
-
f"Warning: Failed to cancel blockchain storage for chunk {chunk_cid}: {e}"
|
1736
|
-
)
|
1737
|
-
|
1738
|
-
if "chunk_results" not in result["blockchain_result"]:
|
1739
|
-
result["blockchain_result"] = {}
|
1740
|
-
result["blockchain_result"]["chunk_results"] = []
|
1741
|
-
|
1742
|
-
result["blockchain_result"]["chunk_results"].append(
|
1743
|
-
{
|
1744
|
-
"cid": chunk_cid,
|
1745
|
-
"error": str(e),
|
1746
|
-
"status": "failed",
|
1747
|
-
}
|
1748
|
-
)
|
1749
|
-
|
1750
|
-
return True
|
1751
|
-
except Exception as e:
|
1752
|
-
error_msg = f"Failed to delete chunk {chunk_cid}: {e}"
|
1753
|
-
print(f"Warning: {error_msg}")
|
1754
|
-
|
1755
|
-
# Record failure
|
1756
|
-
async with failed_chunks_lock:
|
1757
|
-
result["failed_chunks"].append(
|
1758
|
-
{"cid": chunk_cid, "error": str(e)}
|
1759
|
-
)
|
1760
|
-
|
1761
|
-
return False
|
1762
|
-
|
1763
|
-
# Start deleting chunks in parallel
|
1764
|
-
print(
|
1765
|
-
f"Starting parallel deletion of {total_chunks} chunks with max {parallel_limit} concurrent operations"
|
1766
|
-
)
|
1767
|
-
delete_tasks = [delete_chunk(cid) for cid in chunks]
|
1768
|
-
await asyncio.gather(*delete_tasks)
|
1769
|
-
|
1770
|
-
# Delete the metadata file itself
|
1771
|
-
print(f"Unpinning metadata file: {metadata_cid}")
|
1772
|
-
response = await self.client.unpin(metadata_cid)
|
1773
|
-
|
1774
|
-
print(">>>", response)
|
1775
|
-
raise SystemExit
|
1776
|
-
|
1777
|
-
# Cancel metadata from blockchain if requested
|
1778
|
-
if cancel_from_blockchain:
|
1779
|
-
try:
|
1780
|
-
print(f"Canceling blockchain storage request for metadata file...")
|
1781
|
-
substrate_client = SubstrateClient()
|
1782
|
-
tx_hash = await substrate_client.cancel_storage_request(
|
1783
|
-
metadata_cid
|
1784
|
-
)
|
1785
|
-
|
1786
|
-
# Handle special return values from cancel_storage_request
|
1787
|
-
if tx_hash == "no-blockchain-cancellation-available":
|
1788
|
-
print(
|
1789
|
-
"Blockchain cancellation not available for metadata, but IPFS unpinning was successful"
|
1790
|
-
)
|
1791
|
-
result["blockchain_result"] = {
|
1792
|
-
"status": "not_available",
|
1793
|
-
"message": "Blockchain cancellation not available, but IPFS unpinning was successful",
|
1794
|
-
}
|
1795
|
-
elif tx_hash.startswith("ipfs-unpinned-only"):
|
1796
|
-
error_msg = tx_hash.replace("ipfs-unpinned-only-", "")
|
1797
|
-
print(
|
1798
|
-
f"IPFS unpinning successful, but blockchain cancellation failed for metadata: {error_msg}"
|
1799
|
-
)
|
1800
|
-
result["blockchain_result"] = {
|
1801
|
-
"status": "failed",
|
1802
|
-
"error": error_msg,
|
1803
|
-
"message": "IPFS unpinning successful, but blockchain cancellation failed",
|
1804
|
-
}
|
1805
|
-
else:
|
1806
|
-
# Standard successful transaction
|
1807
|
-
result["blockchain_result"] = {
|
1808
|
-
"metadata_transaction_hash": tx_hash,
|
1809
|
-
"status": "success",
|
1810
|
-
}
|
1811
|
-
print(
|
1812
|
-
f"Successfully canceled blockchain storage for metadata file"
|
1813
|
-
)
|
1814
|
-
except Exception as e:
|
1815
|
-
print(
|
1816
|
-
f"Warning: Failed to cancel blockchain storage for metadata file: {e}"
|
1817
|
-
)
|
1818
|
-
|
1819
|
-
if not result["blockchain_result"]:
|
1820
|
-
result["blockchain_result"] = {}
|
1821
|
-
|
1822
|
-
result["blockchain_result"]["metadata_error"] = str(e)
|
1823
|
-
result["blockchain_result"]["status"] = "failed"
|
1824
|
-
|
1825
|
-
# Calculate and record timing information
|
1826
|
-
end_time = time.time()
|
1827
|
-
duration = end_time - result["timing"]["start_time"]
|
1828
|
-
|
1829
|
-
result["timing"]["end_time"] = end_time
|
1830
|
-
result["timing"]["duration_seconds"] = duration
|
1831
|
-
|
1832
|
-
deleted_count = len(result["deleted_chunks"])
|
1833
|
-
failed_count = len(result["failed_chunks"])
|
1834
|
-
|
1835
|
-
print(f"Deletion complete in {duration:.2f} seconds!")
|
1836
|
-
print(f"Successfully deleted: {deleted_count}/{total_chunks} chunks")
|
1837
|
-
|
1838
|
-
if failed_count > 0:
|
1839
|
-
print(f"Failed to delete: {failed_count}/{total_chunks} chunks")
|
1750
|
+
await self.client.unpin(cid)
|
1751
|
+
return {"success": True, "cid": cid}
|
1752
|
+
except Exception:
|
1753
|
+
# Record failure but continue with other chunks
|
1754
|
+
return {"success": False, "cid": cid}
|
1755
|
+
|
1756
|
+
# Unpin all chunks in parallel
|
1757
|
+
if chunk_cids:
|
1758
|
+
unpin_tasks = [unpin_chunk(cid) for cid in chunk_cids]
|
1759
|
+
results = await asyncio.gather(*unpin_tasks)
|
1760
|
+
|
1761
|
+
# Count failures
|
1762
|
+
failures = [r for r in results if not r["success"]]
|
1763
|
+
if failures:
|
1764
|
+
ipfs_failure = True
|
1765
|
+
except Exception:
|
1766
|
+
# If we can't process chunks at all, record the failure
|
1767
|
+
ipfs_failure = True
|
1768
|
+
|
1769
|
+
# Unpin the metadata file itself, regardless of whether we could process chunks
|
1770
|
+
try:
|
1771
|
+
await self.client.unpin(metadata_cid)
|
1772
|
+
except Exception:
|
1773
|
+
# Record the failure but continue with blockchain cancellation
|
1774
|
+
ipfs_failure = True
|
1840
1775
|
|
1841
|
-
|
1842
|
-
|
1843
|
-
#
|
1844
|
-
|
1845
|
-
result["timing"]["duration_seconds"] = (
|
1846
|
-
result["timing"]["end_time"] - result["timing"]["start_time"]
|
1847
|
-
)
|
1776
|
+
# Handle blockchain cancellation if requested
|
1777
|
+
if cancel_from_blockchain:
|
1778
|
+
# Create a substrate client
|
1779
|
+
substrate_client = SubstrateClient()
|
1848
1780
|
|
1849
|
-
|
1850
|
-
|
1851
|
-
|
1781
|
+
# This will raise appropriate exceptions if it fails:
|
1782
|
+
# - HippiusAlreadyDeletedError if already deleted
|
1783
|
+
# - HippiusFailedSubstrateDelete if transaction fails
|
1784
|
+
# - Other exceptions for other failures
|
1785
|
+
await substrate_client.cancel_storage_request(metadata_cid)
|
1786
|
+
|
1787
|
+
# If we get here, either:
|
1788
|
+
# 1. Blockchain cancellation succeeded (if requested)
|
1789
|
+
# 2. We weren't doing blockchain cancellation
|
1790
|
+
# In either case, we report success
|
1791
|
+
return True
|