hippius 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hippius_sdk/ipfs.py CHANGED
@@ -10,7 +10,7 @@ import shutil
10
10
  import tempfile
11
11
  import time
12
12
  import uuid
13
- from typing import Any, Dict, List, Optional
13
+ from typing import Any, Callable, Dict, List, Optional
14
14
 
15
15
  import httpx
16
16
  import requests
@@ -66,7 +66,7 @@ class IPFSClient:
66
66
  """
67
67
  # Load configuration values if not explicitly provided
68
68
  if gateway is None:
69
- gateway = get_config_value("ipfs", "gateway", "https://ipfs.io")
69
+ gateway = get_config_value("ipfs", "gateway", "https://get.hippius.network")
70
70
 
71
71
  if api_url is None:
72
72
  api_url = get_config_value(
@@ -84,11 +84,12 @@ class IPFSClient:
84
84
  self.base_url = api_url
85
85
 
86
86
  try:
87
- self.client = AsyncIPFSClient(api_url)
87
+ self.client = AsyncIPFSClient(api_url=api_url, gateway=self.gateway)
88
88
  except httpx.ConnectError as e:
89
- print(f"Warning: Could not connect to IPFS node at {api_url}: {e}")
90
- # Try to connect to local IPFS daemon as fallback
91
- self.client = AsyncIPFSClient()
89
+ print(
90
+ f"Warning: Falling back to local IPFS daemon, but still using gateway={self.gateway}"
91
+ )
92
+ self.client = AsyncIPFSClient(gateway=self.gateway)
92
93
 
93
94
  self._initialize_encryption(encrypt_by_default, encryption_key)
94
95
 
@@ -483,8 +484,6 @@ class IPFSClient:
483
484
 
484
485
  # Download the file with retry logic
485
486
  retries = 0
486
- last_error = None
487
-
488
487
  while retries < max_retries:
489
488
  try:
490
489
  # Download the file
@@ -505,7 +504,6 @@ class IPFSClient:
505
504
 
506
505
  except (requests.exceptions.RequestException, IOError) as e:
507
506
  # Save the error and retry
508
- last_error = e
509
507
  retries += 1
510
508
 
511
509
  if retries < max_retries:
@@ -742,6 +740,7 @@ class IPFSClient:
742
740
  encrypt: Optional[bool] = None,
743
741
  max_retries: int = 3,
744
742
  verbose: bool = True,
743
+ progress_callback: Optional[Callable[[str, int, int], None]] = None,
745
744
  ) -> Dict[str, Any]:
746
745
  """
747
746
  Split a file using erasure coding, then upload the chunks to IPFS.
@@ -759,6 +758,8 @@ class IPFSClient:
759
758
  encrypt: Whether to encrypt the file before encoding (defaults to self.encrypt_by_default)
760
759
  max_retries: Maximum number of retry attempts for IPFS uploads
761
760
  verbose: Whether to print progress information
761
+ progress_callback: Optional callback function for progress updates
762
+ Function receives (stage_name, current, total)
762
763
 
763
764
  Returns:
764
765
  dict: Metadata including the original file info and chunk information
@@ -815,6 +816,9 @@ class IPFSClient:
815
816
  file_data = self.encrypt_data(file_data)
816
817
 
817
818
  # Step 2: Split the file into chunks for erasure coding
819
+ chunk_size = int(chunk_size)
820
+ chunk_size = max(1, chunk_size) # Ensure it's at least 1 byte
821
+
818
822
  chunks = []
819
823
  chunk_positions = []
820
824
  for i in range(0, len(file_data), chunk_size):
@@ -824,7 +828,7 @@ class IPFSClient:
824
828
 
825
829
  # Pad the last chunk if necessary
826
830
  if chunks and len(chunks[-1]) < chunk_size:
827
- pad_size = chunk_size - len(chunks[-1])
831
+ pad_size = int(chunk_size - len(chunks[-1]))
828
832
  chunks[-1] = chunks[-1] + b"\0" * pad_size
829
833
 
830
834
  # If we don't have enough chunks for the requested parameters, adjust
@@ -977,6 +981,16 @@ class IPFSClient:
977
981
  # Create a semaphore to limit concurrent uploads
978
982
  semaphore = asyncio.Semaphore(batch_size)
979
983
 
984
+ # Track total uploads for progress reporting
985
+ total_chunks = len(all_chunk_info)
986
+
987
+ # Initialize progress tracking if callback provided
988
+ if progress_callback:
989
+ progress_callback("upload", 0, total_chunks)
990
+
991
+ if verbose:
992
+ print(f"Uploading {total_chunks} erasure-coded chunks to IPFS...")
993
+
980
994
  # Define upload task for a single chunk
981
995
  async def upload_chunk(chunk_info):
982
996
  nonlocal chunk_uploads
@@ -988,13 +1002,19 @@ class IPFSClient:
988
1002
  )
989
1003
  chunk_info["cid"] = chunk_cid
990
1004
  chunk_uploads += 1
1005
+
1006
+ # Update progress through callback
1007
+ if progress_callback:
1008
+ progress_callback("upload", chunk_uploads, total_chunks)
1009
+
991
1010
  if verbose and chunk_uploads % 10 == 0:
992
- print(
993
- f" Uploaded {chunk_uploads}/{len(chunks) * m} chunks"
994
- )
1011
+ print(f" Uploaded {chunk_uploads}/{total_chunks} chunks")
995
1012
  return chunk_info
996
1013
  except Exception as e:
997
- print(f"Error uploading chunk {chunk_info['name']}: {str(e)}")
1014
+ if verbose:
1015
+ print(
1016
+ f"Error uploading chunk {chunk_info['name']}: {str(e)}"
1017
+ )
998
1018
  return None
999
1019
 
1000
1020
  # Create tasks for all chunk uploads
@@ -1042,7 +1062,7 @@ class IPFSClient:
1042
1062
  temp_dir: str = None,
1043
1063
  max_retries: int = 3,
1044
1064
  verbose: bool = True,
1045
- ) -> str:
1065
+ ) -> Dict:
1046
1066
  """
1047
1067
  Reconstruct a file from erasure-coded chunks using its metadata.
1048
1068
 
@@ -1054,7 +1074,7 @@ class IPFSClient:
1054
1074
  verbose: Whether to print progress information
1055
1075
 
1056
1076
  Returns:
1057
- str: Path to the reconstructed file
1077
+ Dict: containing file reconstruction info.
1058
1078
 
1059
1079
  Raises:
1060
1080
  ValueError: If reconstruction fails
@@ -1347,7 +1367,10 @@ class IPFSClient:
1347
1367
  print(f"Reconstruction complete in {total_time:.2f} seconds!")
1348
1368
  print(f"File saved to: {output_file}")
1349
1369
 
1350
- return output_file
1370
+ return {
1371
+ "output_path": output_file,
1372
+ "size_bytes": size_processed,
1373
+ }
1351
1374
 
1352
1375
  finally:
1353
1376
  # Clean up temporary directory if we created it
@@ -1365,6 +1388,8 @@ class IPFSClient:
1365
1388
  substrate_client=None,
1366
1389
  max_retries: int = 3,
1367
1390
  verbose: bool = True,
1391
+ progress_callback: Optional[Callable[[str, int, int], None]] = None,
1392
+ publish: bool = True,
1368
1393
  ) -> Dict[str, Any]:
1369
1394
  """
1370
1395
  Erasure code a file, upload the chunks to IPFS, and store in the Hippius marketplace.
@@ -1381,15 +1406,23 @@ class IPFSClient:
1381
1406
  substrate_client: SubstrateClient to use (or None to create one)
1382
1407
  max_retries: Maximum number of retry attempts
1383
1408
  verbose: Whether to print progress information
1409
+ progress_callback: Optional callback function for progress updates
1410
+ Function receives (stage_name, current, total)
1411
+ publish: Whether to publish to the blockchain (True) or just perform local
1412
+ erasure coding without publishing (False). When False, no password
1413
+ is needed for seed phrase access.
1384
1414
 
1385
1415
  Returns:
1386
- dict: Result including metadata CID and transaction hash
1416
+ dict: Result including metadata CID and transaction hash (if published)
1387
1417
 
1388
1418
  Raises:
1389
1419
  ValueError: If parameters are invalid
1390
1420
  RuntimeError: If processing fails
1391
1421
  """
1392
- # Step 1: Erasure code the file and upload chunks
1422
+ # Step 1: Create substrate client if we need it and are publishing
1423
+ if substrate_client is None and publish:
1424
+ substrate_client = SubstrateClient()
1425
+ # Step 2: Erasure code the file and upload chunks
1393
1426
  metadata = await self.erasure_code_file(
1394
1427
  file_path=file_path,
1395
1428
  k=k,
@@ -1398,52 +1431,55 @@ class IPFSClient:
1398
1431
  encrypt=encrypt,
1399
1432
  max_retries=max_retries,
1400
1433
  verbose=verbose,
1434
+ progress_callback=progress_callback,
1401
1435
  )
1402
1436
 
1403
- # Step 2: Create substrate client if we need it
1404
- if substrate_client is None:
1405
- substrate_client = SubstrateClient()
1406
-
1407
1437
  original_file = metadata["original_file"]
1408
1438
  metadata_cid = metadata["metadata_cid"]
1409
1439
 
1410
- # Create a list to hold all the file inputs (metadata + all chunks)
1411
- all_file_inputs = []
1440
+ # Initialize transaction hash variable
1441
+ tx_hash = None
1412
1442
 
1413
- # Step 3: Prepare metadata file for storage
1414
- if verbose:
1415
- print(
1416
- f"Preparing to store metadata and {len(metadata['chunks'])} chunks in the Hippius marketplace..."
1417
- )
1443
+ # Only proceed with blockchain storage if publish is True
1444
+ if publish:
1445
+ # Create a list to hold all the file inputs (metadata + all chunks)
1446
+ all_file_inputs = []
1418
1447
 
1419
- # Create a file input for the metadata file
1420
- metadata_file_input = FileInput(
1421
- file_hash=metadata_cid, file_name=f"{original_file['name']}.ec_metadata"
1422
- )
1423
- all_file_inputs.append(metadata_file_input)
1448
+ # Step 3: Prepare metadata file for storage
1449
+ if verbose:
1450
+ print(
1451
+ f"Preparing to store metadata and {len(metadata['chunks'])} chunks in the Hippius marketplace..."
1452
+ )
1424
1453
 
1425
- # Step 4: Add all chunks to the storage request
1426
- if verbose:
1427
- print("Adding all chunks to storage request...")
1428
-
1429
- for i, chunk in enumerate(metadata["chunks"]):
1430
- # Extract the CID string from the chunk's cid dictionary
1431
- chunk_cid = (
1432
- chunk["cid"]["cid"]
1433
- if isinstance(chunk["cid"], dict) and "cid" in chunk["cid"]
1434
- else chunk["cid"]
1454
+ # Create a file input for the metadata file
1455
+ metadata_file_input = FileInput(
1456
+ file_hash=metadata_cid, file_name=f"{original_file['name']}.ec_metadata"
1435
1457
  )
1436
- chunk_file_input = FileInput(file_hash=chunk_cid, file_name=chunk["name"])
1437
- all_file_inputs.append(chunk_file_input)
1458
+ all_file_inputs.append(metadata_file_input)
1438
1459
 
1439
- # Print progress for large numbers of chunks
1440
- if verbose and (i + 1) % 50 == 0:
1441
- print(
1442
- f" Prepared {i + 1}/{len(metadata['chunks'])} chunks for storage"
1460
+ # Step 4: Add all chunks to the storage request
1461
+ if verbose:
1462
+ print("Adding all chunks to storage request...")
1463
+
1464
+ for i, chunk in enumerate(metadata["chunks"]):
1465
+ # Extract the CID string from the chunk's cid dictionary
1466
+ chunk_cid = (
1467
+ chunk["cid"]["cid"]
1468
+ if isinstance(chunk["cid"], dict) and "cid" in chunk["cid"]
1469
+ else chunk["cid"]
1470
+ )
1471
+ chunk_file_input = FileInput(
1472
+ file_hash=chunk_cid, file_name=chunk["name"]
1443
1473
  )
1474
+ all_file_inputs.append(chunk_file_input)
1444
1475
 
1445
- # Step 5: Submit the storage request for all files
1446
- try:
1476
+ # Print progress for large numbers of chunks
1477
+ if verbose and (i + 1) % 50 == 0:
1478
+ print(
1479
+ f" Prepared {i + 1}/{len(metadata['chunks'])} chunks for storage"
1480
+ )
1481
+
1482
+ # Step 5: Submit the storage request for all files
1447
1483
  if verbose:
1448
1484
  print(
1449
1485
  f"Submitting storage request for 1 metadata file and {len(metadata['chunks'])} chunks..."
@@ -1452,7 +1488,6 @@ class IPFSClient:
1452
1488
  tx_hash = await substrate_client.storage_request(
1453
1489
  files=all_file_inputs, miner_ids=miner_ids
1454
1490
  )
1455
-
1456
1491
  if verbose:
1457
1492
  print("Successfully stored all files in marketplace!")
1458
1493
  print(f"Transaction hash: {tx_hash}")
@@ -1461,14 +1496,356 @@ class IPFSClient:
1461
1496
  f"Total files stored: {len(all_file_inputs)} (1 metadata + {len(metadata['chunks'])} chunks)"
1462
1497
  )
1463
1498
 
1464
- return {
1499
+ result = {
1465
1500
  "metadata": metadata,
1466
1501
  "metadata_cid": metadata_cid,
1467
1502
  "transaction_hash": tx_hash,
1468
1503
  "total_files_stored": len(all_file_inputs),
1469
1504
  }
1505
+ else:
1506
+ # Not publishing to blockchain (--no-publish flag used)
1507
+ if verbose:
1508
+ print("Not publishing to blockchain (--no-publish flag used)")
1509
+ print(f"Metadata CID: {metadata_cid}")
1510
+ print(f"Total chunks: {len(metadata['chunks'])}")
1511
+
1512
+ result = {
1513
+ "metadata": metadata,
1514
+ "metadata_cid": metadata_cid,
1515
+ "total_files_stored": len(metadata["chunks"])
1516
+ + 1, # +1 for metadata file
1517
+ }
1518
+
1519
+ return result
1520
+
1521
+ async def delete_file(
1522
+ self, cid: str, cancel_from_blockchain: bool = True
1523
+ ) -> Dict[str, Any]:
1524
+ """
1525
+ Delete a file from IPFS and optionally cancel its storage on the blockchain.
1526
+
1527
+ Args:
1528
+ cid: Content Identifier (CID) of the file to delete
1529
+ cancel_from_blockchain: Whether to also cancel the storage request from the blockchain
1530
+
1531
+ Returns:
1532
+ Dict containing the result of the operation
1533
+ """
1534
+ result = {
1535
+ "cid": cid,
1536
+ "unpin_result": None,
1537
+ "blockchain_result": None,
1538
+ "timing": {
1539
+ "start_time": time.time(),
1540
+ "end_time": None,
1541
+ "duration_seconds": None,
1542
+ },
1543
+ }
1544
+
1545
+ # First, unpin from IPFS
1546
+ try:
1547
+ print(f"Unpinning file from IPFS: {cid}")
1548
+ try:
1549
+ # Try to check if file exists in IPFS before unpinning
1550
+ await self.exists(cid)
1551
+ except Exception as exists_e:
1552
+ print(f"ERROR: Error checking file existence: {exists_e}")
1553
+
1554
+ unpin_result = await self.client.unpin(cid)
1555
+ result["unpin_result"] = unpin_result
1556
+ print("Successfully unpinned from IPFS")
1557
+ except Exception as e:
1558
+ print(f"Warning: Failed to unpin file from IPFS: {e}")
1559
+ raise
1560
+
1561
+ # Then, if requested, cancel from blockchain
1562
+ if cancel_from_blockchain:
1563
+ try:
1564
+ # Create a substrate client
1565
+ print(f"DEBUG: Creating SubstrateClient for blockchain cancellation...")
1566
+ substrate_client = SubstrateClient()
1567
+ print(
1568
+ f"DEBUG: Substrate client created with URL: {substrate_client.url}"
1569
+ )
1570
+ print(f"DEBUG: Calling cancel_storage_request with CID: {cid}")
1571
+
1572
+ tx_hash = await substrate_client.cancel_storage_request(cid)
1573
+ print(f"DEBUG: Received transaction hash: {tx_hash}")
1574
+
1575
+ # Check the return value - special cases for when blockchain cancellation isn't available
1576
+ if tx_hash == "no-blockchain-cancellation-available":
1577
+ print(
1578
+ "Blockchain cancellation not available, but IPFS unpinning was successful"
1579
+ )
1580
+ result["blockchain_result"] = {
1581
+ "status": "not_available",
1582
+ "message": "Blockchain cancellation not available, but IPFS unpinning was successful",
1583
+ }
1584
+ elif tx_hash.startswith("ipfs-unpinned-only"):
1585
+ error_msg = tx_hash.replace("ipfs-unpinned-only-", "")
1586
+ print(
1587
+ f"IPFS unpinning successful, but blockchain cancellation failed: {error_msg}"
1588
+ )
1589
+ result["blockchain_result"] = {
1590
+ "status": "failed",
1591
+ "error": error_msg,
1592
+ "message": "IPFS unpinning successful, but blockchain cancellation failed",
1593
+ }
1594
+ else:
1595
+ # Standard successful transaction
1596
+ result["blockchain_result"] = {
1597
+ "transaction_hash": tx_hash,
1598
+ "status": "success",
1599
+ }
1600
+ print(f"Successfully canceled storage request from blockchain")
1601
+ print(
1602
+ f"DEBUG: Blockchain cancellation succeeded with transaction hash: {tx_hash}"
1603
+ )
1604
+ except Exception as e:
1605
+ print(f"Warning: Failed to cancel storage from blockchain: {e}")
1606
+ print(
1607
+ f"DEBUG: Blockchain cancellation exception: {type(e).__name__}: {str(e)}"
1608
+ )
1609
+ if hasattr(e, "__dict__"):
1610
+ print(f"DEBUG: Exception attributes: {e.__dict__}")
1611
+ result["blockchain_error"] = str(e)
1612
+
1613
+ # Calculate timing
1614
+ result["timing"]["end_time"] = time.time()
1615
+ result["timing"]["duration_seconds"] = (
1616
+ result["timing"]["end_time"] - result["timing"]["start_time"]
1617
+ )
1618
+
1619
+ return result
1620
+
1621
+ async def delete_ec_file(
1622
+ self,
1623
+ metadata_cid: str,
1624
+ cancel_from_blockchain: bool = True,
1625
+ parallel_limit: int = 20,
1626
+ ) -> Dict[str, Any]:
1627
+ """
1628
+ Delete an erasure-coded file, including all its chunks in parallel.
1629
+
1630
+ Args:
1631
+ metadata_cid: CID of the metadata file for the erasure-coded file
1632
+ cancel_from_blockchain: Whether to cancel storage from blockchain
1633
+ parallel_limit: Maximum number of concurrent deletion operations
1634
+
1635
+ Returns:
1636
+ Dict containing the result of the operation
1637
+ """
1638
+ result = {
1639
+ "metadata_cid": metadata_cid,
1640
+ "deleted_chunks": [],
1641
+ "failed_chunks": [],
1642
+ "blockchain_result": None,
1643
+ "timing": {
1644
+ "start_time": time.time(),
1645
+ "end_time": None,
1646
+ "duration_seconds": None,
1647
+ },
1648
+ }
1649
+
1650
+ # Track deletions for reporting
1651
+ deleted_chunks_lock = asyncio.Lock()
1652
+ failed_chunks_lock = asyncio.Lock()
1653
+
1654
+ # First, get the metadata to find all chunks
1655
+ try:
1656
+ print(f"Downloading metadata file (CID: {metadata_cid})...")
1657
+ start_time = time.time()
1658
+ metadata_content = await self.client.cat(metadata_cid)
1659
+ metadata = json.loads(metadata_content.decode("utf-8"))
1660
+ metadata_download_time = time.time() - start_time
1661
+
1662
+ print(f"Metadata downloaded in {metadata_download_time:.2f} seconds")
1663
+
1664
+ # Extract chunk CIDs
1665
+ chunks = []
1666
+ total_chunks = 0
1470
1667
 
1668
+ for chunk_data in metadata.get("chunks", []):
1669
+ for ec_chunk in chunk_data.get("ec_chunks", []):
1670
+ chunk_cid = ec_chunk.get("cid")
1671
+ if chunk_cid:
1672
+ chunks.append(chunk_cid)
1673
+ total_chunks += 1
1674
+
1675
+ print(f"Found {total_chunks} chunks to delete")
1676
+
1677
+ # Create a semaphore to limit concurrent operations
1678
+ sem = asyncio.Semaphore(parallel_limit)
1679
+
1680
+ # Define the chunk deletion function
1681
+ async def delete_chunk(chunk_cid):
1682
+ async with sem:
1683
+ try:
1684
+ print(f"Unpinning chunk: {chunk_cid}")
1685
+ await self.client.unpin(chunk_cid)
1686
+
1687
+ # Record success
1688
+ async with deleted_chunks_lock:
1689
+ result["deleted_chunks"].append(chunk_cid)
1690
+
1691
+ # Cancel from blockchain if requested
1692
+ if cancel_from_blockchain:
1693
+ try:
1694
+ substrate_client = SubstrateClient()
1695
+ tx_hash = await substrate_client.cancel_storage_request(
1696
+ chunk_cid
1697
+ )
1698
+
1699
+ # Add blockchain result
1700
+ if "chunk_results" not in result["blockchain_result"]:
1701
+ result["blockchain_result"] = {}
1702
+ result["blockchain_result"]["chunk_results"] = []
1703
+
1704
+ # Handle special return values from cancel_storage_request
1705
+ if tx_hash == "no-blockchain-cancellation-available":
1706
+ result["blockchain_result"]["chunk_results"].append(
1707
+ {
1708
+ "cid": chunk_cid,
1709
+ "status": "not_available",
1710
+ "message": "Blockchain cancellation not available",
1711
+ }
1712
+ )
1713
+ elif tx_hash.startswith("ipfs-unpinned-only"):
1714
+ error_msg = tx_hash.replace(
1715
+ "ipfs-unpinned-only-", ""
1716
+ )
1717
+ result["blockchain_result"]["chunk_results"].append(
1718
+ {
1719
+ "cid": chunk_cid,
1720
+ "status": "failed",
1721
+ "error": error_msg,
1722
+ }
1723
+ )
1724
+ else:
1725
+ # Standard successful transaction
1726
+ result["blockchain_result"]["chunk_results"].append(
1727
+ {
1728
+ "cid": chunk_cid,
1729
+ "transaction_hash": tx_hash,
1730
+ "status": "success",
1731
+ }
1732
+ )
1733
+ except Exception as e:
1734
+ print(
1735
+ f"Warning: Failed to cancel blockchain storage for chunk {chunk_cid}: {e}"
1736
+ )
1737
+
1738
+ if "chunk_results" not in result["blockchain_result"]:
1739
+ result["blockchain_result"] = {}
1740
+ result["blockchain_result"]["chunk_results"] = []
1741
+
1742
+ result["blockchain_result"]["chunk_results"].append(
1743
+ {
1744
+ "cid": chunk_cid,
1745
+ "error": str(e),
1746
+ "status": "failed",
1747
+ }
1748
+ )
1749
+
1750
+ return True
1751
+ except Exception as e:
1752
+ error_msg = f"Failed to delete chunk {chunk_cid}: {e}"
1753
+ print(f"Warning: {error_msg}")
1754
+
1755
+ # Record failure
1756
+ async with failed_chunks_lock:
1757
+ result["failed_chunks"].append(
1758
+ {"cid": chunk_cid, "error": str(e)}
1759
+ )
1760
+
1761
+ return False
1762
+
1763
+ # Start deleting chunks in parallel
1764
+ print(
1765
+ f"Starting parallel deletion of {total_chunks} chunks with max {parallel_limit} concurrent operations"
1766
+ )
1767
+ delete_tasks = [delete_chunk(cid) for cid in chunks]
1768
+ await asyncio.gather(*delete_tasks)
1769
+
1770
+ # Delete the metadata file itself
1771
+ print(f"Unpinning metadata file: {metadata_cid}")
1772
+ response = await self.client.unpin(metadata_cid)
1773
+
1774
+ print(">>>", response)
1775
+ raise SystemExit
1776
+
1777
+ # Cancel metadata from blockchain if requested
1778
+ if cancel_from_blockchain:
1779
+ try:
1780
+ print(f"Canceling blockchain storage request for metadata file...")
1781
+ substrate_client = SubstrateClient()
1782
+ tx_hash = await substrate_client.cancel_storage_request(
1783
+ metadata_cid
1784
+ )
1785
+
1786
+ # Handle special return values from cancel_storage_request
1787
+ if tx_hash == "no-blockchain-cancellation-available":
1788
+ print(
1789
+ "Blockchain cancellation not available for metadata, but IPFS unpinning was successful"
1790
+ )
1791
+ result["blockchain_result"] = {
1792
+ "status": "not_available",
1793
+ "message": "Blockchain cancellation not available, but IPFS unpinning was successful",
1794
+ }
1795
+ elif tx_hash.startswith("ipfs-unpinned-only"):
1796
+ error_msg = tx_hash.replace("ipfs-unpinned-only-", "")
1797
+ print(
1798
+ f"IPFS unpinning successful, but blockchain cancellation failed for metadata: {error_msg}"
1799
+ )
1800
+ result["blockchain_result"] = {
1801
+ "status": "failed",
1802
+ "error": error_msg,
1803
+ "message": "IPFS unpinning successful, but blockchain cancellation failed",
1804
+ }
1805
+ else:
1806
+ # Standard successful transaction
1807
+ result["blockchain_result"] = {
1808
+ "metadata_transaction_hash": tx_hash,
1809
+ "status": "success",
1810
+ }
1811
+ print(
1812
+ f"Successfully canceled blockchain storage for metadata file"
1813
+ )
1814
+ except Exception as e:
1815
+ print(
1816
+ f"Warning: Failed to cancel blockchain storage for metadata file: {e}"
1817
+ )
1818
+
1819
+ if not result["blockchain_result"]:
1820
+ result["blockchain_result"] = {}
1821
+
1822
+ result["blockchain_result"]["metadata_error"] = str(e)
1823
+ result["blockchain_result"]["status"] = "failed"
1824
+
1825
+ # Calculate and record timing information
1826
+ end_time = time.time()
1827
+ duration = end_time - result["timing"]["start_time"]
1828
+
1829
+ result["timing"]["end_time"] = end_time
1830
+ result["timing"]["duration_seconds"] = duration
1831
+
1832
+ deleted_count = len(result["deleted_chunks"])
1833
+ failed_count = len(result["failed_chunks"])
1834
+
1835
+ print(f"Deletion complete in {duration:.2f} seconds!")
1836
+ print(f"Successfully deleted: {deleted_count}/{total_chunks} chunks")
1837
+
1838
+ if failed_count > 0:
1839
+ print(f"Failed to delete: {failed_count}/{total_chunks} chunks")
1840
+
1841
+ return result
1471
1842
  except Exception as e:
1472
- print(f"Error storing files in marketplace: {str(e)}")
1473
- # Return the metadata even if storage fails
1474
- return {"metadata": metadata, "metadata_cid": metadata_cid, "error": str(e)}
1843
+ # Record end time even if there was an error
1844
+ result["timing"]["end_time"] = time.time()
1845
+ result["timing"]["duration_seconds"] = (
1846
+ result["timing"]["end_time"] - result["timing"]["start_time"]
1847
+ )
1848
+
1849
+ error_msg = f"Error deleting erasure-coded file: {e}"
1850
+ print(f"Error: {error_msg}")
1851
+ raise RuntimeError(error_msg)