synapse-filecoin-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. pynapse/__init__.py +6 -0
  2. pynapse/_version.py +1 -0
  3. pynapse/contracts/__init__.py +34 -0
  4. pynapse/contracts/abi_registry.py +11 -0
  5. pynapse/contracts/addresses.json +30 -0
  6. pynapse/contracts/erc20_abi.json +92 -0
  7. pynapse/contracts/errorsAbi.json +933 -0
  8. pynapse/contracts/filecoinPayV1Abi.json +2424 -0
  9. pynapse/contracts/filecoinWarmStorageServiceAbi.json +2363 -0
  10. pynapse/contracts/filecoinWarmStorageServiceStateViewAbi.json +651 -0
  11. pynapse/contracts/generated.py +35 -0
  12. pynapse/contracts/payments_abi.json +205 -0
  13. pynapse/contracts/pdpVerifierAbi.json +1266 -0
  14. pynapse/contracts/providerIdSetAbi.json +161 -0
  15. pynapse/contracts/serviceProviderRegistryAbi.json +1479 -0
  16. pynapse/contracts/sessionKeyRegistryAbi.json +147 -0
  17. pynapse/core/__init__.py +68 -0
  18. pynapse/core/abis.py +25 -0
  19. pynapse/core/chains.py +97 -0
  20. pynapse/core/constants.py +27 -0
  21. pynapse/core/errors.py +22 -0
  22. pynapse/core/piece.py +263 -0
  23. pynapse/core/rand.py +14 -0
  24. pynapse/core/typed_data.py +320 -0
  25. pynapse/core/utils.py +30 -0
  26. pynapse/evm/__init__.py +3 -0
  27. pynapse/evm/client.py +26 -0
  28. pynapse/filbeam/__init__.py +3 -0
  29. pynapse/filbeam/service.py +39 -0
  30. pynapse/payments/__init__.py +17 -0
  31. pynapse/payments/service.py +826 -0
  32. pynapse/pdp/__init__.py +21 -0
  33. pynapse/pdp/server.py +331 -0
  34. pynapse/pdp/types.py +38 -0
  35. pynapse/pdp/verifier.py +82 -0
  36. pynapse/retriever/__init__.py +12 -0
  37. pynapse/retriever/async_chain.py +227 -0
  38. pynapse/retriever/chain.py +209 -0
  39. pynapse/session/__init__.py +12 -0
  40. pynapse/session/key.py +30 -0
  41. pynapse/session/permissions.py +57 -0
  42. pynapse/session/registry.py +90 -0
  43. pynapse/sp_registry/__init__.py +11 -0
  44. pynapse/sp_registry/capabilities.py +25 -0
  45. pynapse/sp_registry/pdp_capabilities.py +102 -0
  46. pynapse/sp_registry/service.py +446 -0
  47. pynapse/sp_registry/types.py +52 -0
  48. pynapse/storage/__init__.py +57 -0
  49. pynapse/storage/async_context.py +682 -0
  50. pynapse/storage/async_manager.py +757 -0
  51. pynapse/storage/context.py +680 -0
  52. pynapse/storage/manager.py +758 -0
  53. pynapse/synapse.py +191 -0
  54. pynapse/utils/__init__.py +25 -0
  55. pynapse/utils/constants.py +25 -0
  56. pynapse/utils/errors.py +3 -0
  57. pynapse/utils/metadata.py +35 -0
  58. pynapse/utils/piece_url.py +16 -0
  59. pynapse/warm_storage/__init__.py +13 -0
  60. pynapse/warm_storage/service.py +513 -0
  61. synapse_filecoin_sdk-0.1.0.dist-info/METADATA +74 -0
  62. synapse_filecoin_sdk-0.1.0.dist-info/RECORD +64 -0
  63. synapse_filecoin_sdk-0.1.0.dist-info/WHEEL +4 -0
  64. synapse_filecoin_sdk-0.1.0.dist-info/licenses/LICENSE.md +228 -0
@@ -0,0 +1,680 @@
1
+ """
2
+ StorageContext - Represents a specific Service Provider + DataSet pair
3
+
4
+ This class provides a connection to a specific service provider and data set,
5
+ handling uploads and downloads within that context. It manages:
6
+ - Provider selection and data set creation/reuse
7
+ - PieceCID calculation and validation
8
+ - Payment rail setup through Warm Storage
9
+ - Batched piece additions for efficiency
10
+ """
11
+ from __future__ import annotations
12
+
13
+ import random
14
+ from dataclasses import dataclass, field
15
+ from typing import Callable, Dict, List, Optional, TYPE_CHECKING
16
+
17
+ from pynapse.core.piece import calculate_piece_cid
18
+ from pynapse.core.typed_data import sign_add_pieces_extra_data, sign_create_dataset_extra_data
19
+ from pynapse.pdp import PDPServer
20
+ from pynapse.utils.metadata import combine_metadata, metadata_matches, metadata_object_to_entries
21
+
22
+ if TYPE_CHECKING:
23
+ from pynapse.sp_registry import ProviderInfo
24
+ from pynapse.warm_storage import SyncWarmStorageService
25
+
26
+
27
+ # Size constants
28
+ MIN_UPLOAD_SIZE = 256 # bytes
29
+ MAX_UPLOAD_SIZE = 254 * 1024 * 1024 # 254 MiB
30
+
31
+
32
+ @dataclass
33
+ class UploadResult:
34
+ """Result of an upload operation."""
35
+ piece_cid: str
36
+ size: int
37
+ tx_hash: Optional[str] = None
38
+ piece_id: Optional[int] = None
39
+
40
+
41
+ @dataclass
42
+ class ProviderSelectionResult:
43
+ """Result of provider and dataset selection."""
44
+ provider: "ProviderInfo"
45
+ pdp_endpoint: str
46
+ data_set_id: int # -1 means needs to be created
47
+ client_data_set_id: int
48
+ is_existing: bool
49
+ metadata: Dict[str, str] = field(default_factory=dict)
50
+
51
+
52
+ @dataclass
53
+ class StorageContextOptions:
54
+ """Options for creating a storage context."""
55
+ provider_id: Optional[int] = None
56
+ provider_address: Optional[str] = None
57
+ data_set_id: Optional[int] = None
58
+ with_cdn: bool = False
59
+ force_create_data_set: bool = False
60
+ metadata: Optional[Dict[str, str]] = None
61
+ exclude_provider_ids: Optional[List[int]] = None
62
+ # Callbacks
63
+ on_provider_selected: Optional[Callable[["ProviderInfo"], None]] = None
64
+ on_data_set_resolved: Optional[Callable[[dict], None]] = None
65
+
66
+
67
+ class StorageContext:
68
+ """
69
+ Storage context for a specific provider and dataset.
70
+
71
+ Use the factory methods `create()` or `create_contexts()` to construct
72
+ instances with proper provider selection and dataset resolution.
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ pdp_endpoint: str,
78
+ chain,
79
+ private_key: str,
80
+ data_set_id: int,
81
+ client_data_set_id: int,
82
+ provider: Optional["ProviderInfo"] = None,
83
+ with_cdn: bool = False,
84
+ metadata: Optional[Dict[str, str]] = None,
85
+ ) -> None:
86
+ self._pdp = PDPServer(pdp_endpoint)
87
+ self._pdp_endpoint = pdp_endpoint
88
+ self._chain = chain
89
+ self._private_key = private_key
90
+ self._data_set_id = data_set_id
91
+ self._client_data_set_id = client_data_set_id
92
+ self._provider = provider
93
+ self._with_cdn = with_cdn
94
+ self._metadata = metadata or {}
95
+
96
+ @property
97
+ def data_set_id(self) -> int:
98
+ return self._data_set_id
99
+
100
+ @property
101
+ def client_data_set_id(self) -> int:
102
+ return self._client_data_set_id
103
+
104
+ @property
105
+ def provider(self) -> Optional["ProviderInfo"]:
106
+ return self._provider
107
+
108
+ @property
109
+ def with_cdn(self) -> bool:
110
+ return self._with_cdn
111
+
112
+ @property
113
+ def data_set_metadata(self) -> Dict[str, str]:
114
+ return self._metadata
115
+
116
+ @staticmethod
117
+ def _validate_size(size_bytes: int, context: str = "upload") -> None:
118
+ """Validate data size against limits."""
119
+ if size_bytes < MIN_UPLOAD_SIZE:
120
+ raise ValueError(
121
+ f"Data size {size_bytes} bytes is below minimum allowed size of {MIN_UPLOAD_SIZE} bytes"
122
+ )
123
+ if size_bytes > MAX_UPLOAD_SIZE:
124
+ raise ValueError(
125
+ f"Data size {size_bytes} bytes exceeds maximum allowed size of {MAX_UPLOAD_SIZE} bytes "
126
+ f"({MAX_UPLOAD_SIZE // 1024 // 1024} MiB)"
127
+ )
128
+
129
+ @classmethod
130
+ def create(
131
+ cls,
132
+ chain,
133
+ private_key: str,
134
+ warm_storage: "SyncWarmStorageService",
135
+ sp_registry,
136
+ options: Optional[StorageContextOptions] = None,
137
+ ) -> "StorageContext":
138
+ """
139
+ Create a storage context with smart provider and dataset selection.
140
+
141
+ Args:
142
+ chain: The chain configuration
143
+ private_key: Private key for signing
144
+ warm_storage: WarmStorageService instance
145
+ sp_registry: SPRegistryService instance
146
+ options: Optional configuration for context creation
147
+
148
+ Returns:
149
+ A configured StorageContext instance
150
+ """
151
+ from eth_account import Account
152
+ acct = Account.from_key(private_key)
153
+ client_address = acct.address
154
+
155
+ options = options or StorageContextOptions()
156
+ requested_metadata = combine_metadata(options.metadata, options.with_cdn)
157
+
158
+ # Resolve provider and dataset
159
+ resolution = cls._resolve_provider_and_data_set(
160
+ client_address=client_address,
161
+ chain=chain,
162
+ private_key=private_key,
163
+ warm_storage=warm_storage,
164
+ sp_registry=sp_registry,
165
+ options=options,
166
+ requested_metadata=requested_metadata,
167
+ )
168
+
169
+ # Fire callbacks
170
+ if options.on_provider_selected and resolution.provider:
171
+ try:
172
+ options.on_provider_selected(resolution.provider)
173
+ except Exception:
174
+ pass
175
+
176
+ # Create dataset if needed
177
+ data_set_id = resolution.data_set_id
178
+ client_data_set_id = resolution.client_data_set_id
179
+
180
+ if data_set_id == -1:
181
+ # Need to create a new dataset
182
+ pdp = PDPServer(resolution.pdp_endpoint)
183
+
184
+ # Get next client_data_set_id by counting existing datasets
185
+ try:
186
+ existing = warm_storage.get_client_data_sets(acct.address)
187
+ next_client_id = len(existing) + 1
188
+ except Exception:
189
+ next_client_id = 1
190
+
191
+ # Convert metadata dict to list of {key, value} entries
192
+ metadata_entries = metadata_object_to_entries(requested_metadata)
193
+
194
+ extra_data = sign_create_dataset_extra_data(
195
+ private_key=private_key,
196
+ chain=chain,
197
+ client_data_set_id=next_client_id,
198
+ payee=resolution.provider.payee,
199
+ metadata=metadata_entries,
200
+ )
201
+ resp = pdp.create_data_set(
202
+ record_keeper=chain.contracts.warm_storage,
203
+ extra_data=extra_data,
204
+ )
205
+ # Wait for creation
206
+ status = pdp.wait_for_data_set_creation(resp.tx_hash)
207
+ data_set_id = status.data_set_id
208
+ # Get client_data_set_id from the new dataset
209
+ ds_info = warm_storage.get_data_set(data_set_id)
210
+ client_data_set_id = ds_info.client_data_set_id
211
+
212
+ # Fire dataset resolved callback
213
+ if options.on_data_set_resolved:
214
+ try:
215
+ options.on_data_set_resolved({
216
+ "is_existing": resolution.is_existing,
217
+ "data_set_id": data_set_id,
218
+ "provider": resolution.provider,
219
+ })
220
+ except Exception:
221
+ pass
222
+
223
+ return cls(
224
+ pdp_endpoint=resolution.pdp_endpoint,
225
+ chain=chain,
226
+ private_key=private_key,
227
+ data_set_id=data_set_id,
228
+ client_data_set_id=client_data_set_id,
229
+ provider=resolution.provider,
230
+ with_cdn=options.with_cdn,
231
+ metadata=requested_metadata,
232
+ )
233
+
234
+ @classmethod
235
+ def create_contexts(
236
+ cls,
237
+ chain,
238
+ private_key: str,
239
+ warm_storage: "SyncWarmStorageService",
240
+ sp_registry,
241
+ count: int = 2,
242
+ options: Optional[StorageContextOptions] = None,
243
+ ) -> List["StorageContext"]:
244
+ """
245
+ Create multiple storage contexts for multi-provider redundancy.
246
+
247
+ Args:
248
+ chain: The chain configuration
249
+ private_key: Private key for signing
250
+ warm_storage: WarmStorageService instance
251
+ sp_registry: SPRegistryService instance
252
+ count: Number of contexts to create (default: 2)
253
+ options: Optional configuration for context creation
254
+
255
+ Returns:
256
+ List of configured StorageContext instances
257
+ """
258
+ contexts: List[StorageContext] = []
259
+ used_provider_ids: List[int] = []
260
+
261
+ options = options or StorageContextOptions()
262
+
263
+ for _ in range(count):
264
+ # Build options with exclusions
265
+ ctx_options = StorageContextOptions(
266
+ provider_id=options.provider_id if not contexts else None,
267
+ provider_address=options.provider_address if not contexts else None,
268
+ data_set_id=options.data_set_id if not contexts else None,
269
+ with_cdn=options.with_cdn,
270
+ force_create_data_set=options.force_create_data_set,
271
+ metadata=options.metadata,
272
+ exclude_provider_ids=(options.exclude_provider_ids or []) + used_provider_ids,
273
+ on_provider_selected=options.on_provider_selected,
274
+ on_data_set_resolved=options.on_data_set_resolved,
275
+ )
276
+
277
+ try:
278
+ ctx = cls.create(
279
+ chain=chain,
280
+ private_key=private_key,
281
+ warm_storage=warm_storage,
282
+ sp_registry=sp_registry,
283
+ options=ctx_options,
284
+ )
285
+ contexts.append(ctx)
286
+ if ctx.provider:
287
+ used_provider_ids.append(ctx.provider.provider_id)
288
+ except Exception as e:
289
+ # If we can't create more contexts, return what we have
290
+ if not contexts:
291
+ raise
292
+ break
293
+
294
+ return contexts
295
+
296
+ @classmethod
297
+ def _resolve_provider_and_data_set(
298
+ cls,
299
+ client_address: str,
300
+ chain,
301
+ private_key: str,
302
+ warm_storage: "SyncWarmStorageService",
303
+ sp_registry,
304
+ options: StorageContextOptions,
305
+ requested_metadata: Dict[str, str],
306
+ ) -> ProviderSelectionResult:
307
+ """Resolve provider and dataset based on options."""
308
+
309
+ # 1. If explicit data_set_id provided
310
+ if options.data_set_id is not None and not options.force_create_data_set:
311
+ return cls._resolve_by_data_set_id(
312
+ data_set_id=options.data_set_id,
313
+ client_address=client_address,
314
+ warm_storage=warm_storage,
315
+ sp_registry=sp_registry,
316
+ )
317
+
318
+ # 2. If explicit provider_id provided
319
+ if options.provider_id is not None:
320
+ return cls._resolve_by_provider_id(
321
+ provider_id=options.provider_id,
322
+ client_address=client_address,
323
+ warm_storage=warm_storage,
324
+ sp_registry=sp_registry,
325
+ requested_metadata=requested_metadata,
326
+ force_create=options.force_create_data_set,
327
+ )
328
+
329
+ # 3. If explicit provider_address provided
330
+ if options.provider_address is not None:
331
+ provider = sp_registry.get_provider_by_address(options.provider_address)
332
+ if provider is None:
333
+ raise ValueError(f"Provider {options.provider_address} not found in registry")
334
+ return cls._resolve_by_provider_id(
335
+ provider_id=provider.provider_id,
336
+ client_address=client_address,
337
+ warm_storage=warm_storage,
338
+ sp_registry=sp_registry,
339
+ requested_metadata=requested_metadata,
340
+ force_create=options.force_create_data_set,
341
+ )
342
+
343
+ # 4. Smart selection
344
+ return cls._smart_select_provider(
345
+ client_address=client_address,
346
+ warm_storage=warm_storage,
347
+ sp_registry=sp_registry,
348
+ requested_metadata=requested_metadata,
349
+ exclude_provider_ids=options.exclude_provider_ids or [],
350
+ force_create=options.force_create_data_set,
351
+ )
352
+
353
+ @classmethod
354
+ def _resolve_by_data_set_id(
355
+ cls,
356
+ data_set_id: int,
357
+ client_address: str,
358
+ warm_storage: "SyncWarmStorageService",
359
+ sp_registry,
360
+ ) -> ProviderSelectionResult:
361
+ """Resolve using explicit dataset ID."""
362
+ warm_storage.validate_data_set(data_set_id)
363
+ ds_info = warm_storage.get_data_set(data_set_id)
364
+
365
+ if ds_info.payer.lower() != client_address.lower():
366
+ raise ValueError(
367
+ f"Data set {data_set_id} is not owned by {client_address} (owned by {ds_info.payer})"
368
+ )
369
+
370
+ provider = sp_registry.get_provider(ds_info.provider_id)
371
+ if provider is None:
372
+ raise ValueError(f"Provider ID {ds_info.provider_id} for data set {data_set_id} not found")
373
+
374
+ # Get PDP endpoint from provider product info
375
+ pdp_endpoint = cls._get_pdp_endpoint(sp_registry, provider.provider_id)
376
+ metadata = warm_storage.get_all_data_set_metadata(data_set_id)
377
+
378
+ return ProviderSelectionResult(
379
+ provider=provider,
380
+ pdp_endpoint=pdp_endpoint,
381
+ data_set_id=data_set_id,
382
+ client_data_set_id=ds_info.client_data_set_id,
383
+ is_existing=True,
384
+ metadata=metadata,
385
+ )
386
+
387
+ @classmethod
388
+ def _resolve_by_provider_id(
389
+ cls,
390
+ provider_id: int,
391
+ client_address: str,
392
+ warm_storage: "SyncWarmStorageService",
393
+ sp_registry,
394
+ requested_metadata: Dict[str, str],
395
+ force_create: bool = False,
396
+ ) -> ProviderSelectionResult:
397
+ """Resolve by provider ID, finding or creating dataset."""
398
+ provider = sp_registry.get_provider(provider_id)
399
+ if provider is None:
400
+ raise ValueError(f"Provider ID {provider_id} not found in registry")
401
+
402
+ pdp_endpoint = cls._get_pdp_endpoint(sp_registry, provider_id)
403
+
404
+ if force_create:
405
+ return ProviderSelectionResult(
406
+ provider=provider,
407
+ pdp_endpoint=pdp_endpoint,
408
+ data_set_id=-1,
409
+ client_data_set_id=0,
410
+ is_existing=False,
411
+ metadata=requested_metadata,
412
+ )
413
+
414
+ # Try to find existing dataset for this provider
415
+ try:
416
+ datasets = warm_storage.get_client_data_sets(client_address)
417
+ for ds in datasets:
418
+ if ds.provider_id == provider_id and ds.pdp_end_epoch == 0:
419
+ # Check metadata match
420
+ ds_metadata = warm_storage.get_all_data_set_metadata(ds.data_set_id)
421
+ if metadata_matches(ds_metadata, requested_metadata):
422
+ return ProviderSelectionResult(
423
+ provider=provider,
424
+ pdp_endpoint=pdp_endpoint,
425
+ data_set_id=ds.data_set_id,
426
+ client_data_set_id=ds.client_data_set_id,
427
+ is_existing=True,
428
+ metadata=ds_metadata,
429
+ )
430
+ except Exception:
431
+ pass
432
+
433
+ # No matching dataset found, need to create
434
+ return ProviderSelectionResult(
435
+ provider=provider,
436
+ pdp_endpoint=pdp_endpoint,
437
+ data_set_id=-1,
438
+ client_data_set_id=0,
439
+ is_existing=False,
440
+ metadata=requested_metadata,
441
+ )
442
+
443
+ @classmethod
444
+ def _smart_select_provider(
445
+ cls,
446
+ client_address: str,
447
+ warm_storage: "SyncWarmStorageService",
448
+ sp_registry,
449
+ requested_metadata: Dict[str, str],
450
+ exclude_provider_ids: List[int],
451
+ force_create: bool = False,
452
+ ) -> ProviderSelectionResult:
453
+ """Smart provider selection with existing dataset reuse."""
454
+ exclude_set = set(exclude_provider_ids)
455
+
456
+ # First, try to find existing datasets with matching metadata
457
+ if not force_create:
458
+ try:
459
+ datasets = warm_storage.get_client_data_sets_with_details(client_address)
460
+ # Filter for live, managed datasets with matching metadata
461
+ matching = [
462
+ ds for ds in datasets
463
+ if ds.is_live
464
+ and ds.is_managed
465
+ and ds.pdp_end_epoch == 0
466
+ and ds.provider_id not in exclude_set
467
+ and metadata_matches(ds.metadata, requested_metadata)
468
+ ]
469
+
470
+ # Prefer datasets with pieces, sorted by ID (older first)
471
+ matching.sort(key=lambda ds: (-ds.active_piece_count, ds.data_set_id))
472
+
473
+ for ds in matching:
474
+ provider = sp_registry.get_provider(ds.provider_id)
475
+ if provider and provider.is_active:
476
+ # Health check: try to ping the PDP endpoint
477
+ pdp_endpoint = cls._get_pdp_endpoint(sp_registry, ds.provider_id)
478
+ if cls._ping_provider(pdp_endpoint):
479
+ return ProviderSelectionResult(
480
+ provider=provider,
481
+ pdp_endpoint=pdp_endpoint,
482
+ data_set_id=ds.data_set_id,
483
+ client_data_set_id=ds.client_data_set_id,
484
+ is_existing=True,
485
+ metadata=ds.metadata,
486
+ )
487
+ except Exception:
488
+ pass
489
+
490
+ # No existing dataset, select a new provider
491
+ try:
492
+ approved_ids = warm_storage.get_approved_provider_ids()
493
+ except Exception:
494
+ approved_ids = []
495
+
496
+ # Filter out excluded providers
497
+ candidate_ids = [pid for pid in approved_ids if pid not in exclude_set]
498
+
499
+ # Shuffle for random selection
500
+ random.shuffle(candidate_ids)
501
+
502
+ # Find a healthy provider
503
+ for pid in candidate_ids:
504
+ try:
505
+ provider = sp_registry.get_provider(pid)
506
+ if provider and provider.is_active:
507
+ pdp_endpoint = cls._get_pdp_endpoint(sp_registry, pid)
508
+ if cls._ping_provider(pdp_endpoint):
509
+ return ProviderSelectionResult(
510
+ provider=provider,
511
+ pdp_endpoint=pdp_endpoint,
512
+ data_set_id=-1,
513
+ client_data_set_id=0,
514
+ is_existing=False,
515
+ metadata=requested_metadata,
516
+ )
517
+ except Exception:
518
+ continue
519
+
520
+ raise ValueError("No approved service providers available")
521
+
522
+ @classmethod
523
+ def _get_pdp_endpoint(cls, sp_registry, provider_id: int) -> str:
524
+ """Get the PDP service URL for a provider."""
525
+ try:
526
+ product = sp_registry.get_provider_with_product(provider_id, 0) # PDP product type
527
+ # Look for serviceURL in capability values
528
+ for i, key in enumerate(product.product.capability_keys):
529
+ if key == "serviceURL" and i < len(product.product_capability_values):
530
+ val = product.product_capability_values[i]
531
+ # Values are returned as bytes from the contract
532
+ if isinstance(val, bytes):
533
+ return val.decode('utf-8')
534
+ return str(val)
535
+ except Exception:
536
+ pass
537
+
538
+ raise ValueError(f"Could not find PDP endpoint for provider {provider_id}")
539
+
540
+ @classmethod
541
+ def _ping_provider(cls, pdp_endpoint: str, timeout: float = 5.0) -> bool:
542
+ """Health check a provider's PDP endpoint."""
543
+ import httpx
544
+ try:
545
+ # Try a simple HEAD request to check if the server is responsive
546
+ with httpx.Client(timeout=timeout) as client:
547
+ resp = client.head(pdp_endpoint)
548
+ return resp.status_code < 500
549
+ except Exception:
550
+ return False
551
+
552
+ def upload(
553
+ self,
554
+ data: bytes,
555
+ metadata: Optional[Dict[str, str]] = None,
556
+ on_progress: Optional[Callable[[int], None]] = None,
557
+ on_upload_complete: Optional[Callable[[str], None]] = None,
558
+ on_pieces_added: Optional[Callable[[str], None]] = None,
559
+ ) -> UploadResult:
560
+ """
561
+ Upload data to this storage context.
562
+
563
+ Args:
564
+ data: Bytes to upload
565
+ metadata: Optional piece metadata
566
+ on_progress: Callback for upload progress
567
+ on_upload_complete: Callback when upload completes
568
+ on_pieces_added: Callback when pieces are added on-chain
569
+
570
+ Returns:
571
+ UploadResult with piece CID and transaction info
572
+ """
573
+ self._validate_size(len(data))
574
+
575
+ info = calculate_piece_cid(data)
576
+
577
+ # Upload to PDP server (include padded_piece_size for PieceCIDv1)
578
+ self._pdp.upload_piece(data, info.piece_cid, info.padded_piece_size)
579
+
580
+ # Wait for piece to be indexed before adding to dataset
581
+ # The PDP server needs time to process and index uploaded pieces
582
+ self._pdp.wait_for_piece(info.piece_cid, timeout_seconds=60, poll_interval=2)
583
+
584
+ if on_upload_complete:
585
+ try:
586
+ on_upload_complete(info.piece_cid)
587
+ except Exception:
588
+ pass
589
+
590
+ # Add piece to dataset
591
+ pieces = [(info.piece_cid, [{"key": k, "value": v} for k, v in (metadata or {}).items()])]
592
+ extra_data = sign_add_pieces_extra_data(
593
+ private_key=self._private_key,
594
+ chain=self._chain,
595
+ client_data_set_id=self._client_data_set_id,
596
+ pieces=pieces,
597
+ )
598
+
599
+ add_resp = self._pdp.add_pieces(self._data_set_id, [info.piece_cid], extra_data)
600
+
601
+ if on_pieces_added:
602
+ try:
603
+ on_pieces_added(add_resp.tx_hash)
604
+ except Exception:
605
+ pass
606
+
607
+ return UploadResult(
608
+ piece_cid=info.piece_cid,
609
+ size=info.payload_size,
610
+ tx_hash=add_resp.tx_hash,
611
+ )
612
+
613
+ def upload_multi(
614
+ self,
615
+ data_items: List[bytes],
616
+ metadata: Optional[Dict[str, str]] = None,
617
+ ) -> List[UploadResult]:
618
+ """
619
+ Upload multiple pieces in a batch.
620
+
621
+ Args:
622
+ data_items: List of byte arrays to upload
623
+ metadata: Optional metadata to apply to all pieces
624
+
625
+ Returns:
626
+ List of UploadResults
627
+ """
628
+ results = []
629
+ piece_infos = []
630
+
631
+ # Calculate CIDs and upload all pieces
632
+ for data in data_items:
633
+ self._validate_size(len(data))
634
+ info = calculate_piece_cid(data)
635
+ self._pdp.upload_piece(data, info.piece_cid, info.padded_piece_size)
636
+ piece_infos.append(info)
637
+
638
+ # Wait for all pieces to be indexed before adding to dataset
639
+ for info in piece_infos:
640
+ self._pdp.wait_for_piece(info.piece_cid, timeout_seconds=60, poll_interval=2)
641
+
642
+ # Batch add pieces
643
+ pieces = [
644
+ (info.piece_cid, [{"key": k, "value": v} for k, v in (metadata or {}).items()])
645
+ for info in piece_infos
646
+ ]
647
+ extra_data = sign_add_pieces_extra_data(
648
+ private_key=self._private_key,
649
+ chain=self._chain,
650
+ client_data_set_id=self._client_data_set_id,
651
+ pieces=pieces,
652
+ )
653
+
654
+ piece_cids = [info.piece_cid for info in piece_infos]
655
+ add_resp = self._pdp.add_pieces(self._data_set_id, piece_cids, extra_data)
656
+
657
+ for info in piece_infos:
658
+ results.append(UploadResult(
659
+ piece_cid=info.piece_cid,
660
+ size=info.payload_size,
661
+ tx_hash=add_resp.tx_hash,
662
+ ))
663
+
664
+ return results
665
+
666
+ def download(self, piece_cid: str) -> bytes:
667
+ """Download a piece by CID."""
668
+ return self._pdp.download_piece(piece_cid)
669
+
670
+ def has_piece(self, piece_cid: str) -> bool:
671
+ """Check if a piece exists on this provider."""
672
+ try:
673
+ self._pdp.find_piece(piece_cid)
674
+ return True
675
+ except Exception:
676
+ return False
677
+
678
+ def wait_for_piece(self, piece_cid: str, timeout_seconds: int = 300) -> None:
679
+ """Wait for a piece to be available on this provider."""
680
+ self._pdp.wait_for_piece(piece_cid, timeout_seconds)