synapse-filecoin-sdk 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pynapse/__init__.py +6 -0
- pynapse/_version.py +1 -0
- pynapse/contracts/__init__.py +34 -0
- pynapse/contracts/abi_registry.py +11 -0
- pynapse/contracts/addresses.json +30 -0
- pynapse/contracts/erc20_abi.json +92 -0
- pynapse/contracts/errorsAbi.json +933 -0
- pynapse/contracts/filecoinPayV1Abi.json +2424 -0
- pynapse/contracts/filecoinWarmStorageServiceAbi.json +2363 -0
- pynapse/contracts/filecoinWarmStorageServiceStateViewAbi.json +651 -0
- pynapse/contracts/generated.py +35 -0
- pynapse/contracts/payments_abi.json +205 -0
- pynapse/contracts/pdpVerifierAbi.json +1266 -0
- pynapse/contracts/providerIdSetAbi.json +161 -0
- pynapse/contracts/serviceProviderRegistryAbi.json +1479 -0
- pynapse/contracts/sessionKeyRegistryAbi.json +147 -0
- pynapse/core/__init__.py +68 -0
- pynapse/core/abis.py +25 -0
- pynapse/core/chains.py +97 -0
- pynapse/core/constants.py +27 -0
- pynapse/core/errors.py +22 -0
- pynapse/core/piece.py +263 -0
- pynapse/core/rand.py +14 -0
- pynapse/core/typed_data.py +320 -0
- pynapse/core/utils.py +30 -0
- pynapse/evm/__init__.py +3 -0
- pynapse/evm/client.py +26 -0
- pynapse/filbeam/__init__.py +3 -0
- pynapse/filbeam/service.py +39 -0
- pynapse/payments/__init__.py +17 -0
- pynapse/payments/service.py +826 -0
- pynapse/pdp/__init__.py +21 -0
- pynapse/pdp/server.py +331 -0
- pynapse/pdp/types.py +38 -0
- pynapse/pdp/verifier.py +82 -0
- pynapse/retriever/__init__.py +12 -0
- pynapse/retriever/async_chain.py +227 -0
- pynapse/retriever/chain.py +209 -0
- pynapse/session/__init__.py +12 -0
- pynapse/session/key.py +30 -0
- pynapse/session/permissions.py +57 -0
- pynapse/session/registry.py +90 -0
- pynapse/sp_registry/__init__.py +11 -0
- pynapse/sp_registry/capabilities.py +25 -0
- pynapse/sp_registry/pdp_capabilities.py +102 -0
- pynapse/sp_registry/service.py +446 -0
- pynapse/sp_registry/types.py +52 -0
- pynapse/storage/__init__.py +57 -0
- pynapse/storage/async_context.py +682 -0
- pynapse/storage/async_manager.py +757 -0
- pynapse/storage/context.py +680 -0
- pynapse/storage/manager.py +758 -0
- pynapse/synapse.py +191 -0
- pynapse/utils/__init__.py +25 -0
- pynapse/utils/constants.py +25 -0
- pynapse/utils/errors.py +3 -0
- pynapse/utils/metadata.py +35 -0
- pynapse/utils/piece_url.py +16 -0
- pynapse/warm_storage/__init__.py +13 -0
- pynapse/warm_storage/service.py +513 -0
- synapse_filecoin_sdk-0.1.0.dist-info/METADATA +74 -0
- synapse_filecoin_sdk-0.1.0.dist-info/RECORD +64 -0
- synapse_filecoin_sdk-0.1.0.dist-info/WHEEL +4 -0
- synapse_filecoin_sdk-0.1.0.dist-info/licenses/LICENSE.md +228 -0
|
@@ -0,0 +1,758 @@
|
|
|
1
|
+
"""
|
|
2
|
+
StorageManager - Central facade for storage operations
|
|
3
|
+
|
|
4
|
+
Manages storage contexts (SP + DataSet pairs) with intelligent provider selection
|
|
5
|
+
and dataset reuse. Supports both single and multi-provider uploads.
|
|
6
|
+
"""
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from typing import Callable, Dict, List, Optional, Sequence, Union
|
|
11
|
+
|
|
12
|
+
from .context import StorageContext, StorageContextOptions, UploadResult
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Size and time constants matching TypeScript SDK
|
|
16
|
+
TIB = 1024 ** 4
|
|
17
|
+
EPOCHS_PER_DAY = 2880
|
|
18
|
+
DAYS_PER_MONTH = 30
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class ProviderFilter:
|
|
23
|
+
"""Filter criteria for provider selection."""
|
|
24
|
+
provider_ids: Optional[List[int]] = None
|
|
25
|
+
with_cdn: bool = False
|
|
26
|
+
with_ipni: bool = False
|
|
27
|
+
min_piece_size: Optional[int] = None
|
|
28
|
+
max_piece_size: Optional[int] = None
|
|
29
|
+
location: Optional[str] = None
|
|
30
|
+
exclude_provider_ids: Optional[List[int]] = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class PreflightInfo:
|
|
35
|
+
"""Preflight estimation for storage costs."""
|
|
36
|
+
size_bytes: int
|
|
37
|
+
estimated_cost_per_epoch: int
|
|
38
|
+
estimated_total_cost: int
|
|
39
|
+
duration_epochs: int
|
|
40
|
+
provider_count: int
|
|
41
|
+
providers: List[int] = field(default_factory=list)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class DataSetMatch:
|
|
46
|
+
"""A dataset that matches search criteria."""
|
|
47
|
+
data_set_id: int
|
|
48
|
+
client_data_set_id: int
|
|
49
|
+
provider_id: int
|
|
50
|
+
pdp_endpoint: str
|
|
51
|
+
metadata: Dict[str, str]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class StoragePricing:
|
|
56
|
+
"""Pricing information per time unit."""
|
|
57
|
+
per_tib_per_month: int
|
|
58
|
+
per_tib_per_day: int
|
|
59
|
+
per_tib_per_epoch: int
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class ServiceParameters:
|
|
64
|
+
"""Service configuration parameters."""
|
|
65
|
+
epochs_per_month: int
|
|
66
|
+
epochs_per_day: int = EPOCHS_PER_DAY
|
|
67
|
+
epoch_duration: int = 30 # seconds
|
|
68
|
+
min_upload_size: int = 256 # bytes
|
|
69
|
+
max_upload_size: int = 254 * 1024 * 1024 # 254 MiB
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@dataclass
|
|
73
|
+
class StorageInfo:
|
|
74
|
+
"""Comprehensive storage service information."""
|
|
75
|
+
pricing_no_cdn: StoragePricing
|
|
76
|
+
pricing_with_cdn: StoragePricing
|
|
77
|
+
token_address: str
|
|
78
|
+
token_symbol: str
|
|
79
|
+
providers: List[dict] # List of provider info dicts
|
|
80
|
+
service_parameters: ServiceParameters
|
|
81
|
+
approved_provider_ids: List[int] = field(default_factory=list)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class StorageManager:
|
|
85
|
+
"""
|
|
86
|
+
Central storage manager with provider selection and dataset reuse.
|
|
87
|
+
|
|
88
|
+
Features:
|
|
89
|
+
- Smart provider selection by capabilities (CDN, IPNI, location)
|
|
90
|
+
- Dataset reuse based on metadata matching
|
|
91
|
+
- Multi-provider uploads for redundancy
|
|
92
|
+
- Preflight cost estimation
|
|
93
|
+
|
|
94
|
+
Example:
|
|
95
|
+
# Simple upload (auto-selects provider)
|
|
96
|
+
result = manager.upload(data)
|
|
97
|
+
|
|
98
|
+
# Upload with specific provider
|
|
99
|
+
result = manager.upload(data, provider_id=1)
|
|
100
|
+
|
|
101
|
+
# Multi-provider upload for redundancy
|
|
102
|
+
results = manager.upload_multi(data, provider_count=3)
|
|
103
|
+
|
|
104
|
+
# Preflight check
|
|
105
|
+
info = manager.preflight(len(data), provider_count=2)
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
def __init__(
|
|
109
|
+
self,
|
|
110
|
+
chain,
|
|
111
|
+
private_key: str,
|
|
112
|
+
sp_registry=None,
|
|
113
|
+
warm_storage=None,
|
|
114
|
+
retriever=None,
|
|
115
|
+
) -> None:
|
|
116
|
+
self._chain = chain
|
|
117
|
+
self._private_key = private_key
|
|
118
|
+
self._sp_registry = sp_registry
|
|
119
|
+
self._warm_storage = warm_storage
|
|
120
|
+
self._retriever = retriever
|
|
121
|
+
self._default_context: Optional[StorageContext] = None
|
|
122
|
+
self._context_cache: Dict[int, StorageContext] = {} # provider_id -> context
|
|
123
|
+
|
|
124
|
+
def create_context(
|
|
125
|
+
self,
|
|
126
|
+
pdp_endpoint: str,
|
|
127
|
+
data_set_id: int,
|
|
128
|
+
client_data_set_id: int,
|
|
129
|
+
provider_id: Optional[int] = None,
|
|
130
|
+
) -> StorageContext:
|
|
131
|
+
"""Create a storage context for a specific provider/dataset (low-level)."""
|
|
132
|
+
context = StorageContext(
|
|
133
|
+
pdp_endpoint=pdp_endpoint,
|
|
134
|
+
chain=self._chain,
|
|
135
|
+
private_key=self._private_key,
|
|
136
|
+
data_set_id=data_set_id,
|
|
137
|
+
client_data_set_id=client_data_set_id,
|
|
138
|
+
)
|
|
139
|
+
if provider_id is not None:
|
|
140
|
+
self._context_cache[provider_id] = context
|
|
141
|
+
return context
|
|
142
|
+
|
|
143
|
+
def get_context(
|
|
144
|
+
self,
|
|
145
|
+
provider_id: Optional[int] = None,
|
|
146
|
+
provider_address: Optional[str] = None,
|
|
147
|
+
data_set_id: Optional[int] = None,
|
|
148
|
+
with_cdn: bool = False,
|
|
149
|
+
force_create_data_set: bool = False,
|
|
150
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
151
|
+
exclude_provider_ids: Optional[List[int]] = None,
|
|
152
|
+
on_provider_selected: Optional[Callable] = None,
|
|
153
|
+
on_data_set_resolved: Optional[Callable] = None,
|
|
154
|
+
) -> StorageContext:
|
|
155
|
+
"""
|
|
156
|
+
Get or create a storage context with smart provider/dataset selection.
|
|
157
|
+
|
|
158
|
+
This is the recommended way to get a context - it handles provider
|
|
159
|
+
selection, dataset reuse, and dataset creation automatically.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
provider_id: Optional specific provider ID to use
|
|
163
|
+
provider_address: Optional specific provider address to use
|
|
164
|
+
data_set_id: Optional specific dataset ID to use
|
|
165
|
+
with_cdn: Whether to enable CDN services
|
|
166
|
+
force_create_data_set: Force creation of new dataset
|
|
167
|
+
metadata: Custom metadata for the dataset
|
|
168
|
+
exclude_provider_ids: Provider IDs to exclude from selection
|
|
169
|
+
on_provider_selected: Callback when provider is selected
|
|
170
|
+
on_data_set_resolved: Callback when dataset is resolved
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Configured StorageContext
|
|
174
|
+
"""
|
|
175
|
+
if self._warm_storage is None:
|
|
176
|
+
raise ValueError("warm_storage required for smart context creation")
|
|
177
|
+
if self._sp_registry is None:
|
|
178
|
+
raise ValueError("sp_registry required for smart context creation")
|
|
179
|
+
|
|
180
|
+
# Check if we can reuse the default context
|
|
181
|
+
can_use_default = (
|
|
182
|
+
provider_id is None
|
|
183
|
+
and provider_address is None
|
|
184
|
+
and data_set_id is None
|
|
185
|
+
and not force_create_data_set
|
|
186
|
+
and self._default_context is not None
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
if can_use_default:
|
|
190
|
+
# Check if metadata matches
|
|
191
|
+
from pynapse.utils.metadata import combine_metadata, metadata_matches
|
|
192
|
+
requested_metadata = combine_metadata(metadata, with_cdn)
|
|
193
|
+
if metadata_matches(self._default_context.data_set_metadata, requested_metadata):
|
|
194
|
+
return self._default_context
|
|
195
|
+
|
|
196
|
+
# Create new context using factory method
|
|
197
|
+
options = StorageContextOptions(
|
|
198
|
+
provider_id=provider_id,
|
|
199
|
+
provider_address=provider_address,
|
|
200
|
+
data_set_id=data_set_id,
|
|
201
|
+
with_cdn=with_cdn,
|
|
202
|
+
force_create_data_set=force_create_data_set,
|
|
203
|
+
metadata=metadata,
|
|
204
|
+
exclude_provider_ids=exclude_provider_ids,
|
|
205
|
+
on_provider_selected=on_provider_selected,
|
|
206
|
+
on_data_set_resolved=on_data_set_resolved,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
context = StorageContext.create(
|
|
210
|
+
chain=self._chain,
|
|
211
|
+
private_key=self._private_key,
|
|
212
|
+
warm_storage=self._warm_storage,
|
|
213
|
+
sp_registry=self._sp_registry,
|
|
214
|
+
options=options,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
# Cache as default if no specific options were provided
|
|
218
|
+
if provider_id is None and provider_address is None and data_set_id is None:
|
|
219
|
+
self._default_context = context
|
|
220
|
+
|
|
221
|
+
return context
|
|
222
|
+
|
|
223
|
+
def get_contexts(
|
|
224
|
+
self,
|
|
225
|
+
count: int = 2,
|
|
226
|
+
with_cdn: bool = False,
|
|
227
|
+
force_create_data_set: bool = False,
|
|
228
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
229
|
+
exclude_provider_ids: Optional[List[int]] = None,
|
|
230
|
+
on_provider_selected: Optional[Callable] = None,
|
|
231
|
+
on_data_set_resolved: Optional[Callable] = None,
|
|
232
|
+
) -> List[StorageContext]:
|
|
233
|
+
"""
|
|
234
|
+
Get or create multiple storage contexts for multi-provider redundancy.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
count: Number of contexts to create (default: 2)
|
|
238
|
+
with_cdn: Whether to enable CDN services
|
|
239
|
+
force_create_data_set: Force creation of new datasets
|
|
240
|
+
metadata: Custom metadata for datasets
|
|
241
|
+
exclude_provider_ids: Provider IDs to exclude from selection
|
|
242
|
+
on_provider_selected: Callback when provider is selected
|
|
243
|
+
on_data_set_resolved: Callback when dataset is resolved
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
List of configured StorageContext instances
|
|
247
|
+
"""
|
|
248
|
+
if self._warm_storage is None:
|
|
249
|
+
raise ValueError("warm_storage required for smart context creation")
|
|
250
|
+
if self._sp_registry is None:
|
|
251
|
+
raise ValueError("sp_registry required for smart context creation")
|
|
252
|
+
|
|
253
|
+
options = StorageContextOptions(
|
|
254
|
+
with_cdn=with_cdn,
|
|
255
|
+
force_create_data_set=force_create_data_set,
|
|
256
|
+
metadata=metadata,
|
|
257
|
+
exclude_provider_ids=exclude_provider_ids,
|
|
258
|
+
on_provider_selected=on_provider_selected,
|
|
259
|
+
on_data_set_resolved=on_data_set_resolved,
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
return StorageContext.create_contexts(
|
|
263
|
+
chain=self._chain,
|
|
264
|
+
private_key=self._private_key,
|
|
265
|
+
warm_storage=self._warm_storage,
|
|
266
|
+
sp_registry=self._sp_registry,
|
|
267
|
+
count=count,
|
|
268
|
+
options=options,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
def get_default_context(self) -> StorageContext:
|
|
272
|
+
"""
|
|
273
|
+
Get the default storage context, creating one if needed.
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
The default StorageContext
|
|
277
|
+
"""
|
|
278
|
+
if self._default_context is None:
|
|
279
|
+
self._default_context = self.get_context()
|
|
280
|
+
return self._default_context
|
|
281
|
+
|
|
282
|
+
def select_providers(
|
|
283
|
+
self,
|
|
284
|
+
count: int = 1,
|
|
285
|
+
filter: Optional[ProviderFilter] = None,
|
|
286
|
+
) -> List[int]:
|
|
287
|
+
"""
|
|
288
|
+
Select providers matching the given criteria.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
count: Number of providers to select
|
|
292
|
+
filter: Optional filter criteria
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
List of provider IDs
|
|
296
|
+
"""
|
|
297
|
+
if self._sp_registry is None:
|
|
298
|
+
raise ValueError("sp_registry required for provider selection")
|
|
299
|
+
|
|
300
|
+
filter = filter or ProviderFilter()
|
|
301
|
+
|
|
302
|
+
# If specific provider IDs requested, validate and return them
|
|
303
|
+
if filter.provider_ids:
|
|
304
|
+
return filter.provider_ids[:count]
|
|
305
|
+
|
|
306
|
+
# Get all active providers
|
|
307
|
+
providers = self._sp_registry.get_all_active_providers()
|
|
308
|
+
|
|
309
|
+
# Filter by exclusions
|
|
310
|
+
if filter.exclude_provider_ids:
|
|
311
|
+
providers = [p for p in providers if p.provider_id not in filter.exclude_provider_ids]
|
|
312
|
+
|
|
313
|
+
# TODO: Filter by capabilities (CDN, IPNI, location, piece size)
|
|
314
|
+
# This would require fetching product info for each provider
|
|
315
|
+
# For now, just return the first N providers
|
|
316
|
+
|
|
317
|
+
selected = [p.provider_id for p in providers[:count]]
|
|
318
|
+
return selected
|
|
319
|
+
|
|
320
|
+
def find_dataset(
|
|
321
|
+
self,
|
|
322
|
+
provider_id: int,
|
|
323
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
324
|
+
) -> Optional[DataSetMatch]:
|
|
325
|
+
"""
|
|
326
|
+
Find an existing dataset for the given provider matching metadata.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
provider_id: Provider to search datasets for
|
|
330
|
+
metadata: Metadata criteria to match
|
|
331
|
+
|
|
332
|
+
Returns:
|
|
333
|
+
Matching dataset info or None
|
|
334
|
+
"""
|
|
335
|
+
if self._warm_storage is None:
|
|
336
|
+
return None
|
|
337
|
+
|
|
338
|
+
# TODO: Implement dataset search by provider and metadata
|
|
339
|
+
# This requires warm_storage.get_client_data_sets() and filtering
|
|
340
|
+
return None
|
|
341
|
+
|
|
342
|
+
def preflight(
|
|
343
|
+
self,
|
|
344
|
+
size_bytes: int,
|
|
345
|
+
provider_count: int = 1,
|
|
346
|
+
duration_epochs: int = 2880, # ~1 day default
|
|
347
|
+
filter: Optional[ProviderFilter] = None,
|
|
348
|
+
with_cdn: bool = False,
|
|
349
|
+
) -> PreflightInfo:
|
|
350
|
+
"""
|
|
351
|
+
Estimate storage costs before upload.
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
size_bytes: Size of data to upload
|
|
355
|
+
provider_count: Number of providers for redundancy
|
|
356
|
+
duration_epochs: Storage duration in epochs
|
|
357
|
+
filter: Optional provider filter criteria
|
|
358
|
+
with_cdn: Whether to include CDN in cost estimation
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
Preflight estimation including costs
|
|
362
|
+
"""
|
|
363
|
+
# Select providers
|
|
364
|
+
providers = self.select_providers(count=provider_count, filter=filter)
|
|
365
|
+
|
|
366
|
+
# Try to get actual pricing from warm storage
|
|
367
|
+
if self._warm_storage is not None:
|
|
368
|
+
try:
|
|
369
|
+
pricing_rates = self._warm_storage.get_current_pricing_rates()
|
|
370
|
+
if isinstance(pricing_rates, (list, tuple)) and len(pricing_rates) >= 3:
|
|
371
|
+
price_per_tib_month = int(pricing_rates[1] if with_cdn else pricing_rates[0])
|
|
372
|
+
epochs_per_month = int(pricing_rates[2])
|
|
373
|
+
|
|
374
|
+
# Calculate rate per epoch for this size
|
|
375
|
+
price_per_tib_epoch = price_per_tib_month // epochs_per_month if epochs_per_month else 0
|
|
376
|
+
estimated_rate = (size_bytes * price_per_tib_epoch * provider_count) // TIB
|
|
377
|
+
estimated_rate = max(1, estimated_rate) # minimum 1 unit
|
|
378
|
+
estimated_total = estimated_rate * duration_epochs
|
|
379
|
+
|
|
380
|
+
return PreflightInfo(
|
|
381
|
+
size_bytes=size_bytes,
|
|
382
|
+
estimated_cost_per_epoch=estimated_rate,
|
|
383
|
+
estimated_total_cost=estimated_total,
|
|
384
|
+
duration_epochs=duration_epochs,
|
|
385
|
+
provider_count=len(providers),
|
|
386
|
+
providers=providers,
|
|
387
|
+
)
|
|
388
|
+
except Exception:
|
|
389
|
+
pass
|
|
390
|
+
|
|
391
|
+
# Fallback: simplified cost calculation
|
|
392
|
+
estimated_rate = size_bytes * provider_count // TIB + 1 # minimum 1 unit
|
|
393
|
+
estimated_total = estimated_rate * duration_epochs
|
|
394
|
+
|
|
395
|
+
return PreflightInfo(
|
|
396
|
+
size_bytes=size_bytes,
|
|
397
|
+
estimated_cost_per_epoch=estimated_rate,
|
|
398
|
+
estimated_total_cost=estimated_total,
|
|
399
|
+
duration_epochs=duration_epochs,
|
|
400
|
+
provider_count=len(providers),
|
|
401
|
+
providers=providers,
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
def preflight_upload(
|
|
405
|
+
self,
|
|
406
|
+
size_bytes: int,
|
|
407
|
+
with_cdn: bool = False,
|
|
408
|
+
payments_service=None,
|
|
409
|
+
) -> dict:
|
|
410
|
+
"""
|
|
411
|
+
Comprehensive preflight check including cost estimation and allowance validation.
|
|
412
|
+
|
|
413
|
+
This method checks:
|
|
414
|
+
1. Storage costs per epoch/day/month
|
|
415
|
+
2. Current service allowances (if payments_service provided)
|
|
416
|
+
3. Whether allowances are sufficient
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
size_bytes: Size of data to upload in bytes
|
|
420
|
+
with_cdn: Whether CDN is enabled
|
|
421
|
+
payments_service: Optional PaymentsService for allowance checking
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
Dict with estimated_cost, allowance_check, and any required actions
|
|
425
|
+
"""
|
|
426
|
+
result = {
|
|
427
|
+
"estimated_cost": {
|
|
428
|
+
"per_epoch": 0,
|
|
429
|
+
"per_day": 0,
|
|
430
|
+
"per_month": 0,
|
|
431
|
+
},
|
|
432
|
+
"allowance_check": {
|
|
433
|
+
"sufficient": True,
|
|
434
|
+
"message": None,
|
|
435
|
+
},
|
|
436
|
+
"size_bytes": size_bytes,
|
|
437
|
+
"with_cdn": with_cdn,
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
# Get pricing
|
|
441
|
+
if self._warm_storage is not None:
|
|
442
|
+
try:
|
|
443
|
+
pricing_rates = self._warm_storage.get_current_pricing_rates()
|
|
444
|
+
if isinstance(pricing_rates, (list, tuple)) and len(pricing_rates) >= 3:
|
|
445
|
+
price_per_tib_month = int(pricing_rates[1] if with_cdn else pricing_rates[0])
|
|
446
|
+
epochs_per_month = int(pricing_rates[2])
|
|
447
|
+
|
|
448
|
+
# Calculate costs
|
|
449
|
+
size_ratio = size_bytes / TIB
|
|
450
|
+
cost_per_month = int(price_per_tib_month * size_ratio)
|
|
451
|
+
cost_per_day = cost_per_month // DAYS_PER_MONTH
|
|
452
|
+
cost_per_epoch = cost_per_month // epochs_per_month if epochs_per_month else 0
|
|
453
|
+
|
|
454
|
+
result["estimated_cost"] = {
|
|
455
|
+
"per_epoch": cost_per_epoch,
|
|
456
|
+
"per_day": cost_per_day,
|
|
457
|
+
"per_month": cost_per_month,
|
|
458
|
+
}
|
|
459
|
+
except Exception:
|
|
460
|
+
pass
|
|
461
|
+
|
|
462
|
+
# Check allowances if payments service provided
|
|
463
|
+
if payments_service is not None and self._chain is not None:
|
|
464
|
+
try:
|
|
465
|
+
approval = payments_service.service_approval(
|
|
466
|
+
self._chain.contracts.warm_storage
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
rate_needed = result["estimated_cost"]["per_epoch"]
|
|
470
|
+
# Lockup = rate * lockup_period (typically 10 days)
|
|
471
|
+
lockup_period = EPOCHS_PER_DAY * 10
|
|
472
|
+
lockup_needed = rate_needed * lockup_period
|
|
473
|
+
|
|
474
|
+
rate_sufficient = approval.rate_allowance >= rate_needed
|
|
475
|
+
lockup_sufficient = approval.lockup_allowance >= lockup_needed
|
|
476
|
+
|
|
477
|
+
result["allowance_check"] = {
|
|
478
|
+
"sufficient": rate_sufficient and lockup_sufficient,
|
|
479
|
+
"is_approved": approval.is_approved,
|
|
480
|
+
"rate_allowance": approval.rate_allowance,
|
|
481
|
+
"lockup_allowance": approval.lockup_allowance,
|
|
482
|
+
"rate_needed": rate_needed,
|
|
483
|
+
"lockup_needed": lockup_needed,
|
|
484
|
+
"message": None if (rate_sufficient and lockup_sufficient) else (
|
|
485
|
+
f"Insufficient allowances: need rate={rate_needed}, lockup={lockup_needed}"
|
|
486
|
+
),
|
|
487
|
+
}
|
|
488
|
+
except Exception as e:
|
|
489
|
+
result["allowance_check"]["message"] = f"Failed to check allowances: {e}"
|
|
490
|
+
|
|
491
|
+
return result
|
|
492
|
+
|
|
493
|
+
def upload(
|
|
494
|
+
self,
|
|
495
|
+
data: bytes,
|
|
496
|
+
pdp_endpoint: Optional[str] = None,
|
|
497
|
+
data_set_id: Optional[int] = None,
|
|
498
|
+
client_data_set_id: Optional[int] = None,
|
|
499
|
+
provider_id: Optional[int] = None,
|
|
500
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
501
|
+
context: Optional[StorageContext] = None,
|
|
502
|
+
with_cdn: bool = False,
|
|
503
|
+
auto_create_context: bool = True,
|
|
504
|
+
) -> UploadResult:
|
|
505
|
+
"""
|
|
506
|
+
Upload data to storage.
|
|
507
|
+
|
|
508
|
+
If warm_storage and sp_registry are configured, this method can
|
|
509
|
+
auto-create a context with smart provider selection. Otherwise,
|
|
510
|
+
explicit context parameters are required.
|
|
511
|
+
|
|
512
|
+
Args:
|
|
513
|
+
data: Bytes to upload
|
|
514
|
+
pdp_endpoint: PDP server endpoint (required if no auto-create)
|
|
515
|
+
data_set_id: Dataset ID (required if no auto-create)
|
|
516
|
+
client_data_set_id: Client dataset ID (required if no auto-create)
|
|
517
|
+
provider_id: Optional provider ID for selection/caching
|
|
518
|
+
metadata: Optional piece metadata
|
|
519
|
+
context: Explicit context to use (overrides other params)
|
|
520
|
+
with_cdn: Enable CDN services (for auto-create)
|
|
521
|
+
auto_create_context: Auto-create context if services available (default: True)
|
|
522
|
+
|
|
523
|
+
Returns:
|
|
524
|
+
Upload result with piece CID and tx hash
|
|
525
|
+
"""
|
|
526
|
+
if context is not None:
|
|
527
|
+
return context.upload(data, metadata=metadata)
|
|
528
|
+
|
|
529
|
+
# Check for cached context
|
|
530
|
+
if provider_id is not None and provider_id in self._context_cache:
|
|
531
|
+
return self._context_cache[provider_id].upload(data, metadata=metadata)
|
|
532
|
+
|
|
533
|
+
# Try auto-create if services are available
|
|
534
|
+
if auto_create_context and self._warm_storage is not None and self._sp_registry is not None:
|
|
535
|
+
ctx = self.get_context(
|
|
536
|
+
provider_id=provider_id,
|
|
537
|
+
with_cdn=with_cdn,
|
|
538
|
+
)
|
|
539
|
+
return ctx.upload(data, metadata=metadata)
|
|
540
|
+
|
|
541
|
+
# Fall back to explicit context creation
|
|
542
|
+
if pdp_endpoint is None or data_set_id is None or client_data_set_id is None:
|
|
543
|
+
raise ValueError(
|
|
544
|
+
"pdp_endpoint, data_set_id, and client_data_set_id required "
|
|
545
|
+
"(or configure warm_storage and sp_registry for auto-creation)"
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
ctx = self.create_context(pdp_endpoint, data_set_id, client_data_set_id, provider_id)
|
|
549
|
+
return ctx.upload(data, metadata=metadata)
|
|
550
|
+
|
|
551
|
+
def upload_multi(
|
|
552
|
+
self,
|
|
553
|
+
data: bytes,
|
|
554
|
+
contexts: Sequence[StorageContext],
|
|
555
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
556
|
+
) -> List[UploadResult]:
|
|
557
|
+
"""
|
|
558
|
+
Upload data to multiple storage providers for redundancy.
|
|
559
|
+
|
|
560
|
+
All contexts receive the same data with the same piece CID.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
data: Bytes to upload
|
|
564
|
+
contexts: Storage contexts for each provider
|
|
565
|
+
metadata: Optional piece metadata
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
List of upload results (one per context)
|
|
569
|
+
"""
|
|
570
|
+
results = []
|
|
571
|
+
for ctx in contexts:
|
|
572
|
+
result = ctx.upload(data, metadata=metadata)
|
|
573
|
+
results.append(result)
|
|
574
|
+
return results
|
|
575
|
+
|
|
576
|
+
def download(
|
|
577
|
+
self,
|
|
578
|
+
piece_cid: str,
|
|
579
|
+
pdp_endpoint: Optional[str] = None,
|
|
580
|
+
context: Optional[StorageContext] = None,
|
|
581
|
+
provider_address: Optional[str] = None,
|
|
582
|
+
) -> bytes:
|
|
583
|
+
"""
|
|
584
|
+
Download data by piece CID.
|
|
585
|
+
|
|
586
|
+
If a retriever is configured, this method can perform SP-agnostic
|
|
587
|
+
downloads by querying the client's datasets to find providers.
|
|
588
|
+
|
|
589
|
+
Args:
|
|
590
|
+
piece_cid: The piece CID to download
|
|
591
|
+
pdp_endpoint: PDP endpoint to download from (optional if retriever configured)
|
|
592
|
+
context: Explicit context to use
|
|
593
|
+
provider_address: Optional specific provider address for retriever
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
Downloaded data bytes
|
|
597
|
+
"""
|
|
598
|
+
if context is not None:
|
|
599
|
+
return context.download(piece_cid)
|
|
600
|
+
|
|
601
|
+
# Try SP-agnostic download using retriever
|
|
602
|
+
if self._retriever is not None:
|
|
603
|
+
from eth_account import Account
|
|
604
|
+
acct = Account.from_key(self._private_key)
|
|
605
|
+
return self._retriever.fetch_piece(
|
|
606
|
+
piece_cid=piece_cid,
|
|
607
|
+
client_address=acct.address,
|
|
608
|
+
provider_address=provider_address,
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
# Fall back to explicit endpoint
|
|
612
|
+
if pdp_endpoint is None:
|
|
613
|
+
raise ValueError(
|
|
614
|
+
"pdp_endpoint required (or configure retriever for SP-agnostic downloads)"
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
ctx = StorageContext(
|
|
618
|
+
pdp_endpoint=pdp_endpoint,
|
|
619
|
+
chain=self._chain,
|
|
620
|
+
private_key=self._private_key,
|
|
621
|
+
data_set_id=0,
|
|
622
|
+
client_data_set_id=0,
|
|
623
|
+
)
|
|
624
|
+
return ctx.download(piece_cid)
|
|
625
|
+
|
|
626
|
+
def find_datasets(self, client_address: Optional[str] = None) -> List[dict]:
|
|
627
|
+
"""
|
|
628
|
+
Query datasets for a client with enhanced details.
|
|
629
|
+
|
|
630
|
+
Args:
|
|
631
|
+
client_address: Optional client address. If not provided,
|
|
632
|
+
uses the address derived from the private key.
|
|
633
|
+
|
|
634
|
+
Returns:
|
|
635
|
+
List of enhanced dataset info dictionaries
|
|
636
|
+
"""
|
|
637
|
+
if self._warm_storage is None:
|
|
638
|
+
raise ValueError("warm_storage required for find_datasets")
|
|
639
|
+
|
|
640
|
+
if client_address is None:
|
|
641
|
+
from eth_account import Account
|
|
642
|
+
acct = Account.from_key(self._private_key)
|
|
643
|
+
client_address = acct.address
|
|
644
|
+
|
|
645
|
+
datasets = self._warm_storage.get_client_data_sets_with_details(client_address)
|
|
646
|
+
return [
|
|
647
|
+
{
|
|
648
|
+
"data_set_id": ds.data_set_id,
|
|
649
|
+
"client_data_set_id": ds.client_data_set_id,
|
|
650
|
+
"provider_id": ds.provider_id,
|
|
651
|
+
"service_provider": ds.service_provider,
|
|
652
|
+
"payer": ds.payer,
|
|
653
|
+
"payee": ds.payee,
|
|
654
|
+
"active_piece_count": ds.active_piece_count,
|
|
655
|
+
"is_live": ds.is_live,
|
|
656
|
+
"is_managed": ds.is_managed,
|
|
657
|
+
"with_cdn": ds.with_cdn,
|
|
658
|
+
"metadata": ds.metadata,
|
|
659
|
+
"pdp_end_epoch": ds.pdp_end_epoch,
|
|
660
|
+
}
|
|
661
|
+
for ds in datasets
|
|
662
|
+
]
|
|
663
|
+
|
|
664
|
+
def terminate_data_set(self, data_set_id: int) -> str:
|
|
665
|
+
"""
|
|
666
|
+
Terminate a dataset. This also removes all pieces in the dataset.
|
|
667
|
+
|
|
668
|
+
Args:
|
|
669
|
+
data_set_id: The ID of the dataset to terminate
|
|
670
|
+
|
|
671
|
+
Returns:
|
|
672
|
+
Transaction hash
|
|
673
|
+
"""
|
|
674
|
+
if self._warm_storage is None:
|
|
675
|
+
raise ValueError("warm_storage required for terminate_data_set")
|
|
676
|
+
|
|
677
|
+
from eth_account import Account
|
|
678
|
+
acct = Account.from_key(self._private_key)
|
|
679
|
+
return self._warm_storage.terminate_data_set(acct.address, data_set_id)
|
|
680
|
+
|
|
681
|
+
def get_storage_info(self) -> StorageInfo:
|
|
682
|
+
"""
|
|
683
|
+
Get comprehensive information about the storage service.
|
|
684
|
+
|
|
685
|
+
Returns service pricing, approved providers, contract addresses,
|
|
686
|
+
and configuration parameters.
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
StorageInfo with pricing, providers, and service parameters
|
|
690
|
+
"""
|
|
691
|
+
if self._warm_storage is None:
|
|
692
|
+
raise ValueError("warm_storage required for get_storage_info")
|
|
693
|
+
if self._sp_registry is None:
|
|
694
|
+
raise ValueError("sp_registry required for get_storage_info")
|
|
695
|
+
|
|
696
|
+
# Get pricing info
|
|
697
|
+
pricing_rates = self._warm_storage.get_current_pricing_rates()
|
|
698
|
+
|
|
699
|
+
# Parse pricing - format may vary, handle common cases
|
|
700
|
+
# Typically returns (priceNoCDN, priceWithCDN, epochsPerMonth, tokenAddress)
|
|
701
|
+
if isinstance(pricing_rates, (list, tuple)) and len(pricing_rates) >= 4:
|
|
702
|
+
price_no_cdn = int(pricing_rates[0])
|
|
703
|
+
price_with_cdn = int(pricing_rates[1])
|
|
704
|
+
epochs_per_month = int(pricing_rates[2])
|
|
705
|
+
token_address = pricing_rates[3]
|
|
706
|
+
else:
|
|
707
|
+
# Fallback to individual calls
|
|
708
|
+
price_no_cdn = 0
|
|
709
|
+
price_with_cdn = 0
|
|
710
|
+
epochs_per_month = EPOCHS_PER_DAY * DAYS_PER_MONTH
|
|
711
|
+
token_address = ""
|
|
712
|
+
|
|
713
|
+
# Calculate per-epoch and per-day pricing
|
|
714
|
+
pricing_no_cdn = StoragePricing(
|
|
715
|
+
per_tib_per_month=price_no_cdn,
|
|
716
|
+
per_tib_per_day=price_no_cdn // DAYS_PER_MONTH if price_no_cdn else 0,
|
|
717
|
+
per_tib_per_epoch=price_no_cdn // epochs_per_month if price_no_cdn and epochs_per_month else 0,
|
|
718
|
+
)
|
|
719
|
+
pricing_with_cdn = StoragePricing(
|
|
720
|
+
per_tib_per_month=price_with_cdn,
|
|
721
|
+
per_tib_per_day=price_with_cdn // DAYS_PER_MONTH if price_with_cdn else 0,
|
|
722
|
+
per_tib_per_epoch=price_with_cdn // epochs_per_month if price_with_cdn and epochs_per_month else 0,
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
# Get approved provider IDs
|
|
726
|
+
try:
|
|
727
|
+
approved_ids = self._warm_storage.get_approved_provider_ids()
|
|
728
|
+
except Exception:
|
|
729
|
+
approved_ids = []
|
|
730
|
+
|
|
731
|
+
# Get provider details
|
|
732
|
+
providers = []
|
|
733
|
+
for pid in approved_ids:
|
|
734
|
+
try:
|
|
735
|
+
provider = self._sp_registry.get_provider(pid)
|
|
736
|
+
if provider and provider.is_active:
|
|
737
|
+
providers.append({
|
|
738
|
+
"provider_id": provider.provider_id,
|
|
739
|
+
"service_provider": provider.service_provider,
|
|
740
|
+
"payee": provider.payee,
|
|
741
|
+
"name": provider.name,
|
|
742
|
+
"description": provider.description,
|
|
743
|
+
"is_active": provider.is_active,
|
|
744
|
+
})
|
|
745
|
+
except Exception:
|
|
746
|
+
continue
|
|
747
|
+
|
|
748
|
+
return StorageInfo(
|
|
749
|
+
pricing_no_cdn=pricing_no_cdn,
|
|
750
|
+
pricing_with_cdn=pricing_with_cdn,
|
|
751
|
+
token_address=str(token_address),
|
|
752
|
+
token_symbol="USDFC", # Standard token for Filecoin storage
|
|
753
|
+
providers=providers,
|
|
754
|
+
service_parameters=ServiceParameters(
|
|
755
|
+
epochs_per_month=epochs_per_month,
|
|
756
|
+
),
|
|
757
|
+
approved_provider_ids=approved_ids,
|
|
758
|
+
)
|