xache 5.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xache/__init__.py +142 -0
- xache/client.py +331 -0
- xache/crypto/__init__.py +17 -0
- xache/crypto/signing.py +244 -0
- xache/crypto/wallet.py +240 -0
- xache/errors.py +184 -0
- xache/payment/__init__.py +5 -0
- xache/payment/handler.py +244 -0
- xache/services/__init__.py +29 -0
- xache/services/budget.py +285 -0
- xache/services/collective.py +174 -0
- xache/services/extraction.py +173 -0
- xache/services/facilitator.py +296 -0
- xache/services/identity.py +415 -0
- xache/services/memory.py +401 -0
- xache/services/owner.py +293 -0
- xache/services/receipts.py +202 -0
- xache/services/reputation.py +274 -0
- xache/services/royalty.py +290 -0
- xache/services/sessions.py +268 -0
- xache/services/workspaces.py +447 -0
- xache/types.py +399 -0
- xache/utils/__init__.py +5 -0
- xache/utils/cache.py +214 -0
- xache/utils/http.py +209 -0
- xache/utils/retry.py +101 -0
- xache-5.0.0.dist-info/METADATA +337 -0
- xache-5.0.0.dist-info/RECORD +30 -0
- xache-5.0.0.dist-info/WHEEL +5 -0
- xache-5.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
"""Collective Service - Contribute and query heuristics per LLD §2.5"""
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
from ..types import (
|
|
5
|
+
ContributeHeuristicRequest,
|
|
6
|
+
ContributeHeuristicResponse,
|
|
7
|
+
QueryCollectiveResponse,
|
|
8
|
+
HeuristicMatch,
|
|
9
|
+
HeuristicMetrics,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CollectiveService:
|
|
14
|
+
"""Collective service for heuristic marketplace"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, client):
|
|
17
|
+
self.client = client
|
|
18
|
+
|
|
19
|
+
async def contribute(
|
|
20
|
+
self,
|
|
21
|
+
pattern: str,
|
|
22
|
+
pattern_hash: str,
|
|
23
|
+
domain: str,
|
|
24
|
+
tags: List[str],
|
|
25
|
+
metrics: HeuristicMetrics,
|
|
26
|
+
encrypted_content_ref: str,
|
|
27
|
+
context_type: Optional[str] = None,
|
|
28
|
+
metadata: Optional[dict] = None,
|
|
29
|
+
) -> ContributeHeuristicResponse:
|
|
30
|
+
"""
|
|
31
|
+
Contribute a heuristic to the collective per LLD §2.5
|
|
32
|
+
Cost: $0.001 (automatic 402 payment)
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
pattern: Pattern text (10-500 chars)
|
|
36
|
+
pattern_hash: Hash of pattern for deduplication
|
|
37
|
+
domain: Domain (e.g., 'javascript', 'python', 'devops')
|
|
38
|
+
tags: Tags for categorization (1-10 tags)
|
|
39
|
+
metrics: Heuristic metrics (success_rate, sample_size, confidence)
|
|
40
|
+
encrypted_content_ref: Reference to encrypted content in R2
|
|
41
|
+
context_type: Optional context type
|
|
42
|
+
metadata: Optional metadata
|
|
43
|
+
"""
|
|
44
|
+
self._validate_contribute_request(
|
|
45
|
+
pattern, pattern_hash, domain, tags, metrics, encrypted_content_ref
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
response = await self.client.request_with_payment(
|
|
49
|
+
"POST",
|
|
50
|
+
"/v1/collective/contribute",
|
|
51
|
+
{
|
|
52
|
+
"pattern": pattern,
|
|
53
|
+
"patternHash": pattern_hash,
|
|
54
|
+
"domain": domain,
|
|
55
|
+
"tags": tags,
|
|
56
|
+
"metrics": {
|
|
57
|
+
"successRate": metrics.success_rate,
|
|
58
|
+
"sampleSize": metrics.sample_size,
|
|
59
|
+
"confidence": metrics.confidence,
|
|
60
|
+
},
|
|
61
|
+
"encryptedContentRef": encrypted_content_ref,
|
|
62
|
+
"contextType": context_type,
|
|
63
|
+
"metadata": metadata,
|
|
64
|
+
},
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if not response.success or not response.data:
|
|
68
|
+
raise Exception("Heuristic contribution failed")
|
|
69
|
+
|
|
70
|
+
data = response.data
|
|
71
|
+
return ContributeHeuristicResponse(
|
|
72
|
+
heuristic_id=data["heuristicId"],
|
|
73
|
+
pattern=data["pattern"],
|
|
74
|
+
domain=data["domain"],
|
|
75
|
+
tags=data["tags"],
|
|
76
|
+
receipt_id=data["receiptId"],
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
async def query(
|
|
80
|
+
self,
|
|
81
|
+
query_text: str,
|
|
82
|
+
domain: str = None,
|
|
83
|
+
limit: int = 10,
|
|
84
|
+
) -> QueryCollectiveResponse:
|
|
85
|
+
"""
|
|
86
|
+
Query the collective for relevant heuristics per LLD §2.5
|
|
87
|
+
Cost: $0.01 + royalties (automatic 402 payment)
|
|
88
|
+
"""
|
|
89
|
+
self._validate_query_request(query_text, limit)
|
|
90
|
+
|
|
91
|
+
response = await self.client.request_with_payment(
|
|
92
|
+
"POST",
|
|
93
|
+
"/v1/collective/query",
|
|
94
|
+
{
|
|
95
|
+
"queryText": query_text,
|
|
96
|
+
"domain": domain,
|
|
97
|
+
"limit": limit,
|
|
98
|
+
},
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
if not response.success or not response.data:
|
|
102
|
+
raise Exception("Collective query failed")
|
|
103
|
+
|
|
104
|
+
data = response.data
|
|
105
|
+
matches = [
|
|
106
|
+
HeuristicMatch(**match) for match in data["matches"]
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
return QueryCollectiveResponse(
|
|
110
|
+
matches=matches,
|
|
111
|
+
total_cost=data["totalCost"],
|
|
112
|
+
royalties_usd=data["royaltiesUSD"],
|
|
113
|
+
receipt_id=data["receiptId"],
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
def _validate_contribute_request(
|
|
117
|
+
self,
|
|
118
|
+
pattern: str,
|
|
119
|
+
pattern_hash: str,
|
|
120
|
+
domain: str,
|
|
121
|
+
tags: List[str],
|
|
122
|
+
metrics: HeuristicMetrics,
|
|
123
|
+
encrypted_content_ref: str,
|
|
124
|
+
):
|
|
125
|
+
"""Validate contribution request"""
|
|
126
|
+
# Validate pattern
|
|
127
|
+
if not pattern or len(pattern) < 10:
|
|
128
|
+
raise ValueError("pattern must be at least 10 characters")
|
|
129
|
+
if len(pattern) > 500:
|
|
130
|
+
raise ValueError("pattern must be at most 500 characters")
|
|
131
|
+
|
|
132
|
+
# Validate patternHash (required per LLD §2.4)
|
|
133
|
+
if not pattern_hash or not isinstance(pattern_hash, str):
|
|
134
|
+
raise ValueError("pattern_hash is required and must be a string")
|
|
135
|
+
|
|
136
|
+
# Validate domain
|
|
137
|
+
if not domain:
|
|
138
|
+
raise ValueError("domain is required")
|
|
139
|
+
|
|
140
|
+
# Validate tags
|
|
141
|
+
if not tags or len(tags) == 0:
|
|
142
|
+
raise ValueError("tags must be a non-empty list")
|
|
143
|
+
if len(tags) > 10:
|
|
144
|
+
raise ValueError("tags must have at most 10 items")
|
|
145
|
+
|
|
146
|
+
# Validate metrics (required per LLD §2.4)
|
|
147
|
+
if not metrics or not isinstance(metrics, HeuristicMetrics):
|
|
148
|
+
raise ValueError("metrics is required and must be a HeuristicMetrics instance")
|
|
149
|
+
|
|
150
|
+
if not isinstance(metrics.success_rate, (int, float)):
|
|
151
|
+
raise ValueError("metrics.success_rate must be a number")
|
|
152
|
+
if metrics.success_rate < 0 or metrics.success_rate > 1:
|
|
153
|
+
raise ValueError("metrics.success_rate must be between 0 and 1")
|
|
154
|
+
|
|
155
|
+
if not isinstance(metrics.sample_size, int) or metrics.sample_size < 1:
|
|
156
|
+
raise ValueError("metrics.sample_size must be a positive integer")
|
|
157
|
+
|
|
158
|
+
if not isinstance(metrics.confidence, (int, float)):
|
|
159
|
+
raise ValueError("metrics.confidence must be a number")
|
|
160
|
+
if metrics.confidence < 0 or metrics.confidence > 1:
|
|
161
|
+
raise ValueError("metrics.confidence must be between 0 and 1")
|
|
162
|
+
|
|
163
|
+
# Validate encryptedContentRef (required per LLD §2.4)
|
|
164
|
+
if not encrypted_content_ref or not isinstance(encrypted_content_ref, str):
|
|
165
|
+
raise ValueError("encrypted_content_ref is required and must be a string")
|
|
166
|
+
|
|
167
|
+
def _validate_query_request(self, query_text: str, limit: int):
|
|
168
|
+
"""Validate query request"""
|
|
169
|
+
if not query_text or len(query_text) < 5:
|
|
170
|
+
raise ValueError("query_text must be at least 5 characters")
|
|
171
|
+
if len(query_text) > 500:
|
|
172
|
+
raise ValueError("query_text must be at most 500 characters")
|
|
173
|
+
if limit < 1 or limit > 50:
|
|
174
|
+
raise ValueError("limit must be between 1 and 50")
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Extraction Service - AI-powered memory extraction from conversations
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Optional, Dict, Any
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class ExtractedMemory:
|
|
11
|
+
"""Extracted memory from conversation"""
|
|
12
|
+
content: str
|
|
13
|
+
type: str # 'preference', 'fact', 'pattern', 'error_fix', 'insight'
|
|
14
|
+
context: str
|
|
15
|
+
confidence: float
|
|
16
|
+
source_text: Optional[str] = None
|
|
17
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ExtractionResult:
|
|
22
|
+
"""Result from memory extraction"""
|
|
23
|
+
memories: List[ExtractedMemory]
|
|
24
|
+
total_extracted: int
|
|
25
|
+
stored_count: int
|
|
26
|
+
skipped_count: int
|
|
27
|
+
processing_time_ms: int
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class ExtractMemoriesRequest:
|
|
32
|
+
"""Request for memory extraction"""
|
|
33
|
+
conversation: str
|
|
34
|
+
context: Optional[str] = None
|
|
35
|
+
types: Optional[List[str]] = None
|
|
36
|
+
auto_store: bool = True
|
|
37
|
+
min_confidence: float = 0.7
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ExtractionService:
|
|
41
|
+
"""
|
|
42
|
+
Extraction service for AI-powered memory extraction
|
|
43
|
+
|
|
44
|
+
Automatically extracts valuable memories from conversations,
|
|
45
|
+
including user preferences, facts, patterns, error fixes, and insights.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(self, client):
|
|
49
|
+
self.client = client
|
|
50
|
+
|
|
51
|
+
async def extract(self, request: ExtractMemoriesRequest) -> ExtractionResult:
|
|
52
|
+
"""
|
|
53
|
+
Extract memories from a conversation
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
request: Extraction request with conversation and options
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Extraction result with extracted memories
|
|
60
|
+
|
|
61
|
+
Example:
|
|
62
|
+
```python
|
|
63
|
+
from xache.services.extraction import ExtractMemoriesRequest
|
|
64
|
+
|
|
65
|
+
result = await client.extraction.extract(ExtractMemoriesRequest(
|
|
66
|
+
conversation="User: I prefer dark mode for all apps.\\nAssistant: ...",
|
|
67
|
+
context="user-preferences",
|
|
68
|
+
auto_store=True,
|
|
69
|
+
min_confidence=0.8
|
|
70
|
+
))
|
|
71
|
+
|
|
72
|
+
print(f"Extracted {result.total_extracted} memories")
|
|
73
|
+
for m in result.memories:
|
|
74
|
+
print(f" [{m.type}] {m.content} (confidence: {m.confidence})")
|
|
75
|
+
```
|
|
76
|
+
"""
|
|
77
|
+
body = {
|
|
78
|
+
"conversation": request.conversation,
|
|
79
|
+
"autoStore": request.auto_store,
|
|
80
|
+
"minConfidence": request.min_confidence,
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
if request.context:
|
|
84
|
+
body["context"] = request.context
|
|
85
|
+
if request.types:
|
|
86
|
+
body["types"] = request.types
|
|
87
|
+
|
|
88
|
+
response = await self.client.request("POST", "/v1/extraction/extract", body)
|
|
89
|
+
|
|
90
|
+
if not response.success or not response.data:
|
|
91
|
+
raise Exception(
|
|
92
|
+
response.error.get("message", "Failed to extract memories")
|
|
93
|
+
if response.error
|
|
94
|
+
else "Failed to extract memories"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
data = response.data
|
|
98
|
+
return ExtractionResult(
|
|
99
|
+
memories=[
|
|
100
|
+
ExtractedMemory(
|
|
101
|
+
content=m["content"],
|
|
102
|
+
type=m["type"],
|
|
103
|
+
context=m.get("context", ""),
|
|
104
|
+
confidence=m["confidence"],
|
|
105
|
+
source_text=m.get("sourceText"),
|
|
106
|
+
metadata=m.get("metadata"),
|
|
107
|
+
)
|
|
108
|
+
for m in data.get("memories", [])
|
|
109
|
+
],
|
|
110
|
+
total_extracted=data.get("totalExtracted", 0),
|
|
111
|
+
stored_count=data.get("storedCount", 0),
|
|
112
|
+
skipped_count=data.get("skippedCount", 0),
|
|
113
|
+
processing_time_ms=data.get("processingTimeMs", 0),
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
async def analyze(self, conversation: str) -> Dict[str, Any]:
|
|
117
|
+
"""
|
|
118
|
+
Analyze a conversation without storing memories
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
conversation: Conversation text to analyze
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Analysis result with potential memories
|
|
125
|
+
|
|
126
|
+
Example:
|
|
127
|
+
```python
|
|
128
|
+
analysis = await client.extraction.analyze(
|
|
129
|
+
"User: I always use vim keybindings..."
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
print(f"Found {len(analysis['potentialMemories'])} potential memories")
|
|
133
|
+
```
|
|
134
|
+
"""
|
|
135
|
+
response = await self.client.request(
|
|
136
|
+
"POST", "/v1/extraction/analyze", {"conversation": conversation}
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
if not response.success or not response.data:
|
|
140
|
+
raise Exception(
|
|
141
|
+
response.error.get("message", "Failed to analyze conversation")
|
|
142
|
+
if response.error
|
|
143
|
+
else "Failed to analyze conversation"
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
return response.data
|
|
147
|
+
|
|
148
|
+
async def get_types(self) -> List[Dict[str, Any]]:
|
|
149
|
+
"""
|
|
150
|
+
Get supported memory types for extraction
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
List of supported memory types with descriptions
|
|
154
|
+
|
|
155
|
+
Example:
|
|
156
|
+
```python
|
|
157
|
+
types = await client.extraction.get_types()
|
|
158
|
+
for t in types:
|
|
159
|
+
print(f"{t['type']}: {t['description']}")
|
|
160
|
+
```
|
|
161
|
+
"""
|
|
162
|
+
response = await self.client.request(
|
|
163
|
+
"GET", "/v1/extraction/types", skip_auth=True
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
if not response.success or not response.data:
|
|
167
|
+
raise Exception(
|
|
168
|
+
response.error.get("message", "Failed to get extraction types")
|
|
169
|
+
if response.error
|
|
170
|
+
else "Failed to get extraction types"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
return response.data.get("types", [])
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Facilitator Service - x402 v2 facilitator selection and management
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Optional, Dict, Any, Literal
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
import time
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Type aliases
|
|
11
|
+
NetworkId = Literal['base', 'base-sepolia', 'solana', 'solana-devnet']
|
|
12
|
+
PaymentScheme = Literal['exact']
|
|
13
|
+
ChainType = Literal['evm', 'solana']
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class FacilitatorConfig:
|
|
18
|
+
"""Facilitator configuration"""
|
|
19
|
+
id: str
|
|
20
|
+
name: str
|
|
21
|
+
chains: List[ChainType]
|
|
22
|
+
networks: List[NetworkId]
|
|
23
|
+
schemes: List[PaymentScheme]
|
|
24
|
+
priority: int
|
|
25
|
+
healthy: bool = True
|
|
26
|
+
avg_latency_ms: Optional[int] = None
|
|
27
|
+
last_health_check: Optional[int] = None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class FacilitatorPreferences:
|
|
32
|
+
"""User preferences for facilitator selection"""
|
|
33
|
+
preferred_facilitators: List[str] = field(default_factory=list)
|
|
34
|
+
avoid_networks: List[NetworkId] = field(default_factory=list)
|
|
35
|
+
max_latency_ms: Optional[int] = None
|
|
36
|
+
preferred_chain: Optional[ChainType] = None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class FacilitatorSelection:
|
|
41
|
+
"""Selected facilitator with reasoning"""
|
|
42
|
+
facilitator: FacilitatorConfig
|
|
43
|
+
reason: Literal['preference', 'priority', 'latency', 'fallback']
|
|
44
|
+
alternatives: List[FacilitatorConfig] = field(default_factory=list)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class FacilitatorService:
|
|
48
|
+
"""
|
|
49
|
+
Facilitator service for x402 v2 payment facilitator selection
|
|
50
|
+
|
|
51
|
+
Manages facilitator preferences and selection for payment processing.
|
|
52
|
+
Uses a hardcoded registry of available facilitators.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self, client):
|
|
56
|
+
self.client = client
|
|
57
|
+
self._preferences = FacilitatorPreferences()
|
|
58
|
+
self._cached_facilitators: List[FacilitatorConfig] = []
|
|
59
|
+
self._last_fetch_time: int = 0
|
|
60
|
+
self._cache_duration_ms = 300000 # 5 minutes
|
|
61
|
+
|
|
62
|
+
def set_preferences(self, preferences: Dict[str, Any]) -> None:
|
|
63
|
+
"""
|
|
64
|
+
Set facilitator preferences for payment routing
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
preferences: Dict with optional keys:
|
|
68
|
+
- preferred_facilitators: List of facilitator IDs
|
|
69
|
+
- avoid_networks: List of networks to avoid
|
|
70
|
+
- max_latency_ms: Maximum acceptable latency
|
|
71
|
+
- preferred_chain: 'evm' or 'solana'
|
|
72
|
+
|
|
73
|
+
Example:
|
|
74
|
+
```python
|
|
75
|
+
client.facilitators.set_preferences({
|
|
76
|
+
'preferred_facilitators': ['cdp'],
|
|
77
|
+
'preferred_chain': 'solana',
|
|
78
|
+
'max_latency_ms': 5000,
|
|
79
|
+
})
|
|
80
|
+
```
|
|
81
|
+
"""
|
|
82
|
+
if 'preferred_facilitators' in preferences:
|
|
83
|
+
self._preferences.preferred_facilitators = preferences['preferred_facilitators']
|
|
84
|
+
if 'avoid_networks' in preferences:
|
|
85
|
+
self._preferences.avoid_networks = preferences['avoid_networks']
|
|
86
|
+
if 'max_latency_ms' in preferences:
|
|
87
|
+
self._preferences.max_latency_ms = preferences['max_latency_ms']
|
|
88
|
+
if 'preferred_chain' in preferences:
|
|
89
|
+
self._preferences.preferred_chain = preferences['preferred_chain']
|
|
90
|
+
|
|
91
|
+
def get_preferences(self) -> FacilitatorPreferences:
|
|
92
|
+
"""Get current facilitator preferences"""
|
|
93
|
+
return self._preferences
|
|
94
|
+
|
|
95
|
+
def clear_preferences(self) -> None:
|
|
96
|
+
"""Clear facilitator preferences"""
|
|
97
|
+
self._preferences = FacilitatorPreferences()
|
|
98
|
+
|
|
99
|
+
def get_default_facilitator(self) -> FacilitatorConfig:
|
|
100
|
+
"""
|
|
101
|
+
Get the default CDP facilitator configuration
|
|
102
|
+
This is the built-in facilitator for Coinbase Developer Platform
|
|
103
|
+
"""
|
|
104
|
+
return FacilitatorConfig(
|
|
105
|
+
id='cdp',
|
|
106
|
+
name='Coinbase Developer Platform',
|
|
107
|
+
chains=['evm', 'solana'],
|
|
108
|
+
networks=['base', 'base-sepolia', 'solana', 'solana-devnet'],
|
|
109
|
+
schemes=['exact'],
|
|
110
|
+
priority=100,
|
|
111
|
+
healthy=True,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
async def list(self, force_refresh: bool = False) -> List[FacilitatorConfig]:
|
|
115
|
+
"""
|
|
116
|
+
List all available facilitators
|
|
117
|
+
Returns the default CDP facilitator. Cached for performance.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
force_refresh: Force refresh (ignored - using hardcoded registry)
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of facilitator configurations
|
|
124
|
+
|
|
125
|
+
Example:
|
|
126
|
+
```python
|
|
127
|
+
facilitators = await client.facilitators.list()
|
|
128
|
+
|
|
129
|
+
for f in facilitators:
|
|
130
|
+
print(f"{f.name}: {f.chains}")
|
|
131
|
+
```
|
|
132
|
+
"""
|
|
133
|
+
now = int(time.time() * 1000)
|
|
134
|
+
|
|
135
|
+
# Check cache
|
|
136
|
+
if (
|
|
137
|
+
self._cached_facilitators
|
|
138
|
+
and not force_refresh
|
|
139
|
+
and (now - self._last_fetch_time) < self._cache_duration_ms
|
|
140
|
+
):
|
|
141
|
+
return self._cached_facilitators
|
|
142
|
+
|
|
143
|
+
# Return default facilitator (CDP)
|
|
144
|
+
# In future, this could fetch from a /v1/facilitators endpoint
|
|
145
|
+
self._cached_facilitators = [self.get_default_facilitator()]
|
|
146
|
+
self._last_fetch_time = now
|
|
147
|
+
|
|
148
|
+
return self._cached_facilitators
|
|
149
|
+
|
|
150
|
+
async def get(self, facilitator_id: str) -> Optional[FacilitatorConfig]:
|
|
151
|
+
"""
|
|
152
|
+
Get facilitator by ID
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
facilitator_id: Facilitator identifier
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
Facilitator configuration or None if not found
|
|
159
|
+
|
|
160
|
+
Example:
|
|
161
|
+
```python
|
|
162
|
+
facilitator = await client.facilitators.get("cdp")
|
|
163
|
+
if facilitator:
|
|
164
|
+
print(f"Found: {facilitator.name}")
|
|
165
|
+
```
|
|
166
|
+
"""
|
|
167
|
+
facilitators = await self.list()
|
|
168
|
+
return next(
|
|
169
|
+
(f for f in facilitators if f.id == facilitator_id), None
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
async def select(
|
|
173
|
+
self,
|
|
174
|
+
chain: ChainType,
|
|
175
|
+
network: Optional[NetworkId] = None,
|
|
176
|
+
scheme: PaymentScheme = 'exact',
|
|
177
|
+
) -> Optional[FacilitatorSelection]:
|
|
178
|
+
"""
|
|
179
|
+
Select optimal facilitator based on criteria
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
chain: Blockchain chain (evm, solana)
|
|
183
|
+
network: Network ID (base, base-sepolia, solana-devnet, etc.)
|
|
184
|
+
scheme: Payment scheme (exact)
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
Selected facilitator with reason, or None if no match
|
|
188
|
+
|
|
189
|
+
Example:
|
|
190
|
+
```python
|
|
191
|
+
selection = await client.facilitators.select(
|
|
192
|
+
chain="evm",
|
|
193
|
+
network="base-sepolia"
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
if selection:
|
|
197
|
+
print(f"Selected: {selection.facilitator.name}")
|
|
198
|
+
print(f"Reason: {selection.reason}")
|
|
199
|
+
```
|
|
200
|
+
"""
|
|
201
|
+
facilitators = await self.list()
|
|
202
|
+
|
|
203
|
+
# Filter by requirements
|
|
204
|
+
candidates = [
|
|
205
|
+
f for f in facilitators
|
|
206
|
+
if chain in f.chains
|
|
207
|
+
and (network is None or network in f.networks)
|
|
208
|
+
and scheme in f.schemes
|
|
209
|
+
and f.healthy is not False
|
|
210
|
+
]
|
|
211
|
+
|
|
212
|
+
if not candidates:
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
# Apply preferences
|
|
216
|
+
if self._preferences.avoid_networks:
|
|
217
|
+
candidates = [
|
|
218
|
+
f for f in candidates
|
|
219
|
+
if not any(n in f.networks for n in self._preferences.avoid_networks)
|
|
220
|
+
]
|
|
221
|
+
|
|
222
|
+
if self._preferences.preferred_facilitators:
|
|
223
|
+
preferred = [
|
|
224
|
+
f for f in candidates
|
|
225
|
+
if f.id in self._preferences.preferred_facilitators
|
|
226
|
+
]
|
|
227
|
+
if preferred:
|
|
228
|
+
candidates = preferred
|
|
229
|
+
|
|
230
|
+
if self._preferences.max_latency_ms:
|
|
231
|
+
within_latency = [
|
|
232
|
+
f for f in candidates
|
|
233
|
+
if f.avg_latency_ms is None or f.avg_latency_ms <= self._preferences.max_latency_ms
|
|
234
|
+
]
|
|
235
|
+
if within_latency:
|
|
236
|
+
candidates = within_latency
|
|
237
|
+
|
|
238
|
+
# Sort by priority (descending) then latency (ascending)
|
|
239
|
+
def sort_key(f: FacilitatorConfig):
|
|
240
|
+
latency = f.avg_latency_ms if f.avg_latency_ms is not None else float('inf')
|
|
241
|
+
return (-f.priority, latency)
|
|
242
|
+
|
|
243
|
+
candidates.sort(key=sort_key)
|
|
244
|
+
|
|
245
|
+
selected = candidates[0]
|
|
246
|
+
alternatives = candidates[1:]
|
|
247
|
+
|
|
248
|
+
# Determine selection reason
|
|
249
|
+
reason: Literal['preference', 'priority', 'latency', 'fallback'] = 'priority'
|
|
250
|
+
if selected.id in self._preferences.preferred_facilitators:
|
|
251
|
+
reason = 'preference'
|
|
252
|
+
elif len(candidates) > 1 and selected.avg_latency_ms is not None:
|
|
253
|
+
reason = 'latency'
|
|
254
|
+
elif len(candidates) == 1:
|
|
255
|
+
reason = 'fallback'
|
|
256
|
+
|
|
257
|
+
return FacilitatorSelection(
|
|
258
|
+
facilitator=selected,
|
|
259
|
+
reason=reason,
|
|
260
|
+
alternatives=alternatives,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
async def supports(
|
|
264
|
+
self,
|
|
265
|
+
facilitator_id: str,
|
|
266
|
+
chain: ChainType,
|
|
267
|
+
network: NetworkId,
|
|
268
|
+
scheme: PaymentScheme = 'exact',
|
|
269
|
+
) -> bool:
|
|
270
|
+
"""
|
|
271
|
+
Check if a specific facilitator supports the given requirements
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
facilitator_id: Facilitator ID
|
|
275
|
+
chain: Chain type
|
|
276
|
+
network: Network ID
|
|
277
|
+
scheme: Payment scheme
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
True if supported
|
|
281
|
+
"""
|
|
282
|
+
facilitator = await self.get(facilitator_id)
|
|
283
|
+
if not facilitator:
|
|
284
|
+
return False
|
|
285
|
+
|
|
286
|
+
return (
|
|
287
|
+
chain in facilitator.chains
|
|
288
|
+
and network in facilitator.networks
|
|
289
|
+
and scheme in facilitator.schemes
|
|
290
|
+
and facilitator.healthy is not False
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
def clear_cache(self):
|
|
294
|
+
"""Clear facilitator cache to force refresh on next list()"""
|
|
295
|
+
self._cached_facilitators = []
|
|
296
|
+
self._last_fetch_time = 0
|