xache 5.0.0__py3-none-any.whl → 5.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
xache/__init__.py CHANGED
@@ -25,7 +25,7 @@ Example:
25
25
  ```
26
26
  """
27
27
 
28
- __version__ = "5.0.0"
28
+ __version__ = "5.2.0"
29
29
 
30
30
  # Main client
31
31
  from .client import XacheClient
@@ -75,6 +75,21 @@ from .services import (
75
75
  CollectiveService,
76
76
  BudgetService,
77
77
  ReceiptsService,
78
+ ExtractionService,
79
+ )
80
+
81
+ # Extraction types
82
+ from .services.extraction import (
83
+ LLMProvider,
84
+ LLMApiFormat,
85
+ LLMConfig,
86
+ LLMConfigApiKey,
87
+ LLMConfigEndpoint,
88
+ LLMConfigXacheManaged,
89
+ ExtractedMemory,
90
+ ExtractionMetadata,
91
+ ExtractionResult,
92
+ ExtractionOptions,
78
93
  )
79
94
 
80
95
  # Utilities
@@ -130,6 +145,18 @@ __all__ = [
130
145
  "CollectiveService",
131
146
  "BudgetService",
132
147
  "ReceiptsService",
148
+ "ExtractionService",
149
+ # Extraction types
150
+ "LLMProvider",
151
+ "LLMApiFormat",
152
+ "LLMConfig",
153
+ "LLMConfigApiKey",
154
+ "LLMConfigEndpoint",
155
+ "LLMConfigXacheManaged",
156
+ "ExtractedMemory",
157
+ "ExtractionMetadata",
158
+ "ExtractionResult",
159
+ "ExtractionOptions",
133
160
  # Utilities
134
161
  "RetryPolicy",
135
162
  "with_retry",
@@ -1,173 +1,473 @@
1
1
  """
2
2
  Extraction Service - AI-powered memory extraction from conversations
3
+ Supports 10 major LLM providers plus custom endpoints
3
4
  """
4
5
 
5
- from typing import List, Optional, Dict, Any
6
- from dataclasses import dataclass
6
+ from typing import List, Optional, Dict, Any, Union, Literal
7
+ from dataclasses import dataclass, field
8
+
9
+
10
+ # LLM Provider type - matches TypeScript SDK
11
+ LLMProvider = Literal[
12
+ 'anthropic', 'openai', 'google', 'mistral', 'groq',
13
+ 'together', 'fireworks', 'cohere', 'xai', 'deepseek'
14
+ ]
15
+
16
+ # LLM API format for custom endpoints
17
+ LLMApiFormat = Literal['openai', 'anthropic', 'cohere']
18
+
19
+
20
+ @dataclass
21
+ class LLMConfigApiKey:
22
+ """LLM config for api-key mode - use your own API key with major providers"""
23
+ type: Literal['api-key'] = 'api-key'
24
+ provider: LLMProvider = 'anthropic'
25
+ api_key: str = ''
26
+ model: Optional[str] = None
27
+
28
+
29
+ @dataclass
30
+ class LLMConfigEndpoint:
31
+ """LLM config for endpoint mode - custom/self-hosted endpoints"""
32
+ type: Literal['endpoint'] = 'endpoint'
33
+ url: str = ''
34
+ auth_token: Optional[str] = None
35
+ model: Optional[str] = None
36
+ format: LLMApiFormat = 'openai'
37
+
38
+
39
+ @dataclass
40
+ class LLMConfigXacheManaged:
41
+ """LLM config for xache-managed mode - Xache provides the LLM"""
42
+ type: Literal['xache-managed'] = 'xache-managed'
43
+ provider: Literal['anthropic', 'openai'] = 'anthropic'
44
+ model: Optional[str] = None
45
+
46
+
47
+ # Union type for LLM config
48
+ LLMConfig = Union[LLMConfigApiKey, LLMConfigEndpoint, LLMConfigXacheManaged]
7
49
 
8
50
 
9
51
  @dataclass
10
52
  class ExtractedMemory:
11
53
  """Extracted memory from conversation"""
12
- content: str
13
- type: str # 'preference', 'fact', 'pattern', 'error_fix', 'insight'
14
- context: str
15
- confidence: float
16
- source_text: Optional[str] = None
17
- metadata: Optional[Dict[str, Any]] = None
54
+ type: str # 'preference', 'fact', 'pattern', 'relationship', etc.
55
+ data: Dict[str, Any] = field(default_factory=dict)
56
+ reasoning: Optional[str] = None
57
+ confidence: float = 1.0
58
+
59
+
60
+ @dataclass
61
+ class ExtractionMetadata:
62
+ """Metadata about the extraction operation"""
63
+ extraction_time: int = 0
64
+ llm_provider: str = ''
65
+ llm_model: str = ''
66
+ total_extractions: int = 0
67
+ stored_count: int = 0
68
+ payment_receipt_id: Optional[str] = None
18
69
 
19
70
 
20
71
  @dataclass
21
72
  class ExtractionResult:
22
73
  """Result from memory extraction"""
23
- memories: List[ExtractedMemory]
24
- total_extracted: int
25
- stored_count: int
26
- skipped_count: int
27
- processing_time_ms: int
74
+ extractions: List[ExtractedMemory] = field(default_factory=list)
75
+ stored: Optional[List[str]] = None
76
+ metadata: ExtractionMetadata = field(default_factory=ExtractionMetadata)
28
77
 
29
78
 
30
79
  @dataclass
31
- class ExtractMemoriesRequest:
32
- """Request for memory extraction"""
33
- conversation: str
34
- context: Optional[str] = None
35
- types: Optional[List[str]] = None
36
- auto_store: bool = True
37
- min_confidence: float = 0.7
80
+ class ExtractionOptions:
81
+ """Options for extraction"""
82
+ confidence_threshold: Optional[float] = None
83
+ context_hint: Optional[str] = None
84
+ auto_store: bool = False
85
+ subject: Optional[Dict[str, Any]] = None
38
86
 
39
87
 
40
88
  class ExtractionService:
41
89
  """
42
90
  Extraction service for AI-powered memory extraction
43
91
 
44
- Automatically extracts valuable memories from conversations,
45
- including user preferences, facts, patterns, error fixes, and insights.
92
+ Supports three LLM modes:
93
+ 1. api-key: Use your own API key with major providers (10 supported)
94
+ - anthropic, openai, google, mistral, groq
95
+ - together, fireworks, cohere, xai, deepseek
96
+ - Cost: $0.002 per extraction
97
+
98
+ 2. endpoint: Use custom/self-hosted endpoints
99
+ - Ollama, OpenRouter, vLLM, Modal, Replicate, etc.
100
+ - Supports openai, anthropic, cohere API formats
101
+ - Cost: $0.002 per extraction
102
+
103
+ 3. xache-managed: Xache provides the LLM
104
+ - Requires PII-scrubbed traces
105
+ - Cost: $0.011 per extraction
106
+
107
+ Example:
108
+ ```python
109
+ from xache.services.extraction import LLMConfigApiKey
110
+
111
+ # Using your own Anthropic key
112
+ result = await client.extraction.extract(
113
+ trace="User: I prefer dark mode\\nAgent: I'll remember that",
114
+ llm_config=LLMConfigApiKey(
115
+ provider='anthropic',
116
+ api_key='sk-ant-...',
117
+ ),
118
+ options=ExtractionOptions(
119
+ confidence_threshold=0.8,
120
+ auto_store=True,
121
+ )
122
+ )
123
+
124
+ print(f"Extracted {len(result.extractions)} memories")
125
+ ```
46
126
  """
47
127
 
128
+ # Supported providers for api-key mode
129
+ SUPPORTED_PROVIDERS: List[str] = [
130
+ 'anthropic', 'openai', 'google', 'mistral', 'groq',
131
+ 'together', 'fireworks', 'cohere', 'xai', 'deepseek'
132
+ ]
133
+
134
+ # Supported API formats for endpoint mode
135
+ SUPPORTED_FORMATS: List[str] = ['openai', 'anthropic', 'cohere']
136
+
48
137
  def __init__(self, client):
49
138
  self.client = client
50
139
 
51
- async def extract(self, request: ExtractMemoriesRequest) -> ExtractionResult:
140
+ def _build_llm_config_dict(self, llm_config: LLMConfig) -> Dict[str, Any]:
141
+ """Convert dataclass to API-compatible dict"""
142
+ if isinstance(llm_config, LLMConfigApiKey):
143
+ config = {
144
+ 'type': 'api-key',
145
+ 'provider': llm_config.provider,
146
+ 'apiKey': llm_config.api_key,
147
+ }
148
+ if llm_config.model:
149
+ config['model'] = llm_config.model
150
+ return config
151
+
152
+ elif isinstance(llm_config, LLMConfigEndpoint):
153
+ config = {
154
+ 'type': 'endpoint',
155
+ 'url': llm_config.url,
156
+ 'format': llm_config.format,
157
+ }
158
+ if llm_config.auth_token:
159
+ config['authToken'] = llm_config.auth_token
160
+ if llm_config.model:
161
+ config['model'] = llm_config.model
162
+ return config
163
+
164
+ elif isinstance(llm_config, LLMConfigXacheManaged):
165
+ config = {
166
+ 'type': 'xache-managed',
167
+ 'provider': llm_config.provider,
168
+ }
169
+ if llm_config.model:
170
+ config['model'] = llm_config.model
171
+ return config
172
+
173
+ else:
174
+ # Assume it's already a dict
175
+ return llm_config
176
+
177
+ async def extract(
178
+ self,
179
+ trace: Union[str, Dict[str, Any]],
180
+ llm_config: LLMConfig,
181
+ options: Optional[ExtractionOptions] = None,
182
+ ) -> ExtractionResult:
52
183
  """
53
- Extract memories from a conversation
184
+ Extract memories from agent trace using specified LLM
54
185
 
55
186
  Args:
56
- request: Extraction request with conversation and options
187
+ trace: The conversation trace (string or object)
188
+ llm_config: LLM configuration (api-key, endpoint, or xache-managed)
189
+ options: Extraction options
57
190
 
58
191
  Returns:
59
- Extraction result with extracted memories
192
+ ExtractionResult with extracted memories
60
193
 
61
194
  Example:
62
195
  ```python
63
- from xache.services.extraction import ExtractMemoriesRequest
196
+ # Using OpenAI
197
+ result = await client.extraction.extract(
198
+ trace="User: I always use vim keybindings...",
199
+ llm_config=LLMConfigApiKey(
200
+ provider='openai',
201
+ api_key='sk-...',
202
+ model='gpt-4-turbo',
203
+ ),
204
+ options=ExtractionOptions(auto_store=True),
205
+ )
64
206
 
65
- result = await client.extraction.extract(ExtractMemoriesRequest(
66
- conversation="User: I prefer dark mode for all apps.\\nAssistant: ...",
67
- context="user-preferences",
68
- auto_store=True,
69
- min_confidence=0.8
70
- ))
207
+ # Using custom endpoint (Ollama)
208
+ result = await client.extraction.extract(
209
+ trace=conversation,
210
+ llm_config=LLMConfigEndpoint(
211
+ url='http://localhost:11434/v1/chat/completions',
212
+ model='llama2',
213
+ format='openai',
214
+ ),
215
+ )
71
216
 
72
- print(f"Extracted {result.total_extracted} memories")
73
- for m in result.memories:
74
- print(f" [{m.type}] {m.content} (confidence: {m.confidence})")
217
+ # Using Xache-managed LLM
218
+ result = await client.extraction.extract(
219
+ trace=scrubbed_trace, # Must be PII-scrubbed
220
+ llm_config=LLMConfigXacheManaged(
221
+ provider='anthropic',
222
+ ),
223
+ )
75
224
  ```
76
225
  """
77
- body = {
78
- "conversation": request.conversation,
79
- "autoStore": request.auto_store,
80
- "minConfidence": request.min_confidence,
226
+ body: Dict[str, Any] = {
227
+ 'trace': trace,
228
+ 'llmConfig': self._build_llm_config_dict(llm_config),
81
229
  }
82
230
 
83
- if request.context:
84
- body["context"] = request.context
85
- if request.types:
86
- body["types"] = request.types
87
-
88
- response = await self.client.request("POST", "/v1/extraction/extract", body)
231
+ if options:
232
+ opts: Dict[str, Any] = {}
233
+ if options.confidence_threshold is not None:
234
+ opts['confidenceThreshold'] = options.confidence_threshold
235
+ if options.context_hint:
236
+ opts['contextHint'] = options.context_hint
237
+ if options.auto_store:
238
+ opts['autoStore'] = options.auto_store
239
+ if options.subject:
240
+ opts['subject'] = options.subject
241
+ if opts:
242
+ body['options'] = opts
243
+
244
+ response = await self.client.request('POST', '/v1/extract', body)
89
245
 
90
246
  if not response.success or not response.data:
91
247
  raise Exception(
92
- response.error.get("message", "Failed to extract memories")
248
+ response.error.get('message', 'Failed to extract memories')
93
249
  if response.error
94
- else "Failed to extract memories"
250
+ else 'Failed to extract memories'
95
251
  )
96
252
 
97
253
  data = response.data
254
+
255
+ # Parse extractions
256
+ extractions = []
257
+ for m in data.get('extractions', []):
258
+ extractions.append(ExtractedMemory(
259
+ type=m.get('type', 'unknown'),
260
+ data=m.get('data', {}),
261
+ reasoning=m.get('reasoning'),
262
+ confidence=m.get('confidence', 1.0),
263
+ ))
264
+
265
+ # Parse metadata
266
+ meta = data.get('metadata', {})
267
+ metadata = ExtractionMetadata(
268
+ extraction_time=meta.get('extractionTime', 0),
269
+ llm_provider=meta.get('llmProvider', ''),
270
+ llm_model=meta.get('llmModel', ''),
271
+ total_extractions=meta.get('totalExtractions', 0),
272
+ stored_count=meta.get('storedCount', 0),
273
+ payment_receipt_id=meta.get('paymentReceiptId'),
274
+ )
275
+
98
276
  return ExtractionResult(
99
- memories=[
100
- ExtractedMemory(
101
- content=m["content"],
102
- type=m["type"],
103
- context=m.get("context", ""),
104
- confidence=m["confidence"],
105
- source_text=m.get("sourceText"),
106
- metadata=m.get("metadata"),
107
- )
108
- for m in data.get("memories", [])
109
- ],
110
- total_extracted=data.get("totalExtracted", 0),
111
- stored_count=data.get("storedCount", 0),
112
- skipped_count=data.get("skippedCount", 0),
113
- processing_time_ms=data.get("processingTimeMs", 0),
277
+ extractions=extractions,
278
+ stored=data.get('stored'),
279
+ metadata=metadata,
114
280
  )
115
281
 
116
- async def analyze(self, conversation: str) -> Dict[str, Any]:
282
+ async def extract_with_anthropic(
283
+ self,
284
+ trace: Union[str, Dict[str, Any]],
285
+ api_key: str,
286
+ model: Optional[str] = None,
287
+ auto_store: bool = False,
288
+ confidence_threshold: Optional[float] = None,
289
+ context_hint: Optional[str] = None,
290
+ ) -> ExtractionResult:
117
291
  """
118
- Analyze a conversation without storing memories
292
+ Convenience method: Extract memories using Anthropic
119
293
 
120
294
  Args:
121
- conversation: Conversation text to analyze
122
-
123
- Returns:
124
- Analysis result with potential memories
295
+ trace: Conversation trace
296
+ api_key: Your Anthropic API key
297
+ model: Model name (default: claude-sonnet-4-20250514)
298
+ auto_store: Whether to auto-store extracted memories
299
+ confidence_threshold: Minimum confidence threshold
300
+ context_hint: Context hint for extraction
125
301
 
126
302
  Example:
127
303
  ```python
128
- analysis = await client.extraction.analyze(
129
- "User: I always use vim keybindings..."
304
+ result = await client.extraction.extract_with_anthropic(
305
+ trace="User: I prefer dark mode...",
306
+ api_key="sk-ant-...",
307
+ auto_store=True,
130
308
  )
131
-
132
- print(f"Found {len(analysis['potentialMemories'])} potential memories")
133
309
  ```
134
310
  """
135
- response = await self.client.request(
136
- "POST", "/v1/extraction/analyze", {"conversation": conversation}
311
+ return await self.extract(
312
+ trace=trace,
313
+ llm_config=LLMConfigApiKey(
314
+ provider='anthropic',
315
+ api_key=api_key,
316
+ model=model,
317
+ ),
318
+ options=ExtractionOptions(
319
+ auto_store=auto_store,
320
+ confidence_threshold=confidence_threshold,
321
+ context_hint=context_hint,
322
+ ),
137
323
  )
138
324
 
139
- if not response.success or not response.data:
140
- raise Exception(
141
- response.error.get("message", "Failed to analyze conversation")
142
- if response.error
143
- else "Failed to analyze conversation"
144
- )
325
+ async def extract_with_openai(
326
+ self,
327
+ trace: Union[str, Dict[str, Any]],
328
+ api_key: str,
329
+ model: Optional[str] = None,
330
+ auto_store: bool = False,
331
+ confidence_threshold: Optional[float] = None,
332
+ context_hint: Optional[str] = None,
333
+ ) -> ExtractionResult:
334
+ """
335
+ Convenience method: Extract memories using OpenAI
336
+
337
+ Args:
338
+ trace: Conversation trace
339
+ api_key: Your OpenAI API key
340
+ model: Model name (default: gpt-4-turbo)
341
+ auto_store: Whether to auto-store extracted memories
342
+ confidence_threshold: Minimum confidence threshold
343
+ context_hint: Context hint for extraction
145
344
 
146
- return response.data
345
+ Example:
346
+ ```python
347
+ result = await client.extraction.extract_with_openai(
348
+ trace="User: I prefer dark mode...",
349
+ api_key="sk-...",
350
+ model="gpt-4-turbo",
351
+ auto_store=True,
352
+ )
353
+ ```
354
+ """
355
+ return await self.extract(
356
+ trace=trace,
357
+ llm_config=LLMConfigApiKey(
358
+ provider='openai',
359
+ api_key=api_key,
360
+ model=model,
361
+ ),
362
+ options=ExtractionOptions(
363
+ auto_store=auto_store,
364
+ confidence_threshold=confidence_threshold,
365
+ context_hint=context_hint,
366
+ ),
367
+ )
147
368
 
148
- async def get_types(self) -> List[Dict[str, Any]]:
369
+ async def extract_with_endpoint(
370
+ self,
371
+ trace: Union[str, Dict[str, Any]],
372
+ url: str,
373
+ model: str,
374
+ auth_token: Optional[str] = None,
375
+ format: LLMApiFormat = 'openai',
376
+ auto_store: bool = False,
377
+ confidence_threshold: Optional[float] = None,
378
+ context_hint: Optional[str] = None,
379
+ ) -> ExtractionResult:
149
380
  """
150
- Get supported memory types for extraction
381
+ Extract memories using custom endpoint (Ollama, OpenRouter, vLLM, etc.)
151
382
 
152
- Returns:
153
- List of supported memory types with descriptions
383
+ Args:
384
+ trace: Conversation trace
385
+ url: Endpoint URL
386
+ model: Model name
387
+ auth_token: Optional auth token
388
+ format: API format (openai, anthropic, cohere)
389
+ auto_store: Whether to auto-store extracted memories
390
+ confidence_threshold: Minimum confidence threshold
391
+ context_hint: Context hint for extraction
154
392
 
155
393
  Example:
156
394
  ```python
157
- types = await client.extraction.get_types()
158
- for t in types:
159
- print(f"{t['type']}: {t['description']}")
395
+ # Ollama
396
+ result = await client.extraction.extract_with_endpoint(
397
+ trace="User: I prefer dark mode...",
398
+ url="http://localhost:11434/v1/chat/completions",
399
+ model="llama2",
400
+ format="openai",
401
+ )
402
+
403
+ # OpenRouter
404
+ result = await client.extraction.extract_with_endpoint(
405
+ trace="User: ...",
406
+ url="https://openrouter.ai/api/v1/chat/completions",
407
+ model="anthropic/claude-3-sonnet",
408
+ auth_token="sk-or-...",
409
+ format="openai",
410
+ )
160
411
  ```
161
412
  """
162
- response = await self.client.request(
163
- "GET", "/v1/extraction/types", skip_auth=True
413
+ return await self.extract(
414
+ trace=trace,
415
+ llm_config=LLMConfigEndpoint(
416
+ url=url,
417
+ model=model,
418
+ auth_token=auth_token,
419
+ format=format,
420
+ ),
421
+ options=ExtractionOptions(
422
+ auto_store=auto_store,
423
+ confidence_threshold=confidence_threshold,
424
+ context_hint=context_hint,
425
+ ),
164
426
  )
165
427
 
166
- if not response.success or not response.data:
167
- raise Exception(
168
- response.error.get("message", "Failed to get extraction types")
169
- if response.error
170
- else "Failed to get extraction types"
171
- )
428
+ async def extract_with_xache_llm(
429
+ self,
430
+ trace: Union[str, Dict[str, Any]],
431
+ provider: Literal['anthropic', 'openai'] = 'anthropic',
432
+ model: Optional[str] = None,
433
+ auto_store: bool = False,
434
+ confidence_threshold: Optional[float] = None,
435
+ context_hint: Optional[str] = None,
436
+ ) -> ExtractionResult:
437
+ """
438
+ Extract memories using Xache-managed LLM
439
+
440
+ IMPORTANT: Traces must be scrubbed of PII before calling this method.
441
+
442
+ Args:
443
+ trace: PII-scrubbed conversation trace
444
+ provider: Xache-managed provider (anthropic or openai)
445
+ model: Optional model override
446
+ auto_store: Whether to auto-store extracted memories
447
+ confidence_threshold: Minimum confidence threshold
448
+ context_hint: Context hint for extraction
172
449
 
173
- return response.data.get("types", [])
450
+ Example:
451
+ ```python
452
+ # Scrub PII first
453
+ scrubbed = scrub_trace(raw_trace)
454
+
455
+ result = await client.extraction.extract_with_xache_llm(
456
+ trace=scrubbed,
457
+ provider="anthropic",
458
+ auto_store=True,
459
+ )
460
+ ```
461
+ """
462
+ return await self.extract(
463
+ trace=trace,
464
+ llm_config=LLMConfigXacheManaged(
465
+ provider=provider,
466
+ model=model,
467
+ ),
468
+ options=ExtractionOptions(
469
+ auto_store=auto_store,
470
+ confidence_threshold=confidence_threshold,
471
+ context_hint=context_hint,
472
+ ),
473
+ )
@@ -1,5 +1,7 @@
1
1
  """
2
2
  Facilitator Service - x402 v2 facilitator selection and management
3
+
4
+ Fetches facilitator configuration from the API for network-agnostic operation.
3
5
  """
4
6
 
5
7
  from typing import List, Optional, Dict, Any, Literal
@@ -19,19 +21,20 @@ class FacilitatorConfig:
19
21
  id: str
20
22
  name: str
21
23
  chains: List[ChainType]
22
- networks: List[NetworkId]
24
+ networks: List[str]
23
25
  schemes: List[PaymentScheme]
24
26
  priority: int
25
27
  healthy: bool = True
26
28
  avg_latency_ms: Optional[int] = None
27
29
  last_health_check: Optional[int] = None
30
+ pay_to: Optional[Dict[str, Dict[str, str]]] = None
28
31
 
29
32
 
30
33
  @dataclass
31
34
  class FacilitatorPreferences:
32
35
  """User preferences for facilitator selection"""
33
36
  preferred_facilitators: List[str] = field(default_factory=list)
34
- avoid_networks: List[NetworkId] = field(default_factory=list)
37
+ avoid_networks: List[str] = field(default_factory=list)
35
38
  max_latency_ms: Optional[int] = None
36
39
  preferred_chain: Optional[ChainType] = None
37
40
 
@@ -49,13 +52,15 @@ class FacilitatorService:
49
52
  Facilitator service for x402 v2 payment facilitator selection
50
53
 
51
54
  Manages facilitator preferences and selection for payment processing.
52
- Uses a hardcoded registry of available facilitators.
55
+ Fetches configuration from the API for network-agnostic operation.
53
56
  """
54
57
 
55
58
  def __init__(self, client):
56
59
  self.client = client
57
60
  self._preferences = FacilitatorPreferences()
58
61
  self._cached_facilitators: List[FacilitatorConfig] = []
62
+ self._cached_environment: str = 'testnet'
63
+ self._cached_default_network: str = 'base-sepolia'
59
64
  self._last_fetch_time: int = 0
60
65
  self._cache_duration_ms = 300000 # 5 minutes
61
66
 
@@ -96,10 +101,20 @@ class FacilitatorService:
96
101
  """Clear facilitator preferences"""
97
102
  self._preferences = FacilitatorPreferences()
98
103
 
99
- def get_default_facilitator(self) -> FacilitatorConfig:
104
+ async def get_environment(self) -> str:
105
+ """Get the current environment (testnet or mainnet)"""
106
+ await self.list() # Ensure cache is populated
107
+ return self._cached_environment
108
+
109
+ async def get_default_network(self) -> str:
110
+ """Get the default network for the current environment"""
111
+ await self.list() # Ensure cache is populated
112
+ return self._cached_default_network
113
+
114
+ def _get_default_facilitator(self) -> FacilitatorConfig:
100
115
  """
101
- Get the default CDP facilitator configuration
102
- This is the built-in facilitator for Coinbase Developer Platform
116
+ Get the default CDP facilitator configuration (fallback)
117
+ This is used only if API is unavailable
103
118
  """
104
119
  return FacilitatorConfig(
105
120
  id='cdp',
@@ -114,10 +129,10 @@ class FacilitatorService:
114
129
  async def list(self, force_refresh: bool = False) -> List[FacilitatorConfig]:
115
130
  """
116
131
  List all available facilitators
117
- Returns the default CDP facilitator. Cached for performance.
132
+ Fetches from API and caches for performance.
118
133
 
119
134
  Args:
120
- force_refresh: Force refresh (ignored - using hardcoded registry)
135
+ force_refresh: Force refresh from API
121
136
 
122
137
  Returns:
123
138
  List of facilitator configurations
@@ -140,10 +155,39 @@ class FacilitatorService:
140
155
  ):
141
156
  return self._cached_facilitators
142
157
 
143
- # Return default facilitator (CDP)
144
- # In future, this could fetch from a /v1/facilitators endpoint
145
- self._cached_facilitators = [self.get_default_facilitator()]
146
- self._last_fetch_time = now
158
+ try:
159
+ # Fetch from API
160
+ response = await self.client._request('GET', '/v1/facilitators')
161
+ data = response.data
162
+
163
+ if data and 'facilitators' in data:
164
+ self._cached_facilitators = [
165
+ FacilitatorConfig(
166
+ id=f['id'],
167
+ name=f['name'],
168
+ chains=f['chains'],
169
+ networks=f['networks'],
170
+ schemes=f['schemes'],
171
+ priority=f['priority'],
172
+ healthy=f.get('healthy', True),
173
+ avg_latency_ms=f.get('avgLatencyMs'),
174
+ last_health_check=f.get('lastHealthCheck'),
175
+ pay_to=f.get('payTo'),
176
+ )
177
+ for f in data['facilitators']
178
+ ]
179
+ self._cached_environment = data.get('environment', 'testnet')
180
+ self._cached_default_network = data.get('defaultNetwork', 'base-sepolia')
181
+ self._last_fetch_time = now
182
+ return self._cached_facilitators
183
+
184
+ except Exception as e:
185
+ print(f"[FacilitatorService] Failed to fetch facilitators from API, using fallback: {e}")
186
+
187
+ # Fallback to default if API fails
188
+ if not self._cached_facilitators:
189
+ self._cached_facilitators = [self._get_default_facilitator()]
190
+ self._last_fetch_time = now
147
191
 
148
192
  return self._cached_facilitators
149
193
 
@@ -172,7 +216,7 @@ class FacilitatorService:
172
216
  async def select(
173
217
  self,
174
218
  chain: ChainType,
175
- network: Optional[NetworkId] = None,
219
+ network: Optional[str] = None,
176
220
  scheme: PaymentScheme = 'exact',
177
221
  ) -> Optional[FacilitatorSelection]:
178
222
  """
@@ -200,11 +244,18 @@ class FacilitatorService:
200
244
  """
201
245
  facilitators = await self.list()
202
246
 
247
+ # Default network based on environment
248
+ if network is None:
249
+ if chain == 'solana':
250
+ network = 'solana' if self._cached_environment == 'mainnet' else 'solana-devnet'
251
+ else:
252
+ network = self._cached_default_network
253
+
203
254
  # Filter by requirements
204
255
  candidates = [
205
256
  f for f in facilitators
206
257
  if chain in f.chains
207
- and (network is None or network in f.networks)
258
+ and network in f.networks
208
259
  and scheme in f.schemes
209
260
  and f.healthy is not False
210
261
  ]
@@ -264,7 +315,7 @@ class FacilitatorService:
264
315
  self,
265
316
  facilitator_id: str,
266
317
  chain: ChainType,
267
- network: NetworkId,
318
+ network: str,
268
319
  scheme: PaymentScheme = 'exact',
269
320
  ) -> bool:
270
321
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xache
3
- Version: 5.0.0
3
+ Version: 5.2.0
4
4
  Summary: Official Python SDK for Xache Protocol
5
5
  Home-page: https://github.com/xache-ai/xache-protocol
6
6
  Author: Xache Protocol
@@ -1,4 +1,4 @@
1
- xache/__init__.py,sha256=RFenUVyM4Mt5ycz_OvqYNg2oZVwrOF9EogK3jZ_yEpE,2989
1
+ xache/__init__.py,sha256=K1QXmT5R7nbiTwDwQUz53J1vN6n73NNUA4yVdqCMfFs,3559
2
2
  xache/client.py,sha256=ABjFJFnrv4KmY5ihBXK5YC8sg50s48tGdDUSKq7Vd8g,10217
3
3
  xache/errors.py,sha256=sHLjfKGw1JxN3TdV_bswG9erH2vVAOFPzvOkWb1k0EI,5007
4
4
  xache/types.py,sha256=YpepxdWdWbkCRzRWQX9wckCL1R6zjHswrWTrI3CnrB4,9479
@@ -10,8 +10,8 @@ xache/payment/handler.py,sha256=X3RL1mvjHCn5FbloOjiNUv_CythRlJU0Vo524lx4DLg,8901
10
10
  xache/services/__init__.py,sha256=ERuZXa7R-Iv0oFBknxJiBO5nJC-oCtIut_4VITw_uOE,775
11
11
  xache/services/budget.py,sha256=6sZ3nojb888a3k774XPnJHfdkqIC2Y1BxJKt8zy76V0,10599
12
12
  xache/services/collective.py,sha256=utYLIHv3tuOYHbdYJYXqIy1hS-aYIRems744CsCUIUw,6229
13
- xache/services/extraction.py,sha256=5KElr4ldBR3kcOofvz4d3NwAuTvKVskZ-NTQ7BI2ChY,5317
14
- xache/services/facilitator.py,sha256=ikZNTqEkmcQixGSMJYGlspmbKfivMBFO3sg5Tzy7si8,9412
13
+ xache/services/extraction.py,sha256=wtqsFMqCQUrY9idXYktkPdkdgKyZGuO8OdCuKBWjlUg,15412
14
+ xache/services/facilitator.py,sha256=FlJh6YvPd1xiCdt6Y8Y9NpbmJI0o92Dcp3ZgzeR14bM,11512
15
15
  xache/services/identity.py,sha256=gOs5fN9juyoBfXQVm-G4whyUMJ6Oha2VmP_i3mQw0G0,13478
16
16
  xache/services/memory.py,sha256=ng9_cwL4jE4c3gdlwQDZyqaBdQgwtqApEwj0LkZYWRY,13290
17
17
  xache/services/owner.py,sha256=2ASJFjApS3AiQEpoS2oM9M3sisi0Y6xSjmU1fwUM0rA,8912
@@ -24,7 +24,7 @@ xache/utils/__init__.py,sha256=8VrQm0QnyqxdplpCG7BDRiAVdBGWrjUs9ipH2zsJOBM,106
24
24
  xache/utils/cache.py,sha256=9zhE9dIXFTofj7jz1TX-FkAqmclqoYXTe4FwwGLeKT4,5479
25
25
  xache/utils/http.py,sha256=rIQCYvYrziNrNfEbOnIKbCOGGf7bcdTvZrrU_W6CcZA,6547
26
26
  xache/utils/retry.py,sha256=OJYBGozKIoteCvKw50dqd4ThhOo-WisorcKa8Tr6mnE,2860
27
- xache-5.0.0.dist-info/METADATA,sha256=Xk9jdBWRacl2aVkn89dGbdLr9xHNmi8FWbK-B2BHyQg,8469
28
- xache-5.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
29
- xache-5.0.0.dist-info/top_level.txt,sha256=FBWE4IVb7zoLS9arsdrl97QVETlwFvYGAx6xEJZOEUU,6
30
- xache-5.0.0.dist-info/RECORD,,
27
+ xache-5.2.0.dist-info/METADATA,sha256=Er8B6d8cJ2_qkmkJTrlZFK8UvNlf6jYeyFwTTeRXJBg,8469
28
+ xache-5.2.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
29
+ xache-5.2.0.dist-info/top_level.txt,sha256=FBWE4IVb7zoLS9arsdrl97QVETlwFvYGAx6xEJZOEUU,6
30
+ xache-5.2.0.dist-info/RECORD,,
File without changes