xache 5.1.0__tar.gz → 5.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {xache-5.1.0 → xache-5.2.1}/PKG-INFO +5 -6
  2. {xache-5.1.0 → xache-5.2.1}/README.md +4 -5
  3. {xache-5.1.0 → xache-5.2.1}/pyproject.toml +1 -1
  4. {xache-5.1.0 → xache-5.2.1}/xache/__init__.py +28 -1
  5. xache-5.2.1/xache/services/extraction.py +473 -0
  6. {xache-5.1.0 → xache-5.2.1}/xache.egg-info/PKG-INFO +5 -6
  7. xache-5.1.0/xache/services/extraction.py +0 -173
  8. {xache-5.1.0 → xache-5.2.1}/setup.cfg +0 -0
  9. {xache-5.1.0 → xache-5.2.1}/setup.py +0 -0
  10. {xache-5.1.0 → xache-5.2.1}/xache/client.py +0 -0
  11. {xache-5.1.0 → xache-5.2.1}/xache/crypto/__init__.py +0 -0
  12. {xache-5.1.0 → xache-5.2.1}/xache/crypto/signing.py +0 -0
  13. {xache-5.1.0 → xache-5.2.1}/xache/crypto/wallet.py +0 -0
  14. {xache-5.1.0 → xache-5.2.1}/xache/errors.py +0 -0
  15. {xache-5.1.0 → xache-5.2.1}/xache/payment/__init__.py +0 -0
  16. {xache-5.1.0 → xache-5.2.1}/xache/payment/handler.py +0 -0
  17. {xache-5.1.0 → xache-5.2.1}/xache/services/__init__.py +0 -0
  18. {xache-5.1.0 → xache-5.2.1}/xache/services/budget.py +0 -0
  19. {xache-5.1.0 → xache-5.2.1}/xache/services/collective.py +0 -0
  20. {xache-5.1.0 → xache-5.2.1}/xache/services/facilitator.py +0 -0
  21. {xache-5.1.0 → xache-5.2.1}/xache/services/identity.py +0 -0
  22. {xache-5.1.0 → xache-5.2.1}/xache/services/memory.py +0 -0
  23. {xache-5.1.0 → xache-5.2.1}/xache/services/owner.py +0 -0
  24. {xache-5.1.0 → xache-5.2.1}/xache/services/receipts.py +0 -0
  25. {xache-5.1.0 → xache-5.2.1}/xache/services/reputation.py +0 -0
  26. {xache-5.1.0 → xache-5.2.1}/xache/services/royalty.py +0 -0
  27. {xache-5.1.0 → xache-5.2.1}/xache/services/sessions.py +0 -0
  28. {xache-5.1.0 → xache-5.2.1}/xache/services/workspaces.py +0 -0
  29. {xache-5.1.0 → xache-5.2.1}/xache/types.py +0 -0
  30. {xache-5.1.0 → xache-5.2.1}/xache/utils/__init__.py +0 -0
  31. {xache-5.1.0 → xache-5.2.1}/xache/utils/cache.py +0 -0
  32. {xache-5.1.0 → xache-5.2.1}/xache/utils/http.py +0 -0
  33. {xache-5.1.0 → xache-5.2.1}/xache/utils/retry.py +0 -0
  34. {xache-5.1.0 → xache-5.2.1}/xache.egg-info/SOURCES.txt +0 -0
  35. {xache-5.1.0 → xache-5.2.1}/xache.egg-info/dependency_links.txt +0 -0
  36. {xache-5.1.0 → xache-5.2.1}/xache.egg-info/requires.txt +0 -0
  37. {xache-5.1.0 → xache-5.2.1}/xache.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xache
3
- Version: 5.1.0
3
+ Version: 5.2.1
4
4
  Summary: Official Python SDK for Xache Protocol
5
5
  Home-page: https://github.com/xache-ai/xache-protocol
6
6
  Author: Xache Protocol
@@ -44,7 +44,7 @@ Dynamic: requires-python
44
44
 
45
45
  # Xache Protocol Python SDK
46
46
 
47
- Official Python SDK for [Xache Protocol](https://xache.ai) - decentralized agent memory and collective intelligence marketplace.
47
+ Official Python SDK for [Xache Protocol](https://xache.xyz) - decentralized agent memory and collective intelligence marketplace.
48
48
 
49
49
  ## Features
50
50
 
@@ -331,7 +331,6 @@ MIT
331
331
 
332
332
  ## Links
333
333
 
334
- - [Documentation](https://docs.xache.ai)
335
- - [Protocol Specification](https://github.com/xache-ai/xache-protocol)
336
- - [API Reference](https://api.xache.xyz/docs)
337
- - [Discord](https://discord.gg/xache)
334
+ - [Documentation](https://docs.xache.xyz)
335
+ - [GitHub](https://github.com/xacheai/xache-protocol)
336
+ - [Website](https://xache.xyz)
@@ -1,6 +1,6 @@
1
1
  # Xache Protocol Python SDK
2
2
 
3
- Official Python SDK for [Xache Protocol](https://xache.ai) - decentralized agent memory and collective intelligence marketplace.
3
+ Official Python SDK for [Xache Protocol](https://xache.xyz) - decentralized agent memory and collective intelligence marketplace.
4
4
 
5
5
  ## Features
6
6
 
@@ -287,7 +287,6 @@ MIT
287
287
 
288
288
  ## Links
289
289
 
290
- - [Documentation](https://docs.xache.ai)
291
- - [Protocol Specification](https://github.com/xache-ai/xache-protocol)
292
- - [API Reference](https://api.xache.xyz/docs)
293
- - [Discord](https://discord.gg/xache)
290
+ - [Documentation](https://docs.xache.xyz)
291
+ - [GitHub](https://github.com/xacheai/xache-protocol)
292
+ - [Website](https://xache.xyz)
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "xache"
7
- version = "5.1.0"
7
+ version = "5.2.1"
8
8
  description = "Official Python SDK for Xache Protocol"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -25,7 +25,7 @@ Example:
25
25
  ```
26
26
  """
27
27
 
28
- __version__ = "5.0.0"
28
+ __version__ = "5.2.0"
29
29
 
30
30
  # Main client
31
31
  from .client import XacheClient
@@ -75,6 +75,21 @@ from .services import (
75
75
  CollectiveService,
76
76
  BudgetService,
77
77
  ReceiptsService,
78
+ ExtractionService,
79
+ )
80
+
81
+ # Extraction types
82
+ from .services.extraction import (
83
+ LLMProvider,
84
+ LLMApiFormat,
85
+ LLMConfig,
86
+ LLMConfigApiKey,
87
+ LLMConfigEndpoint,
88
+ LLMConfigXacheManaged,
89
+ ExtractedMemory,
90
+ ExtractionMetadata,
91
+ ExtractionResult,
92
+ ExtractionOptions,
78
93
  )
79
94
 
80
95
  # Utilities
@@ -130,6 +145,18 @@ __all__ = [
130
145
  "CollectiveService",
131
146
  "BudgetService",
132
147
  "ReceiptsService",
148
+ "ExtractionService",
149
+ # Extraction types
150
+ "LLMProvider",
151
+ "LLMApiFormat",
152
+ "LLMConfig",
153
+ "LLMConfigApiKey",
154
+ "LLMConfigEndpoint",
155
+ "LLMConfigXacheManaged",
156
+ "ExtractedMemory",
157
+ "ExtractionMetadata",
158
+ "ExtractionResult",
159
+ "ExtractionOptions",
133
160
  # Utilities
134
161
  "RetryPolicy",
135
162
  "with_retry",
@@ -0,0 +1,473 @@
1
+ """
2
+ Extraction Service - AI-powered memory extraction from conversations
3
+ Supports 10 major LLM providers plus custom endpoints
4
+ """
5
+
6
+ from typing import List, Optional, Dict, Any, Union, Literal
7
+ from dataclasses import dataclass, field
8
+
9
+
10
+ # LLM Provider type - matches TypeScript SDK
11
+ LLMProvider = Literal[
12
+ 'anthropic', 'openai', 'google', 'mistral', 'groq',
13
+ 'together', 'fireworks', 'cohere', 'xai', 'deepseek'
14
+ ]
15
+
16
+ # LLM API format for custom endpoints
17
+ LLMApiFormat = Literal['openai', 'anthropic', 'cohere']
18
+
19
+
20
+ @dataclass
21
+ class LLMConfigApiKey:
22
+ """LLM config for api-key mode - use your own API key with major providers"""
23
+ type: Literal['api-key'] = 'api-key'
24
+ provider: LLMProvider = 'anthropic'
25
+ api_key: str = ''
26
+ model: Optional[str] = None
27
+
28
+
29
+ @dataclass
30
+ class LLMConfigEndpoint:
31
+ """LLM config for endpoint mode - custom/self-hosted endpoints"""
32
+ type: Literal['endpoint'] = 'endpoint'
33
+ url: str = ''
34
+ auth_token: Optional[str] = None
35
+ model: Optional[str] = None
36
+ format: LLMApiFormat = 'openai'
37
+
38
+
39
+ @dataclass
40
+ class LLMConfigXacheManaged:
41
+ """LLM config for xache-managed mode - Xache provides the LLM"""
42
+ type: Literal['xache-managed'] = 'xache-managed'
43
+ provider: Literal['anthropic', 'openai'] = 'anthropic'
44
+ model: Optional[str] = None
45
+
46
+
47
+ # Union type for LLM config
48
+ LLMConfig = Union[LLMConfigApiKey, LLMConfigEndpoint, LLMConfigXacheManaged]
49
+
50
+
51
+ @dataclass
52
+ class ExtractedMemory:
53
+ """Extracted memory from conversation"""
54
+ type: str # 'preference', 'fact', 'pattern', 'relationship', etc.
55
+ data: Dict[str, Any] = field(default_factory=dict)
56
+ reasoning: Optional[str] = None
57
+ confidence: float = 1.0
58
+
59
+
60
+ @dataclass
61
+ class ExtractionMetadata:
62
+ """Metadata about the extraction operation"""
63
+ extraction_time: int = 0
64
+ llm_provider: str = ''
65
+ llm_model: str = ''
66
+ total_extractions: int = 0
67
+ stored_count: int = 0
68
+ payment_receipt_id: Optional[str] = None
69
+
70
+
71
+ @dataclass
72
+ class ExtractionResult:
73
+ """Result from memory extraction"""
74
+ extractions: List[ExtractedMemory] = field(default_factory=list)
75
+ stored: Optional[List[str]] = None
76
+ metadata: ExtractionMetadata = field(default_factory=ExtractionMetadata)
77
+
78
+
79
+ @dataclass
80
+ class ExtractionOptions:
81
+ """Options for extraction"""
82
+ confidence_threshold: Optional[float] = None
83
+ context_hint: Optional[str] = None
84
+ auto_store: bool = False
85
+ subject: Optional[Dict[str, Any]] = None
86
+
87
+
88
+ class ExtractionService:
89
+ """
90
+ Extraction service for AI-powered memory extraction
91
+
92
+ Supports three LLM modes:
93
+ 1. api-key: Use your own API key with major providers (10 supported)
94
+ - anthropic, openai, google, mistral, groq
95
+ - together, fireworks, cohere, xai, deepseek
96
+ - Cost: $0.002 per extraction
97
+
98
+ 2. endpoint: Use custom/self-hosted endpoints
99
+ - Ollama, OpenRouter, vLLM, Modal, Replicate, etc.
100
+ - Supports openai, anthropic, cohere API formats
101
+ - Cost: $0.002 per extraction
102
+
103
+ 3. xache-managed: Xache provides the LLM
104
+ - Requires PII-scrubbed traces
105
+ - Cost: $0.011 per extraction
106
+
107
+ Example:
108
+ ```python
109
+ from xache.services.extraction import LLMConfigApiKey
110
+
111
+ # Using your own Anthropic key
112
+ result = await client.extraction.extract(
113
+ trace="User: I prefer dark mode\\nAgent: I'll remember that",
114
+ llm_config=LLMConfigApiKey(
115
+ provider='anthropic',
116
+ api_key='sk-ant-...',
117
+ ),
118
+ options=ExtractionOptions(
119
+ confidence_threshold=0.8,
120
+ auto_store=True,
121
+ )
122
+ )
123
+
124
+ print(f"Extracted {len(result.extractions)} memories")
125
+ ```
126
+ """
127
+
128
+ # Supported providers for api-key mode
129
+ SUPPORTED_PROVIDERS: List[str] = [
130
+ 'anthropic', 'openai', 'google', 'mistral', 'groq',
131
+ 'together', 'fireworks', 'cohere', 'xai', 'deepseek'
132
+ ]
133
+
134
+ # Supported API formats for endpoint mode
135
+ SUPPORTED_FORMATS: List[str] = ['openai', 'anthropic', 'cohere']
136
+
137
+ def __init__(self, client):
138
+ self.client = client
139
+
140
+ def _build_llm_config_dict(self, llm_config: LLMConfig) -> Dict[str, Any]:
141
+ """Convert dataclass to API-compatible dict"""
142
+ if isinstance(llm_config, LLMConfigApiKey):
143
+ config = {
144
+ 'type': 'api-key',
145
+ 'provider': llm_config.provider,
146
+ 'apiKey': llm_config.api_key,
147
+ }
148
+ if llm_config.model:
149
+ config['model'] = llm_config.model
150
+ return config
151
+
152
+ elif isinstance(llm_config, LLMConfigEndpoint):
153
+ config = {
154
+ 'type': 'endpoint',
155
+ 'url': llm_config.url,
156
+ 'format': llm_config.format,
157
+ }
158
+ if llm_config.auth_token:
159
+ config['authToken'] = llm_config.auth_token
160
+ if llm_config.model:
161
+ config['model'] = llm_config.model
162
+ return config
163
+
164
+ elif isinstance(llm_config, LLMConfigXacheManaged):
165
+ config = {
166
+ 'type': 'xache-managed',
167
+ 'provider': llm_config.provider,
168
+ }
169
+ if llm_config.model:
170
+ config['model'] = llm_config.model
171
+ return config
172
+
173
+ else:
174
+ # Assume it's already a dict
175
+ return llm_config
176
+
177
+ async def extract(
178
+ self,
179
+ trace: Union[str, Dict[str, Any]],
180
+ llm_config: LLMConfig,
181
+ options: Optional[ExtractionOptions] = None,
182
+ ) -> ExtractionResult:
183
+ """
184
+ Extract memories from agent trace using specified LLM
185
+
186
+ Args:
187
+ trace: The conversation trace (string or object)
188
+ llm_config: LLM configuration (api-key, endpoint, or xache-managed)
189
+ options: Extraction options
190
+
191
+ Returns:
192
+ ExtractionResult with extracted memories
193
+
194
+ Example:
195
+ ```python
196
+ # Using OpenAI
197
+ result = await client.extraction.extract(
198
+ trace="User: I always use vim keybindings...",
199
+ llm_config=LLMConfigApiKey(
200
+ provider='openai',
201
+ api_key='sk-...',
202
+ model='gpt-4-turbo',
203
+ ),
204
+ options=ExtractionOptions(auto_store=True),
205
+ )
206
+
207
+ # Using custom endpoint (Ollama)
208
+ result = await client.extraction.extract(
209
+ trace=conversation,
210
+ llm_config=LLMConfigEndpoint(
211
+ url='http://localhost:11434/v1/chat/completions',
212
+ model='llama2',
213
+ format='openai',
214
+ ),
215
+ )
216
+
217
+ # Using Xache-managed LLM
218
+ result = await client.extraction.extract(
219
+ trace=scrubbed_trace, # Must be PII-scrubbed
220
+ llm_config=LLMConfigXacheManaged(
221
+ provider='anthropic',
222
+ ),
223
+ )
224
+ ```
225
+ """
226
+ body: Dict[str, Any] = {
227
+ 'trace': trace,
228
+ 'llmConfig': self._build_llm_config_dict(llm_config),
229
+ }
230
+
231
+ if options:
232
+ opts: Dict[str, Any] = {}
233
+ if options.confidence_threshold is not None:
234
+ opts['confidenceThreshold'] = options.confidence_threshold
235
+ if options.context_hint:
236
+ opts['contextHint'] = options.context_hint
237
+ if options.auto_store:
238
+ opts['autoStore'] = options.auto_store
239
+ if options.subject:
240
+ opts['subject'] = options.subject
241
+ if opts:
242
+ body['options'] = opts
243
+
244
+ response = await self.client.request('POST', '/v1/extract', body)
245
+
246
+ if not response.success or not response.data:
247
+ raise Exception(
248
+ response.error.get('message', 'Failed to extract memories')
249
+ if response.error
250
+ else 'Failed to extract memories'
251
+ )
252
+
253
+ data = response.data
254
+
255
+ # Parse extractions
256
+ extractions = []
257
+ for m in data.get('extractions', []):
258
+ extractions.append(ExtractedMemory(
259
+ type=m.get('type', 'unknown'),
260
+ data=m.get('data', {}),
261
+ reasoning=m.get('reasoning'),
262
+ confidence=m.get('confidence', 1.0),
263
+ ))
264
+
265
+ # Parse metadata
266
+ meta = data.get('metadata', {})
267
+ metadata = ExtractionMetadata(
268
+ extraction_time=meta.get('extractionTime', 0),
269
+ llm_provider=meta.get('llmProvider', ''),
270
+ llm_model=meta.get('llmModel', ''),
271
+ total_extractions=meta.get('totalExtractions', 0),
272
+ stored_count=meta.get('storedCount', 0),
273
+ payment_receipt_id=meta.get('paymentReceiptId'),
274
+ )
275
+
276
+ return ExtractionResult(
277
+ extractions=extractions,
278
+ stored=data.get('stored'),
279
+ metadata=metadata,
280
+ )
281
+
282
+ async def extract_with_anthropic(
283
+ self,
284
+ trace: Union[str, Dict[str, Any]],
285
+ api_key: str,
286
+ model: Optional[str] = None,
287
+ auto_store: bool = False,
288
+ confidence_threshold: Optional[float] = None,
289
+ context_hint: Optional[str] = None,
290
+ ) -> ExtractionResult:
291
+ """
292
+ Convenience method: Extract memories using Anthropic
293
+
294
+ Args:
295
+ trace: Conversation trace
296
+ api_key: Your Anthropic API key
297
+ model: Model name (default: claude-sonnet-4-20250514)
298
+ auto_store: Whether to auto-store extracted memories
299
+ confidence_threshold: Minimum confidence threshold
300
+ context_hint: Context hint for extraction
301
+
302
+ Example:
303
+ ```python
304
+ result = await client.extraction.extract_with_anthropic(
305
+ trace="User: I prefer dark mode...",
306
+ api_key="sk-ant-...",
307
+ auto_store=True,
308
+ )
309
+ ```
310
+ """
311
+ return await self.extract(
312
+ trace=trace,
313
+ llm_config=LLMConfigApiKey(
314
+ provider='anthropic',
315
+ api_key=api_key,
316
+ model=model,
317
+ ),
318
+ options=ExtractionOptions(
319
+ auto_store=auto_store,
320
+ confidence_threshold=confidence_threshold,
321
+ context_hint=context_hint,
322
+ ),
323
+ )
324
+
325
+ async def extract_with_openai(
326
+ self,
327
+ trace: Union[str, Dict[str, Any]],
328
+ api_key: str,
329
+ model: Optional[str] = None,
330
+ auto_store: bool = False,
331
+ confidence_threshold: Optional[float] = None,
332
+ context_hint: Optional[str] = None,
333
+ ) -> ExtractionResult:
334
+ """
335
+ Convenience method: Extract memories using OpenAI
336
+
337
+ Args:
338
+ trace: Conversation trace
339
+ api_key: Your OpenAI API key
340
+ model: Model name (default: gpt-4-turbo)
341
+ auto_store: Whether to auto-store extracted memories
342
+ confidence_threshold: Minimum confidence threshold
343
+ context_hint: Context hint for extraction
344
+
345
+ Example:
346
+ ```python
347
+ result = await client.extraction.extract_with_openai(
348
+ trace="User: I prefer dark mode...",
349
+ api_key="sk-...",
350
+ model="gpt-4-turbo",
351
+ auto_store=True,
352
+ )
353
+ ```
354
+ """
355
+ return await self.extract(
356
+ trace=trace,
357
+ llm_config=LLMConfigApiKey(
358
+ provider='openai',
359
+ api_key=api_key,
360
+ model=model,
361
+ ),
362
+ options=ExtractionOptions(
363
+ auto_store=auto_store,
364
+ confidence_threshold=confidence_threshold,
365
+ context_hint=context_hint,
366
+ ),
367
+ )
368
+
369
+ async def extract_with_endpoint(
370
+ self,
371
+ trace: Union[str, Dict[str, Any]],
372
+ url: str,
373
+ model: str,
374
+ auth_token: Optional[str] = None,
375
+ format: LLMApiFormat = 'openai',
376
+ auto_store: bool = False,
377
+ confidence_threshold: Optional[float] = None,
378
+ context_hint: Optional[str] = None,
379
+ ) -> ExtractionResult:
380
+ """
381
+ Extract memories using custom endpoint (Ollama, OpenRouter, vLLM, etc.)
382
+
383
+ Args:
384
+ trace: Conversation trace
385
+ url: Endpoint URL
386
+ model: Model name
387
+ auth_token: Optional auth token
388
+ format: API format (openai, anthropic, cohere)
389
+ auto_store: Whether to auto-store extracted memories
390
+ confidence_threshold: Minimum confidence threshold
391
+ context_hint: Context hint for extraction
392
+
393
+ Example:
394
+ ```python
395
+ # Ollama
396
+ result = await client.extraction.extract_with_endpoint(
397
+ trace="User: I prefer dark mode...",
398
+ url="http://localhost:11434/v1/chat/completions",
399
+ model="llama2",
400
+ format="openai",
401
+ )
402
+
403
+ # OpenRouter
404
+ result = await client.extraction.extract_with_endpoint(
405
+ trace="User: ...",
406
+ url="https://openrouter.ai/api/v1/chat/completions",
407
+ model="anthropic/claude-3-sonnet",
408
+ auth_token="sk-or-...",
409
+ format="openai",
410
+ )
411
+ ```
412
+ """
413
+ return await self.extract(
414
+ trace=trace,
415
+ llm_config=LLMConfigEndpoint(
416
+ url=url,
417
+ model=model,
418
+ auth_token=auth_token,
419
+ format=format,
420
+ ),
421
+ options=ExtractionOptions(
422
+ auto_store=auto_store,
423
+ confidence_threshold=confidence_threshold,
424
+ context_hint=context_hint,
425
+ ),
426
+ )
427
+
428
+ async def extract_with_xache_llm(
429
+ self,
430
+ trace: Union[str, Dict[str, Any]],
431
+ provider: Literal['anthropic', 'openai'] = 'anthropic',
432
+ model: Optional[str] = None,
433
+ auto_store: bool = False,
434
+ confidence_threshold: Optional[float] = None,
435
+ context_hint: Optional[str] = None,
436
+ ) -> ExtractionResult:
437
+ """
438
+ Extract memories using Xache-managed LLM
439
+
440
+ IMPORTANT: Traces must be scrubbed of PII before calling this method.
441
+
442
+ Args:
443
+ trace: PII-scrubbed conversation trace
444
+ provider: Xache-managed provider (anthropic or openai)
445
+ model: Optional model override
446
+ auto_store: Whether to auto-store extracted memories
447
+ confidence_threshold: Minimum confidence threshold
448
+ context_hint: Context hint for extraction
449
+
450
+ Example:
451
+ ```python
452
+ # Scrub PII first
453
+ scrubbed = scrub_trace(raw_trace)
454
+
455
+ result = await client.extraction.extract_with_xache_llm(
456
+ trace=scrubbed,
457
+ provider="anthropic",
458
+ auto_store=True,
459
+ )
460
+ ```
461
+ """
462
+ return await self.extract(
463
+ trace=trace,
464
+ llm_config=LLMConfigXacheManaged(
465
+ provider=provider,
466
+ model=model,
467
+ ),
468
+ options=ExtractionOptions(
469
+ auto_store=auto_store,
470
+ confidence_threshold=confidence_threshold,
471
+ context_hint=context_hint,
472
+ ),
473
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xache
3
- Version: 5.1.0
3
+ Version: 5.2.1
4
4
  Summary: Official Python SDK for Xache Protocol
5
5
  Home-page: https://github.com/xache-ai/xache-protocol
6
6
  Author: Xache Protocol
@@ -44,7 +44,7 @@ Dynamic: requires-python
44
44
 
45
45
  # Xache Protocol Python SDK
46
46
 
47
- Official Python SDK for [Xache Protocol](https://xache.ai) - decentralized agent memory and collective intelligence marketplace.
47
+ Official Python SDK for [Xache Protocol](https://xache.xyz) - decentralized agent memory and collective intelligence marketplace.
48
48
 
49
49
  ## Features
50
50
 
@@ -331,7 +331,6 @@ MIT
331
331
 
332
332
  ## Links
333
333
 
334
- - [Documentation](https://docs.xache.ai)
335
- - [Protocol Specification](https://github.com/xache-ai/xache-protocol)
336
- - [API Reference](https://api.xache.xyz/docs)
337
- - [Discord](https://discord.gg/xache)
334
+ - [Documentation](https://docs.xache.xyz)
335
+ - [GitHub](https://github.com/xacheai/xache-protocol)
336
+ - [Website](https://xache.xyz)
@@ -1,173 +0,0 @@
1
- """
2
- Extraction Service - AI-powered memory extraction from conversations
3
- """
4
-
5
- from typing import List, Optional, Dict, Any
6
- from dataclasses import dataclass
7
-
8
-
9
- @dataclass
10
- class ExtractedMemory:
11
- """Extracted memory from conversation"""
12
- content: str
13
- type: str # 'preference', 'fact', 'pattern', 'error_fix', 'insight'
14
- context: str
15
- confidence: float
16
- source_text: Optional[str] = None
17
- metadata: Optional[Dict[str, Any]] = None
18
-
19
-
20
- @dataclass
21
- class ExtractionResult:
22
- """Result from memory extraction"""
23
- memories: List[ExtractedMemory]
24
- total_extracted: int
25
- stored_count: int
26
- skipped_count: int
27
- processing_time_ms: int
28
-
29
-
30
- @dataclass
31
- class ExtractMemoriesRequest:
32
- """Request for memory extraction"""
33
- conversation: str
34
- context: Optional[str] = None
35
- types: Optional[List[str]] = None
36
- auto_store: bool = True
37
- min_confidence: float = 0.7
38
-
39
-
40
- class ExtractionService:
41
- """
42
- Extraction service for AI-powered memory extraction
43
-
44
- Automatically extracts valuable memories from conversations,
45
- including user preferences, facts, patterns, error fixes, and insights.
46
- """
47
-
48
- def __init__(self, client):
49
- self.client = client
50
-
51
- async def extract(self, request: ExtractMemoriesRequest) -> ExtractionResult:
52
- """
53
- Extract memories from a conversation
54
-
55
- Args:
56
- request: Extraction request with conversation and options
57
-
58
- Returns:
59
- Extraction result with extracted memories
60
-
61
- Example:
62
- ```python
63
- from xache.services.extraction import ExtractMemoriesRequest
64
-
65
- result = await client.extraction.extract(ExtractMemoriesRequest(
66
- conversation="User: I prefer dark mode for all apps.\\nAssistant: ...",
67
- context="user-preferences",
68
- auto_store=True,
69
- min_confidence=0.8
70
- ))
71
-
72
- print(f"Extracted {result.total_extracted} memories")
73
- for m in result.memories:
74
- print(f" [{m.type}] {m.content} (confidence: {m.confidence})")
75
- ```
76
- """
77
- body = {
78
- "conversation": request.conversation,
79
- "autoStore": request.auto_store,
80
- "minConfidence": request.min_confidence,
81
- }
82
-
83
- if request.context:
84
- body["context"] = request.context
85
- if request.types:
86
- body["types"] = request.types
87
-
88
- response = await self.client.request("POST", "/v1/extraction/extract", body)
89
-
90
- if not response.success or not response.data:
91
- raise Exception(
92
- response.error.get("message", "Failed to extract memories")
93
- if response.error
94
- else "Failed to extract memories"
95
- )
96
-
97
- data = response.data
98
- return ExtractionResult(
99
- memories=[
100
- ExtractedMemory(
101
- content=m["content"],
102
- type=m["type"],
103
- context=m.get("context", ""),
104
- confidence=m["confidence"],
105
- source_text=m.get("sourceText"),
106
- metadata=m.get("metadata"),
107
- )
108
- for m in data.get("memories", [])
109
- ],
110
- total_extracted=data.get("totalExtracted", 0),
111
- stored_count=data.get("storedCount", 0),
112
- skipped_count=data.get("skippedCount", 0),
113
- processing_time_ms=data.get("processingTimeMs", 0),
114
- )
115
-
116
- async def analyze(self, conversation: str) -> Dict[str, Any]:
117
- """
118
- Analyze a conversation without storing memories
119
-
120
- Args:
121
- conversation: Conversation text to analyze
122
-
123
- Returns:
124
- Analysis result with potential memories
125
-
126
- Example:
127
- ```python
128
- analysis = await client.extraction.analyze(
129
- "User: I always use vim keybindings..."
130
- )
131
-
132
- print(f"Found {len(analysis['potentialMemories'])} potential memories")
133
- ```
134
- """
135
- response = await self.client.request(
136
- "POST", "/v1/extraction/analyze", {"conversation": conversation}
137
- )
138
-
139
- if not response.success or not response.data:
140
- raise Exception(
141
- response.error.get("message", "Failed to analyze conversation")
142
- if response.error
143
- else "Failed to analyze conversation"
144
- )
145
-
146
- return response.data
147
-
148
- async def get_types(self) -> List[Dict[str, Any]]:
149
- """
150
- Get supported memory types for extraction
151
-
152
- Returns:
153
- List of supported memory types with descriptions
154
-
155
- Example:
156
- ```python
157
- types = await client.extraction.get_types()
158
- for t in types:
159
- print(f"{t['type']}: {t['description']}")
160
- ```
161
- """
162
- response = await self.client.request(
163
- "GET", "/v1/extraction/types", skip_auth=True
164
- )
165
-
166
- if not response.success or not response.data:
167
- raise Exception(
168
- response.error.get("message", "Failed to get extraction types")
169
- if response.error
170
- else "Failed to get extraction types"
171
- )
172
-
173
- return response.data.get("types", [])
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes