xache 5.1.0__py3-none-any.whl → 5.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xache/__init__.py +28 -1
- xache/services/extraction.py +398 -98
- {xache-5.1.0.dist-info → xache-5.2.1.dist-info}/METADATA +5 -6
- {xache-5.1.0.dist-info → xache-5.2.1.dist-info}/RECORD +6 -6
- {xache-5.1.0.dist-info → xache-5.2.1.dist-info}/WHEEL +0 -0
- {xache-5.1.0.dist-info → xache-5.2.1.dist-info}/top_level.txt +0 -0
xache/__init__.py
CHANGED
|
@@ -25,7 +25,7 @@ Example:
|
|
|
25
25
|
```
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
|
-
__version__ = "5.
|
|
28
|
+
__version__ = "5.2.0"
|
|
29
29
|
|
|
30
30
|
# Main client
|
|
31
31
|
from .client import XacheClient
|
|
@@ -75,6 +75,21 @@ from .services import (
|
|
|
75
75
|
CollectiveService,
|
|
76
76
|
BudgetService,
|
|
77
77
|
ReceiptsService,
|
|
78
|
+
ExtractionService,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Extraction types
|
|
82
|
+
from .services.extraction import (
|
|
83
|
+
LLMProvider,
|
|
84
|
+
LLMApiFormat,
|
|
85
|
+
LLMConfig,
|
|
86
|
+
LLMConfigApiKey,
|
|
87
|
+
LLMConfigEndpoint,
|
|
88
|
+
LLMConfigXacheManaged,
|
|
89
|
+
ExtractedMemory,
|
|
90
|
+
ExtractionMetadata,
|
|
91
|
+
ExtractionResult,
|
|
92
|
+
ExtractionOptions,
|
|
78
93
|
)
|
|
79
94
|
|
|
80
95
|
# Utilities
|
|
@@ -130,6 +145,18 @@ __all__ = [
|
|
|
130
145
|
"CollectiveService",
|
|
131
146
|
"BudgetService",
|
|
132
147
|
"ReceiptsService",
|
|
148
|
+
"ExtractionService",
|
|
149
|
+
# Extraction types
|
|
150
|
+
"LLMProvider",
|
|
151
|
+
"LLMApiFormat",
|
|
152
|
+
"LLMConfig",
|
|
153
|
+
"LLMConfigApiKey",
|
|
154
|
+
"LLMConfigEndpoint",
|
|
155
|
+
"LLMConfigXacheManaged",
|
|
156
|
+
"ExtractedMemory",
|
|
157
|
+
"ExtractionMetadata",
|
|
158
|
+
"ExtractionResult",
|
|
159
|
+
"ExtractionOptions",
|
|
133
160
|
# Utilities
|
|
134
161
|
"RetryPolicy",
|
|
135
162
|
"with_retry",
|
xache/services/extraction.py
CHANGED
|
@@ -1,173 +1,473 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Extraction Service - AI-powered memory extraction from conversations
|
|
3
|
+
Supports 10 major LLM providers plus custom endpoints
|
|
3
4
|
"""
|
|
4
5
|
|
|
5
|
-
from typing import List, Optional, Dict, Any
|
|
6
|
-
from dataclasses import dataclass
|
|
6
|
+
from typing import List, Optional, Dict, Any, Union, Literal
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# LLM Provider type - matches TypeScript SDK
|
|
11
|
+
LLMProvider = Literal[
|
|
12
|
+
'anthropic', 'openai', 'google', 'mistral', 'groq',
|
|
13
|
+
'together', 'fireworks', 'cohere', 'xai', 'deepseek'
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
# LLM API format for custom endpoints
|
|
17
|
+
LLMApiFormat = Literal['openai', 'anthropic', 'cohere']
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class LLMConfigApiKey:
|
|
22
|
+
"""LLM config for api-key mode - use your own API key with major providers"""
|
|
23
|
+
type: Literal['api-key'] = 'api-key'
|
|
24
|
+
provider: LLMProvider = 'anthropic'
|
|
25
|
+
api_key: str = ''
|
|
26
|
+
model: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class LLMConfigEndpoint:
|
|
31
|
+
"""LLM config for endpoint mode - custom/self-hosted endpoints"""
|
|
32
|
+
type: Literal['endpoint'] = 'endpoint'
|
|
33
|
+
url: str = ''
|
|
34
|
+
auth_token: Optional[str] = None
|
|
35
|
+
model: Optional[str] = None
|
|
36
|
+
format: LLMApiFormat = 'openai'
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class LLMConfigXacheManaged:
|
|
41
|
+
"""LLM config for xache-managed mode - Xache provides the LLM"""
|
|
42
|
+
type: Literal['xache-managed'] = 'xache-managed'
|
|
43
|
+
provider: Literal['anthropic', 'openai'] = 'anthropic'
|
|
44
|
+
model: Optional[str] = None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# Union type for LLM config
|
|
48
|
+
LLMConfig = Union[LLMConfigApiKey, LLMConfigEndpoint, LLMConfigXacheManaged]
|
|
7
49
|
|
|
8
50
|
|
|
9
51
|
@dataclass
|
|
10
52
|
class ExtractedMemory:
|
|
11
53
|
"""Extracted memory from conversation"""
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
confidence: float
|
|
16
|
-
|
|
17
|
-
|
|
54
|
+
type: str # 'preference', 'fact', 'pattern', 'relationship', etc.
|
|
55
|
+
data: Dict[str, Any] = field(default_factory=dict)
|
|
56
|
+
reasoning: Optional[str] = None
|
|
57
|
+
confidence: float = 1.0
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass
|
|
61
|
+
class ExtractionMetadata:
|
|
62
|
+
"""Metadata about the extraction operation"""
|
|
63
|
+
extraction_time: int = 0
|
|
64
|
+
llm_provider: str = ''
|
|
65
|
+
llm_model: str = ''
|
|
66
|
+
total_extractions: int = 0
|
|
67
|
+
stored_count: int = 0
|
|
68
|
+
payment_receipt_id: Optional[str] = None
|
|
18
69
|
|
|
19
70
|
|
|
20
71
|
@dataclass
|
|
21
72
|
class ExtractionResult:
|
|
22
73
|
"""Result from memory extraction"""
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
skipped_count: int
|
|
27
|
-
processing_time_ms: int
|
|
74
|
+
extractions: List[ExtractedMemory] = field(default_factory=list)
|
|
75
|
+
stored: Optional[List[str]] = None
|
|
76
|
+
metadata: ExtractionMetadata = field(default_factory=ExtractionMetadata)
|
|
28
77
|
|
|
29
78
|
|
|
30
79
|
@dataclass
|
|
31
|
-
class
|
|
32
|
-
"""
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
min_confidence: float = 0.7
|
|
80
|
+
class ExtractionOptions:
|
|
81
|
+
"""Options for extraction"""
|
|
82
|
+
confidence_threshold: Optional[float] = None
|
|
83
|
+
context_hint: Optional[str] = None
|
|
84
|
+
auto_store: bool = False
|
|
85
|
+
subject: Optional[Dict[str, Any]] = None
|
|
38
86
|
|
|
39
87
|
|
|
40
88
|
class ExtractionService:
|
|
41
89
|
"""
|
|
42
90
|
Extraction service for AI-powered memory extraction
|
|
43
91
|
|
|
44
|
-
|
|
45
|
-
|
|
92
|
+
Supports three LLM modes:
|
|
93
|
+
1. api-key: Use your own API key with major providers (10 supported)
|
|
94
|
+
- anthropic, openai, google, mistral, groq
|
|
95
|
+
- together, fireworks, cohere, xai, deepseek
|
|
96
|
+
- Cost: $0.002 per extraction
|
|
97
|
+
|
|
98
|
+
2. endpoint: Use custom/self-hosted endpoints
|
|
99
|
+
- Ollama, OpenRouter, vLLM, Modal, Replicate, etc.
|
|
100
|
+
- Supports openai, anthropic, cohere API formats
|
|
101
|
+
- Cost: $0.002 per extraction
|
|
102
|
+
|
|
103
|
+
3. xache-managed: Xache provides the LLM
|
|
104
|
+
- Requires PII-scrubbed traces
|
|
105
|
+
- Cost: $0.011 per extraction
|
|
106
|
+
|
|
107
|
+
Example:
|
|
108
|
+
```python
|
|
109
|
+
from xache.services.extraction import LLMConfigApiKey
|
|
110
|
+
|
|
111
|
+
# Using your own Anthropic key
|
|
112
|
+
result = await client.extraction.extract(
|
|
113
|
+
trace="User: I prefer dark mode\\nAgent: I'll remember that",
|
|
114
|
+
llm_config=LLMConfigApiKey(
|
|
115
|
+
provider='anthropic',
|
|
116
|
+
api_key='sk-ant-...',
|
|
117
|
+
),
|
|
118
|
+
options=ExtractionOptions(
|
|
119
|
+
confidence_threshold=0.8,
|
|
120
|
+
auto_store=True,
|
|
121
|
+
)
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
print(f"Extracted {len(result.extractions)} memories")
|
|
125
|
+
```
|
|
46
126
|
"""
|
|
47
127
|
|
|
128
|
+
# Supported providers for api-key mode
|
|
129
|
+
SUPPORTED_PROVIDERS: List[str] = [
|
|
130
|
+
'anthropic', 'openai', 'google', 'mistral', 'groq',
|
|
131
|
+
'together', 'fireworks', 'cohere', 'xai', 'deepseek'
|
|
132
|
+
]
|
|
133
|
+
|
|
134
|
+
# Supported API formats for endpoint mode
|
|
135
|
+
SUPPORTED_FORMATS: List[str] = ['openai', 'anthropic', 'cohere']
|
|
136
|
+
|
|
48
137
|
def __init__(self, client):
|
|
49
138
|
self.client = client
|
|
50
139
|
|
|
51
|
-
|
|
140
|
+
def _build_llm_config_dict(self, llm_config: LLMConfig) -> Dict[str, Any]:
|
|
141
|
+
"""Convert dataclass to API-compatible dict"""
|
|
142
|
+
if isinstance(llm_config, LLMConfigApiKey):
|
|
143
|
+
config = {
|
|
144
|
+
'type': 'api-key',
|
|
145
|
+
'provider': llm_config.provider,
|
|
146
|
+
'apiKey': llm_config.api_key,
|
|
147
|
+
}
|
|
148
|
+
if llm_config.model:
|
|
149
|
+
config['model'] = llm_config.model
|
|
150
|
+
return config
|
|
151
|
+
|
|
152
|
+
elif isinstance(llm_config, LLMConfigEndpoint):
|
|
153
|
+
config = {
|
|
154
|
+
'type': 'endpoint',
|
|
155
|
+
'url': llm_config.url,
|
|
156
|
+
'format': llm_config.format,
|
|
157
|
+
}
|
|
158
|
+
if llm_config.auth_token:
|
|
159
|
+
config['authToken'] = llm_config.auth_token
|
|
160
|
+
if llm_config.model:
|
|
161
|
+
config['model'] = llm_config.model
|
|
162
|
+
return config
|
|
163
|
+
|
|
164
|
+
elif isinstance(llm_config, LLMConfigXacheManaged):
|
|
165
|
+
config = {
|
|
166
|
+
'type': 'xache-managed',
|
|
167
|
+
'provider': llm_config.provider,
|
|
168
|
+
}
|
|
169
|
+
if llm_config.model:
|
|
170
|
+
config['model'] = llm_config.model
|
|
171
|
+
return config
|
|
172
|
+
|
|
173
|
+
else:
|
|
174
|
+
# Assume it's already a dict
|
|
175
|
+
return llm_config
|
|
176
|
+
|
|
177
|
+
async def extract(
|
|
178
|
+
self,
|
|
179
|
+
trace: Union[str, Dict[str, Any]],
|
|
180
|
+
llm_config: LLMConfig,
|
|
181
|
+
options: Optional[ExtractionOptions] = None,
|
|
182
|
+
) -> ExtractionResult:
|
|
52
183
|
"""
|
|
53
|
-
Extract memories from
|
|
184
|
+
Extract memories from agent trace using specified LLM
|
|
54
185
|
|
|
55
186
|
Args:
|
|
56
|
-
|
|
187
|
+
trace: The conversation trace (string or object)
|
|
188
|
+
llm_config: LLM configuration (api-key, endpoint, or xache-managed)
|
|
189
|
+
options: Extraction options
|
|
57
190
|
|
|
58
191
|
Returns:
|
|
59
|
-
|
|
192
|
+
ExtractionResult with extracted memories
|
|
60
193
|
|
|
61
194
|
Example:
|
|
62
195
|
```python
|
|
63
|
-
|
|
196
|
+
# Using OpenAI
|
|
197
|
+
result = await client.extraction.extract(
|
|
198
|
+
trace="User: I always use vim keybindings...",
|
|
199
|
+
llm_config=LLMConfigApiKey(
|
|
200
|
+
provider='openai',
|
|
201
|
+
api_key='sk-...',
|
|
202
|
+
model='gpt-4-turbo',
|
|
203
|
+
),
|
|
204
|
+
options=ExtractionOptions(auto_store=True),
|
|
205
|
+
)
|
|
64
206
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
207
|
+
# Using custom endpoint (Ollama)
|
|
208
|
+
result = await client.extraction.extract(
|
|
209
|
+
trace=conversation,
|
|
210
|
+
llm_config=LLMConfigEndpoint(
|
|
211
|
+
url='http://localhost:11434/v1/chat/completions',
|
|
212
|
+
model='llama2',
|
|
213
|
+
format='openai',
|
|
214
|
+
),
|
|
215
|
+
)
|
|
71
216
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
217
|
+
# Using Xache-managed LLM
|
|
218
|
+
result = await client.extraction.extract(
|
|
219
|
+
trace=scrubbed_trace, # Must be PII-scrubbed
|
|
220
|
+
llm_config=LLMConfigXacheManaged(
|
|
221
|
+
provider='anthropic',
|
|
222
|
+
),
|
|
223
|
+
)
|
|
75
224
|
```
|
|
76
225
|
"""
|
|
77
|
-
body = {
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
"minConfidence": request.min_confidence,
|
|
226
|
+
body: Dict[str, Any] = {
|
|
227
|
+
'trace': trace,
|
|
228
|
+
'llmConfig': self._build_llm_config_dict(llm_config),
|
|
81
229
|
}
|
|
82
230
|
|
|
83
|
-
if
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
231
|
+
if options:
|
|
232
|
+
opts: Dict[str, Any] = {}
|
|
233
|
+
if options.confidence_threshold is not None:
|
|
234
|
+
opts['confidenceThreshold'] = options.confidence_threshold
|
|
235
|
+
if options.context_hint:
|
|
236
|
+
opts['contextHint'] = options.context_hint
|
|
237
|
+
if options.auto_store:
|
|
238
|
+
opts['autoStore'] = options.auto_store
|
|
239
|
+
if options.subject:
|
|
240
|
+
opts['subject'] = options.subject
|
|
241
|
+
if opts:
|
|
242
|
+
body['options'] = opts
|
|
243
|
+
|
|
244
|
+
response = await self.client.request('POST', '/v1/extract', body)
|
|
89
245
|
|
|
90
246
|
if not response.success or not response.data:
|
|
91
247
|
raise Exception(
|
|
92
|
-
response.error.get(
|
|
248
|
+
response.error.get('message', 'Failed to extract memories')
|
|
93
249
|
if response.error
|
|
94
|
-
else
|
|
250
|
+
else 'Failed to extract memories'
|
|
95
251
|
)
|
|
96
252
|
|
|
97
253
|
data = response.data
|
|
254
|
+
|
|
255
|
+
# Parse extractions
|
|
256
|
+
extractions = []
|
|
257
|
+
for m in data.get('extractions', []):
|
|
258
|
+
extractions.append(ExtractedMemory(
|
|
259
|
+
type=m.get('type', 'unknown'),
|
|
260
|
+
data=m.get('data', {}),
|
|
261
|
+
reasoning=m.get('reasoning'),
|
|
262
|
+
confidence=m.get('confidence', 1.0),
|
|
263
|
+
))
|
|
264
|
+
|
|
265
|
+
# Parse metadata
|
|
266
|
+
meta = data.get('metadata', {})
|
|
267
|
+
metadata = ExtractionMetadata(
|
|
268
|
+
extraction_time=meta.get('extractionTime', 0),
|
|
269
|
+
llm_provider=meta.get('llmProvider', ''),
|
|
270
|
+
llm_model=meta.get('llmModel', ''),
|
|
271
|
+
total_extractions=meta.get('totalExtractions', 0),
|
|
272
|
+
stored_count=meta.get('storedCount', 0),
|
|
273
|
+
payment_receipt_id=meta.get('paymentReceiptId'),
|
|
274
|
+
)
|
|
275
|
+
|
|
98
276
|
return ExtractionResult(
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
type=m["type"],
|
|
103
|
-
context=m.get("context", ""),
|
|
104
|
-
confidence=m["confidence"],
|
|
105
|
-
source_text=m.get("sourceText"),
|
|
106
|
-
metadata=m.get("metadata"),
|
|
107
|
-
)
|
|
108
|
-
for m in data.get("memories", [])
|
|
109
|
-
],
|
|
110
|
-
total_extracted=data.get("totalExtracted", 0),
|
|
111
|
-
stored_count=data.get("storedCount", 0),
|
|
112
|
-
skipped_count=data.get("skippedCount", 0),
|
|
113
|
-
processing_time_ms=data.get("processingTimeMs", 0),
|
|
277
|
+
extractions=extractions,
|
|
278
|
+
stored=data.get('stored'),
|
|
279
|
+
metadata=metadata,
|
|
114
280
|
)
|
|
115
281
|
|
|
116
|
-
async def
|
|
282
|
+
async def extract_with_anthropic(
|
|
283
|
+
self,
|
|
284
|
+
trace: Union[str, Dict[str, Any]],
|
|
285
|
+
api_key: str,
|
|
286
|
+
model: Optional[str] = None,
|
|
287
|
+
auto_store: bool = False,
|
|
288
|
+
confidence_threshold: Optional[float] = None,
|
|
289
|
+
context_hint: Optional[str] = None,
|
|
290
|
+
) -> ExtractionResult:
|
|
117
291
|
"""
|
|
118
|
-
|
|
292
|
+
Convenience method: Extract memories using Anthropic
|
|
119
293
|
|
|
120
294
|
Args:
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
295
|
+
trace: Conversation trace
|
|
296
|
+
api_key: Your Anthropic API key
|
|
297
|
+
model: Model name (default: claude-sonnet-4-20250514)
|
|
298
|
+
auto_store: Whether to auto-store extracted memories
|
|
299
|
+
confidence_threshold: Minimum confidence threshold
|
|
300
|
+
context_hint: Context hint for extraction
|
|
125
301
|
|
|
126
302
|
Example:
|
|
127
303
|
```python
|
|
128
|
-
|
|
129
|
-
"User: I
|
|
304
|
+
result = await client.extraction.extract_with_anthropic(
|
|
305
|
+
trace="User: I prefer dark mode...",
|
|
306
|
+
api_key="sk-ant-...",
|
|
307
|
+
auto_store=True,
|
|
130
308
|
)
|
|
131
|
-
|
|
132
|
-
print(f"Found {len(analysis['potentialMemories'])} potential memories")
|
|
133
309
|
```
|
|
134
310
|
"""
|
|
135
|
-
|
|
136
|
-
|
|
311
|
+
return await self.extract(
|
|
312
|
+
trace=trace,
|
|
313
|
+
llm_config=LLMConfigApiKey(
|
|
314
|
+
provider='anthropic',
|
|
315
|
+
api_key=api_key,
|
|
316
|
+
model=model,
|
|
317
|
+
),
|
|
318
|
+
options=ExtractionOptions(
|
|
319
|
+
auto_store=auto_store,
|
|
320
|
+
confidence_threshold=confidence_threshold,
|
|
321
|
+
context_hint=context_hint,
|
|
322
|
+
),
|
|
137
323
|
)
|
|
138
324
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
325
|
+
async def extract_with_openai(
|
|
326
|
+
self,
|
|
327
|
+
trace: Union[str, Dict[str, Any]],
|
|
328
|
+
api_key: str,
|
|
329
|
+
model: Optional[str] = None,
|
|
330
|
+
auto_store: bool = False,
|
|
331
|
+
confidence_threshold: Optional[float] = None,
|
|
332
|
+
context_hint: Optional[str] = None,
|
|
333
|
+
) -> ExtractionResult:
|
|
334
|
+
"""
|
|
335
|
+
Convenience method: Extract memories using OpenAI
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
trace: Conversation trace
|
|
339
|
+
api_key: Your OpenAI API key
|
|
340
|
+
model: Model name (default: gpt-4-turbo)
|
|
341
|
+
auto_store: Whether to auto-store extracted memories
|
|
342
|
+
confidence_threshold: Minimum confidence threshold
|
|
343
|
+
context_hint: Context hint for extraction
|
|
145
344
|
|
|
146
|
-
|
|
345
|
+
Example:
|
|
346
|
+
```python
|
|
347
|
+
result = await client.extraction.extract_with_openai(
|
|
348
|
+
trace="User: I prefer dark mode...",
|
|
349
|
+
api_key="sk-...",
|
|
350
|
+
model="gpt-4-turbo",
|
|
351
|
+
auto_store=True,
|
|
352
|
+
)
|
|
353
|
+
```
|
|
354
|
+
"""
|
|
355
|
+
return await self.extract(
|
|
356
|
+
trace=trace,
|
|
357
|
+
llm_config=LLMConfigApiKey(
|
|
358
|
+
provider='openai',
|
|
359
|
+
api_key=api_key,
|
|
360
|
+
model=model,
|
|
361
|
+
),
|
|
362
|
+
options=ExtractionOptions(
|
|
363
|
+
auto_store=auto_store,
|
|
364
|
+
confidence_threshold=confidence_threshold,
|
|
365
|
+
context_hint=context_hint,
|
|
366
|
+
),
|
|
367
|
+
)
|
|
147
368
|
|
|
148
|
-
async def
|
|
369
|
+
async def extract_with_endpoint(
|
|
370
|
+
self,
|
|
371
|
+
trace: Union[str, Dict[str, Any]],
|
|
372
|
+
url: str,
|
|
373
|
+
model: str,
|
|
374
|
+
auth_token: Optional[str] = None,
|
|
375
|
+
format: LLMApiFormat = 'openai',
|
|
376
|
+
auto_store: bool = False,
|
|
377
|
+
confidence_threshold: Optional[float] = None,
|
|
378
|
+
context_hint: Optional[str] = None,
|
|
379
|
+
) -> ExtractionResult:
|
|
149
380
|
"""
|
|
150
|
-
|
|
381
|
+
Extract memories using custom endpoint (Ollama, OpenRouter, vLLM, etc.)
|
|
151
382
|
|
|
152
|
-
|
|
153
|
-
|
|
383
|
+
Args:
|
|
384
|
+
trace: Conversation trace
|
|
385
|
+
url: Endpoint URL
|
|
386
|
+
model: Model name
|
|
387
|
+
auth_token: Optional auth token
|
|
388
|
+
format: API format (openai, anthropic, cohere)
|
|
389
|
+
auto_store: Whether to auto-store extracted memories
|
|
390
|
+
confidence_threshold: Minimum confidence threshold
|
|
391
|
+
context_hint: Context hint for extraction
|
|
154
392
|
|
|
155
393
|
Example:
|
|
156
394
|
```python
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
395
|
+
# Ollama
|
|
396
|
+
result = await client.extraction.extract_with_endpoint(
|
|
397
|
+
trace="User: I prefer dark mode...",
|
|
398
|
+
url="http://localhost:11434/v1/chat/completions",
|
|
399
|
+
model="llama2",
|
|
400
|
+
format="openai",
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# OpenRouter
|
|
404
|
+
result = await client.extraction.extract_with_endpoint(
|
|
405
|
+
trace="User: ...",
|
|
406
|
+
url="https://openrouter.ai/api/v1/chat/completions",
|
|
407
|
+
model="anthropic/claude-3-sonnet",
|
|
408
|
+
auth_token="sk-or-...",
|
|
409
|
+
format="openai",
|
|
410
|
+
)
|
|
160
411
|
```
|
|
161
412
|
"""
|
|
162
|
-
|
|
163
|
-
|
|
413
|
+
return await self.extract(
|
|
414
|
+
trace=trace,
|
|
415
|
+
llm_config=LLMConfigEndpoint(
|
|
416
|
+
url=url,
|
|
417
|
+
model=model,
|
|
418
|
+
auth_token=auth_token,
|
|
419
|
+
format=format,
|
|
420
|
+
),
|
|
421
|
+
options=ExtractionOptions(
|
|
422
|
+
auto_store=auto_store,
|
|
423
|
+
confidence_threshold=confidence_threshold,
|
|
424
|
+
context_hint=context_hint,
|
|
425
|
+
),
|
|
164
426
|
)
|
|
165
427
|
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
428
|
+
async def extract_with_xache_llm(
|
|
429
|
+
self,
|
|
430
|
+
trace: Union[str, Dict[str, Any]],
|
|
431
|
+
provider: Literal['anthropic', 'openai'] = 'anthropic',
|
|
432
|
+
model: Optional[str] = None,
|
|
433
|
+
auto_store: bool = False,
|
|
434
|
+
confidence_threshold: Optional[float] = None,
|
|
435
|
+
context_hint: Optional[str] = None,
|
|
436
|
+
) -> ExtractionResult:
|
|
437
|
+
"""
|
|
438
|
+
Extract memories using Xache-managed LLM
|
|
439
|
+
|
|
440
|
+
IMPORTANT: Traces must be scrubbed of PII before calling this method.
|
|
441
|
+
|
|
442
|
+
Args:
|
|
443
|
+
trace: PII-scrubbed conversation trace
|
|
444
|
+
provider: Xache-managed provider (anthropic or openai)
|
|
445
|
+
model: Optional model override
|
|
446
|
+
auto_store: Whether to auto-store extracted memories
|
|
447
|
+
confidence_threshold: Minimum confidence threshold
|
|
448
|
+
context_hint: Context hint for extraction
|
|
172
449
|
|
|
173
|
-
|
|
450
|
+
Example:
|
|
451
|
+
```python
|
|
452
|
+
# Scrub PII first
|
|
453
|
+
scrubbed = scrub_trace(raw_trace)
|
|
454
|
+
|
|
455
|
+
result = await client.extraction.extract_with_xache_llm(
|
|
456
|
+
trace=scrubbed,
|
|
457
|
+
provider="anthropic",
|
|
458
|
+
auto_store=True,
|
|
459
|
+
)
|
|
460
|
+
```
|
|
461
|
+
"""
|
|
462
|
+
return await self.extract(
|
|
463
|
+
trace=trace,
|
|
464
|
+
llm_config=LLMConfigXacheManaged(
|
|
465
|
+
provider=provider,
|
|
466
|
+
model=model,
|
|
467
|
+
),
|
|
468
|
+
options=ExtractionOptions(
|
|
469
|
+
auto_store=auto_store,
|
|
470
|
+
confidence_threshold=confidence_threshold,
|
|
471
|
+
context_hint=context_hint,
|
|
472
|
+
),
|
|
473
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: xache
|
|
3
|
-
Version: 5.1
|
|
3
|
+
Version: 5.2.1
|
|
4
4
|
Summary: Official Python SDK for Xache Protocol
|
|
5
5
|
Home-page: https://github.com/xache-ai/xache-protocol
|
|
6
6
|
Author: Xache Protocol
|
|
@@ -44,7 +44,7 @@ Dynamic: requires-python
|
|
|
44
44
|
|
|
45
45
|
# Xache Protocol Python SDK
|
|
46
46
|
|
|
47
|
-
Official Python SDK for [Xache Protocol](https://xache.
|
|
47
|
+
Official Python SDK for [Xache Protocol](https://xache.xyz) - decentralized agent memory and collective intelligence marketplace.
|
|
48
48
|
|
|
49
49
|
## Features
|
|
50
50
|
|
|
@@ -331,7 +331,6 @@ MIT
|
|
|
331
331
|
|
|
332
332
|
## Links
|
|
333
333
|
|
|
334
|
-
- [Documentation](https://docs.xache.
|
|
335
|
-
- [
|
|
336
|
-
- [
|
|
337
|
-
- [Discord](https://discord.gg/xache)
|
|
334
|
+
- [Documentation](https://docs.xache.xyz)
|
|
335
|
+
- [GitHub](https://github.com/xacheai/xache-protocol)
|
|
336
|
+
- [Website](https://xache.xyz)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
xache/__init__.py,sha256=
|
|
1
|
+
xache/__init__.py,sha256=K1QXmT5R7nbiTwDwQUz53J1vN6n73NNUA4yVdqCMfFs,3559
|
|
2
2
|
xache/client.py,sha256=ABjFJFnrv4KmY5ihBXK5YC8sg50s48tGdDUSKq7Vd8g,10217
|
|
3
3
|
xache/errors.py,sha256=sHLjfKGw1JxN3TdV_bswG9erH2vVAOFPzvOkWb1k0EI,5007
|
|
4
4
|
xache/types.py,sha256=YpepxdWdWbkCRzRWQX9wckCL1R6zjHswrWTrI3CnrB4,9479
|
|
@@ -10,7 +10,7 @@ xache/payment/handler.py,sha256=X3RL1mvjHCn5FbloOjiNUv_CythRlJU0Vo524lx4DLg,8901
|
|
|
10
10
|
xache/services/__init__.py,sha256=ERuZXa7R-Iv0oFBknxJiBO5nJC-oCtIut_4VITw_uOE,775
|
|
11
11
|
xache/services/budget.py,sha256=6sZ3nojb888a3k774XPnJHfdkqIC2Y1BxJKt8zy76V0,10599
|
|
12
12
|
xache/services/collective.py,sha256=utYLIHv3tuOYHbdYJYXqIy1hS-aYIRems744CsCUIUw,6229
|
|
13
|
-
xache/services/extraction.py,sha256=
|
|
13
|
+
xache/services/extraction.py,sha256=wtqsFMqCQUrY9idXYktkPdkdgKyZGuO8OdCuKBWjlUg,15412
|
|
14
14
|
xache/services/facilitator.py,sha256=FlJh6YvPd1xiCdt6Y8Y9NpbmJI0o92Dcp3ZgzeR14bM,11512
|
|
15
15
|
xache/services/identity.py,sha256=gOs5fN9juyoBfXQVm-G4whyUMJ6Oha2VmP_i3mQw0G0,13478
|
|
16
16
|
xache/services/memory.py,sha256=ng9_cwL4jE4c3gdlwQDZyqaBdQgwtqApEwj0LkZYWRY,13290
|
|
@@ -24,7 +24,7 @@ xache/utils/__init__.py,sha256=8VrQm0QnyqxdplpCG7BDRiAVdBGWrjUs9ipH2zsJOBM,106
|
|
|
24
24
|
xache/utils/cache.py,sha256=9zhE9dIXFTofj7jz1TX-FkAqmclqoYXTe4FwwGLeKT4,5479
|
|
25
25
|
xache/utils/http.py,sha256=rIQCYvYrziNrNfEbOnIKbCOGGf7bcdTvZrrU_W6CcZA,6547
|
|
26
26
|
xache/utils/retry.py,sha256=OJYBGozKIoteCvKw50dqd4ThhOo-WisorcKa8Tr6mnE,2860
|
|
27
|
-
xache-5.1.
|
|
28
|
-
xache-5.1.
|
|
29
|
-
xache-5.1.
|
|
30
|
-
xache-5.1.
|
|
27
|
+
xache-5.2.1.dist-info/METADATA,sha256=3kALV9tITIf2A3R2sZeLUyYzg4UJKyc9bWm06DbAYZ4,8400
|
|
28
|
+
xache-5.2.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
29
|
+
xache-5.2.1.dist-info/top_level.txt,sha256=FBWE4IVb7zoLS9arsdrl97QVETlwFvYGAx6xEJZOEUU,6
|
|
30
|
+
xache-5.2.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|