langchain-xache 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_xache-0.1.1.dist-info → langchain_xache-0.2.1.dist-info}/METADATA +3 -4
- {langchain_xache-0.1.1.dist-info → langchain_xache-0.2.1.dist-info}/RECORD +5 -5
- xache_langchain/extraction.py +56 -12
- {langchain_xache-0.1.1.dist-info → langchain_xache-0.2.1.dist-info}/WHEEL +0 -0
- {langchain_xache-0.1.1.dist-info → langchain_xache-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-xache
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.1
|
|
4
4
|
Summary: LangChain integration for Xache Protocol - verifiable AI agent memory
|
|
5
5
|
Author-email: Xache Protocol <dev@xache.xyz>
|
|
6
6
|
License: MIT
|
|
@@ -190,9 +190,8 @@ Xache supports [ERC-8004](https://eips.ethereum.org/EIPS/eip-8004) for portable,
|
|
|
190
190
|
## Resources
|
|
191
191
|
|
|
192
192
|
- [Documentation](https://docs.xache.xyz)
|
|
193
|
-
- [
|
|
194
|
-
- [
|
|
195
|
-
- [Discord](https://discord.gg/xache)
|
|
193
|
+
- [GitHub](https://github.com/xacheai/xache-protocol)
|
|
194
|
+
- [Website](https://xache.xyz)
|
|
196
195
|
|
|
197
196
|
## License
|
|
198
197
|
|
|
@@ -2,11 +2,11 @@ xache_langchain/__init__.py,sha256=OYzpiCnA75ToCTVmQcqut7kGygD9lQhjrUTiFZU5S88,1
|
|
|
2
2
|
xache_langchain/_async_utils.py,sha256=Srhe4rJ1S4W6E0sDfgsTqMdfMFr0Je9scJ3cFNa8fWA,1699
|
|
3
3
|
xache_langchain/chat_history.py,sha256=MS6w8fyowWlLvvtZ0RBQWNygBytM3cnasZ6C0KwL4sE,6730
|
|
4
4
|
xache_langchain/collective.py,sha256=OyZKvfOx20ETK236P36fqAZ6ahLyos6FYs81IOKjti0,8092
|
|
5
|
-
xache_langchain/extraction.py,sha256=
|
|
5
|
+
xache_langchain/extraction.py,sha256=4mrugy9zAsoO1SV_5gOBq2FF7A9D8U67x1Rek8drmWg,9734
|
|
6
6
|
xache_langchain/memory.py,sha256=71hxXcUtsyaAo67_xBmjel43EvhgDiCENcD6AvJxMBo,6196
|
|
7
7
|
xache_langchain/reputation.py,sha256=b1GVceH1QPqB7rjvTj295QllxiJ5W0rQ5rUoKJ5Np7A,7389
|
|
8
8
|
xache_langchain/retriever.py,sha256=qw7CSP7zR6g7mn9DUYHbId9S9K564kKXqKR1WnnHkP0,7607
|
|
9
|
-
langchain_xache-0.
|
|
10
|
-
langchain_xache-0.
|
|
11
|
-
langchain_xache-0.
|
|
12
|
-
langchain_xache-0.
|
|
9
|
+
langchain_xache-0.2.1.dist-info/METADATA,sha256=dW4FUcf5_-BIRSBxt7dJ1HFgSf0e2kqaNNfJ71oJDqY,5261
|
|
10
|
+
langchain_xache-0.2.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
11
|
+
langchain_xache-0.2.1.dist-info/top_level.txt,sha256=in69PSq9agqGIAyShkm5ZVg9n0ks76QlD1tGaws9efA,16
|
|
12
|
+
langchain_xache-0.2.1.dist-info/RECORD,,
|
xache_langchain/extraction.py
CHANGED
|
@@ -58,6 +58,15 @@ class XacheExtractor:
|
|
|
58
58
|
```
|
|
59
59
|
"""
|
|
60
60
|
|
|
61
|
+
# Supported LLM providers for api-key mode
|
|
62
|
+
SUPPORTED_PROVIDERS = [
|
|
63
|
+
"anthropic", "openai", "google", "mistral", "groq",
|
|
64
|
+
"together", "fireworks", "cohere", "xai", "deepseek"
|
|
65
|
+
]
|
|
66
|
+
|
|
67
|
+
# Supported API formats for endpoint mode
|
|
68
|
+
SUPPORTED_FORMATS = ["openai", "anthropic", "cohere"]
|
|
69
|
+
|
|
61
70
|
def __init__(
|
|
62
71
|
self,
|
|
63
72
|
wallet_address: str,
|
|
@@ -66,8 +75,11 @@ class XacheExtractor:
|
|
|
66
75
|
chain: str = "base",
|
|
67
76
|
mode: str = "xache-managed",
|
|
68
77
|
llm_api_key: Optional[str] = None,
|
|
78
|
+
llm_provider: str = "anthropic",
|
|
69
79
|
llm_endpoint: Optional[str] = None,
|
|
70
|
-
|
|
80
|
+
llm_endpoint_format: str = "openai",
|
|
81
|
+
llm_auth_token: Optional[str] = None,
|
|
82
|
+
llm_model: Optional[str] = None,
|
|
71
83
|
):
|
|
72
84
|
"""
|
|
73
85
|
Initialize Xache extractor.
|
|
@@ -77,16 +89,28 @@ class XacheExtractor:
|
|
|
77
89
|
private_key: Private key for signing
|
|
78
90
|
api_url: Xache API URL (defaults to XACHE_API_URL env var or https://api.xache.xyz)
|
|
79
91
|
chain: Blockchain (base, solana)
|
|
80
|
-
mode: Extraction mode
|
|
92
|
+
mode: Extraction mode:
|
|
93
|
+
- 'xache-managed': Xache provides LLM ($0.011)
|
|
94
|
+
- 'api-key': Use major provider with your API key ($0.002)
|
|
95
|
+
- 'endpoint': Use custom endpoint ($0.002)
|
|
81
96
|
llm_api_key: Your LLM API key (required for api-key mode)
|
|
82
|
-
|
|
83
|
-
|
|
97
|
+
llm_provider: Provider for api-key mode. Supported:
|
|
98
|
+
anthropic, openai, google, mistral, groq, together,
|
|
99
|
+
fireworks, cohere, xai, deepseek
|
|
100
|
+
llm_endpoint: Custom endpoint URL (required for endpoint mode)
|
|
101
|
+
llm_endpoint_format: API format for endpoint mode (openai, anthropic, cohere)
|
|
102
|
+
llm_auth_token: Auth token for endpoint mode
|
|
103
|
+
llm_model: Model to use (optional, uses provider default)
|
|
84
104
|
"""
|
|
85
105
|
# Validate mode-specific requirements
|
|
86
106
|
if mode == "api-key" and not llm_api_key:
|
|
87
107
|
raise ValueError("llm_api_key is required when mode is 'api-key'")
|
|
108
|
+
if mode == "api-key" and llm_provider not in self.SUPPORTED_PROVIDERS:
|
|
109
|
+
raise ValueError(f"llm_provider must be one of: {', '.join(self.SUPPORTED_PROVIDERS)}")
|
|
88
110
|
if mode == "endpoint" and not llm_endpoint:
|
|
89
111
|
raise ValueError("llm_endpoint is required when mode is 'endpoint'")
|
|
112
|
+
if mode == "endpoint" and llm_endpoint_format not in self.SUPPORTED_FORMATS:
|
|
113
|
+
raise ValueError(f"llm_endpoint_format must be one of: {', '.join(self.SUPPORTED_FORMATS)}")
|
|
90
114
|
|
|
91
115
|
self.wallet_address = wallet_address
|
|
92
116
|
self.private_key = private_key
|
|
@@ -94,7 +118,10 @@ class XacheExtractor:
|
|
|
94
118
|
self.chain = chain
|
|
95
119
|
self.mode = mode
|
|
96
120
|
self.llm_api_key = llm_api_key
|
|
121
|
+
self.llm_provider = llm_provider
|
|
97
122
|
self.llm_endpoint = llm_endpoint
|
|
123
|
+
self.llm_endpoint_format = llm_endpoint_format
|
|
124
|
+
self.llm_auth_token = llm_auth_token
|
|
98
125
|
self.llm_model = llm_model
|
|
99
126
|
|
|
100
127
|
# Build DID
|
|
@@ -131,20 +158,28 @@ class XacheExtractor:
|
|
|
131
158
|
if self.mode == "api-key":
|
|
132
159
|
llm_config = {
|
|
133
160
|
"type": "api-key",
|
|
161
|
+
"provider": self.llm_provider,
|
|
134
162
|
"apiKey": self.llm_api_key,
|
|
135
|
-
"model": self.llm_model,
|
|
136
163
|
}
|
|
164
|
+
if self.llm_model:
|
|
165
|
+
llm_config["model"] = self.llm_model
|
|
137
166
|
elif self.mode == "endpoint":
|
|
138
167
|
llm_config = {
|
|
139
168
|
"type": "endpoint",
|
|
140
|
-
"
|
|
141
|
-
"
|
|
169
|
+
"url": self.llm_endpoint,
|
|
170
|
+
"format": self.llm_endpoint_format,
|
|
142
171
|
}
|
|
172
|
+
if self.llm_auth_token:
|
|
173
|
+
llm_config["authToken"] = self.llm_auth_token
|
|
174
|
+
if self.llm_model:
|
|
175
|
+
llm_config["model"] = self.llm_model
|
|
143
176
|
else:
|
|
144
177
|
llm_config = {
|
|
145
178
|
"type": "xache-managed",
|
|
146
|
-
"
|
|
179
|
+
"provider": "anthropic",
|
|
147
180
|
}
|
|
181
|
+
if self.llm_model:
|
|
182
|
+
llm_config["model"] = self.llm_model
|
|
148
183
|
|
|
149
184
|
result = await client.extraction.extract(
|
|
150
185
|
trace=trace,
|
|
@@ -171,23 +206,32 @@ class XacheExtractor:
|
|
|
171
206
|
) -> ExtractionResult:
|
|
172
207
|
"""Async extract memories from text"""
|
|
173
208
|
async with self._client as client:
|
|
209
|
+
# Build LLM config based on mode
|
|
174
210
|
if self.mode == "api-key":
|
|
175
211
|
llm_config = {
|
|
176
212
|
"type": "api-key",
|
|
213
|
+
"provider": self.llm_provider,
|
|
177
214
|
"apiKey": self.llm_api_key,
|
|
178
|
-
"model": self.llm_model,
|
|
179
215
|
}
|
|
216
|
+
if self.llm_model:
|
|
217
|
+
llm_config["model"] = self.llm_model
|
|
180
218
|
elif self.mode == "endpoint":
|
|
181
219
|
llm_config = {
|
|
182
220
|
"type": "endpoint",
|
|
183
|
-
"
|
|
184
|
-
"
|
|
221
|
+
"url": self.llm_endpoint,
|
|
222
|
+
"format": self.llm_endpoint_format,
|
|
185
223
|
}
|
|
224
|
+
if self.llm_auth_token:
|
|
225
|
+
llm_config["authToken"] = self.llm_auth_token
|
|
226
|
+
if self.llm_model:
|
|
227
|
+
llm_config["model"] = self.llm_model
|
|
186
228
|
else:
|
|
187
229
|
llm_config = {
|
|
188
230
|
"type": "xache-managed",
|
|
189
|
-
"
|
|
231
|
+
"provider": "anthropic",
|
|
190
232
|
}
|
|
233
|
+
if self.llm_model:
|
|
234
|
+
llm_config["model"] = self.llm_model
|
|
191
235
|
|
|
192
236
|
result = await client.extraction.extract(
|
|
193
237
|
trace=trace,
|
|
File without changes
|
|
File without changes
|