@totalreclaw/totalreclaw 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +27 -0
- package/.github/workflows/publish.yml +39 -0
- package/README.md +104 -0
- package/SKILL.md +687 -0
- package/api-client.ts +300 -0
- package/crypto.ts +351 -0
- package/embedding.ts +84 -0
- package/extractor.ts +210 -0
- package/generate-mnemonic.ts +14 -0
- package/hot-cache-wrapper.ts +126 -0
- package/index.ts +1885 -0
- package/llm-client.ts +418 -0
- package/lsh.test.ts +463 -0
- package/lsh.ts +257 -0
- package/package.json +40 -0
- package/porter-stemmer.d.ts +4 -0
- package/reranker.test.ts +594 -0
- package/reranker.ts +537 -0
- package/semantic-dedup.test.ts +392 -0
- package/semantic-dedup.ts +100 -0
- package/subgraph-search.ts +278 -0
- package/subgraph-store.ts +342 -0
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Subgraph search path — queries facts via GraphQL hash_in.
|
|
3
|
+
*
|
|
4
|
+
* Used when TOTALRECLAW_SUBGRAPH_MODE=true. Replaces the HTTP POST
|
|
5
|
+
* to /v1/search with a GraphQL query to the subgraph via the relay server.
|
|
6
|
+
*
|
|
7
|
+
* The relay server proxies GraphQL queries to Graph Studio with its own
|
|
8
|
+
* API key at `${relayUrl}/v1/subgraph`. Clients never need a subgraph endpoint.
|
|
9
|
+
*
|
|
10
|
+
* Query cost optimization:
|
|
11
|
+
* Phase 1: Single query with ALL trapdoors (1 query).
|
|
12
|
+
* Phase 2: If saturated (1000 results), split into small parallel batches
|
|
13
|
+
* so rare trapdoor matches aren't drowned by common ones.
|
|
14
|
+
* Phase 3: Cursor-based pagination for any saturated batch.
|
|
15
|
+
*
|
|
16
|
+
* This minimizes Graph Network query costs (pay-per-query via GRT):
|
|
17
|
+
* - Small datasets (<1000 matches): 1 query total
|
|
18
|
+
* - Medium datasets: 1 + N batch queries
|
|
19
|
+
* - Large datasets: 1 + N batches + pagination queries
|
|
20
|
+
*/
|
|
21
|
+
|
|
22
|
+
import { getSubgraphConfig } from './subgraph-store.js';
|
|
23
|
+
|
|
24
|
+
export interface SubgraphSearchFact {
|
|
25
|
+
id: string;
|
|
26
|
+
encryptedBlob: string;
|
|
27
|
+
encryptedEmbedding: string | null;
|
|
28
|
+
decayScore: string;
|
|
29
|
+
timestamp: string;
|
|
30
|
+
isActive: boolean;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/** Batch size for Phase 2 split queries. */
|
|
34
|
+
const TRAPDOOR_BATCH_SIZE = parseInt(process.env.TOTALRECLAW_TRAPDOOR_BATCH_SIZE ?? '5', 10);
|
|
35
|
+
/** Graph Studio / Graph Network hard limit on `first` argument. */
|
|
36
|
+
const PAGE_SIZE = parseInt(process.env.TOTALRECLAW_SUBGRAPH_PAGE_SIZE ?? '1000', 10);
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Execute a single GraphQL query against the subgraph endpoint.
|
|
40
|
+
* Returns null on any network or HTTP error (never throws).
|
|
41
|
+
*/
|
|
42
|
+
async function gqlQuery<T>(
|
|
43
|
+
endpoint: string,
|
|
44
|
+
query: string,
|
|
45
|
+
variables: Record<string, unknown>,
|
|
46
|
+
authKeyHex?: string,
|
|
47
|
+
): Promise<T | null> {
|
|
48
|
+
try {
|
|
49
|
+
const headers: Record<string, string> = { 'Content-Type': 'application/json' };
|
|
50
|
+
if (authKeyHex) {
|
|
51
|
+
headers['Authorization'] = `Bearer ${authKeyHex}`;
|
|
52
|
+
}
|
|
53
|
+
const response = await fetch(endpoint, {
|
|
54
|
+
method: 'POST',
|
|
55
|
+
headers,
|
|
56
|
+
body: JSON.stringify({ query, variables }),
|
|
57
|
+
});
|
|
58
|
+
if (!response.ok) return null;
|
|
59
|
+
const json = await response.json() as { data?: T };
|
|
60
|
+
return json.data ?? null;
|
|
61
|
+
} catch {
|
|
62
|
+
return null;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/** GraphQL query for blind index lookup. */
|
|
67
|
+
const SEARCH_QUERY = `
|
|
68
|
+
query SearchByBlindIndex($trapdoors: [String!]!, $owner: Bytes!, $first: Int!) {
|
|
69
|
+
blindIndexes(
|
|
70
|
+
where: { hash_in: $trapdoors, owner: $owner, fact_: { isActive: true } }
|
|
71
|
+
first: $first
|
|
72
|
+
orderBy: id
|
|
73
|
+
orderDirection: desc
|
|
74
|
+
) {
|
|
75
|
+
id
|
|
76
|
+
fact {
|
|
77
|
+
id
|
|
78
|
+
encryptedBlob
|
|
79
|
+
encryptedEmbedding
|
|
80
|
+
decayScore
|
|
81
|
+
timestamp
|
|
82
|
+
isActive
|
|
83
|
+
contentFp
|
|
84
|
+
sequenceId
|
|
85
|
+
version
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
`;
|
|
90
|
+
|
|
91
|
+
/** Pagination query — cursor-based using id_gt, ascending for deterministic walk. */
|
|
92
|
+
const PAGINATE_QUERY = `
|
|
93
|
+
query PaginateBlindIndex($trapdoors: [String!]!, $owner: Bytes!, $first: Int!, $lastId: String!) {
|
|
94
|
+
blindIndexes(
|
|
95
|
+
where: { hash_in: $trapdoors, owner: $owner, id_gt: $lastId, fact_: { isActive: true } }
|
|
96
|
+
first: $first
|
|
97
|
+
orderBy: id
|
|
98
|
+
orderDirection: asc
|
|
99
|
+
) {
|
|
100
|
+
id
|
|
101
|
+
fact {
|
|
102
|
+
id
|
|
103
|
+
encryptedBlob
|
|
104
|
+
encryptedEmbedding
|
|
105
|
+
timestamp
|
|
106
|
+
decayScore
|
|
107
|
+
isActive
|
|
108
|
+
contentFp
|
|
109
|
+
sequenceId
|
|
110
|
+
version
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
`;
|
|
115
|
+
|
|
116
|
+
interface BlindIndexEntry {
|
|
117
|
+
id: string;
|
|
118
|
+
fact: SubgraphSearchFact;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
interface SearchResponse {
|
|
122
|
+
blindIndexes?: BlindIndexEntry[];
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/** Collect facts from blind index entries, deduplicating by fact id. */
|
|
126
|
+
function collectFacts(
|
|
127
|
+
entries: BlindIndexEntry[],
|
|
128
|
+
allResults: Map<string, SubgraphSearchFact>,
|
|
129
|
+
): void {
|
|
130
|
+
for (const entry of entries) {
|
|
131
|
+
if (entry.fact && entry.fact.isActive !== false && !allResults.has(entry.fact.id)) {
|
|
132
|
+
allResults.set(entry.fact.id, entry.fact);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Paginate a single trapdoor chunk until exhausted or maxCandidates reached.
|
|
139
|
+
*/
|
|
140
|
+
async function paginateChunk(
|
|
141
|
+
subgraphUrl: string,
|
|
142
|
+
chunk: string[],
|
|
143
|
+
owner: string,
|
|
144
|
+
allResults: Map<string, SubgraphSearchFact>,
|
|
145
|
+
maxCandidates: number,
|
|
146
|
+
authKeyHex?: string,
|
|
147
|
+
): Promise<void> {
|
|
148
|
+
let lastId = '';
|
|
149
|
+
while (allResults.size < maxCandidates) {
|
|
150
|
+
const data = await gqlQuery<SearchResponse>(
|
|
151
|
+
subgraphUrl,
|
|
152
|
+
PAGINATE_QUERY,
|
|
153
|
+
{ trapdoors: chunk, owner, first: PAGE_SIZE, lastId },
|
|
154
|
+
authKeyHex,
|
|
155
|
+
);
|
|
156
|
+
const entries = data?.blindIndexes ?? [];
|
|
157
|
+
if (entries.length === 0) break;
|
|
158
|
+
collectFacts(entries, allResults);
|
|
159
|
+
if (entries.length < PAGE_SIZE) break;
|
|
160
|
+
lastId = entries[entries.length - 1].id;
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Search the subgraph for facts matching the given trapdoors.
|
|
166
|
+
*
|
|
167
|
+
* Adaptive strategy to minimize query costs:
|
|
168
|
+
*
|
|
169
|
+
* Phase 1: Single query with ALL trapdoors.
|
|
170
|
+
* - If not saturated (< PAGE_SIZE results): done. 1 query total.
|
|
171
|
+
* - If saturated: common trapdoors may be drowning rare ones. Go to Phase 2.
|
|
172
|
+
*
|
|
173
|
+
* Phase 2: Split trapdoors into small parallel batches (TRAPDOOR_BATCH_SIZE=5).
|
|
174
|
+
* - Each batch independently gets up to PAGE_SIZE results.
|
|
175
|
+
* - Rare trapdoor matches get their own budget.
|
|
176
|
+
*
|
|
177
|
+
* Phase 3: Cursor-based pagination for any saturated batch.
|
|
178
|
+
* - Only for power users with very large datasets.
|
|
179
|
+
*/
|
|
180
|
+
export async function searchSubgraph(
|
|
181
|
+
owner: string,
|
|
182
|
+
trapdoors: string[],
|
|
183
|
+
maxCandidates: number,
|
|
184
|
+
authKeyHex?: string,
|
|
185
|
+
): Promise<SubgraphSearchFact[]> {
|
|
186
|
+
const config = getSubgraphConfig();
|
|
187
|
+
const subgraphUrl = `${config.relayUrl}/v1/subgraph`;
|
|
188
|
+
const allResults = new Map<string, SubgraphSearchFact>();
|
|
189
|
+
|
|
190
|
+
// -----------------------------------------------------------------------
|
|
191
|
+
// Phase 1: Single query with all trapdoors (1 query)
|
|
192
|
+
// -----------------------------------------------------------------------
|
|
193
|
+
const phase1 = await gqlQuery<SearchResponse>(
|
|
194
|
+
subgraphUrl,
|
|
195
|
+
SEARCH_QUERY,
|
|
196
|
+
{ trapdoors, owner, first: PAGE_SIZE },
|
|
197
|
+
authKeyHex,
|
|
198
|
+
);
|
|
199
|
+
|
|
200
|
+
const phase1Entries = phase1?.blindIndexes ?? [];
|
|
201
|
+
collectFacts(phase1Entries, allResults);
|
|
202
|
+
|
|
203
|
+
// Not saturated — we got everything in 1 query. Done.
|
|
204
|
+
if (phase1Entries.length < PAGE_SIZE) {
|
|
205
|
+
return Array.from(allResults.values());
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// -----------------------------------------------------------------------
|
|
209
|
+
// Phase 2: Saturated — split into small batches for better rare-word recall.
|
|
210
|
+
// Common trapdoors were drowning rare ones in the single-query result.
|
|
211
|
+
// -----------------------------------------------------------------------
|
|
212
|
+
const chunks: string[][] = [];
|
|
213
|
+
for (let i = 0; i < trapdoors.length; i += TRAPDOOR_BATCH_SIZE) {
|
|
214
|
+
chunks.push(trapdoors.slice(i, i + TRAPDOOR_BATCH_SIZE));
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
const batchResults = await Promise.all(
|
|
218
|
+
chunks.map(async (chunk) => {
|
|
219
|
+
const data = await gqlQuery<SearchResponse>(
|
|
220
|
+
subgraphUrl,
|
|
221
|
+
SEARCH_QUERY,
|
|
222
|
+
{ trapdoors: chunk, owner, first: PAGE_SIZE },
|
|
223
|
+
authKeyHex,
|
|
224
|
+
);
|
|
225
|
+
return { chunk, entries: data?.blindIndexes ?? [] };
|
|
226
|
+
}),
|
|
227
|
+
);
|
|
228
|
+
|
|
229
|
+
const saturatedChunks: string[][] = [];
|
|
230
|
+
for (const { chunk, entries } of batchResults) {
|
|
231
|
+
collectFacts(entries, allResults);
|
|
232
|
+
if (entries.length >= PAGE_SIZE) {
|
|
233
|
+
saturatedChunks.push(chunk);
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
// -----------------------------------------------------------------------
|
|
238
|
+
// Phase 3: Cursor-based pagination for saturated batches (power users).
|
|
239
|
+
// -----------------------------------------------------------------------
|
|
240
|
+
for (const chunk of saturatedChunks) {
|
|
241
|
+
if (allResults.size >= maxCandidates) break;
|
|
242
|
+
await paginateChunk(subgraphUrl, chunk, owner, allResults, maxCandidates, authKeyHex);
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
return Array.from(allResults.values());
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
/**
|
|
249
|
+
* Get fact count from the subgraph for dynamic pool sizing.
|
|
250
|
+
* Uses the globalStates entity for a lightweight single-row lookup
|
|
251
|
+
* instead of fetching and counting individual fact IDs.
|
|
252
|
+
*/
|
|
253
|
+
export async function getSubgraphFactCount(owner: string, authKeyHex?: string): Promise<number> {
|
|
254
|
+
const config = getSubgraphConfig();
|
|
255
|
+
const subgraphUrl = `${config.relayUrl}/v1/subgraph`;
|
|
256
|
+
|
|
257
|
+
const query = `
|
|
258
|
+
query FactCount {
|
|
259
|
+
globalStates(first: 1) {
|
|
260
|
+
totalFacts
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
`;
|
|
264
|
+
|
|
265
|
+
const data = await gqlQuery<{ globalStates?: Array<{ totalFacts: string }> }>(
|
|
266
|
+
subgraphUrl,
|
|
267
|
+
query,
|
|
268
|
+
{},
|
|
269
|
+
authKeyHex,
|
|
270
|
+
);
|
|
271
|
+
|
|
272
|
+
if (data?.globalStates && data.globalStates.length > 0) {
|
|
273
|
+
const count = parseInt(data.globalStates[0].totalFacts, 10);
|
|
274
|
+
return isNaN(count) ? 0 : count;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
return 0;
|
|
278
|
+
}
|
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Subgraph store path — writes facts on-chain via ERC-4337 UserOps.
|
|
3
|
+
*
|
|
4
|
+
* Used when TOTALRECLAW_SUBGRAPH_MODE=true. Replaces the HTTP POST
|
|
5
|
+
* to /v1/store with an on-chain transaction flow.
|
|
6
|
+
*
|
|
7
|
+
* Builds UserOps client-side using `permissionless` + `viem` and submits
|
|
8
|
+
* them through the TotalReclaw relay server, which proxies bundler/paymaster
|
|
9
|
+
* JSON-RPC to Pimlico with its own API key. Clients never need a Pimlico key.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { createPublicClient, http, type Hex, type Address, type Chain } from 'viem';
|
|
13
|
+
import { entryPoint07Address } from 'viem/account-abstraction';
|
|
14
|
+
import { mnemonicToAccount } from 'viem/accounts';
|
|
15
|
+
import { gnosis, gnosisChiado } from 'viem/chains';
|
|
16
|
+
import { createSmartAccountClient } from 'permissionless';
|
|
17
|
+
import { toSimpleSmartAccount } from 'permissionless/accounts';
|
|
18
|
+
import { createPimlicoClient } from 'permissionless/clients/pimlico';
|
|
19
|
+
|
|
20
|
+
// ---------------------------------------------------------------------------
|
|
21
|
+
// Types
|
|
22
|
+
// ---------------------------------------------------------------------------
|
|
23
|
+
|
|
24
|
+
/** Default EventfulDataEdge contract address on Chiado testnet */
|
|
25
|
+
const DEFAULT_DATA_EDGE_ADDRESS = '0xA84c5433110Ccc93e57ec387e630E86Bad86c36f';
|
|
26
|
+
|
|
27
|
+
/** Well-known ERC-4337 EntryPoint v0.7 address (same on all chains) */
|
|
28
|
+
const DEFAULT_ENTRYPOINT_ADDRESS = '0x0000000071727De22E5E9d8BAf0edAc6f37da032';
|
|
29
|
+
|
|
30
|
+
export interface SubgraphStoreConfig {
|
|
31
|
+
relayUrl: string; // TotalReclaw relay server URL (proxies bundler + subgraph)
|
|
32
|
+
mnemonic: string; // BIP-39 mnemonic for key derivation
|
|
33
|
+
cachePath: string; // Hot cache file path
|
|
34
|
+
chainId: number; // 10200 for Chiado, 100 for Gnosis
|
|
35
|
+
dataEdgeAddress: string; // EventfulDataEdge contract address
|
|
36
|
+
entryPointAddress: string; // ERC-4337 EntryPoint v0.7
|
|
37
|
+
authKeyHex?: string; // HKDF auth key for relay server Authorization header
|
|
38
|
+
rpcUrl?: string; // Override chain RPC URL for public client reads
|
|
39
|
+
walletAddress?: string; // Smart Account address for billing (X-Wallet-Address header)
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export interface FactPayload {
|
|
43
|
+
id: string;
|
|
44
|
+
timestamp: string;
|
|
45
|
+
owner: string; // Smart Account address (hex)
|
|
46
|
+
encryptedBlob: string; // Hex-encoded AES-256-GCM ciphertext
|
|
47
|
+
blindIndices: string[]; // SHA-256 hashes (word + LSH)
|
|
48
|
+
decayScore: number;
|
|
49
|
+
source: string;
|
|
50
|
+
contentFp: string;
|
|
51
|
+
agentId: string;
|
|
52
|
+
encryptedEmbedding?: string;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// ---------------------------------------------------------------------------
|
|
56
|
+
// Protobuf encoding (unchanged)
|
|
57
|
+
// ---------------------------------------------------------------------------
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Encode a fact payload as a minimal Protobuf wire format.
|
|
61
|
+
*
|
|
62
|
+
* Field numbers match server/proto/totalreclaw.proto:
|
|
63
|
+
* 1: id (string), 2: timestamp (string), 3: owner (string),
|
|
64
|
+
* 4: encrypted_blob (bytes), 5: blind_indices (repeated string),
|
|
65
|
+
* 6: decay_score (double), 7: is_active (bool), 8: version (int32),
|
|
66
|
+
* 9: source (string), 10: content_fp (string), 11: agent_id (string),
|
|
67
|
+
* 12: sequence_id (int64), 13: encrypted_embedding (string)
|
|
68
|
+
*/
|
|
69
|
+
export function encodeFactProtobuf(fact: FactPayload): Buffer {
|
|
70
|
+
const parts: Buffer[] = [];
|
|
71
|
+
|
|
72
|
+
// Helper: encode a string field
|
|
73
|
+
const writeString = (fieldNumber: number, value: string) => {
|
|
74
|
+
if (!value) return;
|
|
75
|
+
const data = Buffer.from(value, 'utf-8');
|
|
76
|
+
const key = (fieldNumber << 3) | 2; // wire type 2 = length-delimited
|
|
77
|
+
parts.push(encodeVarint(key));
|
|
78
|
+
parts.push(encodeVarint(data.length));
|
|
79
|
+
parts.push(data);
|
|
80
|
+
};
|
|
81
|
+
|
|
82
|
+
// Helper: encode a bytes field
|
|
83
|
+
const writeBytes = (fieldNumber: number, value: Buffer) => {
|
|
84
|
+
const key = (fieldNumber << 3) | 2;
|
|
85
|
+
parts.push(encodeVarint(key));
|
|
86
|
+
parts.push(encodeVarint(value.length));
|
|
87
|
+
parts.push(value);
|
|
88
|
+
};
|
|
89
|
+
|
|
90
|
+
// Helper: encode a double field (wire type 1 = 64-bit)
|
|
91
|
+
const writeDouble = (fieldNumber: number, value: number) => {
|
|
92
|
+
const key = (fieldNumber << 3) | 1;
|
|
93
|
+
parts.push(encodeVarint(key));
|
|
94
|
+
const buf = Buffer.alloc(8);
|
|
95
|
+
buf.writeDoubleLE(value);
|
|
96
|
+
parts.push(buf);
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
// Helper: encode a varint field (wire type 0)
|
|
100
|
+
const writeVarintField = (fieldNumber: number, value: number) => {
|
|
101
|
+
const key = (fieldNumber << 3) | 0;
|
|
102
|
+
parts.push(encodeVarint(key));
|
|
103
|
+
parts.push(encodeVarint(value));
|
|
104
|
+
};
|
|
105
|
+
|
|
106
|
+
// Encode fields
|
|
107
|
+
writeString(1, fact.id);
|
|
108
|
+
writeString(2, fact.timestamp);
|
|
109
|
+
writeString(3, fact.owner);
|
|
110
|
+
writeBytes(4, Buffer.from(fact.encryptedBlob, 'hex'));
|
|
111
|
+
|
|
112
|
+
for (const index of fact.blindIndices) {
|
|
113
|
+
writeString(5, index);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
writeDouble(6, fact.decayScore);
|
|
117
|
+
writeVarintField(7, 1); // is_active = true
|
|
118
|
+
writeVarintField(8, 2); // version = 2
|
|
119
|
+
writeString(9, fact.source);
|
|
120
|
+
writeString(10, fact.contentFp);
|
|
121
|
+
writeString(11, fact.agentId);
|
|
122
|
+
// Field 12 (sequence_id) is assigned by the subgraph mapping, not the client
|
|
123
|
+
if (fact.encryptedEmbedding) {
|
|
124
|
+
writeString(13, fact.encryptedEmbedding);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return Buffer.concat(parts);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/** Encode an integer as a Protobuf varint */
|
|
131
|
+
export function encodeVarint(value: number): Buffer {
|
|
132
|
+
const bytes: number[] = [];
|
|
133
|
+
let v = value >>> 0; // unsigned
|
|
134
|
+
while (v > 0x7f) {
|
|
135
|
+
bytes.push((v & 0x7f) | 0x80);
|
|
136
|
+
v >>>= 7;
|
|
137
|
+
}
|
|
138
|
+
bytes.push(v & 0x7f);
|
|
139
|
+
return Buffer.from(bytes);
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// ---------------------------------------------------------------------------
|
|
143
|
+
// Chain helpers
|
|
144
|
+
// ---------------------------------------------------------------------------
|
|
145
|
+
|
|
146
|
+
/** Resolve a viem Chain object from chain ID */
|
|
147
|
+
function getChainFromId(chainId: number): Chain {
|
|
148
|
+
switch (chainId) {
|
|
149
|
+
case 100:
|
|
150
|
+
return gnosis;
|
|
151
|
+
case 10200:
|
|
152
|
+
return gnosisChiado;
|
|
153
|
+
default:
|
|
154
|
+
return gnosisChiado;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
/** Build the relay bundler RPC URL from the relay server URL */
|
|
159
|
+
function getRelayBundlerUrl(relayUrl: string): string {
|
|
160
|
+
return `${relayUrl}/v1/bundler`;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// ---------------------------------------------------------------------------
|
|
164
|
+
// On-chain submission (Pimlico UserOps)
|
|
165
|
+
// ---------------------------------------------------------------------------
|
|
166
|
+
|
|
167
|
+
/**
|
|
168
|
+
* Submit a fact on-chain via ERC-4337 UserOp through the relay server.
|
|
169
|
+
*
|
|
170
|
+
* Builds a UserOp client-side using `permissionless` + `viem`:
|
|
171
|
+
* 1. Derives private key from mnemonic (BIP-39 + BIP-44 m/44'/60'/0'/0/0)
|
|
172
|
+
* 2. Creates a SimpleSmartAccount
|
|
173
|
+
* 3. Gets paymaster sponsorship (via relay proxy to Pimlico)
|
|
174
|
+
* 4. Signs and submits the UserOp to relay bundler endpoint
|
|
175
|
+
* 5. Waits for the transaction receipt
|
|
176
|
+
*
|
|
177
|
+
* The relay server proxies all bundler/paymaster JSON-RPC to Pimlico
|
|
178
|
+
* with its own API key. Clients never need a Pimlico API key.
|
|
179
|
+
*/
|
|
180
|
+
export async function submitFactOnChain(
|
|
181
|
+
protobufPayload: Buffer,
|
|
182
|
+
config: SubgraphStoreConfig,
|
|
183
|
+
): Promise<{ txHash: string; userOpHash: string; success: boolean }> {
|
|
184
|
+
if (!config.relayUrl) {
|
|
185
|
+
throw new Error('Relay URL (TOTALRECLAW_SERVER_URL) is required for on-chain submission');
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
if (!config.mnemonic) {
|
|
189
|
+
throw new Error('Mnemonic (TOTALRECLAW_MASTER_PASSWORD) is required for on-chain submission');
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
const chain = getChainFromId(config.chainId);
|
|
193
|
+
const bundlerRpcUrl = getRelayBundlerUrl(config.relayUrl);
|
|
194
|
+
const dataEdgeAddress = config.dataEdgeAddress as Address;
|
|
195
|
+
const entryPointAddr = (config.entryPointAddress || entryPoint07Address) as Address;
|
|
196
|
+
|
|
197
|
+
// Build authenticated transport for relay server proxy
|
|
198
|
+
const headers: Record<string, string> = {};
|
|
199
|
+
if (config.authKeyHex) headers['Authorization'] = `Bearer ${config.authKeyHex}`;
|
|
200
|
+
if (config.walletAddress) headers['X-Wallet-Address'] = config.walletAddress;
|
|
201
|
+
|
|
202
|
+
const authTransport = Object.keys(headers).length > 0
|
|
203
|
+
? http(bundlerRpcUrl, { fetchOptions: { headers } })
|
|
204
|
+
: http(bundlerRpcUrl);
|
|
205
|
+
|
|
206
|
+
// 1. Derive EOA signer from mnemonic (BIP-44 m/44'/60'/0'/0/0)
|
|
207
|
+
const ownerAccount = mnemonicToAccount(config.mnemonic);
|
|
208
|
+
|
|
209
|
+
// 2. Create a public client for chain reads (using explicit RPC if configured,
|
|
210
|
+
// NOT the bundler proxy which only supports ERC-4337 JSON-RPC methods)
|
|
211
|
+
const publicClient = createPublicClient({
|
|
212
|
+
chain,
|
|
213
|
+
transport: config.rpcUrl ? http(config.rpcUrl) : http(),
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
// 3. Create Pimlico client for bundler + paymaster operations (via relay)
|
|
217
|
+
const pimlicoClient = createPimlicoClient({
|
|
218
|
+
chain,
|
|
219
|
+
transport: authTransport,
|
|
220
|
+
entryPoint: {
|
|
221
|
+
address: entryPointAddr,
|
|
222
|
+
version: '0.7',
|
|
223
|
+
},
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
// 4. Create a SimpleSmartAccount (auto-generates initCode if undeployed)
|
|
227
|
+
const smartAccount = await toSimpleSmartAccount({
|
|
228
|
+
client: publicClient,
|
|
229
|
+
owner: ownerAccount,
|
|
230
|
+
entryPoint: {
|
|
231
|
+
address: entryPointAddr,
|
|
232
|
+
version: '0.7',
|
|
233
|
+
},
|
|
234
|
+
});
|
|
235
|
+
|
|
236
|
+
// 5. Create smart account client wired to relay bundler + paymaster
|
|
237
|
+
const smartAccountClient = createSmartAccountClient({
|
|
238
|
+
account: smartAccount,
|
|
239
|
+
chain,
|
|
240
|
+
bundlerTransport: authTransport,
|
|
241
|
+
// Paymaster sponsorship proxied through relay to Pimlico
|
|
242
|
+
paymaster: pimlicoClient,
|
|
243
|
+
userOperation: {
|
|
244
|
+
estimateFeesPerGas: async () => {
|
|
245
|
+
return (await pimlicoClient.getUserOperationGasPrice()).fast;
|
|
246
|
+
},
|
|
247
|
+
},
|
|
248
|
+
});
|
|
249
|
+
|
|
250
|
+
// 6. Send the transaction: Smart Account execute(dataEdgeAddress, 0, protobufPayload)
|
|
251
|
+
// The DataEdge contract has a fallback() that emits Log(bytes), so the calldata
|
|
252
|
+
// IS the protobuf payload directly (no function selector needed).
|
|
253
|
+
// permissionless encodes the execute() call internally from to/value/data.
|
|
254
|
+
const calldata = `0x${protobufPayload.toString('hex')}` as Hex;
|
|
255
|
+
|
|
256
|
+
// Use sendUserOperation to get the userOpHash, then wait for receipt
|
|
257
|
+
const userOpHash = await smartAccountClient.sendUserOperation({
|
|
258
|
+
calls: [
|
|
259
|
+
{
|
|
260
|
+
to: dataEdgeAddress,
|
|
261
|
+
value: 0n,
|
|
262
|
+
data: calldata,
|
|
263
|
+
},
|
|
264
|
+
],
|
|
265
|
+
});
|
|
266
|
+
|
|
267
|
+
// 7. Wait for the UserOp to be included in a transaction
|
|
268
|
+
const receipt = await pimlicoClient.waitForUserOperationReceipt({
|
|
269
|
+
hash: userOpHash,
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
return {
|
|
273
|
+
txHash: receipt.receipt.transactionHash,
|
|
274
|
+
userOpHash,
|
|
275
|
+
success: receipt.success,
|
|
276
|
+
};
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// ---------------------------------------------------------------------------
|
|
280
|
+
// Configuration
|
|
281
|
+
// ---------------------------------------------------------------------------
|
|
282
|
+
|
|
283
|
+
/**
|
|
284
|
+
* Check if subgraph mode is enabled.
|
|
285
|
+
*
|
|
286
|
+
* Returns true unless TOTALRECLAW_SUBGRAPH_MODE is explicitly set to "false".
|
|
287
|
+
*/
|
|
288
|
+
export function isSubgraphMode(): boolean {
|
|
289
|
+
return process.env.TOTALRECLAW_SUBGRAPH_MODE !== 'false';
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
/**
|
|
293
|
+
* Get subgraph configuration from environment variables.
|
|
294
|
+
*
|
|
295
|
+
* After the relay refactor, clients only need:
|
|
296
|
+
* - TOTALRECLAW_MASTER_PASSWORD -- BIP-39 mnemonic
|
|
297
|
+
* - TOTALRECLAW_SERVER_URL -- relay server URL (default: http://localhost:8000)
|
|
298
|
+
* - TOTALRECLAW_SUBGRAPH_MODE -- set "false" to disable (default: enabled with valid mnemonic)
|
|
299
|
+
* - TOTALRECLAW_CHAIN_ID -- optional, defaults to 10200 (Chiado)
|
|
300
|
+
*
|
|
301
|
+
* Removed from client-side config (now server-side only):
|
|
302
|
+
* - PIMLICO_API_KEY
|
|
303
|
+
* - TOTALRECLAW_SUBGRAPH_ENDPOINT
|
|
304
|
+
*/
|
|
305
|
+
/**
|
|
306
|
+
* Derive the Smart Account address from a BIP-39 mnemonic.
|
|
307
|
+
* This is the on-chain owner identity used in the subgraph.
|
|
308
|
+
*/
|
|
309
|
+
export async function deriveSmartAccountAddress(mnemonic: string, chainId?: number): Promise<string> {
|
|
310
|
+
const chain: Chain = (chainId ?? 10200) === 100 ? gnosis : gnosisChiado;
|
|
311
|
+
const ownerAccount = mnemonicToAccount(mnemonic);
|
|
312
|
+
const entryPointAddr = (process.env.TOTALRECLAW_ENTRYPOINT_ADDRESS || DEFAULT_ENTRYPOINT_ADDRESS) as Address;
|
|
313
|
+
const rpcUrl = process.env.TOTALRECLAW_RPC_URL;
|
|
314
|
+
|
|
315
|
+
const publicClient = createPublicClient({
|
|
316
|
+
chain,
|
|
317
|
+
transport: rpcUrl ? http(rpcUrl) : http(),
|
|
318
|
+
});
|
|
319
|
+
|
|
320
|
+
const smartAccount = await toSimpleSmartAccount({
|
|
321
|
+
client: publicClient,
|
|
322
|
+
owner: ownerAccount,
|
|
323
|
+
entryPoint: {
|
|
324
|
+
address: entryPointAddr,
|
|
325
|
+
version: '0.7',
|
|
326
|
+
},
|
|
327
|
+
});
|
|
328
|
+
|
|
329
|
+
return smartAccount.address.toLowerCase();
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
export function getSubgraphConfig(): SubgraphStoreConfig {
|
|
333
|
+
return {
|
|
334
|
+
relayUrl: process.env.TOTALRECLAW_SERVER_URL || 'http://localhost:8000',
|
|
335
|
+
mnemonic: process.env.TOTALRECLAW_MASTER_PASSWORD || '',
|
|
336
|
+
cachePath: process.env.TOTALRECLAW_CACHE_PATH || `${process.env.HOME}/.totalreclaw/cache.enc`,
|
|
337
|
+
chainId: parseInt(process.env.TOTALRECLAW_CHAIN_ID || '10200'),
|
|
338
|
+
dataEdgeAddress: process.env.TOTALRECLAW_DATA_EDGE_ADDRESS || DEFAULT_DATA_EDGE_ADDRESS,
|
|
339
|
+
entryPointAddress: process.env.TOTALRECLAW_ENTRYPOINT_ADDRESS || DEFAULT_ENTRYPOINT_ADDRESS,
|
|
340
|
+
rpcUrl: process.env.TOTALRECLAW_RPC_URL || undefined,
|
|
341
|
+
};
|
|
342
|
+
}
|