@x1scroll/agent-sdk 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,272 @@
1
+ 'use strict';
2
+
3
+ /**
4
+ * examples/context-saver.js — THE MONEY EXAMPLE
5
+ *
6
+ * Full pattern for on-chain memory as context cost reduction:
7
+ * END of session → compress conversation → push to IPFS → stamp CID on-chain
8
+ * START of next session → fetch CIDs → pull only relevant → inject into context
9
+ *
10
+ * Result: 90% reduction in context tokens = 90% reduction in API costs.
11
+ *
12
+ * Cost comparison:
13
+ * Full history in context: ~100k tokens/session → ~$4.50/day (Claude Sonnet)
14
+ * This pattern: ~10k tokens/session → ~$0.45/day + 0.1 XNT (~$0.03)
15
+ *
16
+ * Prerequisites:
17
+ * npm install @x1scroll/agent-sdk
18
+ * npm install @helia/unixfs helia (or use your preferred IPFS client)
19
+ */
20
+
21
+ const { AgentClient } = require('@x1scroll/agent-sdk');
22
+ const { Keypair } = require('@solana/web3.js');
23
+
24
+ // ── Config ────────────────────────────────────────────────────────────────────
25
+ const RPC_URL = 'https://rpc.x1scroll.io'; // x1scroll dedicated node
26
+
27
+ // Load from env / secure keystore in production
28
+ const humanKeypair = Keypair.fromSecretKey(
29
+ Buffer.from(JSON.parse(process.env.HUMAN_WALLET_KEY || '[]'))
30
+ );
31
+ const agentKeypair = Keypair.fromSecretKey(
32
+ Buffer.from(JSON.parse(process.env.AGENT_WALLET_KEY || '[]'))
33
+ );
34
+
35
+ // ── Stubbed IPFS client — replace with your actual implementation ─────────────
36
+ // Options: Helia (browser/node), web3.storage, Pinata, Filebase, nft.storage
37
+ const ipfs = {
38
+ /**
39
+ * Push data to IPFS, return CID string.
40
+ * @param {string} json
41
+ * @returns {Promise<string>} CID
42
+ */
43
+ async add(json) {
44
+ // Example using web3.storage:
45
+ // const client = new Web3Storage({ token: process.env.W3S_TOKEN });
46
+ // const blob = new Blob([json], { type: 'application/json' });
47
+ // const cid = await client.put([new File([blob], 'memory.json')]);
48
+ // return cid;
49
+
50
+ // Stub for demo
51
+ console.log(' [ipfs.add] would push:', json.slice(0, 80) + '...');
52
+ return 'bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi'; // fake CID
53
+ },
54
+
55
+ /**
56
+ * Fetch content from IPFS by CID.
57
+ * @param {string} cid
58
+ * @returns {Promise<object>}
59
+ */
60
+ async get(cid) {
61
+ // Example: return await fetch(`https://w3s.link/ipfs/${cid}`).then(r => r.json());
62
+
63
+ // Stub for demo
64
+ console.log(' [ipfs.get] would fetch CID:', cid);
65
+ return {
66
+ summary: 'User discussed trading strategy on X1. Decided to focus on XNT/USDC pair. Threshold: 5% swing.',
67
+ keyFacts: ['user trades XNT', 'risk tolerance: medium', 'preferred DEX: xDEX'],
68
+ sessionDate: '2026-04-06',
69
+ };
70
+ },
71
+ };
72
+
73
+ // ── Stubbed LLM compressor — replace with your AI model ───────────────────────
74
+ const llm = {
75
+ /**
76
+ * Compress a conversation into a JSON-serializable summary.
77
+ * In production: call Claude, GPT, Llama, etc. with a compression prompt.
78
+ * @param {Array} conversationHistory
79
+ * @returns {Promise<object>}
80
+ */
81
+ async compress(conversationHistory) {
82
+ // Example prompt for your LLM:
83
+ // "Compress this conversation into a structured JSON summary.
84
+ // Include: key facts, decisions, topics discussed, important context.
85
+ // Be concise — this will be loaded as context in future sessions.
86
+ // Target: <500 tokens."
87
+ console.log(` [llm.compress] compressing ${conversationHistory.length} turns...`);
88
+ return {
89
+ summary: 'Discussed memory architecture and context cost reduction strategies.',
90
+ decisions: ['use on-chain CIDs for memory indexing', 'store compressed JSON to IPFS'],
91
+ keyFacts: ['user is building an AI agent', 'budget: $50/day API'],
92
+ topics: ['memory', 'cost', 'X1', 'IPFS'],
93
+ compressedAt: new Date().toISOString(),
94
+ };
95
+ },
96
+
97
+ /**
98
+ * Perform semantic matching — pick the memories most relevant to the current query.
99
+ * In production: use embeddings (OpenAI, nomic-embed-text, etc.) + cosine similarity.
100
+ * @param {Array} memorySummaries
101
+ * @param {string} query
102
+ * @returns {Promise<Array>}
103
+ */
104
+ async selectRelevant(memorySummaries, query) {
105
+ console.log(` [llm.selectRelevant] selecting from ${memorySummaries.length} memories for: "${query}"`);
106
+ // Simple stub: return all (real impl would filter by semantic similarity)
107
+ return memorySummaries;
108
+ },
109
+ };
110
+
111
+ // ── The pattern ───────────────────────────────────────────────────────────────
112
+
113
+ /**
114
+ * Call this at the END of every agent session.
115
+ * Compresses the conversation and stamps the CID on X1.
116
+ *
117
+ * Cost: 0.001 XNT + IPFS storage (often free tier)
118
+ * Saves: ~90% of next session's context tokens
119
+ *
120
+ * @param {Array} conversationHistory Array of { role, content } turns
121
+ * @param {string} [sessionLabel] Optional label (defaults to today's date)
122
+ */
123
+ async function saveSessionToChain(conversationHistory, sessionLabel) {
124
+ const client = new AgentClient({ wallet: humanKeypair, rpcUrl: RPC_URL });
125
+ const today = new Date().toISOString().slice(0, 10);
126
+ const topic = sessionLabel || `session-${today}`;
127
+
128
+ console.log(`\n=== SAVING SESSION: ${topic} ===`);
129
+ console.log(`Conversation turns: ${conversationHistory.length}`);
130
+
131
+ // Step 1: Compress the conversation with your LLM
132
+ console.log('\n1. Compressing conversation...');
133
+ const summary = await llm.compress(conversationHistory);
134
+ const json = JSON.stringify(summary);
135
+ console.log(` Compressed: ${json.length} bytes (vs ${estimateTokens(conversationHistory)} tokens raw)`);
136
+
137
+ // Step 2: Push compressed summary to IPFS
138
+ console.log('\n2. Pushing to IPFS...');
139
+ const cid = await ipfs.add(json);
140
+ console.log(` CID: ${cid}`);
141
+
142
+ // Step 3: Stamp the CID on X1 — the drip
143
+ console.log('\n3. Stamping CID on X1... (0.001 XNT)');
144
+ const tags = ['session', 'compressed', 'daily'];
145
+
146
+ const { txSig, memoryEntryPDA } = await client.storeMemory(
147
+ agentKeypair,
148
+ humanKeypair.publicKey.toBase58(),
149
+ topic,
150
+ cid,
151
+ tags,
152
+ false // not encrypted (encrypt before IPFS.add() if needed)
153
+ );
154
+
155
+ console.log(` ✓ Stamped on-chain!`);
156
+ console.log(` TX: ${txSig}`);
157
+ console.log(` MemoryEntry: ${memoryEntryPDA}`);
158
+ console.log(` Explorer: https://explorer.x1.xyz/tx/${txSig}`);
159
+
160
+ return { txSig, cid, memoryEntryPDA };
161
+ }
162
+
163
+ /**
164
+ * Call this at the START of every agent session.
165
+ * Loads relevant memories from chain → IPFS → returns context string.
166
+ *
167
+ * Replaces: loading 100k tokens of conversation history
168
+ * Result: 5–10k tokens of targeted, relevant context
169
+ *
170
+ * @param {string} currentQuery What the user is asking / current topic
171
+ * @param {number} [limit=5] How many recent memories to consider
172
+ * @returns {Promise<string>} Compressed context to inject into your LLM prompt
173
+ */
174
+ async function loadContextFromChain(currentQuery, limit = 5) {
175
+ const client = new AgentClient({ rpcUrl: RPC_URL }); // read-only, no wallet needed
176
+
177
+ console.log(`\n=== LOADING CONTEXT for: "${currentQuery}" ===`);
178
+
179
+ // Step 1: Fetch recent memory CIDs from chain (one RPC call)
180
+ console.log(`\n1. Fetching last ${limit} memories from X1...`);
181
+ const memories = await client.listMemories(agentKeypair.publicKey.toBase58(), limit);
182
+ console.log(` Found ${memories.length} memories on-chain`);
183
+
184
+ if (memories.length === 0) {
185
+ console.log(' No memories found. Starting fresh session.');
186
+ return '';
187
+ }
188
+
189
+ // Step 2: Fetch content from IPFS (only for non-encrypted entries)
190
+ console.log('\n2. Fetching content from IPFS...');
191
+ const summaries = await Promise.all(
192
+ memories
193
+ .filter(m => !m.encrypted)
194
+ .map(async m => {
195
+ const content = await ipfs.get(m.cid);
196
+ return { topic: m.topic, timestamp: m.timestamp, tags: m.tags, content };
197
+ })
198
+ );
199
+
200
+ // Step 3: Semantic selection — pull only what's relevant
201
+ console.log('\n3. Selecting relevant memories...');
202
+ const relevant = await llm.selectRelevant(summaries, currentQuery);
203
+ console.log(` Selected ${relevant.length} of ${summaries.length} memories as relevant`);
204
+
205
+ // Step 4: Build context string for your LLM prompt
206
+ const contextBlock = relevant
207
+ .map(m => `[${m.topic} | ${new Date(m.timestamp * 1000).toISOString().slice(0, 10)}]\n${JSON.stringify(m.content, null, 2)}`)
208
+ .join('\n\n---\n\n');
209
+
210
+ const tokenEstimate = Math.ceil(contextBlock.length / 4);
211
+ console.log(`\n✓ Context ready: ~${tokenEstimate} tokens (vs ~100k for full history)`);
212
+ console.log(` Cost savings: ~${Math.round((1 - tokenEstimate / 100000) * 100)}% fewer tokens`);
213
+
214
+ return contextBlock;
215
+ }
216
+
217
+ // ── Utility ────────────────────────────────────────────────────────────────────
218
+
219
+ function estimateTokens(conversationHistory) {
220
+ const chars = conversationHistory.reduce((sum, turn) => sum + (turn.content || '').length, 0);
221
+ return Math.ceil(chars / 4); // ~4 chars per token
222
+ }
223
+
224
+ // ── Demo run ───────────────────────────────────────────────────────────────────
225
+
226
+ async function demo() {
227
+ console.log('Context Cost Reduction Demo');
228
+ console.log('===========================\n');
229
+
230
+ // Simulated conversation from a previous session
231
+ const previousSession = [
232
+ { role: 'user', content: 'I want to build an AI agent that trades XNT on xDEX.' },
233
+ { role: 'assistant', content: 'Great idea. Let me help you design the strategy...' },
234
+ { role: 'user', content: 'My budget is $50/day for API costs. That is the hard ceiling.' },
235
+ { role: 'assistant', content: 'Understood. With on-chain memory you can cut that to $5/day...' },
236
+ // ... imagine 200 more turns ...
237
+ ];
238
+
239
+ // END OF SESSION: save to chain
240
+ // await saveSessionToChain(previousSession, 'trading-agent-design-2026-04-06');
241
+
242
+ // START OF NEW SESSION: load context
243
+ // const context = await loadContextFromChain('How do I configure the XNT trading bot?', 5);
244
+ // Then inject `context` into your LLM's system prompt:
245
+ // const response = await yourLLM.chat({ system: `Context from memory:\n${context}`, user: query });
246
+
247
+ // Show PDA derivation (works without a wallet or RPC)
248
+ console.log('Agent PDA addresses (deterministic, no RPC needed):');
249
+ const { pda: recordPDA } = AgentClient.deriveAgentRecord(agentKeypair.publicKey);
250
+ const { pda: mem0PDA } = AgentClient.deriveMemoryEntry(agentKeypair.publicKey, 0);
251
+ const { pda: mem1PDA } = AgentClient.deriveMemoryEntry(agentKeypair.publicKey, 1);
252
+ console.log(' AgentRecord: ', recordPDA.toBase58());
253
+ console.log(' Memory[0]: ', mem0PDA.toBase58());
254
+ console.log(' Memory[1]: ', mem1PDA.toBase58());
255
+
256
+ console.log('\n────────────────────────────────────────────────────────────────');
257
+ console.log('The math:');
258
+ console.log(' Before: 100k tokens/session × $3/1M × 300 sessions/day = $90/day');
259
+ console.log(' After: 10k tokens/session × $3/1M × 300 sessions/day = $9/day');
260
+ console.log(' Savings: $81/day on API costs');
261
+ console.log(' On-chain cost: 300 × 0.001 XNT = 0.3 XNT/day (~$0.09/day at $0.30 XNT)');
262
+ console.log(' NET SAVINGS: ~$80.91/day');
263
+ console.log('────────────────────────────────────────────────────────────────');
264
+ console.log('\nGet started: https://x1scroll.io | Get XNT: https://app.xdex.xyz');
265
+ }
266
+
267
+ demo().catch(err => {
268
+ console.error('Error:', err.message);
269
+ process.exit(1);
270
+ });
271
+
272
+ module.exports = { saveSessionToChain, loadContextFromChain };
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "@x1scroll/agent-sdk",
3
+ "version": "1.0.0",
4
+ "description": "Agent identity and on-chain memory protocol for X1 blockchain",
5
+ "license": "BSL-1.1",
6
+ "main": "src/index.js",
7
+ "types": "src/index.d.ts",
8
+ "files": [
9
+ "src/",
10
+ "examples/",
11
+ "README.md",
12
+ "LICENSE"
13
+ ],
14
+ "keywords": [
15
+ "x1",
16
+ "x1scroll",
17
+ "solana",
18
+ "svm",
19
+ "agent",
20
+ "ai-agent",
21
+ "memory",
22
+ "on-chain",
23
+ "identity",
24
+ "blockchain",
25
+ "ipfs",
26
+ "llm"
27
+ ],
28
+ "author": "x1scroll.io (Arnett Esters)",
29
+ "homepage": "https://x1scroll.io",
30
+ "repository": {
31
+ "type": "git",
32
+ "url": "https://github.com/x1scroll/agent-identity-sdk.git"
33
+ },
34
+ "bugs": {
35
+ "url": "https://github.com/x1scroll/agent-identity-sdk/issues"
36
+ },
37
+ "engines": {
38
+ "node": ">=18.0.0"
39
+ },
40
+ "dependencies": {
41
+ "@solana/web3.js": "^1.98.0",
42
+ "bs58": "^5.0.0"
43
+ },
44
+ "devDependencies": {
45
+ "jest": "^29.0.0"
46
+ },
47
+ "scripts": {
48
+ "test": "jest",
49
+ "lint": "node --check src/index.js"
50
+ },
51
+ "publishConfig": {
52
+ "access": "public"
53
+ }
54
+ }