@nina-protocol/nina-db 0.0.108 → 0.0.110
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +4 -0
- package/dist/models/Release.js +2 -1
- package/dist/redis/index.js +340 -0
- package/dist/redis/subscriptions.js +110 -0
- package/dist/utils/index.js +8 -0
- package/package.json +4 -2
- package/src/index.js +5 -1
- package/src/models/Release.js +2 -1
- package/src/redis/index.js +382 -0
- package/src/redis/subscriptions.js +132 -0
- package/src/utils/index.js +10 -0
- package/tsconfig.json +2 -1
package/dist/index.js
CHANGED
|
@@ -3,6 +3,8 @@ import Knex from 'knex';
|
|
|
3
3
|
import { Model } from 'objection';
|
|
4
4
|
import Models from './models/index.js';
|
|
5
5
|
import knexConfig from './knexfile.js';
|
|
6
|
+
import RedisSubscriptions from './redis/subscriptions.js';
|
|
7
|
+
import IndexerRedis from './redis/index.js';
|
|
6
8
|
export const initDb = async (config) => {
|
|
7
9
|
const db = Knex(config.development);
|
|
8
10
|
await db.raw(`SELECT 'CREATE DATABASE ${process.env.POSTGRES_DATABASE}'
|
|
@@ -28,3 +30,5 @@ export const Tag = Models.Tag;
|
|
|
28
30
|
export const Transaction = Models.Transaction;
|
|
29
31
|
export const Verification = Models.Verification;
|
|
30
32
|
export const config = knexConfig;
|
|
33
|
+
export const redis = IndexerRedis;
|
|
34
|
+
export const SubscriptionsWithCache = RedisSubscriptions;
|
package/dist/models/Release.js
CHANGED
|
@@ -133,7 +133,7 @@ export default class Release extends Model {
|
|
|
133
133
|
return null;
|
|
134
134
|
}
|
|
135
135
|
};
|
|
136
|
-
static createRelease = async ({ publicKey, mint, metadata, datetime, publisherId, releaseAccount, programId }) => {
|
|
136
|
+
static createRelease = async ({ publicKey, mint, metadata, datetime, publisherId, releaseAccount, programId, solanaAddress }) => {
|
|
137
137
|
const slug = await this.generateSlug(metadata);
|
|
138
138
|
const price = releaseAccount.account?.price?.toNumber() ||
|
|
139
139
|
releaseAccount?.price?.toNumber() ||
|
|
@@ -151,6 +151,7 @@ export default class Release extends Model {
|
|
|
151
151
|
paymentMint,
|
|
152
152
|
archived: false,
|
|
153
153
|
programId,
|
|
154
|
+
solanaAddress,
|
|
154
155
|
});
|
|
155
156
|
if (metadata.properties.tags) {
|
|
156
157
|
for await (let tag of metadata.properties.tags) {
|
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
import Redis from 'ioredis';
|
|
2
|
+
import dotenv from 'dotenv';
|
|
3
|
+
dotenv.config();
|
|
4
|
+
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
|
|
5
|
+
const CACHE_TTL = 7200; // 2 hours in seconds
|
|
6
|
+
const SEARCH_CACHE_TTL = 300; // 5 minutes in seconds for search queries
|
|
7
|
+
const POOL_SIZE = 10;
|
|
8
|
+
const CONNECTION_TIMEOUT = 5000; // 5 seconds
|
|
9
|
+
const OPERATION_TIMEOUT = 10000; // 10 seconds
|
|
10
|
+
// Track pool health
|
|
11
|
+
let failedConnections = 0;
|
|
12
|
+
let lastErrorTime = null;
|
|
13
|
+
const MAX_FAILURES = 5;
|
|
14
|
+
const FAILURE_WINDOW = 60000; // 1 minute
|
|
15
|
+
// Alert thresholds
|
|
16
|
+
const ALERT_THRESHOLDS = {
|
|
17
|
+
MIN_ACTIVE_CONNECTIONS: 5, // Alert if less than 5 active connections
|
|
18
|
+
MAX_FAILED_CONNECTIONS: 3, // Alert if more than 3 failed connections
|
|
19
|
+
ERROR_WINDOW: 5 * 60 * 1000 // 5 minutes
|
|
20
|
+
};
|
|
21
|
+
// Get a client from the pool using round-robin with timeout
|
|
22
|
+
let currentClientIndex = 0;
|
|
23
|
+
let totalRequests = 0;
|
|
24
|
+
// Create Redis connection pool
|
|
25
|
+
const createRedisPool = (size = POOL_SIZE) => {
|
|
26
|
+
const pool = [];
|
|
27
|
+
let activeConnections = 0;
|
|
28
|
+
for (let i = 0; i < size; i++) {
|
|
29
|
+
const client = new Redis(REDIS_URL, {
|
|
30
|
+
retryStrategy: (times) => {
|
|
31
|
+
const delay = Math.min(times * 1000, 5000);
|
|
32
|
+
return delay;
|
|
33
|
+
},
|
|
34
|
+
maxRetriesPerRequest: 3,
|
|
35
|
+
connectTimeout: CONNECTION_TIMEOUT,
|
|
36
|
+
commandTimeout: OPERATION_TIMEOUT,
|
|
37
|
+
enableOfflineQueue: true,
|
|
38
|
+
enableReadyCheck: true,
|
|
39
|
+
reconnectOnError: (err) => {
|
|
40
|
+
console.error('[Redis] Reconnect on error:', err);
|
|
41
|
+
return true;
|
|
42
|
+
},
|
|
43
|
+
lazyConnect: true,
|
|
44
|
+
keepAlive: 10000
|
|
45
|
+
});
|
|
46
|
+
client.on('error', (error) => {
|
|
47
|
+
console.error(`[Redis Pool Client ${i}] Error:`, error);
|
|
48
|
+
failedConnections++;
|
|
49
|
+
// Check if we're having too many failures
|
|
50
|
+
const now = Date.now();
|
|
51
|
+
if (lastErrorTime && (now - lastErrorTime) < FAILURE_WINDOW) {
|
|
52
|
+
if (failedConnections >= MAX_FAILURES) {
|
|
53
|
+
console.error('[Redis] Too many failures in short time, removing client from pool');
|
|
54
|
+
const index = pool.indexOf(client);
|
|
55
|
+
if (index > -1) {
|
|
56
|
+
pool.splice(index, 1);
|
|
57
|
+
client.disconnect();
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
else {
|
|
62
|
+
// Reset failure count if outside window
|
|
63
|
+
failedConnections = 1;
|
|
64
|
+
lastErrorTime = now;
|
|
65
|
+
}
|
|
66
|
+
});
|
|
67
|
+
client.on('connect', () => {
|
|
68
|
+
activeConnections++;
|
|
69
|
+
console.log(`[Redis] Client ${i} connected. Active connections: ${activeConnections}`);
|
|
70
|
+
});
|
|
71
|
+
client.on('close', () => {
|
|
72
|
+
activeConnections--;
|
|
73
|
+
console.log(`[Redis] Client ${i} disconnected. Active connections: ${activeConnections}`);
|
|
74
|
+
});
|
|
75
|
+
client.on('timeout', () => {
|
|
76
|
+
console.error(`[Redis] Client ${i} timed out`);
|
|
77
|
+
client.disconnect();
|
|
78
|
+
});
|
|
79
|
+
pool.push(client);
|
|
80
|
+
}
|
|
81
|
+
return pool;
|
|
82
|
+
};
|
|
83
|
+
// Create the pool
|
|
84
|
+
const redisPool = createRedisPool();
|
|
85
|
+
let isPoolInitialized = false;
|
|
86
|
+
const getClient = () => {
|
|
87
|
+
if (!isPoolInitialized) {
|
|
88
|
+
throw new Error('Redis pool not initialized');
|
|
89
|
+
}
|
|
90
|
+
if (redisPool.length === 0) {
|
|
91
|
+
throw new Error('No available Redis connections in pool');
|
|
92
|
+
}
|
|
93
|
+
const client = redisPool[currentClientIndex];
|
|
94
|
+
currentClientIndex = (currentClientIndex + 1) % redisPool.length;
|
|
95
|
+
totalRequests++;
|
|
96
|
+
// Log every 1000 requests
|
|
97
|
+
if (totalRequests % 1000 === 0) {
|
|
98
|
+
console.log(`[Redis] Total requests processed: ${totalRequests}`);
|
|
99
|
+
}
|
|
100
|
+
return client;
|
|
101
|
+
};
|
|
102
|
+
const testRedisConnection = async (client) => {
|
|
103
|
+
try {
|
|
104
|
+
await client.set('test:connection', 'ok', 'EX', 10);
|
|
105
|
+
const result = await client.get('test:connection');
|
|
106
|
+
return result === 'ok';
|
|
107
|
+
}
|
|
108
|
+
catch (error) {
|
|
109
|
+
console.error('[Redis] Connection test failed:', error);
|
|
110
|
+
return false;
|
|
111
|
+
}
|
|
112
|
+
};
|
|
113
|
+
// Initialize all clients
|
|
114
|
+
const initializePool = async () => {
|
|
115
|
+
try {
|
|
116
|
+
console.log('[Redis] Initializing pool...');
|
|
117
|
+
const results = await Promise.all(redisPool.map(client => testRedisConnection(client)));
|
|
118
|
+
const successCount = results.filter(Boolean).length;
|
|
119
|
+
if (successCount === 0) {
|
|
120
|
+
throw new Error('Failed to initialize any Redis connections');
|
|
121
|
+
}
|
|
122
|
+
if (successCount < redisPool.length) {
|
|
123
|
+
console.warn(`[Redis] Only ${successCount}/${redisPool.length} connections initialized successfully`);
|
|
124
|
+
}
|
|
125
|
+
isPoolInitialized = true;
|
|
126
|
+
console.log('[Redis] Pool initialization completed');
|
|
127
|
+
}
|
|
128
|
+
catch (error) {
|
|
129
|
+
console.error('[Redis] Pool initialization failed:', error);
|
|
130
|
+
throw error;
|
|
131
|
+
}
|
|
132
|
+
};
|
|
133
|
+
// Health check function
|
|
134
|
+
export const checkPoolHealth = () => {
|
|
135
|
+
const health = {
|
|
136
|
+
totalConnections: redisPool.length,
|
|
137
|
+
activeConnections: redisPool.filter(client => client.status === 'ready').length,
|
|
138
|
+
failedConnections,
|
|
139
|
+
lastErrorTime,
|
|
140
|
+
timestamp: new Date().toISOString()
|
|
141
|
+
};
|
|
142
|
+
// Check for problems and log
|
|
143
|
+
if (health.activeConnections < ALERT_THRESHOLDS.MIN_ACTIVE_CONNECTIONS) {
|
|
144
|
+
console.error(`[Redis CRITICAL] Low active connections: ${health.activeConnections}/${POOL_SIZE}. Pool may be exhausted.`);
|
|
145
|
+
}
|
|
146
|
+
if (health.failedConnections > ALERT_THRESHOLDS.MAX_FAILED_CONNECTIONS) {
|
|
147
|
+
console.error(`[Redis CRITICAL] High number of failed connections: ${health.failedConnections}. Redis may be having issues.`);
|
|
148
|
+
}
|
|
149
|
+
if (health.lastErrorTime && (Date.now() - health.lastErrorTime) < ALERT_THRESHOLDS.ERROR_WINDOW) {
|
|
150
|
+
console.error(`[Redis WARNING] Recent Redis errors detected. Last error: ${new Date(health.lastErrorTime).toISOString()}`);
|
|
151
|
+
}
|
|
152
|
+
// Log health status
|
|
153
|
+
console.log('[Redis] Pool Health:', {
|
|
154
|
+
...health,
|
|
155
|
+
lastErrorTime: health.lastErrorTime ? new Date(health.lastErrorTime).toISOString() : null
|
|
156
|
+
});
|
|
157
|
+
return health;
|
|
158
|
+
};
|
|
159
|
+
// Clear cache for a specific key
|
|
160
|
+
export const clearCache = async (key) => {
|
|
161
|
+
const client = getClient();
|
|
162
|
+
try {
|
|
163
|
+
await client.del(key);
|
|
164
|
+
}
|
|
165
|
+
catch (error) {
|
|
166
|
+
console.error('[Redis] Clear cache error:', error);
|
|
167
|
+
}
|
|
168
|
+
};
|
|
169
|
+
// Clear cache by pattern
|
|
170
|
+
export const clearCacheByPattern = async (pattern) => {
|
|
171
|
+
const client = getClient();
|
|
172
|
+
try {
|
|
173
|
+
const keys = await client.keys(pattern);
|
|
174
|
+
if (keys.length > 0) {
|
|
175
|
+
const pipeline = client.pipeline();
|
|
176
|
+
keys.forEach(key => pipeline.del(key));
|
|
177
|
+
await pipeline.exec();
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
catch (error) {
|
|
181
|
+
console.error('[Redis] Clear cache by pattern error:', error);
|
|
182
|
+
}
|
|
183
|
+
};
|
|
184
|
+
// Cleanup function to properly close all connections
|
|
185
|
+
export const cleanupPool = async () => {
|
|
186
|
+
console.log('[Redis] Starting pool cleanup...');
|
|
187
|
+
isPoolInitialized = false;
|
|
188
|
+
try {
|
|
189
|
+
const closePromises = redisPool.map(async (client, index) => {
|
|
190
|
+
try {
|
|
191
|
+
await client.quit();
|
|
192
|
+
console.log(`[Redis] Client ${index} closed successfully`);
|
|
193
|
+
}
|
|
194
|
+
catch (error) {
|
|
195
|
+
console.error(`[Redis] Error closing client ${index}:`, error);
|
|
196
|
+
// Force close if quit fails
|
|
197
|
+
try {
|
|
198
|
+
await client.disconnect();
|
|
199
|
+
}
|
|
200
|
+
catch (disconnectError) {
|
|
201
|
+
console.error(`[Redis] Error force disconnecting client ${index}:`, disconnectError);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
});
|
|
205
|
+
await Promise.all(closePromises);
|
|
206
|
+
console.log('[Redis] Pool cleanup completed');
|
|
207
|
+
}
|
|
208
|
+
catch (error) {
|
|
209
|
+
console.error('[Redis] Error during pool cleanup:', error);
|
|
210
|
+
throw error;
|
|
211
|
+
}
|
|
212
|
+
};
|
|
213
|
+
// Cache wrapper function
|
|
214
|
+
export const withCache = async (key, fn, ttl = CACHE_TTL, override = false) => {
|
|
215
|
+
console.log('withCache', key, override);
|
|
216
|
+
const client = getClient();
|
|
217
|
+
try {
|
|
218
|
+
// Try to get from cache first
|
|
219
|
+
const cachedResult = await client.get(key);
|
|
220
|
+
if (cachedResult && !override) {
|
|
221
|
+
console.log('found cached result');
|
|
222
|
+
try {
|
|
223
|
+
const parsed = JSON.parse(cachedResult);
|
|
224
|
+
if (Array.isArray(parsed)) {
|
|
225
|
+
return parsed.map(id => {
|
|
226
|
+
if (typeof id === 'object' && id !== null) {
|
|
227
|
+
return id.id;
|
|
228
|
+
}
|
|
229
|
+
return typeof id === 'string' ? parseInt(id, 10) : id;
|
|
230
|
+
}).filter(id => !isNaN(id));
|
|
231
|
+
}
|
|
232
|
+
return parsed;
|
|
233
|
+
}
|
|
234
|
+
catch (parseError) {
|
|
235
|
+
await client.del(key);
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
const result = await fn();
|
|
239
|
+
if (result != null) {
|
|
240
|
+
console.log('no result - adding to cache for: ', key);
|
|
241
|
+
try {
|
|
242
|
+
const toCache = Array.isArray(result)
|
|
243
|
+
? result.map(id => {
|
|
244
|
+
if (typeof id === 'object' && id !== null) {
|
|
245
|
+
return id.id;
|
|
246
|
+
}
|
|
247
|
+
return typeof id === 'string' ? parseInt(id, 10) : id;
|
|
248
|
+
}).filter(id => !isNaN(id))
|
|
249
|
+
: result;
|
|
250
|
+
await client.setex(key, ttl, JSON.stringify(toCache));
|
|
251
|
+
}
|
|
252
|
+
catch (cacheError) {
|
|
253
|
+
console.error('[Redis] Cache error:', cacheError);
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
return result;
|
|
257
|
+
}
|
|
258
|
+
catch (error) {
|
|
259
|
+
try {
|
|
260
|
+
return await fn();
|
|
261
|
+
}
|
|
262
|
+
catch (fnError) {
|
|
263
|
+
throw fnError;
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
};
|
|
267
|
+
const startRedis = async () => {
|
|
268
|
+
// Initialize the pool
|
|
269
|
+
initializePool().then(() => {
|
|
270
|
+
// Run health check after pool is initialized
|
|
271
|
+
checkPoolHealth();
|
|
272
|
+
}).catch(error => {
|
|
273
|
+
console.error('[Redis] Failed to initialize pool:', error);
|
|
274
|
+
process.exit(1);
|
|
275
|
+
});
|
|
276
|
+
// Handle process termination
|
|
277
|
+
process.on('SIGTERM', async () => {
|
|
278
|
+
console.log('[Redis] Received SIGTERM signal');
|
|
279
|
+
await cleanupPool();
|
|
280
|
+
process.exit(0);
|
|
281
|
+
});
|
|
282
|
+
process.on('SIGINT', async () => {
|
|
283
|
+
console.log('[Redis] Received SIGINT signal');
|
|
284
|
+
await cleanupPool();
|
|
285
|
+
process.exit(0);
|
|
286
|
+
});
|
|
287
|
+
// Handle PM2 restarts and crashes
|
|
288
|
+
process.on('uncaughtException', async (error) => {
|
|
289
|
+
console.error('[Redis] Uncaught Exception:', error);
|
|
290
|
+
await cleanupPool();
|
|
291
|
+
process.exit(1);
|
|
292
|
+
});
|
|
293
|
+
process.on('unhandledRejection', async (reason, promise) => {
|
|
294
|
+
console.error('[Redis] Unhandled Rejection at:', promise, 'reason:', reason);
|
|
295
|
+
await cleanupPool();
|
|
296
|
+
process.exit(1);
|
|
297
|
+
});
|
|
298
|
+
// Handle PM2 graceful shutdown
|
|
299
|
+
if (process.env.NODE_ENV === 'production') {
|
|
300
|
+
process.on('message', async (msg) => {
|
|
301
|
+
if (msg === 'shutdown') {
|
|
302
|
+
console.log('[Redis] Received PM2 shutdown message');
|
|
303
|
+
await cleanupPool();
|
|
304
|
+
process.exit(0);
|
|
305
|
+
}
|
|
306
|
+
});
|
|
307
|
+
}
|
|
308
|
+
};
|
|
309
|
+
async function deleteCacheMatchingPattern(pattern, { count = 1000, useUnlink = true } = {}) {
|
|
310
|
+
const client = getClient(); // <-- get a pooled client
|
|
311
|
+
let cursor = '0';
|
|
312
|
+
do {
|
|
313
|
+
// ioredis scan signature: scan(cursor, ...args) -> [nextCursor, keys[]]
|
|
314
|
+
const [nextCursor, keys] = await client.scan(cursor, 'MATCH', pattern, 'COUNT', count);
|
|
315
|
+
if (keys.length) {
|
|
316
|
+
console.log('deleteCacheMatchingPattern: deleting keys:', keys);
|
|
317
|
+
const pipeline = client.pipeline();
|
|
318
|
+
// Prefer UNLINK for async deletion (doesn't block Redis); fall back to DEL
|
|
319
|
+
if (useUnlink && typeof client.unlink === 'function') {
|
|
320
|
+
keys.forEach(k => pipeline.unlink(k));
|
|
321
|
+
}
|
|
322
|
+
else {
|
|
323
|
+
keys.forEach(k => pipeline.del(k));
|
|
324
|
+
}
|
|
325
|
+
await pipeline.exec();
|
|
326
|
+
}
|
|
327
|
+
cursor = nextCursor;
|
|
328
|
+
} while (cursor !== '0');
|
|
329
|
+
}
|
|
330
|
+
export default {
|
|
331
|
+
getClient,
|
|
332
|
+
testRedisConnection,
|
|
333
|
+
checkPoolHealth,
|
|
334
|
+
clearCache,
|
|
335
|
+
clearCacheByPattern,
|
|
336
|
+
cleanupPool,
|
|
337
|
+
withCache,
|
|
338
|
+
startRedis,
|
|
339
|
+
deleteCacheMatchingPattern,
|
|
340
|
+
};
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import redis from './index.js';
|
|
2
|
+
import Subscription from '../models/Subscription.js';
|
|
3
|
+
import Account from '../models/Account.js';
|
|
4
|
+
import { formatColumnForJsonFields, BIG_LIMIT } from '../utils/index.js';
|
|
5
|
+
const SUBSCRIPTION_TO = 'subscription:to';
|
|
6
|
+
const getFollowersForAccountWithCache = async (publicKeyOrHandle, query, override = false) => {
|
|
7
|
+
try {
|
|
8
|
+
let { offset = 0, limit = BIG_LIMIT, sort = 'desc', column = 'datetime' } = query;
|
|
9
|
+
return redis.withCache(`${SUBSCRIPTION_TO}:${publicKeyOrHandle}:${offset}:${limit}:${sort}:${column}`, async () => {
|
|
10
|
+
let account = await Account.query().findOne({ publicKey: publicKeyOrHandle });
|
|
11
|
+
if (!account) {
|
|
12
|
+
account = await Account.query().findOne({ handle: publicKeyOrHandle });
|
|
13
|
+
if (!account) {
|
|
14
|
+
throw new Error(`Account not found for publicKeyOrHandle ${publicKeyOrHandle}`);
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
const publicKey = account.publicKey;
|
|
18
|
+
column = formatColumnForJsonFields(column);
|
|
19
|
+
const subscriptions = await Subscription.query()
|
|
20
|
+
.where('to', publicKey)
|
|
21
|
+
.orderBy(column, sort)
|
|
22
|
+
.range(Number(offset), Number(offset) + Number(limit) - 1);
|
|
23
|
+
const followers = [];
|
|
24
|
+
for await (let subscription of subscriptions.results) {
|
|
25
|
+
if (subscription.subscriptionType === 'account') {
|
|
26
|
+
const account = await Account.findOrCreate(subscription.from);
|
|
27
|
+
await account.format();
|
|
28
|
+
delete subscription.id;
|
|
29
|
+
followers.push({
|
|
30
|
+
account,
|
|
31
|
+
subscription,
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
return {
|
|
36
|
+
followers,
|
|
37
|
+
total: subscriptions.total
|
|
38
|
+
};
|
|
39
|
+
}, undefined, override);
|
|
40
|
+
}
|
|
41
|
+
catch (error) {
|
|
42
|
+
console.log('getFollowersForAccountWithCache error', error);
|
|
43
|
+
}
|
|
44
|
+
};
|
|
45
|
+
const getUserFollowingAccountWithCache = async (publicKeyOrHandle, followingPublicKeyOrHandle, override = false) => {
|
|
46
|
+
try {
|
|
47
|
+
return redis.withCache(`following:${publicKeyOrHandle}:${followingPublicKeyOrHandle}`, async () => {
|
|
48
|
+
let account = await Account.query().findOne({ publicKey: publicKeyOrHandle });
|
|
49
|
+
if (!account) {
|
|
50
|
+
account = await Account.query().findOne({ handle: publicKeyOrHandle });
|
|
51
|
+
if (!account) {
|
|
52
|
+
ctx.status = 404;
|
|
53
|
+
ctx.body = {
|
|
54
|
+
success: false,
|
|
55
|
+
following: false,
|
|
56
|
+
message: `Account not found with publicKey: ${publicKeyOrHandle}`
|
|
57
|
+
};
|
|
58
|
+
return;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
let followingAccount = await Account.query().findOne({ publicKey: followingPublicKeyOrHandle });
|
|
62
|
+
if (!followingAccount) {
|
|
63
|
+
followingAccount = await Account.query().findOne({ handle: followingPublicKeyOrHandle });
|
|
64
|
+
if (!followingAccount) {
|
|
65
|
+
followingAccount = await Hub.query().findOne({ publicKey: followingPublicKeyOrHandle });
|
|
66
|
+
if (!followingAccount) {
|
|
67
|
+
followingAccount = await Hub.query().findOne({ handle: followingPublicKeyOrHandle });
|
|
68
|
+
}
|
|
69
|
+
if (!followingAccount) {
|
|
70
|
+
ctx.status = 404;
|
|
71
|
+
ctx.body = {
|
|
72
|
+
success: false,
|
|
73
|
+
following: false,
|
|
74
|
+
message: `Account not found with publicKey: ${followingPublicKeyOrHandle}`
|
|
75
|
+
};
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
const publicKey = account.publicKey;
|
|
81
|
+
const followingPublicKey = followingAccount.publicKey;
|
|
82
|
+
const subscriptions = await Subscription.query()
|
|
83
|
+
.where('from', publicKey)
|
|
84
|
+
.andWhere('to', followingPublicKey);
|
|
85
|
+
return subscriptions.length > 0;
|
|
86
|
+
}, undefined, override);
|
|
87
|
+
}
|
|
88
|
+
catch (error) {
|
|
89
|
+
console.log('getUserFollowingAccountWithCache error', error);
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
const deleteCacheAfterAccountFollow = async (toPublicKey, toHandle, fromPublicKey, fromHandle) => {
|
|
93
|
+
try {
|
|
94
|
+
console.log('deleteCacheAfterAccountFollow', toHandle, toPublicKey);
|
|
95
|
+
await redis.deleteCacheMatchingPattern(`${SUBSCRIPTION_TO}:${toPublicKey}*`);
|
|
96
|
+
await redis.deleteCacheMatchingPattern(`${SUBSCRIPTION_TO}:${toHandle}*`);
|
|
97
|
+
await redis.deleteCacheMatchingPattern(`following:${fromPublicKey}:${toPublicKey}`);
|
|
98
|
+
await redis.deleteCacheMatchingPattern(`following:${fromPublicKey}:${toHandle}`);
|
|
99
|
+
await redis.deleteCacheMatchingPattern(`following:${fromHandle}:${toPublicKey}`);
|
|
100
|
+
await redis.deleteCacheMatchingPattern(`following:${fromHandle}:${toHandle}`);
|
|
101
|
+
}
|
|
102
|
+
catch (error) {
|
|
103
|
+
console.log('deleteCacheAfterAccountFollow error: ', error);
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
export default {
|
|
107
|
+
getFollowersForAccountWithCache,
|
|
108
|
+
getUserFollowingAccountWithCache,
|
|
109
|
+
deleteCacheAfterAccountFollow,
|
|
110
|
+
};
|
package/dist/utils/index.js
CHANGED
|
@@ -43,3 +43,11 @@ export const tweetNewRelease = async (metadata, publisherId, slug) => {
|
|
|
43
43
|
}
|
|
44
44
|
}
|
|
45
45
|
};
|
|
46
|
+
export const BIG_LIMIT = 5000;
|
|
47
|
+
export const formatColumnForJsonFields = (column, fieldName = 'metadata') => {
|
|
48
|
+
if (column.includes(':')) {
|
|
49
|
+
column = fieldName + ':' + column.split(':')[1];
|
|
50
|
+
column = ref(column).castText();
|
|
51
|
+
}
|
|
52
|
+
return column;
|
|
53
|
+
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@nina-protocol/nina-db",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.110",
|
|
4
4
|
"description": "",
|
|
5
5
|
"source": "src/index.js",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -22,10 +22,12 @@
|
|
|
22
22
|
"dependencies": {
|
|
23
23
|
"@babel/plugin-proposal-object-rest-spread": "^7.20.7",
|
|
24
24
|
"@babel/preset-es2015": "^7.0.0-beta.53",
|
|
25
|
-
"@metaplex-foundation/js": "^0.18.1",
|
|
26
25
|
"@coral-xyz/anchor": "^0.31.0",
|
|
26
|
+
"@metaplex-foundation/js": "^0.18.1",
|
|
27
|
+
"@project-serum/anchor": "^0.26.0",
|
|
27
28
|
"@solana/spl-token": "^0.4.9",
|
|
28
29
|
"axios": "^0.27.2",
|
|
30
|
+
"ioredis": "^5.7.0",
|
|
29
31
|
"knex": "^2.2.0",
|
|
30
32
|
"nanoid": "^4.0.2",
|
|
31
33
|
"objection": "2.2.15",
|
package/src/index.js
CHANGED
|
@@ -3,6 +3,8 @@ import Knex from 'knex'
|
|
|
3
3
|
import { Model } from 'objection'
|
|
4
4
|
import Models from './models/index.js'
|
|
5
5
|
import knexConfig from './knexfile.js'
|
|
6
|
+
import RedisSubscriptions from './redis/subscriptions.js'
|
|
7
|
+
import IndexerRedis from './redis/index.js'
|
|
6
8
|
|
|
7
9
|
export const initDb = async (config) => {
|
|
8
10
|
const db = Knex(config.development)
|
|
@@ -34,4 +36,6 @@ export const Subscription = Models.Subscription
|
|
|
34
36
|
export const Tag = Models.Tag
|
|
35
37
|
export const Transaction = Models.Transaction
|
|
36
38
|
export const Verification = Models.Verification
|
|
37
|
-
export const config = knexConfig
|
|
39
|
+
export const config = knexConfig
|
|
40
|
+
export const redis = IndexerRedis
|
|
41
|
+
export const SubscriptionsWithCache = RedisSubscriptions
|
package/src/models/Release.js
CHANGED
|
@@ -161,7 +161,7 @@ export default class Release extends Model {
|
|
|
161
161
|
}
|
|
162
162
|
};
|
|
163
163
|
|
|
164
|
-
static createRelease = async ({publicKey, mint, metadata, datetime, publisherId, releaseAccount, programId}) => {
|
|
164
|
+
static createRelease = async ({publicKey, mint, metadata, datetime, publisherId, releaseAccount, programId, solanaAddress}) => {
|
|
165
165
|
const slug = await this.generateSlug(metadata);
|
|
166
166
|
const price =
|
|
167
167
|
releaseAccount.account?.price?.toNumber() ||
|
|
@@ -181,6 +181,7 @@ export default class Release extends Model {
|
|
|
181
181
|
paymentMint,
|
|
182
182
|
archived: false,
|
|
183
183
|
programId,
|
|
184
|
+
solanaAddress,
|
|
184
185
|
})
|
|
185
186
|
if (metadata.properties.tags) {
|
|
186
187
|
for await (let tag of metadata.properties.tags) {
|
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
import Redis from 'ioredis';
|
|
2
|
+
import dotenv from 'dotenv';
|
|
3
|
+
|
|
4
|
+
dotenv.config();
|
|
5
|
+
|
|
6
|
+
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
|
|
7
|
+
const CACHE_TTL = 7200; // 2 hours in seconds
|
|
8
|
+
const SEARCH_CACHE_TTL = 300; // 5 minutes in seconds for search queries
|
|
9
|
+
const POOL_SIZE = 10;
|
|
10
|
+
const CONNECTION_TIMEOUT = 5000; // 5 seconds
|
|
11
|
+
const OPERATION_TIMEOUT = 10000; // 10 seconds
|
|
12
|
+
|
|
13
|
+
// Track pool health
|
|
14
|
+
let failedConnections = 0;
|
|
15
|
+
let lastErrorTime = null;
|
|
16
|
+
const MAX_FAILURES = 5;
|
|
17
|
+
const FAILURE_WINDOW = 60000; // 1 minute
|
|
18
|
+
|
|
19
|
+
// Alert thresholds
|
|
20
|
+
const ALERT_THRESHOLDS = {
|
|
21
|
+
MIN_ACTIVE_CONNECTIONS: 5, // Alert if less than 5 active connections
|
|
22
|
+
MAX_FAILED_CONNECTIONS: 3, // Alert if more than 3 failed connections
|
|
23
|
+
ERROR_WINDOW: 5 * 60 * 1000 // 5 minutes
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
// Get a client from the pool using round-robin with timeout
|
|
27
|
+
let currentClientIndex = 0;
|
|
28
|
+
let totalRequests = 0;
|
|
29
|
+
|
|
30
|
+
// Create Redis connection pool
|
|
31
|
+
const createRedisPool = (size = POOL_SIZE) => {
|
|
32
|
+
const pool = [];
|
|
33
|
+
let activeConnections = 0;
|
|
34
|
+
|
|
35
|
+
for (let i = 0; i < size; i++) {
|
|
36
|
+
const client = new Redis(REDIS_URL, {
|
|
37
|
+
retryStrategy: (times) => {
|
|
38
|
+
const delay = Math.min(times * 1000, 5000);
|
|
39
|
+
return delay;
|
|
40
|
+
},
|
|
41
|
+
maxRetriesPerRequest: 3,
|
|
42
|
+
connectTimeout: CONNECTION_TIMEOUT,
|
|
43
|
+
commandTimeout: OPERATION_TIMEOUT,
|
|
44
|
+
enableOfflineQueue: true,
|
|
45
|
+
enableReadyCheck: true,
|
|
46
|
+
reconnectOnError: (err) => {
|
|
47
|
+
console.error('[Redis] Reconnect on error:', err);
|
|
48
|
+
return true;
|
|
49
|
+
},
|
|
50
|
+
lazyConnect: true,
|
|
51
|
+
keepAlive: 10000
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
client.on('error', (error) => {
|
|
55
|
+
console.error(`[Redis Pool Client ${i}] Error:`, error);
|
|
56
|
+
failedConnections++;
|
|
57
|
+
|
|
58
|
+
// Check if we're having too many failures
|
|
59
|
+
const now = Date.now();
|
|
60
|
+
if (lastErrorTime && (now - lastErrorTime) < FAILURE_WINDOW) {
|
|
61
|
+
if (failedConnections >= MAX_FAILURES) {
|
|
62
|
+
console.error('[Redis] Too many failures in short time, removing client from pool');
|
|
63
|
+
const index = pool.indexOf(client);
|
|
64
|
+
if (index > -1) {
|
|
65
|
+
pool.splice(index, 1);
|
|
66
|
+
client.disconnect();
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
} else {
|
|
70
|
+
// Reset failure count if outside window
|
|
71
|
+
failedConnections = 1;
|
|
72
|
+
lastErrorTime = now;
|
|
73
|
+
}
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
client.on('connect', () => {
|
|
77
|
+
activeConnections++;
|
|
78
|
+
console.log(`[Redis] Client ${i} connected. Active connections: ${activeConnections}`);
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
client.on('close', () => {
|
|
82
|
+
activeConnections--;
|
|
83
|
+
console.log(`[Redis] Client ${i} disconnected. Active connections: ${activeConnections}`);
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
client.on('timeout', () => {
|
|
87
|
+
console.error(`[Redis] Client ${i} timed out`);
|
|
88
|
+
client.disconnect();
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
pool.push(client);
|
|
92
|
+
}
|
|
93
|
+
return pool;
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
// Create the pool
|
|
97
|
+
const redisPool = createRedisPool();
|
|
98
|
+
let isPoolInitialized = false;
|
|
99
|
+
|
|
100
|
+
const getClient = () => {
|
|
101
|
+
if (!isPoolInitialized) {
|
|
102
|
+
throw new Error('Redis pool not initialized');
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (redisPool.length === 0) {
|
|
106
|
+
throw new Error('No available Redis connections in pool');
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const client = redisPool[currentClientIndex];
|
|
110
|
+
currentClientIndex = (currentClientIndex + 1) % redisPool.length;
|
|
111
|
+
totalRequests++;
|
|
112
|
+
|
|
113
|
+
// Log every 1000 requests
|
|
114
|
+
if (totalRequests % 1000 === 0) {
|
|
115
|
+
console.log(`[Redis] Total requests processed: ${totalRequests}`);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
return client;
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
const testRedisConnection = async (client) => {
|
|
122
|
+
try {
|
|
123
|
+
await client.set('test:connection', 'ok', 'EX', 10);
|
|
124
|
+
const result = await client.get('test:connection');
|
|
125
|
+
return result === 'ok';
|
|
126
|
+
} catch (error) {
|
|
127
|
+
console.error('[Redis] Connection test failed:', error);
|
|
128
|
+
return false;
|
|
129
|
+
}
|
|
130
|
+
};
|
|
131
|
+
|
|
132
|
+
// Initialize all clients
|
|
133
|
+
const initializePool = async () => {
|
|
134
|
+
try {
|
|
135
|
+
console.log('[Redis] Initializing pool...');
|
|
136
|
+
const results = await Promise.all(redisPool.map(client => testRedisConnection(client)));
|
|
137
|
+
const successCount = results.filter(Boolean).length;
|
|
138
|
+
|
|
139
|
+
if (successCount === 0) {
|
|
140
|
+
throw new Error('Failed to initialize any Redis connections');
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
if (successCount < redisPool.length) {
|
|
144
|
+
console.warn(`[Redis] Only ${successCount}/${redisPool.length} connections initialized successfully`);
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
isPoolInitialized = true;
|
|
148
|
+
console.log('[Redis] Pool initialization completed');
|
|
149
|
+
} catch (error) {
|
|
150
|
+
console.error('[Redis] Pool initialization failed:', error);
|
|
151
|
+
throw error;
|
|
152
|
+
}
|
|
153
|
+
};
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
// Health check function
|
|
157
|
+
export const checkPoolHealth = () => {
|
|
158
|
+
const health = {
|
|
159
|
+
totalConnections: redisPool.length,
|
|
160
|
+
activeConnections: redisPool.filter(client => client.status === 'ready').length,
|
|
161
|
+
failedConnections,
|
|
162
|
+
lastErrorTime,
|
|
163
|
+
timestamp: new Date().toISOString()
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
// Check for problems and log
|
|
167
|
+
if (health.activeConnections < ALERT_THRESHOLDS.MIN_ACTIVE_CONNECTIONS) {
|
|
168
|
+
console.error(`[Redis CRITICAL] Low active connections: ${health.activeConnections}/${POOL_SIZE}. Pool may be exhausted.`);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
if (health.failedConnections > ALERT_THRESHOLDS.MAX_FAILED_CONNECTIONS) {
|
|
172
|
+
console.error(`[Redis CRITICAL] High number of failed connections: ${health.failedConnections}. Redis may be having issues.`);
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
if (health.lastErrorTime && (Date.now() - health.lastErrorTime) < ALERT_THRESHOLDS.ERROR_WINDOW) {
|
|
176
|
+
console.error(`[Redis WARNING] Recent Redis errors detected. Last error: ${new Date(health.lastErrorTime).toISOString()}`);
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// Log health status
|
|
180
|
+
console.log('[Redis] Pool Health:', {
|
|
181
|
+
...health,
|
|
182
|
+
lastErrorTime: health.lastErrorTime ? new Date(health.lastErrorTime).toISOString() : null
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
return health;
|
|
186
|
+
};
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
// Clear cache for a specific key
|
|
190
|
+
export const clearCache = async (key) => {
|
|
191
|
+
const client = getClient();
|
|
192
|
+
try {
|
|
193
|
+
await client.del(key);
|
|
194
|
+
} catch (error) {
|
|
195
|
+
console.error('[Redis] Clear cache error:', error);
|
|
196
|
+
}
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
// Clear cache by pattern
|
|
200
|
+
export const clearCacheByPattern = async (pattern) => {
|
|
201
|
+
const client = getClient();
|
|
202
|
+
try {
|
|
203
|
+
const keys = await client.keys(pattern);
|
|
204
|
+
if (keys.length > 0) {
|
|
205
|
+
const pipeline = client.pipeline();
|
|
206
|
+
keys.forEach(key => pipeline.del(key));
|
|
207
|
+
await pipeline.exec();
|
|
208
|
+
}
|
|
209
|
+
} catch (error) {
|
|
210
|
+
console.error('[Redis] Clear cache by pattern error:', error);
|
|
211
|
+
}
|
|
212
|
+
};
|
|
213
|
+
|
|
214
|
+
// Cleanup function to properly close all connections
|
|
215
|
+
export const cleanupPool = async () => {
|
|
216
|
+
console.log('[Redis] Starting pool cleanup...');
|
|
217
|
+
isPoolInitialized = false;
|
|
218
|
+
|
|
219
|
+
try {
|
|
220
|
+
const closePromises = redisPool.map(async (client, index) => {
|
|
221
|
+
try {
|
|
222
|
+
await client.quit();
|
|
223
|
+
console.log(`[Redis] Client ${index} closed successfully`);
|
|
224
|
+
} catch (error) {
|
|
225
|
+
console.error(`[Redis] Error closing client ${index}:`, error);
|
|
226
|
+
// Force close if quit fails
|
|
227
|
+
try {
|
|
228
|
+
await client.disconnect();
|
|
229
|
+
} catch (disconnectError) {
|
|
230
|
+
console.error(`[Redis] Error force disconnecting client ${index}:`, disconnectError);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
await Promise.all(closePromises);
|
|
236
|
+
console.log('[Redis] Pool cleanup completed');
|
|
237
|
+
} catch (error) {
|
|
238
|
+
console.error('[Redis] Error during pool cleanup:', error);
|
|
239
|
+
throw error;
|
|
240
|
+
}
|
|
241
|
+
};
|
|
242
|
+
|
|
243
|
+
// Cache wrapper function
|
|
244
|
+
export const withCache = async (key, fn, ttl = CACHE_TTL, override = false) => {
|
|
245
|
+
console.log('withCache', key, override)
|
|
246
|
+
const client = getClient();
|
|
247
|
+
try {
|
|
248
|
+
// Try to get from cache first
|
|
249
|
+
const cachedResult = await client.get(key);
|
|
250
|
+
|
|
251
|
+
if (cachedResult && !override) {
|
|
252
|
+
console.log('found cached result')
|
|
253
|
+
try {
|
|
254
|
+
const parsed = JSON.parse(cachedResult);
|
|
255
|
+
if (Array.isArray(parsed)) {
|
|
256
|
+
return parsed.map(id => {
|
|
257
|
+
if (typeof id === 'object' && id !== null) {
|
|
258
|
+
return id.id;
|
|
259
|
+
}
|
|
260
|
+
return typeof id === 'string' ? parseInt(id, 10) : id;
|
|
261
|
+
}).filter(id => !isNaN(id));
|
|
262
|
+
}
|
|
263
|
+
return parsed;
|
|
264
|
+
} catch (parseError) {
|
|
265
|
+
await client.del(key);
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
const result = await fn();
|
|
270
|
+
|
|
271
|
+
if (result != null) {
|
|
272
|
+
console.log('no result - adding to cache for: ', key)
|
|
273
|
+
try {
|
|
274
|
+
const toCache = Array.isArray(result)
|
|
275
|
+
? result.map(id => {
|
|
276
|
+
if (typeof id === 'object' && id !== null) {
|
|
277
|
+
return id.id;
|
|
278
|
+
}
|
|
279
|
+
return typeof id === 'string' ? parseInt(id, 10) : id;
|
|
280
|
+
}).filter(id => !isNaN(id))
|
|
281
|
+
: result;
|
|
282
|
+
|
|
283
|
+
await client.setex(key, ttl, JSON.stringify(toCache));
|
|
284
|
+
} catch (cacheError) {
|
|
285
|
+
console.error('[Redis] Cache error:', cacheError);
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
return result;
|
|
290
|
+
} catch (error) {
|
|
291
|
+
try {
|
|
292
|
+
return await fn();
|
|
293
|
+
} catch (fnError) {
|
|
294
|
+
throw fnError;
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
};
|
|
298
|
+
|
|
299
|
+
const startRedis = async() => {
|
|
300
|
+
// Initialize the pool
|
|
301
|
+
initializePool().then(() => {
|
|
302
|
+
// Run health check after pool is initialized
|
|
303
|
+
checkPoolHealth();
|
|
304
|
+
}).catch(error => {
|
|
305
|
+
console.error('[Redis] Failed to initialize pool:', error);
|
|
306
|
+
process.exit(1);
|
|
307
|
+
});
|
|
308
|
+
|
|
309
|
+
// Handle process termination
|
|
310
|
+
process.on('SIGTERM', async () => {
|
|
311
|
+
console.log('[Redis] Received SIGTERM signal');
|
|
312
|
+
await cleanupPool();
|
|
313
|
+
process.exit(0);
|
|
314
|
+
});
|
|
315
|
+
|
|
316
|
+
process.on('SIGINT', async () => {
|
|
317
|
+
console.log('[Redis] Received SIGINT signal');
|
|
318
|
+
await cleanupPool();
|
|
319
|
+
process.exit(0);
|
|
320
|
+
});
|
|
321
|
+
|
|
322
|
+
// Handle PM2 restarts and crashes
|
|
323
|
+
process.on('uncaughtException', async (error) => {
|
|
324
|
+
console.error('[Redis] Uncaught Exception:', error);
|
|
325
|
+
await cleanupPool();
|
|
326
|
+
process.exit(1);
|
|
327
|
+
});
|
|
328
|
+
|
|
329
|
+
process.on('unhandledRejection', async (reason, promise) => {
|
|
330
|
+
console.error('[Redis] Unhandled Rejection at:', promise, 'reason:', reason);
|
|
331
|
+
await cleanupPool();
|
|
332
|
+
process.exit(1);
|
|
333
|
+
});
|
|
334
|
+
|
|
335
|
+
// Handle PM2 graceful shutdown
|
|
336
|
+
if (process.env.NODE_ENV === 'production') {
|
|
337
|
+
process.on('message', async (msg) => {
|
|
338
|
+
if (msg === 'shutdown') {
|
|
339
|
+
console.log('[Redis] Received PM2 shutdown message');
|
|
340
|
+
await cleanupPool();
|
|
341
|
+
process.exit(0);
|
|
342
|
+
}
|
|
343
|
+
});
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
async function deleteCacheMatchingPattern(pattern, { count = 1000, useUnlink = true } = {}) {
|
|
348
|
+
const client = getClient(); // <-- get a pooled client
|
|
349
|
+
let cursor = '0';
|
|
350
|
+
|
|
351
|
+
do {
|
|
352
|
+
// ioredis scan signature: scan(cursor, ...args) -> [nextCursor, keys[]]
|
|
353
|
+
const [nextCursor, keys] = await client.scan(cursor, 'MATCH', pattern, 'COUNT', count);
|
|
354
|
+
|
|
355
|
+
if (keys.length) {
|
|
356
|
+
console.log('deleteCacheMatchingPattern: deleting keys:', keys)
|
|
357
|
+
const pipeline = client.pipeline();
|
|
358
|
+
// Prefer UNLINK for async deletion (doesn't block Redis); fall back to DEL
|
|
359
|
+
if (useUnlink && typeof client.unlink === 'function') {
|
|
360
|
+
keys.forEach(k => pipeline.unlink(k));
|
|
361
|
+
} else {
|
|
362
|
+
keys.forEach(k => pipeline.del(k));
|
|
363
|
+
}
|
|
364
|
+
await pipeline.exec();
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
cursor = nextCursor;
|
|
368
|
+
} while (cursor !== '0');
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
export default {
|
|
373
|
+
getClient,
|
|
374
|
+
testRedisConnection,
|
|
375
|
+
checkPoolHealth,
|
|
376
|
+
clearCache,
|
|
377
|
+
clearCacheByPattern,
|
|
378
|
+
cleanupPool,
|
|
379
|
+
withCache,
|
|
380
|
+
startRedis,
|
|
381
|
+
deleteCacheMatchingPattern,
|
|
382
|
+
};
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import redis from './index.js';
|
|
2
|
+
import Subscription from '../models/Subscription.js';
|
|
3
|
+
import Account from '../models/Account.js';
|
|
4
|
+
import { formatColumnForJsonFields, BIG_LIMIT } from '../utils/index.js';
|
|
5
|
+
|
|
6
|
+
const SUBSCRIPTION_TO = 'subscription:to'
|
|
7
|
+
const getFollowersForAccountWithCache = async (publicKeyOrHandle, query, override=false) => {
|
|
8
|
+
try {
|
|
9
|
+
let { offset=0, limit=BIG_LIMIT, sort='desc', column='datetime' } = query;
|
|
10
|
+
return redis.withCache(`${SUBSCRIPTION_TO}:${publicKeyOrHandle}:${offset}:${limit}:${sort}:${column}`, async () => {
|
|
11
|
+
let account = await Account.query().findOne({publicKey: publicKeyOrHandle});
|
|
12
|
+
if (!account) {
|
|
13
|
+
account = await Account.query().findOne({handle: publicKeyOrHandle});
|
|
14
|
+
if (!account) {
|
|
15
|
+
throw new Error(`Account not found for publicKeyOrHandle ${publicKeyOrHandle}`)
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
const publicKey = account.publicKey
|
|
19
|
+
column = formatColumnForJsonFields(column);
|
|
20
|
+
const subscriptions = await Subscription.query()
|
|
21
|
+
.where('to', publicKey)
|
|
22
|
+
.orderBy(column, sort)
|
|
23
|
+
.range(Number(offset), Number(offset) + Number(limit) - 1);
|
|
24
|
+
|
|
25
|
+
const followers = []
|
|
26
|
+
for await (let subscription of subscriptions.results) {
|
|
27
|
+
if (subscription.subscriptionType === 'account') {
|
|
28
|
+
const account = await Account.findOrCreate(subscription.from);
|
|
29
|
+
await account.format();
|
|
30
|
+
delete subscription.id
|
|
31
|
+
|
|
32
|
+
followers.push({
|
|
33
|
+
account,
|
|
34
|
+
subscription,
|
|
35
|
+
})
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
return {
|
|
39
|
+
followers,
|
|
40
|
+
total: subscriptions.total
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
undefined,
|
|
44
|
+
override
|
|
45
|
+
);
|
|
46
|
+
} catch (error) {
|
|
47
|
+
console.log('getFollowersForAccountWithCache error', error)
|
|
48
|
+
}
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
const getUserFollowingAccountWithCache = async (
|
|
52
|
+
publicKeyOrHandle,
|
|
53
|
+
followingPublicKeyOrHandle,
|
|
54
|
+
override = false,
|
|
55
|
+
) => {
|
|
56
|
+
try {
|
|
57
|
+
return redis.withCache(
|
|
58
|
+
`following:${publicKeyOrHandle}:${followingPublicKeyOrHandle}`, async () => {
|
|
59
|
+
let account = await Account.query().findOne({publicKey: publicKeyOrHandle});
|
|
60
|
+
if (!account) {
|
|
61
|
+
account = await Account.query().findOne({handle: publicKeyOrHandle});
|
|
62
|
+
if (!account) {
|
|
63
|
+
ctx.status = 404
|
|
64
|
+
ctx.body = {
|
|
65
|
+
success: false,
|
|
66
|
+
following:false,
|
|
67
|
+
message: `Account not found with publicKey: ${publicKeyOrHandle}`
|
|
68
|
+
}
|
|
69
|
+
return;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
let followingAccount = await Account.query().findOne({publicKey: followingPublicKeyOrHandle});
|
|
73
|
+
if (!followingAccount) {
|
|
74
|
+
followingAccount = await Account.query().findOne({handle: followingPublicKeyOrHandle});
|
|
75
|
+
if (!followingAccount) {
|
|
76
|
+
followingAccount = await Hub.query().findOne({publicKey: followingPublicKeyOrHandle});
|
|
77
|
+
if (!followingAccount) {
|
|
78
|
+
followingAccount = await Hub.query().findOne({handle: followingPublicKeyOrHandle});
|
|
79
|
+
}
|
|
80
|
+
if (!followingAccount) {
|
|
81
|
+
ctx.status = 404
|
|
82
|
+
ctx.body = {
|
|
83
|
+
success: false,
|
|
84
|
+
following:false,
|
|
85
|
+
message: `Account not found with publicKey: ${followingPublicKeyOrHandle}`
|
|
86
|
+
}
|
|
87
|
+
return;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
const publicKey = account.publicKey
|
|
92
|
+
const followingPublicKey = followingAccount.publicKey
|
|
93
|
+
const subscriptions = await Subscription.query()
|
|
94
|
+
.where('from', publicKey)
|
|
95
|
+
.andWhere('to', followingPublicKey)
|
|
96
|
+
|
|
97
|
+
return subscriptions.length > 0
|
|
98
|
+
|
|
99
|
+
},
|
|
100
|
+
undefined,
|
|
101
|
+
override
|
|
102
|
+
)
|
|
103
|
+
} catch (error) {
|
|
104
|
+
console.log('getUserFollowingAccountWithCache error', error)
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const deleteCacheAfterAccountFollow = async(
|
|
109
|
+
toPublicKey,
|
|
110
|
+
toHandle,
|
|
111
|
+
fromPublicKey,
|
|
112
|
+
fromHandle,
|
|
113
|
+
) => {
|
|
114
|
+
try {
|
|
115
|
+
console.log('deleteCacheAfterAccountFollow', toHandle, toPublicKey)
|
|
116
|
+
await redis.deleteCacheMatchingPattern(`${SUBSCRIPTION_TO}:${toPublicKey}*`)
|
|
117
|
+
await redis.deleteCacheMatchingPattern(`${SUBSCRIPTION_TO}:${toHandle}*`)
|
|
118
|
+
await redis.deleteCacheMatchingPattern(`following:${fromPublicKey}:${toPublicKey}`)
|
|
119
|
+
await redis.deleteCacheMatchingPattern(`following:${fromPublicKey}:${toHandle}`)
|
|
120
|
+
await redis.deleteCacheMatchingPattern(`following:${fromHandle}:${toPublicKey}`)
|
|
121
|
+
await redis.deleteCacheMatchingPattern(`following:${fromHandle}:${toHandle}`)
|
|
122
|
+
|
|
123
|
+
} catch (error) {
|
|
124
|
+
console.log('deleteCacheAfterAccountFollow error: ', error)
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
export default {
|
|
129
|
+
getFollowersForAccountWithCache,
|
|
130
|
+
getUserFollowingAccountWithCache,
|
|
131
|
+
deleteCacheAfterAccountFollow,
|
|
132
|
+
}
|
package/src/utils/index.js
CHANGED
|
@@ -45,4 +45,14 @@ export const tweetNewRelease = async (metadata, publisherId, slug) => {
|
|
|
45
45
|
console.warn('error sending new release tweet: ', error, metadata)
|
|
46
46
|
}
|
|
47
47
|
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export const BIG_LIMIT = 5000;
|
|
51
|
+
|
|
52
|
+
export const formatColumnForJsonFields = (column, fieldName='metadata') => {
|
|
53
|
+
if (column.includes(':')) {
|
|
54
|
+
column = fieldName + ':' + column.split(':')[1]
|
|
55
|
+
column = ref(column).castText()
|
|
56
|
+
}
|
|
57
|
+
return column
|
|
48
58
|
}
|