api-ape 2.2.2 → 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +88 -17
- package/client/browser.js +7 -7
- package/client/connectSocket.js +257 -22
- package/client/index.js +3 -3
- package/dist/ape.js +1 -1
- package/dist/ape.js.map +3 -3
- package/dist/api-ape.min.js +1 -1
- package/dist/api-ape.min.js.map +3 -3
- package/index.d.ts +183 -19
- package/package.json +2 -2
- package/server/README.md +311 -5
- package/server/adapters/README.md +275 -0
- package/server/adapters/firebase.js +172 -0
- package/server/adapters/index.js +144 -0
- package/server/adapters/mongo.js +161 -0
- package/server/adapters/postgres.js +177 -0
- package/server/adapters/redis.js +154 -0
- package/server/adapters/supabase.js +199 -0
- package/server/index.js +3 -3
- package/server/lib/broadcast.js +115 -49
- package/server/lib/bun.js +4 -4
- package/server/lib/fileTransfer.js +129 -0
- package/server/lib/longPolling.js +22 -13
- package/server/lib/main.js +40 -8
- package/server/lib/wiring.js +23 -19
- package/server/socket/receive.js +46 -0
- package/server/socket/send.js +7 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MongoDB Adapter for APE Cluster
|
|
3
|
+
*
|
|
4
|
+
* Uses MongoDB Change Streams for real-time inter-server messaging.
|
|
5
|
+
* Requires replica set for change stream support.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Create MongoDB adapter
|
|
10
|
+
* @param {MongoClient} mongoClient - MongoDB client
|
|
11
|
+
* @param {object} opts
|
|
12
|
+
* @param {string} opts.serverId - This server's unique ID
|
|
13
|
+
* @param {string} [opts.namespace='ape'] - Database/collection prefix
|
|
14
|
+
* @returns {Promise<AdapterInstance>}
|
|
15
|
+
*/
|
|
16
|
+
async function createMongoAdapter(mongoClient, { serverId, namespace = 'ape' }) {
|
|
17
|
+
if (!serverId) throw new Error('serverId required');
|
|
18
|
+
|
|
19
|
+
// State machine: INIT -> JOINED -> LEFT
|
|
20
|
+
let state = 'INIT';
|
|
21
|
+
const ownedClients = new Set();
|
|
22
|
+
const handlers = new Map();
|
|
23
|
+
let changeStream = null;
|
|
24
|
+
|
|
25
|
+
// Use dedicated database for APE cluster
|
|
26
|
+
const db = mongoClient.db(`${namespace}_cluster`);
|
|
27
|
+
const clientsCol = db.collection('clients');
|
|
28
|
+
const eventsCol = db.collection('events');
|
|
29
|
+
|
|
30
|
+
// Ensure indexes
|
|
31
|
+
async function ensureIndexes() {
|
|
32
|
+
await clientsCol.createIndex({ clientId: 1 }, { unique: true });
|
|
33
|
+
await clientsCol.createIndex({ serverId: 1 });
|
|
34
|
+
// Events TTL - auto-delete after 1 hour
|
|
35
|
+
await eventsCol.createIndex({ createdAt: 1 }, { expireAfterSeconds: 3600 });
|
|
36
|
+
await eventsCol.createIndex({ targetServerId: 1, createdAt: 1 });
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const adapter = {
|
|
40
|
+
get serverId() { return serverId; },
|
|
41
|
+
|
|
42
|
+
async join(id) {
|
|
43
|
+
const sid = id || serverId;
|
|
44
|
+
if (!sid?.trim()) throw new Error('serverId required');
|
|
45
|
+
if (state === 'JOINED') throw new Error('already joined');
|
|
46
|
+
if (state === 'LEFT') throw new Error('cannot rejoin after leave');
|
|
47
|
+
|
|
48
|
+
await ensureIndexes();
|
|
49
|
+
|
|
50
|
+
// Watch for events targeted to this server or broadcast
|
|
51
|
+
try {
|
|
52
|
+
changeStream = eventsCol.watch([
|
|
53
|
+
{
|
|
54
|
+
$match: {
|
|
55
|
+
'fullDocument.targetServerId': { $in: [sid, ''] },
|
|
56
|
+
operationType: 'insert'
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
], { fullDocument: 'updateLookup' });
|
|
60
|
+
|
|
61
|
+
changeStream.on('change', (change) => {
|
|
62
|
+
if (change.operationType === 'insert') {
|
|
63
|
+
const doc = change.fullDocument;
|
|
64
|
+
const handler = handlers.get(doc.targetServerId) || handlers.get('');
|
|
65
|
+
if (handler) {
|
|
66
|
+
handler(doc.message, doc.senderServerId);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
changeStream.on('error', (err) => {
|
|
72
|
+
console.error('📛 Mongo adapter: change stream error', err.message);
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
} catch (e) {
|
|
76
|
+
console.warn('⚠️ Mongo adapter: Change streams not available (requires replica set). Falling back to polling.');
|
|
77
|
+
// Could implement polling fallback here
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
state = 'JOINED';
|
|
81
|
+
console.log(`✅ Mongo adapter: joined as ${sid}`);
|
|
82
|
+
},
|
|
83
|
+
|
|
84
|
+
async leave() {
|
|
85
|
+
if (state !== 'JOINED') return;
|
|
86
|
+
state = 'LEFT';
|
|
87
|
+
|
|
88
|
+
console.log(`🔴 Mongo adapter: leaving, cleaning up ${ownedClients.size} clients`);
|
|
89
|
+
|
|
90
|
+
// Close change stream
|
|
91
|
+
if (changeStream) {
|
|
92
|
+
await changeStream.close();
|
|
93
|
+
changeStream = null;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Remove all owned client mappings
|
|
97
|
+
if (ownedClients.size > 0) {
|
|
98
|
+
await clientsCol.deleteMany({
|
|
99
|
+
clientId: { $in: Array.from(ownedClients) }
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
ownedClients.clear();
|
|
103
|
+
},
|
|
104
|
+
|
|
105
|
+
lookup: {
|
|
106
|
+
async add(clientId) {
|
|
107
|
+
await clientsCol.updateOne(
|
|
108
|
+
{ clientId },
|
|
109
|
+
{ $set: { clientId, serverId, updatedAt: new Date() } },
|
|
110
|
+
{ upsert: true }
|
|
111
|
+
);
|
|
112
|
+
ownedClients.add(clientId);
|
|
113
|
+
console.log(`📍 Mongo adapter: registered client ${clientId} -> ${serverId}`);
|
|
114
|
+
},
|
|
115
|
+
|
|
116
|
+
async read(clientId) {
|
|
117
|
+
const doc = await clientsCol.findOne({ clientId });
|
|
118
|
+
return doc?.serverId || null;
|
|
119
|
+
},
|
|
120
|
+
|
|
121
|
+
async remove(clientId) {
|
|
122
|
+
if (!ownedClients.has(clientId)) {
|
|
123
|
+
throw new Error(`not owner: cannot remove client ${clientId}`);
|
|
124
|
+
}
|
|
125
|
+
await clientsCol.deleteOne({ clientId });
|
|
126
|
+
ownedClients.delete(clientId);
|
|
127
|
+
console.log(`🗑️ Mongo adapter: removed client ${clientId}`);
|
|
128
|
+
}
|
|
129
|
+
},
|
|
130
|
+
|
|
131
|
+
channels: {
|
|
132
|
+
async push(targetServerId, message) {
|
|
133
|
+
await eventsCol.insertOne({
|
|
134
|
+
targetServerId: targetServerId || '',
|
|
135
|
+
senderServerId: serverId,
|
|
136
|
+
message,
|
|
137
|
+
createdAt: new Date()
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
if (targetServerId) {
|
|
141
|
+
console.log(`📤 Mongo adapter: pushed to server ${targetServerId}`);
|
|
142
|
+
} else {
|
|
143
|
+
console.log(`📢 Mongo adapter: broadcast to all servers`);
|
|
144
|
+
}
|
|
145
|
+
},
|
|
146
|
+
|
|
147
|
+
async pull(targetServerId, handler) {
|
|
148
|
+
handlers.set(targetServerId || '', handler);
|
|
149
|
+
|
|
150
|
+
// Return unsubscribe function
|
|
151
|
+
return async () => {
|
|
152
|
+
handlers.delete(targetServerId || '');
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
return adapter;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
module.exports = { createMongoAdapter };
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PostgreSQL Adapter for APE Cluster
|
|
3
|
+
*
|
|
4
|
+
* Uses PostgreSQL LISTEN/NOTIFY for real-time inter-server messaging.
|
|
5
|
+
* Client mappings stored in a dedicated table.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Create PostgreSQL adapter
|
|
10
|
+
* @param {pg.Pool} pool - PostgreSQL connection pool
|
|
11
|
+
* @param {object} opts
|
|
12
|
+
* @param {string} opts.serverId - This server's unique ID
|
|
13
|
+
* @param {string} [opts.namespace='ape'] - Table prefix
|
|
14
|
+
* @returns {Promise<AdapterInstance>}
|
|
15
|
+
*/
|
|
16
|
+
async function createPostgresAdapter(pool, { serverId, namespace = 'ape' }) {
|
|
17
|
+
if (!serverId) throw new Error('serverId required');
|
|
18
|
+
|
|
19
|
+
// State machine: INIT -> JOINED -> LEFT
|
|
20
|
+
let state = 'INIT';
|
|
21
|
+
const ownedClients = new Set();
|
|
22
|
+
const handlers = new Map();
|
|
23
|
+
let listenerClient = null;
|
|
24
|
+
|
|
25
|
+
// Table and channel names
|
|
26
|
+
const clientsTable = `${namespace}_clients`;
|
|
27
|
+
const eventsChannel = `${namespace}_events`;
|
|
28
|
+
|
|
29
|
+
// Ensure schema
|
|
30
|
+
async function ensureSchema() {
|
|
31
|
+
await pool.query(`
|
|
32
|
+
CREATE TABLE IF NOT EXISTS ${clientsTable} (
|
|
33
|
+
client_id VARCHAR(255) PRIMARY KEY,
|
|
34
|
+
server_id VARCHAR(255) NOT NULL,
|
|
35
|
+
updated_at TIMESTAMP DEFAULT NOW()
|
|
36
|
+
);
|
|
37
|
+
CREATE INDEX IF NOT EXISTS idx_${clientsTable}_server_id
|
|
38
|
+
ON ${clientsTable}(server_id);
|
|
39
|
+
`);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const adapter = {
|
|
43
|
+
get serverId() { return serverId; },
|
|
44
|
+
|
|
45
|
+
async join(id) {
|
|
46
|
+
const sid = id || serverId;
|
|
47
|
+
if (!sid?.trim()) throw new Error('serverId required');
|
|
48
|
+
if (state === 'JOINED') throw new Error('already joined');
|
|
49
|
+
if (state === 'LEFT') throw new Error('cannot rejoin after leave');
|
|
50
|
+
|
|
51
|
+
await ensureSchema();
|
|
52
|
+
|
|
53
|
+
// Create dedicated client for LISTEN
|
|
54
|
+
listenerClient = await pool.connect();
|
|
55
|
+
|
|
56
|
+
// Subscribe to NOTIFY events
|
|
57
|
+
await listenerClient.query(`LISTEN ${eventsChannel}`);
|
|
58
|
+
|
|
59
|
+
listenerClient.on('notification', (msg) => {
|
|
60
|
+
try {
|
|
61
|
+
const data = JSON.parse(msg.payload);
|
|
62
|
+
|
|
63
|
+
// Check if message is for us or broadcast
|
|
64
|
+
if (data.targetServerId === sid || data.targetServerId === '') {
|
|
65
|
+
const handler = handlers.get(data.targetServerId) || handlers.get('');
|
|
66
|
+
if (handler) {
|
|
67
|
+
handler(data.message, data.senderServerId);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
} catch (e) {
|
|
71
|
+
console.error('📛 Postgres adapter: failed to parse notification', e.message);
|
|
72
|
+
}
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
state = 'JOINED';
|
|
76
|
+
console.log(`✅ Postgres adapter: joined as ${sid}`);
|
|
77
|
+
},
|
|
78
|
+
|
|
79
|
+
async leave() {
|
|
80
|
+
if (state !== 'JOINED') return;
|
|
81
|
+
state = 'LEFT';
|
|
82
|
+
|
|
83
|
+
console.log(`🔴 Postgres adapter: leaving, cleaning up ${ownedClients.size} clients`);
|
|
84
|
+
|
|
85
|
+
// Unlisten and release client
|
|
86
|
+
if (listenerClient) {
|
|
87
|
+
try {
|
|
88
|
+
await listenerClient.query(`UNLISTEN ${eventsChannel}`);
|
|
89
|
+
listenerClient.release();
|
|
90
|
+
} catch (e) {
|
|
91
|
+
// Ignore disconnect errors
|
|
92
|
+
}
|
|
93
|
+
listenerClient = null;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Remove all owned client mappings
|
|
97
|
+
if (ownedClients.size > 0) {
|
|
98
|
+
const ids = Array.from(ownedClients);
|
|
99
|
+
const placeholders = ids.map((_, i) => `$${i + 1}`).join(',');
|
|
100
|
+
await pool.query(
|
|
101
|
+
`DELETE FROM ${clientsTable} WHERE client_id IN (${placeholders})`,
|
|
102
|
+
ids
|
|
103
|
+
);
|
|
104
|
+
}
|
|
105
|
+
ownedClients.clear();
|
|
106
|
+
},
|
|
107
|
+
|
|
108
|
+
lookup: {
|
|
109
|
+
async add(clientId) {
|
|
110
|
+
await pool.query(
|
|
111
|
+
`INSERT INTO ${clientsTable} (client_id, server_id, updated_at)
|
|
112
|
+
VALUES ($1, $2, NOW())
|
|
113
|
+
ON CONFLICT (client_id) DO UPDATE SET server_id = $2, updated_at = NOW()`,
|
|
114
|
+
[clientId, serverId]
|
|
115
|
+
);
|
|
116
|
+
ownedClients.add(clientId);
|
|
117
|
+
console.log(`📍 Postgres adapter: registered client ${clientId} -> ${serverId}`);
|
|
118
|
+
},
|
|
119
|
+
|
|
120
|
+
async read(clientId) {
|
|
121
|
+
const result = await pool.query(
|
|
122
|
+
`SELECT server_id FROM ${clientsTable} WHERE client_id = $1`,
|
|
123
|
+
[clientId]
|
|
124
|
+
);
|
|
125
|
+
return result.rows[0]?.server_id || null;
|
|
126
|
+
},
|
|
127
|
+
|
|
128
|
+
async remove(clientId) {
|
|
129
|
+
if (!ownedClients.has(clientId)) {
|
|
130
|
+
throw new Error(`not owner: cannot remove client ${clientId}`);
|
|
131
|
+
}
|
|
132
|
+
await pool.query(
|
|
133
|
+
`DELETE FROM ${clientsTable} WHERE client_id = $1`,
|
|
134
|
+
[clientId]
|
|
135
|
+
);
|
|
136
|
+
ownedClients.delete(clientId);
|
|
137
|
+
console.log(`🗑️ Postgres adapter: removed client ${clientId}`);
|
|
138
|
+
}
|
|
139
|
+
},
|
|
140
|
+
|
|
141
|
+
channels: {
|
|
142
|
+
async push(targetServerId, message) {
|
|
143
|
+
const payload = JSON.stringify({
|
|
144
|
+
targetServerId: targetServerId || '',
|
|
145
|
+
senderServerId: serverId,
|
|
146
|
+
message
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
// NOTIFY has 8000 byte limit - for larger payloads, use table
|
|
150
|
+
if (payload.length > 7500) {
|
|
151
|
+
console.warn('⚠️ Postgres adapter: payload too large for NOTIFY, consider using smaller messages');
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
await pool.query(`SELECT pg_notify($1, $2)`, [eventsChannel, payload]);
|
|
155
|
+
|
|
156
|
+
if (targetServerId) {
|
|
157
|
+
console.log(`📤 Postgres adapter: pushed to server ${targetServerId}`);
|
|
158
|
+
} else {
|
|
159
|
+
console.log(`📢 Postgres adapter: broadcast to all servers`);
|
|
160
|
+
}
|
|
161
|
+
},
|
|
162
|
+
|
|
163
|
+
async pull(targetServerId, handler) {
|
|
164
|
+
handlers.set(targetServerId || '', handler);
|
|
165
|
+
|
|
166
|
+
// Return unsubscribe function
|
|
167
|
+
return async () => {
|
|
168
|
+
handlers.delete(targetServerId || '');
|
|
169
|
+
};
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
};
|
|
173
|
+
|
|
174
|
+
return adapter;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
module.exports = { createPostgresAdapter };
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Redis Adapter for APE Cluster
|
|
3
|
+
*
|
|
4
|
+
* Uses Redis PUB/SUB for real-time inter-server messaging.
|
|
5
|
+
* Client mappings stored as simple key-value pairs.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Create Redis adapter
|
|
10
|
+
* @param {object} redis - Redis client (node-redis or ioredis)
|
|
11
|
+
* @param {object} opts
|
|
12
|
+
* @param {string} opts.serverId - This server's unique ID
|
|
13
|
+
* @param {string} [opts.namespace='ape'] - Key prefix
|
|
14
|
+
* @returns {Promise<AdapterInstance>}
|
|
15
|
+
*/
|
|
16
|
+
async function createRedisAdapter(redis, { serverId, namespace = 'ape' }) {
|
|
17
|
+
if (!serverId) throw new Error('serverId required');
|
|
18
|
+
|
|
19
|
+
// State machine: INIT -> JOINED -> LEFT
|
|
20
|
+
let state = 'INIT';
|
|
21
|
+
const ownedClients = new Set();
|
|
22
|
+
const handlers = new Map();
|
|
23
|
+
|
|
24
|
+
// Create dedicated pub/sub connections
|
|
25
|
+
const pub = redis.duplicate();
|
|
26
|
+
const sub = redis.duplicate();
|
|
27
|
+
|
|
28
|
+
// Key helpers
|
|
29
|
+
const key = {
|
|
30
|
+
client: (id) => `${namespace}:client:${id}`,
|
|
31
|
+
channel: (id) => `${namespace}:channel:${id || 'ALL'}`,
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
// Connect pub/sub clients
|
|
35
|
+
if (typeof pub.connect === 'function' && pub.isOpen === false) {
|
|
36
|
+
await pub.connect();
|
|
37
|
+
}
|
|
38
|
+
if (typeof sub.connect === 'function' && sub.isOpen === false) {
|
|
39
|
+
await sub.connect();
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Handle incoming messages (node-redis v4 style)
|
|
43
|
+
if (typeof sub.on === 'function') {
|
|
44
|
+
sub.on('message', (channel, message) => {
|
|
45
|
+
try {
|
|
46
|
+
const data = JSON.parse(message);
|
|
47
|
+
// Find matching handler
|
|
48
|
+
for (const [pattern, handler] of handlers) {
|
|
49
|
+
if (channel === key.channel(pattern) || channel === key.channel('')) {
|
|
50
|
+
handler(data, data._senderServerId || serverId);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
} catch (e) {
|
|
54
|
+
console.error('📛 Redis adapter: failed to parse message', e.message);
|
|
55
|
+
}
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
const adapter = {
|
|
60
|
+
get serverId() { return serverId; },
|
|
61
|
+
|
|
62
|
+
async join(id) {
|
|
63
|
+
const sid = id || serverId;
|
|
64
|
+
if (!sid?.trim()) throw new Error('serverId required');
|
|
65
|
+
if (state === 'JOINED') throw new Error('already joined');
|
|
66
|
+
if (state === 'LEFT') throw new Error('cannot rejoin after leave');
|
|
67
|
+
|
|
68
|
+
// Subscribe to this server's channel + broadcast channel
|
|
69
|
+
await sub.subscribe(key.channel(sid));
|
|
70
|
+
await sub.subscribe(key.channel(''));
|
|
71
|
+
|
|
72
|
+
state = 'JOINED';
|
|
73
|
+
console.log(`✅ Redis adapter: joined as ${sid}`);
|
|
74
|
+
},
|
|
75
|
+
|
|
76
|
+
async leave() {
|
|
77
|
+
if (state !== 'JOINED') return;
|
|
78
|
+
state = 'LEFT';
|
|
79
|
+
|
|
80
|
+
console.log(`🔴 Redis adapter: leaving, cleaning up ${ownedClients.size} clients`);
|
|
81
|
+
|
|
82
|
+
// Remove all owned client mappings
|
|
83
|
+
for (const clientId of ownedClients) {
|
|
84
|
+
try {
|
|
85
|
+
await pub.del(key.client(clientId));
|
|
86
|
+
} catch (e) {
|
|
87
|
+
console.error(`📛 Redis adapter: failed to remove client ${clientId}`, e.message);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
ownedClients.clear();
|
|
91
|
+
|
|
92
|
+
// Unsubscribe and disconnect
|
|
93
|
+
try {
|
|
94
|
+
await sub.unsubscribe();
|
|
95
|
+
await pub.quit();
|
|
96
|
+
await sub.quit();
|
|
97
|
+
} catch (e) {
|
|
98
|
+
// Ignore disconnect errors
|
|
99
|
+
}
|
|
100
|
+
},
|
|
101
|
+
|
|
102
|
+
lookup: {
|
|
103
|
+
async add(clientId) {
|
|
104
|
+
await pub.set(key.client(clientId), serverId);
|
|
105
|
+
ownedClients.add(clientId);
|
|
106
|
+
console.log(`📍 Redis adapter: registered client ${clientId} -> ${serverId}`);
|
|
107
|
+
},
|
|
108
|
+
|
|
109
|
+
async read(clientId) {
|
|
110
|
+
const result = await pub.get(key.client(clientId));
|
|
111
|
+
return result || null;
|
|
112
|
+
},
|
|
113
|
+
|
|
114
|
+
async remove(clientId) {
|
|
115
|
+
if (!ownedClients.has(clientId)) {
|
|
116
|
+
throw new Error(`not owner: cannot remove client ${clientId}`);
|
|
117
|
+
}
|
|
118
|
+
await pub.del(key.client(clientId));
|
|
119
|
+
ownedClients.delete(clientId);
|
|
120
|
+
console.log(`🗑️ Redis adapter: removed client ${clientId}`);
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
|
|
124
|
+
channels: {
|
|
125
|
+
async push(targetServerId, message) {
|
|
126
|
+
const channel = key.channel(targetServerId);
|
|
127
|
+
const payload = JSON.stringify({
|
|
128
|
+
...message,
|
|
129
|
+
_senderServerId: serverId
|
|
130
|
+
});
|
|
131
|
+
await pub.publish(channel, payload);
|
|
132
|
+
|
|
133
|
+
if (targetServerId) {
|
|
134
|
+
console.log(`📤 Redis adapter: pushed to server ${targetServerId}`);
|
|
135
|
+
} else {
|
|
136
|
+
console.log(`📢 Redis adapter: broadcast to all servers`);
|
|
137
|
+
}
|
|
138
|
+
},
|
|
139
|
+
|
|
140
|
+
async pull(targetServerId, handler) {
|
|
141
|
+
handlers.set(targetServerId || '', handler);
|
|
142
|
+
|
|
143
|
+
// Return unsubscribe function
|
|
144
|
+
return async () => {
|
|
145
|
+
handlers.delete(targetServerId || '');
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
};
|
|
150
|
+
|
|
151
|
+
return adapter;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
module.exports = { createRedisAdapter };
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Supabase Adapter for APE Cluster
|
|
3
|
+
*
|
|
4
|
+
* Uses Supabase Realtime for inter-server messaging.
|
|
5
|
+
* Client mappings stored in a dedicated table.
|
|
6
|
+
*
|
|
7
|
+
* Supabase is Postgres under the hood with a simpler Realtime API.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Create Supabase adapter
|
|
12
|
+
* @param {SupabaseClient} supabase - Supabase client from @supabase/supabase-js
|
|
13
|
+
* @param {object} opts
|
|
14
|
+
* @param {string} opts.serverId - This server's unique ID
|
|
15
|
+
* @param {string} [opts.namespace='ape'] - Table prefix
|
|
16
|
+
* @returns {Promise<AdapterInstance>}
|
|
17
|
+
*/
|
|
18
|
+
async function createSupabaseAdapter(supabase, { serverId, namespace = 'ape' }) {
|
|
19
|
+
if (!serverId) throw new Error('serverId required');
|
|
20
|
+
|
|
21
|
+
// State machine: INIT -> JOINED -> LEFT
|
|
22
|
+
let state = 'INIT';
|
|
23
|
+
const ownedClients = new Set();
|
|
24
|
+
const handlers = new Map();
|
|
25
|
+
let realtimeChannel = null;
|
|
26
|
+
|
|
27
|
+
// Table names
|
|
28
|
+
const clientsTable = `${namespace}_clients`;
|
|
29
|
+
const eventsTable = `${namespace}_events`;
|
|
30
|
+
|
|
31
|
+
// Ensure tables exist (Supabase requires pre-created tables via migrations)
|
|
32
|
+
// This is a validation check, not creation
|
|
33
|
+
async function validateTables() {
|
|
34
|
+
const { error: clientsError } = await supabase
|
|
35
|
+
.from(clientsTable)
|
|
36
|
+
.select('client_id')
|
|
37
|
+
.limit(1);
|
|
38
|
+
|
|
39
|
+
if (clientsError && clientsError.code === '42P01') {
|
|
40
|
+
throw new Error(
|
|
41
|
+
`Table "${clientsTable}" does not exist. ` +
|
|
42
|
+
`Create it with: CREATE TABLE ${clientsTable} (client_id TEXT PRIMARY KEY, server_id TEXT NOT NULL);`
|
|
43
|
+
);
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const adapter = {
|
|
48
|
+
get serverId() { return serverId; },
|
|
49
|
+
|
|
50
|
+
async join(id) {
|
|
51
|
+
const sid = id || serverId;
|
|
52
|
+
if (!sid?.trim()) throw new Error('serverId required');
|
|
53
|
+
if (state === 'JOINED') throw new Error('already joined');
|
|
54
|
+
if (state === 'LEFT') throw new Error('cannot rejoin after leave');
|
|
55
|
+
|
|
56
|
+
await validateTables();
|
|
57
|
+
|
|
58
|
+
// Subscribe to Realtime channel for this server + broadcast
|
|
59
|
+
realtimeChannel = supabase
|
|
60
|
+
.channel(`${namespace}:${sid}`)
|
|
61
|
+
.on('broadcast', { event: 'message' }, ({ payload }) => {
|
|
62
|
+
const { targetServerId, message, senderServerId } = payload;
|
|
63
|
+
|
|
64
|
+
// Check if message is for us or broadcast
|
|
65
|
+
if (targetServerId === sid || targetServerId === '') {
|
|
66
|
+
const handler = handlers.get(targetServerId) || handlers.get('');
|
|
67
|
+
if (handler) {
|
|
68
|
+
handler(message, senderServerId);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
})
|
|
72
|
+
.subscribe();
|
|
73
|
+
|
|
74
|
+
// Also subscribe to broadcast channel
|
|
75
|
+
supabase
|
|
76
|
+
.channel(`${namespace}:ALL`)
|
|
77
|
+
.on('broadcast', { event: 'message' }, ({ payload }) => {
|
|
78
|
+
const { message, senderServerId } = payload;
|
|
79
|
+
const handler = handlers.get('');
|
|
80
|
+
if (handler) {
|
|
81
|
+
handler(message, senderServerId);
|
|
82
|
+
}
|
|
83
|
+
})
|
|
84
|
+
.subscribe();
|
|
85
|
+
|
|
86
|
+
state = 'JOINED';
|
|
87
|
+
console.log(`✅ Supabase adapter: joined as ${sid}`);
|
|
88
|
+
},
|
|
89
|
+
|
|
90
|
+
async leave() {
|
|
91
|
+
if (state !== 'JOINED') return;
|
|
92
|
+
state = 'LEFT';
|
|
93
|
+
|
|
94
|
+
console.log(`🔴 Supabase adapter: leaving, cleaning up ${ownedClients.size} clients`);
|
|
95
|
+
|
|
96
|
+
// Unsubscribe from channels
|
|
97
|
+
if (realtimeChannel) {
|
|
98
|
+
await supabase.removeChannel(realtimeChannel);
|
|
99
|
+
realtimeChannel = null;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Remove all owned client mappings
|
|
103
|
+
if (ownedClients.size > 0) {
|
|
104
|
+
const ids = Array.from(ownedClients);
|
|
105
|
+
await supabase
|
|
106
|
+
.from(clientsTable)
|
|
107
|
+
.delete()
|
|
108
|
+
.in('client_id', ids);
|
|
109
|
+
}
|
|
110
|
+
ownedClients.clear();
|
|
111
|
+
},
|
|
112
|
+
|
|
113
|
+
lookup: {
|
|
114
|
+
async add(clientId) {
|
|
115
|
+
const { error } = await supabase
|
|
116
|
+
.from(clientsTable)
|
|
117
|
+
.upsert({
|
|
118
|
+
client_id: clientId,
|
|
119
|
+
server_id: serverId,
|
|
120
|
+
updated_at: new Date().toISOString()
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
if (error) throw new Error(`Supabase lookup.add failed: ${error.message}`);
|
|
124
|
+
|
|
125
|
+
ownedClients.add(clientId);
|
|
126
|
+
console.log(`📍 Supabase adapter: registered client ${clientId} -> ${serverId}`);
|
|
127
|
+
},
|
|
128
|
+
|
|
129
|
+
async read(clientId) {
|
|
130
|
+
const { data, error } = await supabase
|
|
131
|
+
.from(clientsTable)
|
|
132
|
+
.select('server_id')
|
|
133
|
+
.eq('client_id', clientId)
|
|
134
|
+
.single();
|
|
135
|
+
|
|
136
|
+
if (error && error.code !== 'PGRST116') { // PGRST116 = not found
|
|
137
|
+
throw new Error(`Supabase lookup.read failed: ${error.message}`);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
return data?.server_id || null;
|
|
141
|
+
},
|
|
142
|
+
|
|
143
|
+
async remove(clientId) {
|
|
144
|
+
if (!ownedClients.has(clientId)) {
|
|
145
|
+
throw new Error(`not owner: cannot remove client ${clientId}`);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const { error } = await supabase
|
|
149
|
+
.from(clientsTable)
|
|
150
|
+
.delete()
|
|
151
|
+
.eq('client_id', clientId);
|
|
152
|
+
|
|
153
|
+
if (error) throw new Error(`Supabase lookup.remove failed: ${error.message}`);
|
|
154
|
+
|
|
155
|
+
ownedClients.delete(clientId);
|
|
156
|
+
console.log(`🗑️ Supabase adapter: removed client ${clientId}`);
|
|
157
|
+
}
|
|
158
|
+
},
|
|
159
|
+
|
|
160
|
+
channels: {
|
|
161
|
+
async push(targetServerId, message) {
|
|
162
|
+
const channelName = targetServerId
|
|
163
|
+
? `${namespace}:${targetServerId}`
|
|
164
|
+
: `${namespace}:ALL`;
|
|
165
|
+
|
|
166
|
+
const channel = supabase.channel(channelName);
|
|
167
|
+
|
|
168
|
+
await channel.send({
|
|
169
|
+
type: 'broadcast',
|
|
170
|
+
event: 'message',
|
|
171
|
+
payload: {
|
|
172
|
+
targetServerId: targetServerId || '',
|
|
173
|
+
senderServerId: serverId,
|
|
174
|
+
message
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
if (targetServerId) {
|
|
179
|
+
console.log(`📤 Supabase adapter: pushed to server ${targetServerId}`);
|
|
180
|
+
} else {
|
|
181
|
+
console.log(`📢 Supabase adapter: broadcast to all servers`);
|
|
182
|
+
}
|
|
183
|
+
},
|
|
184
|
+
|
|
185
|
+
async pull(targetServerId, handler) {
|
|
186
|
+
handlers.set(targetServerId || '', handler);
|
|
187
|
+
|
|
188
|
+
// Return unsubscribe function
|
|
189
|
+
return async () => {
|
|
190
|
+
handlers.delete(targetServerId || '');
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
};
|
|
195
|
+
|
|
196
|
+
return adapter;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
module.exports = { createSupabaseAdapter };
|