@teamplay/backend 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,9 @@
1
+ # v0.1.5 (Mon May 27 2024)
2
+
3
+ #### 🚀 Enhancement
4
+
5
+ - feat: move backend implementation from startupjs. Add an example app. ([@cray0000](https://github.com/cray0000))
6
+
7
+ #### Authors: 1
8
+
9
+ - Pavel Zhukov ([@cray0000](https://github.com/cray0000))
package/README.md ADDED
@@ -0,0 +1,70 @@
1
+ # Teamplay Backend
2
+
3
+ > Create a new ShareDB backend instance
4
+
5
+ ## Installation
6
+
7
+ ```sh
8
+ yarn add @teamplay/backend
9
+ ```
10
+
11
+ ## Requirements
12
+
13
+ ## Configuration
14
+
15
+ The package can be configured using the following environment variables:
16
+
17
+ - `MONGO_URL`: Specifies the URL for the MongoDB connection. MongoDB is used as the primary database unless overridden by setting `NO_MONGO` to `true`.
18
+ - `NO_MONGO`: When set to `true`, this variable disables the use of MongoDB. In this case, the package will utilize a Mingo database, persisting data to SQLite.
19
+ - `DB_PATH`: Defines the file path for the SQLite database. This setting is relevant when `NO_MONGO` is `true`. If `DB_PATH` is not specified, the default file 'sqlite.db' will be used.
20
+ - `DB_LOAD_SNAPSHOT`: An optional variable that can be set with a path to a SQLite snapshot file. This setting is relevant when `NO_MONGO` is `true`. If provided, the SQLite database will be initialized from this snapshot.
21
+ - `DB_READONLY`: Set to `true` to disable persistence to SQLite.
22
+ - `REDIS_URL`: URL for the Redis connection.
23
+ - `NO_REDIS`: Set to `true` to use a mocked Redis client.
24
+
25
+ ### Database Initialization
26
+
27
+ The backend toggles between MongoDB and Mingo for database operations, influenced by environment settings:
28
+
29
+ - **MongoDB**: Used when `MONGO_URL` is set and `NO_MONGO` is `false`.
30
+ - **Mingo and SQLite**: Activated by setting `NO_MONGO` to `true`. Mingo handles operations, while SQLite is used solely for data persistence, initialized from `DB_PATH` if provided.
31
+ - **SQLite Snapshot**: When `DB_LOAD_SNAPSHOT` is set, SQLite is initialized with this pre-populated data snapshot and pulls data to Mingo.
32
+
33
+ This setup ensures flexibility in database management based on environment configurations.
34
+ Data persistence can be disabled by setting `DB_READONLY` to `true`.
35
+
36
+ ### Cloning Existing SQLite Snapshots
37
+
38
+ In scenarios where there is a need to create new SQLite database snapshots based on existing ones, the combination of `DB_LOAD_SNAPSHOT` and `DB_PATH` environment variables can be effectively utilized for cloning. This feature is particularly useful for initializing new environments or testing purposes.
39
+
40
+ To clone an existing snapshot, set `DB_LOAD_SNAPSHOT` to the path of the existing snapshot file (e.g., `snapshot.db`) and specify a new file name in `DB_PATH` for the cloned database (e.g., `clone-snapshot.db`).
41
+
42
+ For example:
43
+ ```bash
44
+ NO_MONGO=true DB_LOAD_SNAPSHOT=snapshot.db DB_PATH=clone-snapshot.db startupjs server
45
+ ```
46
+
47
+ Upon server initialization with these settings, the system will clone the data from `snapshot.db` into a new database file named `clone-snapshot.db`. Subsequently, the server will continue operations using the new `clone-snapshot.db` as its database source. This cloning process provides a seamless way to replicate and utilize existing database states in new instances.
48
+
49
+ ## Usage
50
+
51
+ To use the backend package in your StartupJS project, import and initialize it as follows:
52
+
53
+ ```js
54
+ import getBackend from '@teamplay/backend'
55
+
56
+ export default async function runServer () {
57
+ const backend = await getBackend(options)
58
+ // ...
59
+ }
60
+ ```
61
+
62
+ where `options` are:
63
+
64
+ - `pollDebounce`: the minimum delay between subsequent database polls. It is used individually for each collection in the database. This is used to batch updates and reduce load on the database.
65
+
66
+ ## License
67
+
68
+ MIT
69
+
70
+ (c) Decision Mapper - http://decisionmapper.com
package/db/index.js ADDED
@@ -0,0 +1,24 @@
1
+ export const {
2
+ db,
3
+ mongo, // (optional, only if mongo is used) mongoClient.db()
4
+ mongoClient, // (optional, only if mongo is used)
5
+ createMongoIndex = () => {}, // (optional, only if mongo is used; mock provided) create mongo indexes
6
+ sqlite // (optional, only if mingo-sqlite is used) sqlite3 db instance
7
+ } = await getDb({
8
+ mongoUrl: process.env.MONGO_URL,
9
+ disableMongo: process.env.NO_MONGO,
10
+ isReadonly: process.env.DB_READONLY
11
+ })
12
+
13
+ async function getDb ({ mongoUrl, disableMongo, isReadonly }) {
14
+ if (mongoUrl && !disableMongo) {
15
+ console.log('Database: mongo')
16
+ return await import('./mongo.js')
17
+ } else if (isReadonly) {
18
+ console.log('Database: mingo-memory (no data persistency)')
19
+ return await import('./mingo-memory.js')
20
+ } else {
21
+ console.log('Database: mingo-sqlite (persist data to a local SQLite file)')
22
+ return await import('./mingo-sqlite.js')
23
+ }
24
+ }
@@ -0,0 +1,17 @@
1
+ import ShareDbMingoMemory from 'sharedb-mingo-memory'
2
+ import { getExistingSqliteDb, loadSqliteDbToMingo } from './utils.js'
3
+
4
+ export const db = await getMingoDb({
5
+ loadSnapshotPath: process.env.DB_LOAD_SNAPSHOT
6
+ })
7
+
8
+ async function getMingoDb ({ loadSnapshotPath }) {
9
+ const db = new ShareDbMingoMemory()
10
+
11
+ if (loadSnapshotPath) {
12
+ const sqliteDb = getExistingSqliteDb(loadSnapshotPath)
13
+ await loadSqliteDbToMingo(sqliteDb, db)
14
+ }
15
+
16
+ return db
17
+ }
@@ -0,0 +1,167 @@
1
+ import ShareDbMingoMemory from 'sharedb-mingo-memory'
2
+ import { resolve } from 'path'
3
+ import sqlite3 from 'sqlite3'
4
+ import { v4 as uuid } from 'uuid'
5
+ import { loadSqliteDbToMingo, getExistingSqliteDb } from './utils.js'
6
+
7
+ const DEFAULT_DB_PATH = './local.db'
8
+
9
+ export const { db, sqlite } = await getMingoSqliteDb({
10
+ dbPath: process.env.DB_PATH,
11
+ loadSnapshotPath: process.env.DB_LOAD_SNAPSHOT
12
+ })
13
+
14
+ async function getMingoSqliteDb ({ dbPath, loadSnapshotPath }) {
15
+ const db = new ShareDbMingoMemory()
16
+ dbPath = resolve(dbPath || DEFAULT_DB_PATH)
17
+ const sqliteDb = await getOrCreateSqliteDb(dbPath)
18
+
19
+ await deleteExpiredDocumentsOps(sqliteDb)
20
+
21
+ if (loadSnapshotPath) {
22
+ const snapshotSqliteDb = getExistingSqliteDb(loadSnapshotPath)
23
+ await cloneSqliteDb(snapshotSqliteDb, sqliteDb)
24
+ }
25
+
26
+ await loadSqliteDbToMingo(sqliteDb, db)
27
+
28
+ patchMingoForSQLitePersistence(sqliteDb, db)
29
+
30
+ return { db, sqlite: sqliteDb }
31
+ }
32
+
33
+ // override the commit method to save changes to SQLite
34
+ function patchMingoForSQLitePersistence (sqliteDb, shareDbMingo) {
35
+ const originalCommit = shareDbMingo.commit
36
+
37
+ shareDbMingo.commit = function (collection, docId, op, snapshot, options, callback) {
38
+ originalCommit.call(this, collection, docId, op, snapshot, options, async err => {
39
+ if (err) return callback(err)
40
+
41
+ try {
42
+ await new Promise((resolve, reject) => {
43
+ sqliteDb.run(`
44
+ REPLACE INTO documents (collection, id, data) VALUES (?, ?, ?)
45
+ `, [collection, docId, JSON.stringify(snapshot)], err => err ? reject(err) : resolve())
46
+ })
47
+ await new Promise((resolve, reject) => {
48
+ sqliteDb.run(`
49
+ INSERT INTO ops (id, collection, documentId, op) VALUES (?, ?, ?, ?)
50
+ `, [uuid(), collection, docId, JSON.stringify(op)], err => err ? reject(err) : resolve())
51
+ })
52
+ } catch (err) {
53
+ throw Error('Error saving to SQLite:\n', err.message)
54
+ }
55
+
56
+ callback(null, true)
57
+ })
58
+ }
59
+ }
60
+
61
+ async function deleteExpiredDocumentsOps (sqliteDb) {
62
+ return await new Promise((resolve, reject) => {
63
+ sqliteDb.run(`
64
+ DELETE FROM ops
65
+ WHERE
66
+ json_extract(op, '$.m.ts') < (strftime('%s', 'now') - 24 * 60 * 60 ) * 1000
67
+ AND
68
+ json_extract(op, '$.v') < (
69
+ SELECT (json_extract(data, '$.v') - 1)
70
+ FROM documents
71
+ WHERE documents.id = ops.documentId AND documents.collection = ops.collection
72
+ );
73
+ `, err => err ? reject(err) : resolve())
74
+ })
75
+ }
76
+
77
+ async function getOrCreateSqliteDb (dbPath) {
78
+ const sqliteDb = new sqlite3.Database(dbPath)
79
+
80
+ try {
81
+ await new Promise((resolve, reject) => {
82
+ sqliteDb.run(`
83
+ CREATE TABLE IF NOT EXISTS documents (
84
+ collection TEXT,
85
+ id TEXT,
86
+ data TEXT,
87
+ PRIMARY KEY (collection, id)
88
+ )
89
+ `, err => err ? reject(err) : resolve())
90
+ })
91
+ await new Promise((resolve, reject) => {
92
+ sqliteDb.run(`
93
+ CREATE TABLE IF NOT EXISTS ops (
94
+ id UUID PRIMARY KEY,
95
+ collection TEXT,
96
+ documentId TEXT,
97
+ op TEXT
98
+ )
99
+ `, err => err ? reject(err) : resolve())
100
+ })
101
+ await new Promise((resolve, reject) => {
102
+ sqliteDb.run(`
103
+ CREATE TABLE IF NOT EXISTS files (
104
+ id TEXT PRIMARY KEY,
105
+ data BLOB,
106
+ createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
107
+ )
108
+ `, err => err ? reject(err) : resolve())
109
+ })
110
+ } catch (err) {
111
+ throw Error('Error creating SQLite file db and/or tables:\n' + err.message)
112
+ }
113
+ console.log('Using SQLite DB from file:', dbPath)
114
+ return sqliteDb
115
+ }
116
+
117
+ // clone each table from source to target
118
+ // TODO: Do it in batches of 100 to avoid memory issues.
119
+ async function cloneSqliteDb (source, target) {
120
+ try {
121
+ { // clone 'documents'
122
+ const rows = await new Promise((resolve, reject) => {
123
+ source.all('SELECT * FROM documents', [], (err, rows) => err ? reject(err) : resolve(rows))
124
+ })
125
+ const promises = []
126
+ for (const row of rows) {
127
+ promises.push(new Promise((resolve, reject) => {
128
+ target.run(`
129
+ INSERT INTO documents (collection, id, data) VALUES (?, ?, ?)
130
+ `, [row.collection, row.id, row.data], err => err ? reject(err) : resolve())
131
+ }))
132
+ }
133
+ await Promise.all(promises)
134
+ }
135
+ { // clone 'ops'
136
+ const rows = await new Promise((resolve, reject) => {
137
+ source.all('SELECT * FROM ops', [], (err, rows) => err ? reject(err) : resolve(rows))
138
+ })
139
+ const promises = []
140
+ for (const row of rows) {
141
+ promises.push(new Promise((resolve, reject) => {
142
+ target.run(`
143
+ INSERT INTO ops (id, collection, documentId, op) VALUES (?, ?, ?, ?)
144
+ `, [row.id, row.collection, row.documentId, row.op], err => err ? reject(err) : resolve())
145
+ }))
146
+ }
147
+ await Promise.all(promises)
148
+ }
149
+ { // clone 'files'
150
+ // TODO: Clone them one by one to avoid memory issues, since files are large.
151
+ const rows = await new Promise((resolve, reject) => {
152
+ source.all('SELECT * FROM files', [], (err, rows) => err ? reject(err) : resolve(rows))
153
+ })
154
+ const promises = []
155
+ for (const row of rows) {
156
+ promises.push(new Promise((resolve, reject) => {
157
+ target.run(`
158
+ INSERT INTO files (id, data) VALUES (?, ?)
159
+ `, [row.id, row.data], err => err ? reject(err) : resolve())
160
+ }))
161
+ await Promise.all(promises)
162
+ }
163
+ }
164
+ } catch (err) {
165
+ throw Error('Error cloning SQLite DB:\n' + err.message)
166
+ }
167
+ }
package/db/mongo.js ADDED
@@ -0,0 +1,40 @@
1
+ import fs from 'fs'
2
+ import { MongoClient } from 'mongodb'
3
+ import ShareDbMongo from 'sharedb-mongo'
4
+
5
+ export const { db, mongo, mongoClient, createMongoIndex } = getMongoDb({
6
+ url: process.env.MONGO_URL,
7
+ optsString: process.env.MONGO_OPTS,
8
+ sslKeyPath: process.env.MONGO_SSL_KEY_PATH,
9
+ sslCertPath: process.env.MONGO_SSL_CERT_PATH,
10
+ sslCaPath: process.env.MONGO_SSL_CA_PATH
11
+ })
12
+
13
+ function getMongoDb ({ url, optsString, sslKeyPath, sslCertPath, sslCaPath }) {
14
+ const options = { useUnifiedTopology: true }
15
+
16
+ if (typeof optsString === 'string') {
17
+ const { key, cert, ca } = JSON.parse(optsString)
18
+ options.sslKey = fs.readFileSync(key)
19
+ options.sslCert = fs.readFileSync(cert)
20
+ options.sslCA = fs.readFileSync(ca)
21
+ } else if (sslKeyPath) {
22
+ options.sslKey = fs.readFileSync(sslKeyPath)
23
+ options.sslCert = fs.readFileSync(sslCertPath)
24
+ options.sslCA = fs.readFileSync(sslCaPath)
25
+ }
26
+
27
+ const mongoClient = new MongoClient(url, options)
28
+ const mongo = mongoClient.db()
29
+ return {
30
+ db: ShareDbMongo({
31
+ mongo: callback => callback(null, mongoClient),
32
+ allowAllQueries: true
33
+ }),
34
+ mongo,
35
+ mongoClient,
36
+ createMongoIndex (collection, keys, options) {
37
+ return mongo.collection(collection).createIndex(keys, options)
38
+ }
39
+ }
40
+ }
package/db/utils.js ADDED
@@ -0,0 +1,45 @@
1
+ import sqlite3 from 'sqlite3'
2
+ import { existsSync } from 'fs'
3
+ import { resolve } from 'path'
4
+
5
+ export async function loadSqliteDbToMingo (sqliteDb, mingo) {
6
+ return new Promise((resolve, reject) => {
7
+ sqliteDb.all('SELECT collection, id, data FROM documents', [], (err, docsRows) => {
8
+ if (err) return reject(err)
9
+
10
+ sqliteDb.all('SELECT collection, documentId, op FROM ops', [], (err, opsRows) => {
11
+ if (err) return reject(err)
12
+
13
+ const docsOpsByDocId = opsRows.reduce((byId, row) => {
14
+ const values = byId[`${row.collection}.${row.documentId}`] || []
15
+ const op = JSON.parse(row.op)
16
+ return { ...byId, [`${row.collection}.${row.documentId}`]: [...values, op] }
17
+ }, {})
18
+
19
+ for (const row of docsRows) {
20
+ if (!mingo.docs[row.collection]) {
21
+ mingo.docs[row.collection] = {}
22
+ mingo.ops[row.collection] = {}
23
+ }
24
+
25
+ const docOps = (docsOpsByDocId[`${row.collection}.${row.id}`] || []).sort((a, b) => a.v - b.v)
26
+ const firstOp = docOps[0]
27
+
28
+ mingo.ops[row.collection][row.id] = firstOp?.v > 0 ? [...new Array(firstOp.v), ...docOps] : docOps
29
+ mingo.docs[row.collection][row.id] = JSON.parse(row.data)
30
+ }
31
+ resolve()
32
+ console.log('DB data was loaded from SQLite to shareDbMingo')
33
+ })
34
+ })
35
+ })
36
+ }
37
+
38
+ export function getExistingSqliteDb (dbPath) {
39
+ dbPath = resolve(dbPath)
40
+ console.log('[mingo] Getting existing sqlite db from:', dbPath)
41
+ if (!existsSync(dbPath)) {
42
+ throw Error('[mingo] SQLite db file doesn\'t exist:', dbPath)
43
+ }
44
+ return new sqlite3.Database(dbPath)
45
+ }
@@ -0,0 +1,38 @@
1
+ import _serverAggregate from '@teamplay/server-aggregate'
2
+ import { isAggregationFunction } from '@teamplay/utils/aggregation'
3
+
4
+ export default function serverAggregate (backend, { customCheck, models = {} } = {}) {
5
+ _serverAggregate(backend, { customCheck })
6
+
7
+ for (const modelPattern in models) {
8
+ for (const aggregationName in models[modelPattern]) {
9
+ const aggregation = models[modelPattern][aggregationName]
10
+ if (!isAggregationFunction(aggregation)) continue
11
+ // support only top-level collections
12
+ const collection = modelPattern
13
+ if (/\./.test(collection)) throw Error(ERRORS.onlyTopLevelCollections(modelPattern, aggregationName))
14
+ backend.addAggregate(
15
+ collection,
16
+ aggregationName,
17
+ (queryParams, shareRequest) => {
18
+ const session = shareRequest.agent.connectSession
19
+ // TODO: rewrite to use $ here, or create a separate root $ for each user
20
+ // const model = global.__clients[userId].model
21
+ const context = { session, collection }
22
+ return aggregation(queryParams, context)
23
+ }
24
+ )
25
+ }
26
+ }
27
+
28
+ console.log('✓ Security: only server-side aggregations are allowed')
29
+ }
30
+
31
+ const ERRORS = {
32
+ onlyTopLevelCollections: (modelPattern, aggregationName) => `
33
+ serverAggregate: you can only define aggregations in the top-level collection models
34
+ (i.e. 'model/items.js')
35
+ Found aggregation '${aggregationName}' in '${modelPattern}'.
36
+ Move it to the top-level collection model: 'models/${modelPattern.split('.')[0]}.js'
37
+ `
38
+ }
@@ -0,0 +1,41 @@
1
+ import sharedbSchema from '@teamplay/sharedb-schema'
2
+ import { transformSchema } from '@teamplay/schema'
3
+
4
+ export default function validateSchema (backend, { models = {} } = {}) {
5
+ const schemaPerCollection = { schemas: {}, formats: {}, validators: {} }
6
+
7
+ for (const modelPattern in models) {
8
+ let { schema, factory } = models[modelPattern]
9
+
10
+ if (factory) {
11
+ // TODO: implement getting schema from factory
12
+ // schemaPerCollection.schemas[modelPattern.replace('.*', '')] = ORM[path].OrmEntity
13
+ throw Error('factory model: NOT IMPLEMENTED')
14
+ } else if (schema) {
15
+ const collectionName = modelPattern
16
+ if (/\./.test(collectionName)) throw Error(ERRORS.onlyTopLevelCollections(modelPattern))
17
+ // transform schema from simplified format to full format
18
+ schema = transformSchema(schema, { collectionName })
19
+ schemaPerCollection.schemas[collectionName] = schema
20
+ }
21
+
22
+ // allow any 'service' collection structure
23
+ // since 'service' collection is used in our startupjs libraries
24
+ // and we don't have a tool to collect scheme from all packages right now
25
+ schemaPerCollection.schemas.service = transformSchema({
26
+ type: 'object', properties: {}, additionalProperties: true
27
+ })
28
+ }
29
+
30
+ sharedbSchema(backend, schemaPerCollection)
31
+ console.log('✓ Security: JSON-schema validation of DB collections on backend is enabled')
32
+ }
33
+
34
+ const ERRORS = {
35
+ onlyTopLevelCollections: (modelPattern) => `
36
+ validateSchema: you can only define schema in the top-level collection models
37
+ (i.e. 'model/items.js')
38
+ Found schema in '${modelPattern}'.
39
+ Move it to the top-level collection model: 'models/${modelPattern.split('.')[0]}.js'
40
+ `
41
+ }
package/index.d.ts ADDED
@@ -0,0 +1,48 @@
1
+ import { type Redis } from 'ioredis'
2
+ import { type Db, type MongoClient, type CreateIndexesOptions } from 'mongodb'
3
+ import { type Database as SqliteDatabase } from 'sqlite3'
4
+
5
+ export interface BackendOptions {
6
+ secure?: boolean
7
+ ee?: any // EventEmitter instance, replace 'any' with more specific type if available
8
+ pollDebounce?: number
9
+ flushRedis?: boolean
10
+ extraDbs?: any // Replace 'any' with actual type
11
+ hooks?: (backend: any) => void // Replace 'any' with Backend class type
12
+ accessControl?: boolean
13
+ serverAggregate?: boolean | { customCheck: () => any } // TODO: remove customCheck support
14
+ validateSchema?: boolean
15
+ silentLogs?: boolean
16
+ }
17
+
18
+ // Accommodate both the specific MongoDB index creation signature and a dummy function
19
+ interface CreateMongoIndexFunction {
20
+ (collection: string, keys: Record<string, number | string>, options?: CreateIndexesOptions): void
21
+ (): void
22
+ }
23
+
24
+ export const createMongoIndex: CreateMongoIndexFunction
25
+
26
+ export interface RedisExports {
27
+ redis: Redis
28
+ redlock: any
29
+ Redlock: any
30
+ }
31
+
32
+ export interface DbExports {
33
+ db: any // This refers to the ShareDB connection, not MongoDB connection
34
+ mongo: Db // MongoDB Db instance from MongoClient.db()
35
+ mongoClient: MongoClient
36
+ sqlite: SqliteDatabase // sqlite3 Database instance
37
+ }
38
+
39
+ export function createBackend (options?: BackendOptions): any // Replace 'any' with Backend class type
40
+
41
+ // Exports instances and constructors for Redis and database connections
42
+ export const redis: RedisExports['redis']
43
+ export const redlock: RedisExports['redlock']
44
+ export const Redlock: RedisExports['Redlock']
45
+ export const db: DbExports['db']
46
+ export const mongo: DbExports['mongo'] | undefined
47
+ export const mongoClient: DbExports['mongoClient'] | undefined
48
+ export const sqlite: DbExports['sqlite'] | undefined
package/index.js ADDED
@@ -0,0 +1,115 @@
1
+ import ShareDbAccess, {
2
+ registerOrmRules
3
+ } from '@teamplay/sharedb-access'
4
+ import ShareDB from 'sharedb'
5
+ import shareDbHooks from 'sharedb-hooks'
6
+ import { pubsub } from './redis/index.js'
7
+ import { db } from './db/index.js'
8
+ import maybeFlushRedis from './redis/maybeFlushRedis.js'
9
+ import initValidateSchema from './features/validateSchema.js'
10
+ import initServerAggregate from './features/serverAggregate.js'
11
+
12
+ export { redis, redlock, Redlock } from './redis/index.js'
13
+ export { db, mongo, mongoClient, createMongoIndex, sqlite } from './db/index.js'
14
+
15
+ const usersConnectionCounter = {}
16
+ global.__clients = {}
17
+
18
+ export default function createBackend ({
19
+ secure = false,
20
+ pollDebounce,
21
+ flushRedis = true,
22
+ extraDbs,
23
+ hooks,
24
+ accessControl = secure,
25
+ serverAggregate = secure,
26
+ validateSchema = secure,
27
+ models,
28
+ verbose = true
29
+ } = {}) {
30
+ // pollDebounce is the minimum time in ms between query polls in sharedb
31
+ if (pollDebounce) db.pollDebounce = pollDebounce
32
+
33
+ // Maybe flush redis when starting the app.
34
+ // When running in cluster this should only run on one instance and once a day
35
+ // so redlock is used to guarantee that.
36
+ if (flushRedis) maybeFlushRedis()
37
+
38
+ const backend = new ShareDB({
39
+ db,
40
+ pubsub,
41
+ extraDbs
42
+ })
43
+
44
+ // sharedb-hooks
45
+ shareDbHooks(backend)
46
+
47
+ if (hooks) hooks(backend)
48
+
49
+ const ORM = global.STARTUP_JS_ORM || {}
50
+
51
+ // sharedb-access
52
+ if (accessControl) {
53
+ // eslint-disable-next-line
54
+ new ShareDbAccess(backend, { dontUseOldDocs: true })
55
+
56
+ for (const path in ORM) {
57
+ const ormEntity = ORM[path].OrmEntity
58
+
59
+ const { access } = ormEntity
60
+ const isFactory = !!ormEntity.factory
61
+
62
+ // TODO:
63
+ // - move registerOrmRulesFromFactory and registerOrmRules to this library
64
+ // - rewrite factories check to not use model anymore
65
+ if (isFactory) {
66
+ throw Error('Sharedb-access does not support ORM factories yet')
67
+ // registerOrmRulesFromFactory(backend, path, ormEntity)
68
+ } else if (access) {
69
+ registerOrmRules(backend, path, access)
70
+ }
71
+ }
72
+
73
+ console.log('sharedb-access is working')
74
+ }
75
+
76
+ if (serverAggregate) {
77
+ initServerAggregate(backend, { customCheck: serverAggregate?.customCheck, models })
78
+ }
79
+
80
+ if (validateSchema && process.env.NODE_ENV !== 'production') {
81
+ initValidateSchema(backend, { models })
82
+ }
83
+
84
+ backend.on('client', (client, reject) => {
85
+ const req = client.upgradeReq
86
+ if (!req) return
87
+
88
+ const userId = client.session?.userId || req.session?.userId
89
+
90
+ // TODO: rewrite to use $ here, or create a separate root $ for each user
91
+ // if (!global.__clients[userId]) {
92
+ // const model = backend.createModel()
93
+ // global.__clients[userId] = { model }
94
+ // }
95
+
96
+ usersConnectionCounter[userId] = ~~usersConnectionCounter[userId] + 1
97
+
98
+ const userAgent = req.headers && req.headers['user-agent']
99
+ if (verbose) console.log('[WS OPENED]:', userId, userAgent)
100
+
101
+ client.once('close', () => {
102
+ if (verbose) console.log('[WS CLOSED]', userId)
103
+
104
+ usersConnectionCounter[userId] -= 1
105
+
106
+ // TODO: rewrite to use $ here, or create a separate root $ for each user
107
+ // if (usersConnectionCounter[userId] <= 0) {
108
+ // global.__clients[userId].model.close()
109
+ // delete global.__clients[userId]
110
+ // }
111
+ })
112
+ })
113
+
114
+ return backend
115
+ }
package/package.json ADDED
@@ -0,0 +1,34 @@
1
+ {
2
+ "name": "@teamplay/backend",
3
+ "version": "0.1.5",
4
+ "description": "Create new ShareDB backend instance",
5
+ "type": "module",
6
+ "main": "index.js",
7
+ "license": "MIT",
8
+ "publishConfig": {
9
+ "access": "public"
10
+ },
11
+ "exports": {
12
+ ".": "./index.js"
13
+ },
14
+ "dependencies": {
15
+ "@teamplay/schema": "^0.1.5",
16
+ "@teamplay/server-aggregate": "^0.1.5",
17
+ "@teamplay/sharedb-access": "^0.1.5",
18
+ "@teamplay/sharedb-schema": "^0.1.5",
19
+ "@teamplay/utils": "^0.1.5",
20
+ "@types/ioredis-mock": "^8.2.5",
21
+ "ioredis": "^5.3.2",
22
+ "ioredis-mock": "^8.9.0",
23
+ "mongodb": "^6.0.0",
24
+ "redlock": "^3.0.0",
25
+ "sharedb": "^5.0.0",
26
+ "sharedb-hooks": "~4.0.0",
27
+ "sharedb-mingo-memory": "^3.0.0",
28
+ "sharedb-mongo": "^4.1.2",
29
+ "sharedb-redis-pubsub": "^2.0.1",
30
+ "sqlite3": "^5.1.6",
31
+ "uuid": "^9.0.1"
32
+ },
33
+ "gitHead": "ad85edf62088d1e8d28e9b5e22f2dfb5dfc00bec"
34
+ }
package/redis/index.js ADDED
@@ -0,0 +1,94 @@
1
+ import { readFileSync } from 'fs'
2
+ import Redis from 'ioredis'
3
+ import RedisMock from 'ioredis-mock'
4
+ import Redlock from 'redlock'
5
+ import redisPubSub from 'sharedb-redis-pubsub'
6
+
7
+ export const {
8
+ redis,
9
+ redisObserver
10
+ } = getUniversalRedis({
11
+ disableRedis: process.env.NO_REDIS,
12
+ redisOpts: process.env.REDIS_OPTS,
13
+ redisUrl: process.env.REDIS_URL,
14
+ keyPrefix: generatePrefix({
15
+ mongoUrl: process.env.MONGO_URL,
16
+ baseUrl: process.env.BASE_URL
17
+ })
18
+ })
19
+
20
+ export const pubsub = redisPubSub({
21
+ client: redis,
22
+ observer: redisObserver
23
+ })
24
+
25
+ export const redlock = getRedlock(redis)
26
+
27
+ export { Redlock }
28
+
29
+ function getUniversalRedis ({ disableRedis, redisOpts, redisUrl, keyPrefix }) {
30
+ if (!disableRedis) {
31
+ if (typeof redisOpts === 'string') {
32
+ redisOpts = JSON.parse(redisOpts)
33
+ let tls = {}
34
+
35
+ if (redisOpts.key) {
36
+ tls = {
37
+ key: readFileSync(redisOpts.key),
38
+ cert: readFileSync(redisOpts.cert),
39
+ ca: readFileSync(redisOpts.ca)
40
+ }
41
+ }
42
+
43
+ const options = {
44
+ sentinels: redisOpts.sentinels,
45
+ sslPort: redisOpts.ssl_port || '6380',
46
+ tls,
47
+ name: 'mymaster',
48
+ db: redisOpts.db || 0,
49
+ password: redisOpts.password,
50
+ keyPrefix
51
+ }
52
+
53
+ return {
54
+ redis: new Redis(options),
55
+ redisObserver: new Redis(options)
56
+ }
57
+ } else if (redisUrl) {
58
+ return {
59
+ redis: new Redis(redisUrl, { keyPrefix }),
60
+ redisObserver: new Redis(redisUrl, { keyPrefix })
61
+ }
62
+ }
63
+ }
64
+ return {
65
+ redis: new RedisMock({ keyPrefix }),
66
+ redisObserver: new RedisMock({ keyPrefix })
67
+ }
68
+ }
69
+
70
+ function getRedlock (redis) {
71
+ return new Redlock([redis], {
72
+ driftFactor: 0.01,
73
+ retryCount: 2,
74
+ retryDelay: 10,
75
+ retryJitter: 10
76
+ })
77
+ }
78
+
79
+ // Use prefix for ShareDB's pubsub. This prevents issues with multiple
80
+ // projects using the same redis db.
81
+ // We use a combination of MONGO_URL and BASE_URL to generate a simple
82
+ // hash because together they are going to be unique no matter whether
83
+ // it's run on localhost or on the production server.
84
+ // ref: https://github.com/share/sharedb/issues/420
85
+ function generatePrefix ({ mongoUrl, baseUrl }) {
86
+ return '_' + simpleNumericHash('' + mongoUrl + baseUrl)
87
+ }
88
+
89
+ // ref: https://gist.github.com/hyamamoto/fd435505d29ebfa3d9716fd2be8d42f0?permalink_comment_id=2694461#gistcomment-2694461
90
+ function simpleNumericHash (s) {
91
+ let i, h
92
+ for (i = 0, h = 0; i < s.length; i++) h = Math.imul(31, h) + s.charCodeAt(i) | 0
93
+ return h
94
+ }
@@ -0,0 +1,54 @@
1
+ import { redis, redlock, Redlock } from './index.js'
2
+
3
+ export default function maybeFlushRedis () {
4
+ redis.once('connect', _maybeFlushRedis)
5
+ }
6
+
7
+ async function _maybeFlushRedis () {
8
+ // Always flush redis db in development or if a force env flag is specified.
9
+ if (process.env.NODE_ENV !== 'production' || process.env.FORCE_REDIS_FLUSH) {
10
+ await flushRedis()
11
+ return
12
+ }
13
+
14
+ // In production we flush redis db once a day using locking in redis itself.
15
+ const ONE_DAY = 1000 * 60 * 60 * 24
16
+ const LOCK_FLUSH_DB_KEY = 'startupjs_service_flushdb'
17
+
18
+ try {
19
+ try {
20
+ await redlock.lock(LOCK_FLUSH_DB_KEY, ONE_DAY)
21
+ } catch (err) {
22
+ if (err instanceof Redlock.LockError) {
23
+ console.log('>> No need to do Redis Flush DB yet (lock for one day is still present)')
24
+ return
25
+ } else { throw err }
26
+ }
27
+
28
+ console.log('>>> FLUSHING REDIS DB (this should happen only once a day)')
29
+ await flushRedis()
30
+
31
+ // Re-lock right away.
32
+ console.log('>>> RE-LOCK REDIS DB FLUSH CHECK (for one day)')
33
+ try {
34
+ await redlock.lock(LOCK_FLUSH_DB_KEY, ONE_DAY)
35
+ } catch (err) {
36
+ console.error('Error while re-locking flushdb redis lock!\n' + err)
37
+ }
38
+ } catch (err) {
39
+ console.error('Error while performing redis DB flushing!\n' + err)
40
+ }
41
+ }
42
+
43
+ function flushRedis () {
44
+ return new Promise(resolve => {
45
+ redis.flushdb((err, didSucceed) => {
46
+ if (err) {
47
+ console.error('Redis flushdb err:', err)
48
+ } else {
49
+ console.log('Redis flushdb success:', didSucceed)
50
+ }
51
+ resolve()
52
+ })
53
+ })
54
+ }