wingbot-mongodb 4.2.1 → 4.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mocharc.js +1 -1
- package/baseStorage.d.ts +31 -0
- package/package.json +1 -1
- package/src/compact.js +173 -0
package/.mocharc.js
CHANGED
package/baseStorage.d.ts
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
declare namespace wingbotmongodb {
|
|
4
|
+
|
|
5
|
+
type Collection<T> = import('mongodb').Collection<T>;
|
|
6
|
+
type Db = import('mongodb').Db;
|
|
7
|
+
type CreateIndexesOptions = import('mongodb').CreateIndexesOptions;
|
|
8
|
+
type ObjectId = import('mongodb').ObjectId;
|
|
9
|
+
|
|
10
|
+
export class BaseStorage<T = {}> {
|
|
11
|
+
|
|
12
|
+
constructor (mongoDb: Db|{():Promise<Db>}, collectionName: string, log?: {error:Function, log:Function}, isCosmo?: boolean)
|
|
13
|
+
|
|
14
|
+
_getCollection (forRead?: boolean): Promise<Collection<T>>
|
|
15
|
+
|
|
16
|
+
public addFixtureDoc (...any: any)
|
|
17
|
+
|
|
18
|
+
public addIndex (index: object, options: CreateIndexesOptions)
|
|
19
|
+
|
|
20
|
+
protected _id (id: string): ObjectId
|
|
21
|
+
|
|
22
|
+
protected _expandObjectToSet (attr: string|null, obj: {[key: string]: any}, nested?: boolean): {[key: string]: any}
|
|
23
|
+
|
|
24
|
+
protected _log: { log: Function, error: Function };
|
|
25
|
+
|
|
26
|
+
public preHeat(): Promise<void>
|
|
27
|
+
|
|
28
|
+
public drop (): Promise<void>
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
}
|
package/package.json
CHANGED
package/src/compact.js
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* @author Vojtěch Jedlička
|
|
3
|
+
*/
|
|
4
|
+
'use strict';
|
|
5
|
+
|
|
6
|
+
const { MongoClient } = require('mongodb');
|
|
7
|
+
const defaultLogger = require('./defaultLogger');
|
|
8
|
+
|
|
9
|
+
/** @typedef {import('mongodb').Db} Db */
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* @param {Db} db
|
|
13
|
+
* @returns {Promise<string[]>}
|
|
14
|
+
*/
|
|
15
|
+
async function getCompactableCollections (db) {
|
|
16
|
+
const collections = await db
|
|
17
|
+
.listCollections({ type: 'collection' })
|
|
18
|
+
.toArray();
|
|
19
|
+
return collections
|
|
20
|
+
.map((v) => v.name)
|
|
21
|
+
.filter(
|
|
22
|
+
(name) => !name.startsWith('system.')
|
|
23
|
+
&& !name.startsWith('admin.')
|
|
24
|
+
&& !name.includes('.chunks')
|
|
25
|
+
&& !name.includes('.files')
|
|
26
|
+
&& name !== 'oplog.rs'
|
|
27
|
+
);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Returns host addresses of replica set nodes
|
|
32
|
+
*
|
|
33
|
+
* @param {Db} db
|
|
34
|
+
* @returns {Promise<{primary: string, secondaries: string[]}>}
|
|
35
|
+
*/
|
|
36
|
+
async function getReplicaSetNodes (db) {
|
|
37
|
+
const admin = db.admin();
|
|
38
|
+
const status = await admin.command({ replSetGetStatus: 1 });
|
|
39
|
+
|
|
40
|
+
const primary = status.members.find((member) => member.state === 1);
|
|
41
|
+
const secondaries = status.members.filter((member) => member.state === 2);
|
|
42
|
+
|
|
43
|
+
return {
|
|
44
|
+
primary: primary ? primary.name : null,
|
|
45
|
+
secondaries: secondaries.map((member) => member.name)
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Compacts all collections on a given replica set node
|
|
51
|
+
*
|
|
52
|
+
* @param {object} props
|
|
53
|
+
* @param {string} props.nodeUrl
|
|
54
|
+
* @param {boolean} [props.force]
|
|
55
|
+
* @param {string} props.dbUrl
|
|
56
|
+
* @param {string} props.dbName
|
|
57
|
+
* @param {string[]} props.collections
|
|
58
|
+
* @param {any} props.log
|
|
59
|
+
* @returns {Promise<{bytesFreed:number}>}
|
|
60
|
+
*/
|
|
61
|
+
async function compactNode ({
|
|
62
|
+
nodeUrl, collections, force = false, dbUrl, dbName, log = defaultLogger
|
|
63
|
+
}) {
|
|
64
|
+
log.info(`Starting compaction for node: ${nodeUrl}`);
|
|
65
|
+
|
|
66
|
+
const nodeHost = nodeUrl.replace('mongodb://', '');
|
|
67
|
+
|
|
68
|
+
const originalUrl = new URL(
|
|
69
|
+
dbUrl.replace('mongodb+srv://', 'https://')
|
|
70
|
+
);
|
|
71
|
+
const { username } = originalUrl;
|
|
72
|
+
const { password } = originalUrl;
|
|
73
|
+
|
|
74
|
+
const authPart = username && password ? `${username}:${password}@` : '';
|
|
75
|
+
const directConnectionUrl = `mongodb://${authPart}${nodeHost}/${dbName}?authSource=admin&ssl=true`;
|
|
76
|
+
|
|
77
|
+
const client = new MongoClient(directConnectionUrl, {
|
|
78
|
+
directConnection: true,
|
|
79
|
+
serverSelectionTimeoutMS: 30000,
|
|
80
|
+
connectTimeoutMS: 30000
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
try {
|
|
84
|
+
await client.connect();
|
|
85
|
+
const db = client.db(dbName);
|
|
86
|
+
|
|
87
|
+
const compactionPromises = collections.map(async (collectionName) => {
|
|
88
|
+
try {
|
|
89
|
+
const result = await db.command({
|
|
90
|
+
compact: collectionName,
|
|
91
|
+
force
|
|
92
|
+
});
|
|
93
|
+
log.info(
|
|
94
|
+
`Successfully compacted ${collectionName} on ${nodeUrl}:`,
|
|
95
|
+
result
|
|
96
|
+
);
|
|
97
|
+
|
|
98
|
+
return result;
|
|
99
|
+
} catch (error) {
|
|
100
|
+
log.error(`Failed to compact ${collectionName} on ${nodeUrl}:`, error);
|
|
101
|
+
throw error;
|
|
102
|
+
}
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
const results = await Promise.allSettled(compactionPromises);
|
|
106
|
+
|
|
107
|
+
const bytesFreed = results
|
|
108
|
+
.filter((result) => result.status === 'fulfilled')
|
|
109
|
+
.reduce((a, p) => a + p.value.bytesFreed, 0);
|
|
110
|
+
|
|
111
|
+
log.info(`Completed compaction for node: ${nodeUrl}`);
|
|
112
|
+
|
|
113
|
+
return { bytesFreed };
|
|
114
|
+
} catch (e) {
|
|
115
|
+
log.error(e);
|
|
116
|
+
return { bytesFreed: 0 };
|
|
117
|
+
} finally {
|
|
118
|
+
await client.close();
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* @param {Db} db
|
|
124
|
+
* @param {string} dbUrl
|
|
125
|
+
* @param {string} dbName
|
|
126
|
+
* @param {any} log
|
|
127
|
+
* @returns {Promise<{bytesFreed: number}>}
|
|
128
|
+
*/
|
|
129
|
+
async function compact (db, dbUrl, dbName, log = defaultLogger) {
|
|
130
|
+
const collections = await getCompactableCollections(db);
|
|
131
|
+
const replicaSetNodes = await getReplicaSetNodes(db);
|
|
132
|
+
|
|
133
|
+
log.info('Starting replica set compaction...');
|
|
134
|
+
log.info(`Collections to compact: ${collections.join(', ')}`);
|
|
135
|
+
log.info(`Primary: ${replicaSetNodes.primary}`);
|
|
136
|
+
log.info(`Secondaries: ${replicaSetNodes.secondaries.join(', ')}`);
|
|
137
|
+
|
|
138
|
+
let bytesFreed = 0;
|
|
139
|
+
|
|
140
|
+
for (const secondary of replicaSetNodes.secondaries) {
|
|
141
|
+
const result = await compactNode({
|
|
142
|
+
nodeUrl: secondary, dbUrl, dbName, collections, log
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
bytesFreed += result.bytesFreed;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
if (replicaSetNodes.primary) {
|
|
149
|
+
log.info(
|
|
150
|
+
`Compacting primary ${replicaSetNodes.primary}`
|
|
151
|
+
);
|
|
152
|
+
|
|
153
|
+
const result = await compactNode({
|
|
154
|
+
nodeUrl: replicaSetNodes.primary,
|
|
155
|
+
collections,
|
|
156
|
+
force: true,
|
|
157
|
+
dbUrl,
|
|
158
|
+
dbName,
|
|
159
|
+
log
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
bytesFreed += result.bytesFreed;
|
|
163
|
+
} else {
|
|
164
|
+
log.warn('No primary replica set node!');
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
return { bytesFreed };
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
module.exports = compact;
|
|
171
|
+
module.exports.compactNode = compactNode;
|
|
172
|
+
module.exports.getCompactableCollections = getCompactableCollections;
|
|
173
|
+
module.exports.getReplicaSetNodes = getReplicaSetNodes;
|