ueberdb2 5.0.38 → 5.0.40
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/databases/cassandra_db.js +190 -0
- package/dist/databases/couch_db.js +133 -0
- package/dist/databases/dirty_db.js +62 -0
- package/dist/databases/dirty_git_db.js +68 -0
- package/dist/databases/elasticsearch_db.js +269 -0
- package/dist/databases/memory_db.js +37 -0
- package/dist/databases/mock_db.js +40 -0
- package/dist/databases/mongodb_db.js +95 -0
- package/dist/databases/mssql_db.js +132 -0
- package/dist/databases/mysql_db.js +149 -0
- package/dist/databases/postgres_db.js +125 -0
- package/dist/databases/postgrespool_db.js +10 -0
- package/dist/databases/redis_db.js +93 -0
- package/dist/databases/rethink_db.js +92 -0
- package/dist/databases/rusty_db.js +49 -0
- package/dist/databases/sqlite_db.js +75 -0
- package/dist/databases/surrealdb_db.js +135 -0
- package/dist/index.d.ts +1 -17
- package/dist/index.js +32 -2280
- package/dist/lib/AbstractDatabase.js +36 -0
- package/dist/lib/CacheAndBufferLayer.js +484 -0
- package/dist/lib/logging.js +20 -0
- package/package.json +1 -1
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
const require_runtime = require("../_virtual/_rolldown/runtime.js");
|
|
2
|
+
const require_AbstractDatabase = require("../lib/AbstractDatabase.js");
|
|
3
|
+
let assert = require("assert");
|
|
4
|
+
assert = require_runtime.__toESM(assert);
|
|
5
|
+
let buffer = require("buffer");
|
|
6
|
+
let crypto = require("crypto");
|
|
7
|
+
let _elastic_elasticsearch = require("@elastic/elasticsearch");
|
|
8
|
+
//#region databases/elasticsearch_db.ts
|
|
9
|
+
/**
|
|
10
|
+
* 2015 Visionist, Inc.
|
|
11
|
+
*
|
|
12
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
13
|
+
* you may not use this file except in compliance with the License.
|
|
14
|
+
* You may obtain a copy of the License at
|
|
15
|
+
*
|
|
16
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
17
|
+
*
|
|
18
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
19
|
+
* distributed under the License is distributed on an "AS-IS" BASIS,
|
|
20
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
21
|
+
* See the License for the specific language governing permissions and
|
|
22
|
+
* limitations under the License.
|
|
23
|
+
*/
|
|
24
|
+
const schema = "2";
|
|
25
|
+
const keyToId = (key) => {
|
|
26
|
+
const keyBuf = buffer.Buffer.from(key);
|
|
27
|
+
return keyBuf.length > 512 ? (0, crypto.createHash)("sha512").update(keyBuf).digest("hex") : key;
|
|
28
|
+
};
|
|
29
|
+
const mappings = { properties: {
|
|
30
|
+
key: { type: "wildcard" },
|
|
31
|
+
value: {
|
|
32
|
+
type: "object",
|
|
33
|
+
enabled: false
|
|
34
|
+
}
|
|
35
|
+
} };
|
|
36
|
+
const legacyDocToSchema2Key = (index, id, type, v1BaseIndex) => {
|
|
37
|
+
const legacyType = typeof type === "string" && type !== "" && type !== "_doc" ? type : null;
|
|
38
|
+
if (v1BaseIndex && index !== v1BaseIndex) {
|
|
39
|
+
const parts = index.slice(v1BaseIndex.length + 1).split("-");
|
|
40
|
+
if (parts.length !== 2) throw new Error(`unable to migrate records from index ${index} due to data ambiguity`);
|
|
41
|
+
if (legacyType != null) return `${parts[0]}:${decodeURIComponent(legacyType)}:${parts[1]}:${id}`;
|
|
42
|
+
const idParts = id.split(":");
|
|
43
|
+
if (idParts.length !== 2) throw new Error(`unable to migrate records from index ${index} due to data ambiguity`);
|
|
44
|
+
return `${parts[0]}:${idParts[0]}:${parts[1]}:${idParts[1]}`;
|
|
45
|
+
}
|
|
46
|
+
if (legacyType != null) return `${legacyType}:${id}`;
|
|
47
|
+
const idParts = id.split(":");
|
|
48
|
+
if (idParts.length !== 2) throw new Error(`unable to migrate records from index ${index} due to missing legacy type metadata`);
|
|
49
|
+
return `${idParts[0]}:${idParts[1]}`;
|
|
50
|
+
};
|
|
51
|
+
const migrateToSchema2 = async (client, v1BaseIndex, v2Index, logger) => {
|
|
52
|
+
let recordsMigratedLastLogged = 0;
|
|
53
|
+
let recordsMigrated = 0;
|
|
54
|
+
const totals = /* @__PURE__ */ new Map();
|
|
55
|
+
logger.info(`Attempting elasticsearch record migration from schema v1 at base index ${v1BaseIndex} to schema v2 at index ${v2Index}...`);
|
|
56
|
+
const indices = await client.indices.get({ index: [v1BaseIndex, `${v1BaseIndex}-*-*`] });
|
|
57
|
+
const scrollIds = /* @__PURE__ */ new Map();
|
|
58
|
+
const q = [];
|
|
59
|
+
try {
|
|
60
|
+
for (const index of Object.keys(indices)) {
|
|
61
|
+
const res = await client.search({
|
|
62
|
+
index,
|
|
63
|
+
scroll: "10m"
|
|
64
|
+
});
|
|
65
|
+
scrollIds.set(index, res._scroll_id);
|
|
66
|
+
q.push({
|
|
67
|
+
index,
|
|
68
|
+
res
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
while (q.length) {
|
|
72
|
+
const { index, res: { hits: { hits, total: { value: total } } } } = q.shift();
|
|
73
|
+
if (hits.length === 0) continue;
|
|
74
|
+
totals.set(index, total);
|
|
75
|
+
const body = [];
|
|
76
|
+
for (const { _id, _type, _source: { val } } of hits) {
|
|
77
|
+
const key = legacyDocToSchema2Key(index, _id, _type, v1BaseIndex);
|
|
78
|
+
body.push({ index: { _id: keyToId(key) } }, {
|
|
79
|
+
key,
|
|
80
|
+
value: JSON.parse(val)
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
await client.bulk({
|
|
84
|
+
index: v2Index,
|
|
85
|
+
body
|
|
86
|
+
});
|
|
87
|
+
recordsMigrated += hits.length;
|
|
88
|
+
if (Math.floor(recordsMigrated / 100) > Math.floor(recordsMigratedLastLogged / 100)) {
|
|
89
|
+
const total = [...totals.values()].reduce((a, b) => a + b, 0);
|
|
90
|
+
logger.info(`Migrated ${recordsMigrated} records out of ${total}`);
|
|
91
|
+
recordsMigratedLastLogged = recordsMigrated;
|
|
92
|
+
}
|
|
93
|
+
q.push({
|
|
94
|
+
index,
|
|
95
|
+
res: await client.scroll({
|
|
96
|
+
scroll: "5m",
|
|
97
|
+
scroll_id: scrollIds.get(index)
|
|
98
|
+
})
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
logger.info(`Finished migrating ${recordsMigrated} records`);
|
|
102
|
+
} finally {
|
|
103
|
+
await Promise.all([...scrollIds.values()].map((scrollId) => client.clearScroll({ scroll_id: scrollId })));
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
var elasticsearch_db_default = class extends require_AbstractDatabase.default {
|
|
107
|
+
_client;
|
|
108
|
+
_index;
|
|
109
|
+
_indexClean;
|
|
110
|
+
_q;
|
|
111
|
+
constructor(settings) {
|
|
112
|
+
super(settings);
|
|
113
|
+
this._client = null;
|
|
114
|
+
this.settings = {
|
|
115
|
+
host: "127.0.0.1",
|
|
116
|
+
port: "9200",
|
|
117
|
+
base_index: "ueberes",
|
|
118
|
+
migrate_to_newer_schema: false,
|
|
119
|
+
api: "7.6",
|
|
120
|
+
...settings || {},
|
|
121
|
+
json: false
|
|
122
|
+
};
|
|
123
|
+
this._index = `${this.settings.base_index}_s${schema}`;
|
|
124
|
+
this._q = { index: this._index };
|
|
125
|
+
this._indexClean = true;
|
|
126
|
+
}
|
|
127
|
+
get isAsync() {
|
|
128
|
+
return true;
|
|
129
|
+
}
|
|
130
|
+
async _refreshIndex() {
|
|
131
|
+
if (this._indexClean) return;
|
|
132
|
+
this._indexClean = true;
|
|
133
|
+
await this._client.indices.refresh(this._q);
|
|
134
|
+
}
|
|
135
|
+
/**
|
|
136
|
+
* Initialize the elasticsearch client, then ping the server to ensure that a
|
|
137
|
+
* connection was made.
|
|
138
|
+
*/
|
|
139
|
+
async init() {
|
|
140
|
+
const client = new _elastic_elasticsearch.Client({ node: `http://${this.settings.host}:${this.settings.port}` });
|
|
141
|
+
await client.ping();
|
|
142
|
+
if (!await client.indices.exists({ index: this._index })) {
|
|
143
|
+
let tmpIndex;
|
|
144
|
+
const exists = await client.indices.exists({ index: this.settings.base_index });
|
|
145
|
+
if (exists && !this.settings.migrate_to_newer_schema) throw new Error(`Data exists under the legacy index (schema) named ${this.settings.base_index}. Set migrate_to_newer_schema to true to copy the existing data to a new index named ${this._index}.`);
|
|
146
|
+
let attempt = 0;
|
|
147
|
+
while (true) {
|
|
148
|
+
tmpIndex = `${this._index}_${exists ? "migrate_attempt_" : "i"}${attempt++}`;
|
|
149
|
+
if (!await client.indices.exists({ index: tmpIndex })) break;
|
|
150
|
+
}
|
|
151
|
+
await client.indices.create({
|
|
152
|
+
index: tmpIndex,
|
|
153
|
+
mappings
|
|
154
|
+
});
|
|
155
|
+
if (exists) await migrateToSchema2(client, this.settings.base_index, tmpIndex, this.logger);
|
|
156
|
+
await client.indices.putAlias({
|
|
157
|
+
index: tmpIndex,
|
|
158
|
+
name: this._index
|
|
159
|
+
});
|
|
160
|
+
}
|
|
161
|
+
const indices = Object.values(await client.indices.get({ index: this._index }));
|
|
162
|
+
(0, assert.equal)(indices.length, 1);
|
|
163
|
+
try {
|
|
164
|
+
assert.default.deepEqual(indices[0].mappings, mappings);
|
|
165
|
+
} catch (err) {
|
|
166
|
+
this.logger.warn(`Index ${this._index} mappings does not match expected; attempting to use index anyway. Details: ${err}`);
|
|
167
|
+
}
|
|
168
|
+
this._client = client;
|
|
169
|
+
}
|
|
170
|
+
/**
|
|
171
|
+
* This function provides read functionality to the database.
|
|
172
|
+
*
|
|
173
|
+
* @param {String} key Key
|
|
174
|
+
*/
|
|
175
|
+
async get(key) {
|
|
176
|
+
const res = await this._client.get({
|
|
177
|
+
...this._q,
|
|
178
|
+
id: keyToId(key)
|
|
179
|
+
}, { ignore: [404] });
|
|
180
|
+
if (!res.found) return null;
|
|
181
|
+
return res._source.value;
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* @param key Search key, which uses an asterisk (*) as the wild card.
|
|
185
|
+
* @param notKey Used to filter the result set
|
|
186
|
+
*/
|
|
187
|
+
async findKeys(key, notKey) {
|
|
188
|
+
await this._refreshIndex();
|
|
189
|
+
const q = {
|
|
190
|
+
...this._q,
|
|
191
|
+
body: { query: { bool: {
|
|
192
|
+
filter: { wildcard: { key: { value: key } } },
|
|
193
|
+
...notKey == null ? {} : { must_not: { wildcard: { key: { value: notKey } } } }
|
|
194
|
+
} } }
|
|
195
|
+
};
|
|
196
|
+
const { hits } = await this._client.search(q);
|
|
197
|
+
return hits.hits.map((h) => h._source.key);
|
|
198
|
+
}
|
|
199
|
+
/**
|
|
200
|
+
* This function provides write functionality to the database.
|
|
201
|
+
*
|
|
202
|
+
* @param {String} key Record identifier.
|
|
203
|
+
* @param {JSON|String} value The value to store in the database.
|
|
204
|
+
*/
|
|
205
|
+
async set(key, value) {
|
|
206
|
+
this._indexClean = false;
|
|
207
|
+
await this._client.index({
|
|
208
|
+
...this._q,
|
|
209
|
+
id: keyToId(key),
|
|
210
|
+
body: {
|
|
211
|
+
key,
|
|
212
|
+
value
|
|
213
|
+
}
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* This function provides delete functionality to the database.
|
|
218
|
+
*
|
|
219
|
+
* The index, type, and ID will be parsed from the key, and this document will
|
|
220
|
+
* be deleted from the database.
|
|
221
|
+
*
|
|
222
|
+
* @param {String} key Record identifier.
|
|
223
|
+
*/
|
|
224
|
+
async remove(key) {
|
|
225
|
+
this._indexClean = false;
|
|
226
|
+
await this._client.delete({
|
|
227
|
+
...this._q,
|
|
228
|
+
id: keyToId(key)
|
|
229
|
+
}, { ignore: [404] });
|
|
230
|
+
}
|
|
231
|
+
/**
|
|
232
|
+
* This uses the bulk upload functionality of elasticsearch (url:port/_bulk).
|
|
233
|
+
*
|
|
234
|
+
* The CacheAndBufferLayer will periodically (every this.settings.writeInterval)
|
|
235
|
+
* flush writes that have already been done in the local cache out to the database.
|
|
236
|
+
*
|
|
237
|
+
* @param {Array} bulk An array of JSON data in the format:
|
|
238
|
+
* {"type":type, "key":key, "value":value}
|
|
239
|
+
*/
|
|
240
|
+
async doBulk(bulk) {
|
|
241
|
+
const operations = [];
|
|
242
|
+
for (const { type, key, value } of bulk) {
|
|
243
|
+
this._indexClean = false;
|
|
244
|
+
switch (type) {
|
|
245
|
+
case "set":
|
|
246
|
+
operations.push({ index: { _id: keyToId(key) } });
|
|
247
|
+
operations.push({
|
|
248
|
+
key,
|
|
249
|
+
value
|
|
250
|
+
});
|
|
251
|
+
break;
|
|
252
|
+
case "remove":
|
|
253
|
+
operations.push({ delete: { _id: keyToId(key) } });
|
|
254
|
+
break;
|
|
255
|
+
default:
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
await this._client.bulk({
|
|
259
|
+
...this._q,
|
|
260
|
+
body: operations
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
async close() {
|
|
264
|
+
if (this._client != null) this._client.close();
|
|
265
|
+
this._client = null;
|
|
266
|
+
}
|
|
267
|
+
};
|
|
268
|
+
//#endregion
|
|
269
|
+
exports.default = elasticsearch_db_default;
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
const require_AbstractDatabase = require("../lib/AbstractDatabase.js");
|
|
2
|
+
//#region databases/memory_db.ts
|
|
3
|
+
var MemoryDB = class extends require_AbstractDatabase.default {
|
|
4
|
+
_data;
|
|
5
|
+
constructor(settings) {
|
|
6
|
+
super(settings);
|
|
7
|
+
this.settings = settings;
|
|
8
|
+
settings.json = false;
|
|
9
|
+
settings.cache = 0;
|
|
10
|
+
settings.writeInterval = 0;
|
|
11
|
+
this._data = null;
|
|
12
|
+
}
|
|
13
|
+
get isAsync() {
|
|
14
|
+
return true;
|
|
15
|
+
}
|
|
16
|
+
close() {
|
|
17
|
+
this._data = null;
|
|
18
|
+
}
|
|
19
|
+
findKeys(key, notKey) {
|
|
20
|
+
const regex = this.createFindRegex(key, notKey);
|
|
21
|
+
return [...this._data.keys()].filter((k) => regex.test(k));
|
|
22
|
+
}
|
|
23
|
+
get(key) {
|
|
24
|
+
return this._data.get(key);
|
|
25
|
+
}
|
|
26
|
+
init() {
|
|
27
|
+
this._data = this.settings.data || /* @__PURE__ */ new Map();
|
|
28
|
+
}
|
|
29
|
+
remove(key) {
|
|
30
|
+
this._data.delete(key);
|
|
31
|
+
}
|
|
32
|
+
set(key, value) {
|
|
33
|
+
this._data.set(key, value);
|
|
34
|
+
}
|
|
35
|
+
};
|
|
36
|
+
//#endregion
|
|
37
|
+
exports.default = MemoryDB;
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
const require_runtime = require("../_virtual/_rolldown/runtime.js");
|
|
2
|
+
let events = require("events");
|
|
3
|
+
events = require_runtime.__toESM(events);
|
|
4
|
+
//#region databases/mock_db.ts
|
|
5
|
+
var mock_db_default = class extends events.default.EventEmitter {
|
|
6
|
+
settings;
|
|
7
|
+
mock;
|
|
8
|
+
constructor(settings) {
|
|
9
|
+
super();
|
|
10
|
+
this.settings = {
|
|
11
|
+
writeInterval: 1,
|
|
12
|
+
...settings
|
|
13
|
+
};
|
|
14
|
+
settings.mock = this;
|
|
15
|
+
this.settings = settings;
|
|
16
|
+
}
|
|
17
|
+
close(cb) {
|
|
18
|
+
this.emit("close", cb);
|
|
19
|
+
}
|
|
20
|
+
doBulk(ops, cb) {
|
|
21
|
+
this.emit("doBulk", ops, cb);
|
|
22
|
+
}
|
|
23
|
+
findKeys(key, notKey, cb) {
|
|
24
|
+
this.emit("findKeys", key, notKey, cb);
|
|
25
|
+
}
|
|
26
|
+
get(key, cb) {
|
|
27
|
+
this.emit("get", key, cb);
|
|
28
|
+
}
|
|
29
|
+
async init(cb) {
|
|
30
|
+
this.emit("init", cb());
|
|
31
|
+
}
|
|
32
|
+
remove(key, cb) {
|
|
33
|
+
this.emit("remove", key, cb);
|
|
34
|
+
}
|
|
35
|
+
set(key, value, cb) {
|
|
36
|
+
this.emit("set", key, value, cb);
|
|
37
|
+
}
|
|
38
|
+
};
|
|
39
|
+
//#endregion
|
|
40
|
+
exports.default = mock_db_default;
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
require("../_virtual/_rolldown/runtime.js");
|
|
2
|
+
const require_AbstractDatabase = require("../lib/AbstractDatabase.js");
|
|
3
|
+
let mongodb = require("mongodb");
|
|
4
|
+
//#region databases/mongodb_db.ts
|
|
5
|
+
/**
|
|
6
|
+
* 2020 Sylchauf
|
|
7
|
+
*
|
|
8
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
9
|
+
* you may not use this file except in compliance with the License.
|
|
10
|
+
* You may obtain a copy of the License at
|
|
11
|
+
*
|
|
12
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
13
|
+
*
|
|
14
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
15
|
+
* distributed under the License is distributed on an "AS-IS" BASIS,
|
|
16
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
17
|
+
* See the License for the specific language governing permissions and
|
|
18
|
+
* limitations under the License.
|
|
19
|
+
*/
|
|
20
|
+
var mongodb_db_default = class extends require_AbstractDatabase.default {
|
|
21
|
+
interval;
|
|
22
|
+
database;
|
|
23
|
+
client;
|
|
24
|
+
collection;
|
|
25
|
+
constructor(settings) {
|
|
26
|
+
super(settings);
|
|
27
|
+
this.settings = settings;
|
|
28
|
+
if (!this.settings.url) throw new Error("You must specify a mongodb url");
|
|
29
|
+
if (this.settings.database == null) this.settings.database = this.settings.dbName;
|
|
30
|
+
if (!this.settings.collection) this.settings.collection = "ueberdb";
|
|
31
|
+
}
|
|
32
|
+
clearPing() {
|
|
33
|
+
if (this.interval) clearInterval(this.interval[Symbol.toPrimitive]());
|
|
34
|
+
}
|
|
35
|
+
schedulePing() {
|
|
36
|
+
this.clearPing();
|
|
37
|
+
this.interval = setInterval(() => {
|
|
38
|
+
this.database.command({ ping: 1 });
|
|
39
|
+
}, 1e4);
|
|
40
|
+
}
|
|
41
|
+
init(callback) {
|
|
42
|
+
mongodb.MongoClient.connect(this.settings.url).then((v) => {
|
|
43
|
+
this.client = v;
|
|
44
|
+
this.database = v.db(this.settings.database);
|
|
45
|
+
this.schedulePing();
|
|
46
|
+
this.collection = this.database.collection(this.settings.collection);
|
|
47
|
+
callback(null);
|
|
48
|
+
}).catch((v) => {
|
|
49
|
+
callback(v);
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
get(key, callback) {
|
|
53
|
+
this.collection.findOne({ _id: key }).then((v) => {
|
|
54
|
+
callback(null, v && v.value);
|
|
55
|
+
}).catch((v) => {
|
|
56
|
+
console.log(v);
|
|
57
|
+
callback(v);
|
|
58
|
+
});
|
|
59
|
+
this.schedulePing();
|
|
60
|
+
}
|
|
61
|
+
findKeys(key, notKey, callback) {
|
|
62
|
+
const selector = { $and: [{ _id: { $regex: `${key.replace(/\*/g, "")}` } }] };
|
|
63
|
+
if (notKey) selector.$and.push({ _id: { $not: { $regex: `${notKey.replace(/\*/g, "")}` } } });
|
|
64
|
+
this.collection.find(selector).map((i) => i._id).toArray().then((r) => {
|
|
65
|
+
callback(null, r);
|
|
66
|
+
}).catch((v) => callback(v));
|
|
67
|
+
this.schedulePing();
|
|
68
|
+
}
|
|
69
|
+
set(key, value, callback) {
|
|
70
|
+
if (key.length > 100) callback("Your Key can only be 100 chars");
|
|
71
|
+
else this.collection.updateMany({ _id: key }, { $set: { value } }, { upsert: true }).then(() => callback(null)).catch((v) => callback(v));
|
|
72
|
+
this.schedulePing();
|
|
73
|
+
}
|
|
74
|
+
remove(key, callback) {
|
|
75
|
+
this.collection.deleteOne({ _id: key }).then((r) => callback(null, r)).catch((v) => callback(v));
|
|
76
|
+
this.schedulePing();
|
|
77
|
+
}
|
|
78
|
+
doBulk(bulk, callback) {
|
|
79
|
+
const bulkMongo = this.collection.initializeOrderedBulkOp();
|
|
80
|
+
for (const i in bulk) if (bulk[i].type === "set") bulkMongo.find({ _id: bulk[i].key }).upsert().updateOne({ $set: { value: bulk[i].value } });
|
|
81
|
+
else if (bulk[i].type === "remove") bulkMongo.find({ _id: bulk[i].key }).deleteOne();
|
|
82
|
+
bulkMongo.execute().then((res) => {
|
|
83
|
+
callback(null, res);
|
|
84
|
+
}).catch((error) => {
|
|
85
|
+
callback(error);
|
|
86
|
+
});
|
|
87
|
+
this.schedulePing();
|
|
88
|
+
}
|
|
89
|
+
close(callback) {
|
|
90
|
+
this.clearPing();
|
|
91
|
+
this.client.close().then((r) => callback(r));
|
|
92
|
+
}
|
|
93
|
+
};
|
|
94
|
+
//#endregion
|
|
95
|
+
exports.default = mongodb_db_default;
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
const require_runtime = require("../_virtual/_rolldown/runtime.js");
|
|
2
|
+
const require_AbstractDatabase = require("../lib/AbstractDatabase.js");
|
|
3
|
+
let async = require("async");
|
|
4
|
+
async = require_runtime.__toESM(async);
|
|
5
|
+
let mssql = require("mssql");
|
|
6
|
+
mssql = require_runtime.__toESM(mssql);
|
|
7
|
+
//#region databases/mssql_db.ts
|
|
8
|
+
/**
|
|
9
|
+
* 2019 - exspecto@gmail.com
|
|
10
|
+
*
|
|
11
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
12
|
+
* you may not use this file except in compliance with the License.
|
|
13
|
+
* You may obtain a copy of the License at
|
|
14
|
+
*
|
|
15
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
16
|
+
*
|
|
17
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
18
|
+
* distributed under the License is distributed on an "AS-IS" BASIS,
|
|
19
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
20
|
+
* See the License for the specific language governing permissions and
|
|
21
|
+
* limitations under the License.
|
|
22
|
+
*
|
|
23
|
+
*
|
|
24
|
+
* Note: This requires MS SQL Server >= 2008 due to the usage of the MERGE statement
|
|
25
|
+
*
|
|
26
|
+
*/
|
|
27
|
+
var MSSQL = class extends require_AbstractDatabase.default {
|
|
28
|
+
db;
|
|
29
|
+
constructor(settings) {
|
|
30
|
+
super(settings);
|
|
31
|
+
settings = settings || {};
|
|
32
|
+
if (settings.json != null) settings.parseJSON = settings.json;
|
|
33
|
+
settings.requestTimeout = 3e5;
|
|
34
|
+
settings.server = settings.host;
|
|
35
|
+
this.settings = settings;
|
|
36
|
+
this.settings.cache = 0;
|
|
37
|
+
this.settings.writeInterval = 0;
|
|
38
|
+
}
|
|
39
|
+
init(callback) {
|
|
40
|
+
const sqlCreate = "IF OBJECT_ID(N'dbo.store', N'U') IS NULL BEGIN CREATE TABLE [store] ( [key] NVARCHAR(100) PRIMARY KEY, [value] NTEXT NOT NULL ); END";
|
|
41
|
+
new mssql.default.ConnectionPool(this.settings).connect().then((pool) => {
|
|
42
|
+
this.db = pool;
|
|
43
|
+
new mssql.default.Request(this.db).query(sqlCreate, (err) => {
|
|
44
|
+
callback(err);
|
|
45
|
+
});
|
|
46
|
+
this.db.on("error", (err) => {
|
|
47
|
+
console.log(err);
|
|
48
|
+
});
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
get(key, callback) {
|
|
52
|
+
const request = new mssql.default.Request(this.db);
|
|
53
|
+
request.input("key", mssql.default.NVarChar(100), key);
|
|
54
|
+
request.query("SELECT [value] FROM [store] WHERE [key] = @key", (err, results) => {
|
|
55
|
+
let value = null;
|
|
56
|
+
if (!err && results && results.rowsAffected[0] === 1) value = results.recordset[0].value;
|
|
57
|
+
callback(err, value);
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
findKeys(key, notKey, callback) {
|
|
61
|
+
const request = new mssql.default.Request(this.db);
|
|
62
|
+
let query = "SELECT [key] FROM [store] WHERE [key] LIKE @key";
|
|
63
|
+
key = key.replace(/\*/g, "%");
|
|
64
|
+
request.input("key", mssql.default.NVarChar(100), key);
|
|
65
|
+
if (notKey != null) {
|
|
66
|
+
notKey = notKey.replace(/\*/g, "%");
|
|
67
|
+
request.input("notkey", mssql.default.NVarChar(100), notKey);
|
|
68
|
+
query += " AND [key] NOT LIKE @notkey";
|
|
69
|
+
}
|
|
70
|
+
request.query(query, (err, results) => {
|
|
71
|
+
const value = [];
|
|
72
|
+
if (!err && results && results.rowsAffected[0] > 0) for (let i = 0; i < results.recordset.length; i++) value.push(results.recordset[i].key);
|
|
73
|
+
callback(err, value);
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
set(key, value, callback) {
|
|
77
|
+
const request = new mssql.default.Request(this.db);
|
|
78
|
+
if (key.length > 100) callback("Your Key can only be 100 chars");
|
|
79
|
+
else {
|
|
80
|
+
const query = "MERGE [store] t USING (SELECT @key [key], @value [value]) s ON t.[key] = s.[key] WHEN MATCHED AND s.[value] IS NOT NULL THEN UPDATE SET t.[value] = s.[value] WHEN NOT MATCHED THEN INSERT ([key], [value]) VALUES (s.[key], s.[value]);";
|
|
81
|
+
request.input("key", mssql.default.NVarChar(100), key);
|
|
82
|
+
request.input("value", mssql.default.NText, value);
|
|
83
|
+
request.query(query, (err, info) => {
|
|
84
|
+
callback(err ? err.toString() : "");
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
remove(key, callback) {
|
|
89
|
+
const request = new mssql.default.Request(this.db);
|
|
90
|
+
request.input("key", mssql.default.NVarChar(100), key);
|
|
91
|
+
request.query("DELETE FROM [store] WHERE [key] = @key", callback);
|
|
92
|
+
}
|
|
93
|
+
doBulk(bulk, callback) {
|
|
94
|
+
const maxInserts = 100;
|
|
95
|
+
const request = new mssql.default.Request(this.db);
|
|
96
|
+
let firstReplace = true;
|
|
97
|
+
let firstRemove = true;
|
|
98
|
+
const replacements = [];
|
|
99
|
+
let removeSQL = "DELETE FROM [store] WHERE [key] IN (";
|
|
100
|
+
for (const i in bulk) if (bulk[i].type === "set") {
|
|
101
|
+
if (firstReplace) {
|
|
102
|
+
replacements.push("BEGIN TRANSACTION;");
|
|
103
|
+
firstReplace = false;
|
|
104
|
+
} else if (Number(i) % maxInserts === 0) replacements.push("\nCOMMIT TRANSACTION;\nBEGIN TRANSACTION;\n");
|
|
105
|
+
replacements.push(`MERGE [store] t USING (SELECT '${bulk[i].key}' [key], '${bulk[i].value}' [value]) s`, "ON t.[key] = s.[key]", "WHEN MATCHED AND s.[value] IS NOT NULL THEN UPDATE SET t.[value] = s.[value]", "WHEN NOT MATCHED THEN INSERT ([key], [value]) VALUES (s.[key], s.[value]);");
|
|
106
|
+
} else if (bulk[i].type === "remove") {
|
|
107
|
+
if (!firstRemove) removeSQL += ",";
|
|
108
|
+
firstRemove = false;
|
|
109
|
+
removeSQL += `'${bulk[i].key}'`;
|
|
110
|
+
}
|
|
111
|
+
removeSQL += ");";
|
|
112
|
+
replacements.push("COMMIT TRANSACTION;");
|
|
113
|
+
async.default.parallel([(callback) => {
|
|
114
|
+
if (!firstReplace) request.batch(replacements.join("\n"), (err, results) => {
|
|
115
|
+
if (err) callback(err);
|
|
116
|
+
callback(err, results);
|
|
117
|
+
});
|
|
118
|
+
else callback();
|
|
119
|
+
}, (callback) => {
|
|
120
|
+
if (!firstRemove) request.query(removeSQL, callback);
|
|
121
|
+
else callback();
|
|
122
|
+
}], (err, results) => {
|
|
123
|
+
if (err) callback(err);
|
|
124
|
+
callback(err, results);
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
close(callback) {
|
|
128
|
+
this.db && this.db.close(callback);
|
|
129
|
+
}
|
|
130
|
+
};
|
|
131
|
+
//#endregion
|
|
132
|
+
exports.default = MSSQL;
|