mongodb 6.10.0-dev.20241106.sha.dc3fe957 → 6.10.0-dev.20241107.sha.e5582ed7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/admin.js +3 -2
- package/lib/admin.js.map +1 -1
- package/lib/beta.d.ts +558 -38
- package/lib/bulk/common.js +4 -4
- package/lib/bulk/common.js.map +1 -1
- package/lib/change_stream.js +111 -51
- package/lib/change_stream.js.map +1 -1
- package/lib/client-side-encryption/auto_encrypter.js +8 -5
- package/lib/client-side-encryption/auto_encrypter.js.map +1 -1
- package/lib/client-side-encryption/client_encryption.js +48 -18
- package/lib/client-side-encryption/client_encryption.js.map +1 -1
- package/lib/client-side-encryption/state_machine.js +43 -29
- package/lib/client-side-encryption/state_machine.js.map +1 -1
- package/lib/cmap/connection.js +78 -6
- package/lib/cmap/connection.js.map +1 -1
- package/lib/cmap/connection_pool.js +14 -9
- package/lib/cmap/connection_pool.js.map +1 -1
- package/lib/cmap/wire_protocol/on_data.js +5 -1
- package/lib/cmap/wire_protocol/on_data.js.map +1 -1
- package/lib/cmap/wire_protocol/responses.js +30 -0
- package/lib/cmap/wire_protocol/responses.js.map +1 -1
- package/lib/collection.js +62 -3
- package/lib/collection.js.map +1 -1
- package/lib/connection_string.js +2 -0
- package/lib/connection_string.js.map +1 -1
- package/lib/cursor/abstract_cursor.js +218 -38
- package/lib/cursor/abstract_cursor.js.map +1 -1
- package/lib/cursor/aggregation_cursor.js +29 -7
- package/lib/cursor/aggregation_cursor.js.map +1 -1
- package/lib/cursor/change_stream_cursor.js +2 -2
- package/lib/cursor/change_stream_cursor.js.map +1 -1
- package/lib/cursor/client_bulk_write_cursor.js +1 -1
- package/lib/cursor/client_bulk_write_cursor.js.map +1 -1
- package/lib/cursor/find_cursor.js +18 -8
- package/lib/cursor/find_cursor.js.map +1 -1
- package/lib/cursor/list_collections_cursor.js +1 -1
- package/lib/cursor/list_collections_cursor.js.map +1 -1
- package/lib/cursor/list_indexes_cursor.js +1 -1
- package/lib/cursor/list_indexes_cursor.js.map +1 -1
- package/lib/cursor/run_command_cursor.js +6 -4
- package/lib/cursor/run_command_cursor.js.map +1 -1
- package/lib/db.js +63 -3
- package/lib/db.js.map +1 -1
- package/lib/error.js +27 -2
- package/lib/error.js.map +1 -1
- package/lib/explain.js +57 -1
- package/lib/explain.js.map +1 -1
- package/lib/gridfs/download.js +31 -3
- package/lib/gridfs/download.js.map +1 -1
- package/lib/gridfs/index.js +49 -14
- package/lib/gridfs/index.js.map +1 -1
- package/lib/gridfs/upload.js +80 -22
- package/lib/gridfs/upload.js.map +1 -1
- package/lib/index.js +9 -5
- package/lib/index.js.map +1 -1
- package/lib/mongo_client.js +70 -1
- package/lib/mongo_client.js.map +1 -1
- package/lib/operations/aggregate.js +2 -2
- package/lib/operations/aggregate.js.map +1 -1
- package/lib/operations/bulk_write.js +7 -2
- package/lib/operations/bulk_write.js.map +1 -1
- package/lib/operations/client_bulk_write/client_bulk_write.js +3 -3
- package/lib/operations/client_bulk_write/client_bulk_write.js.map +1 -1
- package/lib/operations/client_bulk_write/executor.js +14 -3
- package/lib/operations/client_bulk_write/executor.js.map +1 -1
- package/lib/operations/command.js +5 -2
- package/lib/operations/command.js.map +1 -1
- package/lib/operations/count.js +2 -2
- package/lib/operations/count.js.map +1 -1
- package/lib/operations/create_collection.js +8 -7
- package/lib/operations/create_collection.js.map +1 -1
- package/lib/operations/delete.js +6 -6
- package/lib/operations/delete.js.map +1 -1
- package/lib/operations/distinct.js +2 -2
- package/lib/operations/distinct.js.map +1 -1
- package/lib/operations/drop.js +8 -8
- package/lib/operations/drop.js.map +1 -1
- package/lib/operations/estimated_document_count.js +2 -2
- package/lib/operations/estimated_document_count.js.map +1 -1
- package/lib/operations/execute_operation.js +16 -10
- package/lib/operations/execute_operation.js.map +1 -1
- package/lib/operations/find.js +6 -3
- package/lib/operations/find.js.map +1 -1
- package/lib/operations/find_and_modify.js +2 -2
- package/lib/operations/find_and_modify.js.map +1 -1
- package/lib/operations/get_more.js +2 -1
- package/lib/operations/get_more.js.map +1 -1
- package/lib/operations/indexes.js +6 -6
- package/lib/operations/indexes.js.map +1 -1
- package/lib/operations/insert.js +6 -6
- package/lib/operations/insert.js.map +1 -1
- package/lib/operations/kill_cursors.js +5 -2
- package/lib/operations/kill_cursors.js.map +1 -1
- package/lib/operations/list_collections.js +2 -2
- package/lib/operations/list_collections.js.map +1 -1
- package/lib/operations/list_databases.js +2 -2
- package/lib/operations/list_databases.js.map +1 -1
- package/lib/operations/operation.js.map +1 -1
- package/lib/operations/profiling_level.js +2 -2
- package/lib/operations/profiling_level.js.map +1 -1
- package/lib/operations/remove_user.js +2 -2
- package/lib/operations/remove_user.js.map +1 -1
- package/lib/operations/rename.js +2 -2
- package/lib/operations/rename.js.map +1 -1
- package/lib/operations/run_command.js +6 -4
- package/lib/operations/run_command.js.map +1 -1
- package/lib/operations/search_indexes/create.js +5 -2
- package/lib/operations/search_indexes/create.js.map +1 -1
- package/lib/operations/search_indexes/drop.js +2 -2
- package/lib/operations/search_indexes/drop.js.map +1 -1
- package/lib/operations/search_indexes/update.js +2 -2
- package/lib/operations/search_indexes/update.js.map +1 -1
- package/lib/operations/set_profiling_level.js +2 -2
- package/lib/operations/set_profiling_level.js.map +1 -1
- package/lib/operations/stats.js +2 -2
- package/lib/operations/stats.js.map +1 -1
- package/lib/operations/update.js +8 -8
- package/lib/operations/update.js.map +1 -1
- package/lib/operations/validate_collection.js +2 -2
- package/lib/operations/validate_collection.js.map +1 -1
- package/lib/sdam/server.js +4 -1
- package/lib/sdam/server.js.map +1 -1
- package/lib/sdam/server_description.js +2 -0
- package/lib/sdam/server_description.js.map +1 -1
- package/lib/sdam/topology.js +38 -11
- package/lib/sdam/topology.js.map +1 -1
- package/lib/sessions.js +145 -74
- package/lib/sessions.js.map +1 -1
- package/lib/timeout.js +217 -16
- package/lib/timeout.js.map +1 -1
- package/lib/utils.js +31 -17
- package/lib/utils.js.map +1 -1
- package/lib/write_concern.js.map +1 -1
- package/mongodb.d.ts +558 -38
- package/package.json +2 -2
- package/src/admin.ts +6 -2
- package/src/bulk/common.ts +17 -5
- package/src/change_stream.ts +127 -52
- package/src/client-side-encryption/auto_encrypter.ts +12 -5
- package/src/client-side-encryption/client_encryption.ts +103 -20
- package/src/client-side-encryption/state_machine.ts +66 -32
- package/src/cmap/connection.ts +105 -8
- package/src/cmap/connection_pool.ts +14 -14
- package/src/cmap/wire_protocol/on_data.ts +11 -1
- package/src/cmap/wire_protocol/responses.ts +35 -1
- package/src/collection.ts +81 -9
- package/src/connection_string.ts +2 -0
- package/src/cursor/abstract_cursor.ts +286 -39
- package/src/cursor/aggregation_cursor.ts +54 -8
- package/src/cursor/change_stream_cursor.ts +6 -2
- package/src/cursor/client_bulk_write_cursor.ts +6 -2
- package/src/cursor/find_cursor.ts +40 -9
- package/src/cursor/list_collections_cursor.ts +1 -1
- package/src/cursor/list_indexes_cursor.ts +1 -1
- package/src/cursor/run_command_cursor.ts +50 -5
- package/src/db.ts +75 -7
- package/src/error.ts +26 -1
- package/src/explain.ts +85 -0
- package/src/gridfs/download.ts +43 -4
- package/src/gridfs/index.ts +64 -16
- package/src/gridfs/upload.ts +152 -45
- package/src/index.ts +26 -4
- package/src/mongo_client.ts +75 -3
- package/src/operations/aggregate.ts +10 -2
- package/src/operations/bulk_write.ts +9 -2
- package/src/operations/client_bulk_write/client_bulk_write.ts +11 -3
- package/src/operations/client_bulk_write/executor.ts +15 -3
- package/src/operations/command.ts +18 -8
- package/src/operations/count.ts +10 -3
- package/src/operations/create_collection.ts +14 -7
- package/src/operations/delete.ts +15 -6
- package/src/operations/distinct.ts +7 -2
- package/src/operations/drop.ts +18 -8
- package/src/operations/estimated_document_count.ts +7 -2
- package/src/operations/execute_operation.ts +22 -13
- package/src/operations/find.ts +17 -5
- package/src/operations/find_and_modify.ts +7 -2
- package/src/operations/get_more.ts +4 -1
- package/src/operations/indexes.ts +20 -7
- package/src/operations/insert.ts +13 -6
- package/src/operations/kill_cursors.ts +10 -2
- package/src/operations/list_collections.ts +10 -1
- package/src/operations/list_databases.ts +9 -2
- package/src/operations/operation.ts +16 -2
- package/src/operations/profiling_level.ts +7 -2
- package/src/operations/remove_user.ts +7 -2
- package/src/operations/rename.ts +7 -2
- package/src/operations/run_command.ts +23 -4
- package/src/operations/search_indexes/create.ts +10 -2
- package/src/operations/search_indexes/drop.ts +7 -2
- package/src/operations/search_indexes/update.ts +7 -2
- package/src/operations/set_profiling_level.ts +4 -2
- package/src/operations/stats.ts +7 -2
- package/src/operations/update.ts +16 -8
- package/src/operations/validate_collection.ts +7 -2
- package/src/sdam/server.ts +14 -4
- package/src/sdam/server_description.ts +4 -0
- package/src/sdam/topology.ts +43 -18
- package/src/sessions.ts +193 -89
- package/src/timeout.ts +310 -23
- package/src/transactions.ts +1 -1
- package/src/utils.ts +42 -28
- package/src/write_concern.ts +6 -3
package/src/gridfs/index.ts
CHANGED
|
@@ -2,10 +2,12 @@ import type { ObjectId } from '../bson';
|
|
|
2
2
|
import type { Collection } from '../collection';
|
|
3
3
|
import type { FindCursor } from '../cursor/find_cursor';
|
|
4
4
|
import type { Db } from '../db';
|
|
5
|
-
import { MongoRuntimeError } from '../error';
|
|
5
|
+
import { MongoOperationTimeoutError, MongoRuntimeError } from '../error';
|
|
6
6
|
import { type Filter, TypedEventEmitter } from '../mongo_types';
|
|
7
7
|
import type { ReadPreference } from '../read_preference';
|
|
8
8
|
import type { Sort } from '../sort';
|
|
9
|
+
import { CSOTTimeoutContext } from '../timeout';
|
|
10
|
+
import { resolveOptions } from '../utils';
|
|
9
11
|
import { WriteConcern, type WriteConcernOptions } from '../write_concern';
|
|
10
12
|
import type { FindOptions } from './../operations/find';
|
|
11
13
|
import {
|
|
@@ -36,7 +38,11 @@ export interface GridFSBucketOptions extends WriteConcernOptions {
|
|
|
36
38
|
chunkSizeBytes?: number;
|
|
37
39
|
/** Read preference to be passed to read operations */
|
|
38
40
|
readPreference?: ReadPreference;
|
|
39
|
-
/**
|
|
41
|
+
/**
|
|
42
|
+
* @experimental
|
|
43
|
+
* Specifies the lifetime duration of a gridFS stream. If any async operations are in progress
|
|
44
|
+
* when this timeout expires, the stream will throw a timeout error.
|
|
45
|
+
*/
|
|
40
46
|
timeoutMS?: number;
|
|
41
47
|
}
|
|
42
48
|
|
|
@@ -48,6 +54,7 @@ export interface GridFSBucketPrivate {
|
|
|
48
54
|
chunkSizeBytes: number;
|
|
49
55
|
readPreference?: ReadPreference;
|
|
50
56
|
writeConcern: WriteConcern | undefined;
|
|
57
|
+
timeoutMS?: number;
|
|
51
58
|
};
|
|
52
59
|
_chunksCollection: Collection<GridFSChunk>;
|
|
53
60
|
_filesCollection: Collection<GridFSFile>;
|
|
@@ -81,11 +88,11 @@ export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
|
|
81
88
|
constructor(db: Db, options?: GridFSBucketOptions) {
|
|
82
89
|
super();
|
|
83
90
|
this.setMaxListeners(0);
|
|
84
|
-
const privateOptions = {
|
|
91
|
+
const privateOptions = resolveOptions(db, {
|
|
85
92
|
...DEFAULT_GRIDFS_BUCKET_OPTIONS,
|
|
86
93
|
...options,
|
|
87
94
|
writeConcern: WriteConcern.fromOptions(options)
|
|
88
|
-
};
|
|
95
|
+
});
|
|
89
96
|
this.s = {
|
|
90
97
|
db,
|
|
91
98
|
options: privateOptions,
|
|
@@ -109,7 +116,10 @@ export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
|
|
109
116
|
filename: string,
|
|
110
117
|
options?: GridFSBucketWriteStreamOptions
|
|
111
118
|
): GridFSBucketWriteStream {
|
|
112
|
-
return new GridFSBucketWriteStream(this, filename,
|
|
119
|
+
return new GridFSBucketWriteStream(this, filename, {
|
|
120
|
+
timeoutMS: this.s.options.timeoutMS,
|
|
121
|
+
...options
|
|
122
|
+
});
|
|
113
123
|
}
|
|
114
124
|
|
|
115
125
|
/**
|
|
@@ -122,7 +132,11 @@ export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
|
|
122
132
|
filename: string,
|
|
123
133
|
options?: GridFSBucketWriteStreamOptions
|
|
124
134
|
): GridFSBucketWriteStream {
|
|
125
|
-
return new GridFSBucketWriteStream(this, filename, {
|
|
135
|
+
return new GridFSBucketWriteStream(this, filename, {
|
|
136
|
+
timeoutMS: this.s.options.timeoutMS,
|
|
137
|
+
...options,
|
|
138
|
+
id
|
|
139
|
+
});
|
|
126
140
|
}
|
|
127
141
|
|
|
128
142
|
/** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */
|
|
@@ -135,7 +149,7 @@ export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
|
|
135
149
|
this.s._filesCollection,
|
|
136
150
|
this.s.options.readPreference,
|
|
137
151
|
{ _id: id },
|
|
138
|
-
options
|
|
152
|
+
{ timeoutMS: this.s.options.timeoutMS, ...options }
|
|
139
153
|
);
|
|
140
154
|
}
|
|
141
155
|
|
|
@@ -144,11 +158,27 @@ export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
|
|
144
158
|
*
|
|
145
159
|
* @param id - The id of the file doc
|
|
146
160
|
*/
|
|
147
|
-
async delete(id: ObjectId): Promise<void> {
|
|
148
|
-
const {
|
|
161
|
+
async delete(id: ObjectId, options?: { timeoutMS: number }): Promise<void> {
|
|
162
|
+
const { timeoutMS } = resolveOptions(this.s.db, options);
|
|
163
|
+
let timeoutContext: CSOTTimeoutContext | undefined = undefined;
|
|
149
164
|
|
|
165
|
+
if (timeoutMS) {
|
|
166
|
+
timeoutContext = new CSOTTimeoutContext({
|
|
167
|
+
timeoutMS,
|
|
168
|
+
serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
const { deletedCount } = await this.s._filesCollection.deleteOne(
|
|
173
|
+
{ _id: id },
|
|
174
|
+
{ timeoutMS: timeoutContext?.remainingTimeMS }
|
|
175
|
+
);
|
|
176
|
+
|
|
177
|
+
const remainingTimeMS = timeoutContext?.remainingTimeMS;
|
|
178
|
+
if (remainingTimeMS != null && remainingTimeMS <= 0)
|
|
179
|
+
throw new MongoOperationTimeoutError(`Timed out after ${timeoutMS}ms`);
|
|
150
180
|
// Delete orphaned chunks before returning FileNotFound
|
|
151
|
-
await this.s._chunksCollection.deleteMany({ files_id: id });
|
|
181
|
+
await this.s._chunksCollection.deleteMany({ files_id: id }, { timeoutMS: remainingTimeMS });
|
|
152
182
|
|
|
153
183
|
if (deletedCount === 0) {
|
|
154
184
|
// TODO(NODE-3483): Replace with more appropriate error
|
|
@@ -188,7 +218,7 @@ export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
|
|
188
218
|
this.s._filesCollection,
|
|
189
219
|
this.s.options.readPreference,
|
|
190
220
|
{ filename },
|
|
191
|
-
{ ...options, sort, skip }
|
|
221
|
+
{ timeoutMS: this.s.options.timeoutMS, ...options, sort, skip }
|
|
192
222
|
);
|
|
193
223
|
}
|
|
194
224
|
|
|
@@ -198,18 +228,36 @@ export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
|
|
198
228
|
* @param id - the id of the file to rename
|
|
199
229
|
* @param filename - new name for the file
|
|
200
230
|
*/
|
|
201
|
-
async rename(id: ObjectId, filename: string): Promise<void> {
|
|
231
|
+
async rename(id: ObjectId, filename: string, options?: { timeoutMS: number }): Promise<void> {
|
|
202
232
|
const filter = { _id: id };
|
|
203
233
|
const update = { $set: { filename } };
|
|
204
|
-
const { matchedCount } = await this.s._filesCollection.updateOne(filter, update);
|
|
234
|
+
const { matchedCount } = await this.s._filesCollection.updateOne(filter, update, options);
|
|
205
235
|
if (matchedCount === 0) {
|
|
206
236
|
throw new MongoRuntimeError(`File with id ${id} not found`);
|
|
207
237
|
}
|
|
208
238
|
}
|
|
209
239
|
|
|
210
240
|
/** Removes this bucket's files collection, followed by its chunks collection. */
|
|
211
|
-
async drop(): Promise<void> {
|
|
212
|
-
|
|
213
|
-
|
|
241
|
+
async drop(options?: { timeoutMS: number }): Promise<void> {
|
|
242
|
+
const { timeoutMS } = resolveOptions(this.s.db, options);
|
|
243
|
+
let timeoutContext: CSOTTimeoutContext | undefined = undefined;
|
|
244
|
+
|
|
245
|
+
if (timeoutMS) {
|
|
246
|
+
timeoutContext = new CSOTTimeoutContext({
|
|
247
|
+
timeoutMS,
|
|
248
|
+
serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
if (timeoutContext) {
|
|
253
|
+
await this.s._filesCollection.drop({ timeoutMS: timeoutContext.remainingTimeMS });
|
|
254
|
+
const remainingTimeMS = timeoutContext.getRemainingTimeMSOrThrow(
|
|
255
|
+
`Timed out after ${timeoutMS}ms`
|
|
256
|
+
);
|
|
257
|
+
await this.s._chunksCollection.drop({ timeoutMS: remainingTimeMS });
|
|
258
|
+
} else {
|
|
259
|
+
await this.s._filesCollection.drop();
|
|
260
|
+
await this.s._chunksCollection.drop();
|
|
261
|
+
}
|
|
214
262
|
}
|
|
215
263
|
}
|
package/src/gridfs/upload.ts
CHANGED
|
@@ -2,8 +2,15 @@ import { Writable } from 'stream';
|
|
|
2
2
|
|
|
3
3
|
import { type Document, ObjectId } from '../bson';
|
|
4
4
|
import type { Collection } from '../collection';
|
|
5
|
-
import {
|
|
6
|
-
import {
|
|
5
|
+
import { CursorTimeoutMode } from '../cursor/abstract_cursor';
|
|
6
|
+
import {
|
|
7
|
+
MongoAPIError,
|
|
8
|
+
MONGODB_ERROR_CODES,
|
|
9
|
+
MongoError,
|
|
10
|
+
MongoOperationTimeoutError
|
|
11
|
+
} from '../error';
|
|
12
|
+
import { CSOTTimeoutContext } from '../timeout';
|
|
13
|
+
import { type Callback, resolveTimeoutOptions, squashError } from '../utils';
|
|
7
14
|
import type { WriteConcernOptions } from '../write_concern';
|
|
8
15
|
import { WriteConcern } from './../write_concern';
|
|
9
16
|
import type { GridFSFile } from './download';
|
|
@@ -35,7 +42,10 @@ export interface GridFSBucketWriteStreamOptions extends WriteConcernOptions {
|
|
|
35
42
|
* @deprecated Will be removed in the next major version. Add an aliases field to the metadata document instead.
|
|
36
43
|
*/
|
|
37
44
|
aliases?: string[];
|
|
38
|
-
/**
|
|
45
|
+
/**
|
|
46
|
+
* @experimental
|
|
47
|
+
* Specifies the time an operation will run until it throws a timeout error
|
|
48
|
+
*/
|
|
39
49
|
timeoutMS?: number;
|
|
40
50
|
}
|
|
41
51
|
|
|
@@ -97,6 +107,8 @@ export class GridFSBucketWriteStream extends Writable {
|
|
|
97
107
|
* ```
|
|
98
108
|
*/
|
|
99
109
|
gridFSFile: GridFSFile | null = null;
|
|
110
|
+
/** @internal */
|
|
111
|
+
timeoutContext?: CSOTTimeoutContext;
|
|
100
112
|
|
|
101
113
|
/**
|
|
102
114
|
* @param bucket - Handle for this stream's corresponding bucket
|
|
@@ -131,14 +143,12 @@ export class GridFSBucketWriteStream extends Writable {
|
|
|
131
143
|
aborted: false
|
|
132
144
|
};
|
|
133
145
|
|
|
134
|
-
if (
|
|
135
|
-
this.
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
}, squashError);
|
|
141
|
-
}
|
|
146
|
+
if (options.timeoutMS != null)
|
|
147
|
+
this.timeoutContext = new CSOTTimeoutContext({
|
|
148
|
+
timeoutMS: options.timeoutMS,
|
|
149
|
+
serverSelectionTimeoutMS: resolveTimeoutOptions(this.bucket.s.db.client, {})
|
|
150
|
+
.serverSelectionTimeoutMS
|
|
151
|
+
});
|
|
142
152
|
}
|
|
143
153
|
|
|
144
154
|
/**
|
|
@@ -147,10 +157,26 @@ export class GridFSBucketWriteStream extends Writable {
|
|
|
147
157
|
* The stream is considered constructed when the indexes are done being created
|
|
148
158
|
*/
|
|
149
159
|
override _construct(callback: (error?: Error | null) => void): void {
|
|
150
|
-
if (this.bucket.s.
|
|
160
|
+
if (!this.bucket.s.calledOpenUploadStream) {
|
|
161
|
+
this.bucket.s.calledOpenUploadStream = true;
|
|
162
|
+
|
|
163
|
+
checkIndexes(this).then(
|
|
164
|
+
() => {
|
|
165
|
+
this.bucket.s.checkedIndexes = true;
|
|
166
|
+
this.bucket.emit('index');
|
|
167
|
+
callback();
|
|
168
|
+
},
|
|
169
|
+
error => {
|
|
170
|
+
if (error instanceof MongoOperationTimeoutError) {
|
|
171
|
+
return handleError(this, error, callback);
|
|
172
|
+
}
|
|
173
|
+
squashError(error);
|
|
174
|
+
callback();
|
|
175
|
+
}
|
|
176
|
+
);
|
|
177
|
+
} else {
|
|
151
178
|
return process.nextTick(callback);
|
|
152
179
|
}
|
|
153
|
-
this.bucket.once('index', callback);
|
|
154
180
|
}
|
|
155
181
|
|
|
156
182
|
/**
|
|
@@ -194,7 +220,10 @@ export class GridFSBucketWriteStream extends Writable {
|
|
|
194
220
|
}
|
|
195
221
|
|
|
196
222
|
this.state.aborted = true;
|
|
197
|
-
|
|
223
|
+
const remainingTimeMS = this.timeoutContext?.getRemainingTimeMSOrThrow(
|
|
224
|
+
`Upload timed out after ${this.timeoutContext?.timeoutMS}ms`
|
|
225
|
+
);
|
|
226
|
+
await this.chunks.deleteMany({ files_id: this.id, timeoutMS: remainingTimeMS });
|
|
198
227
|
}
|
|
199
228
|
}
|
|
200
229
|
|
|
@@ -219,9 +248,19 @@ function createChunkDoc(filesId: ObjectId, n: number, data: Buffer): GridFSChunk
|
|
|
219
248
|
async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise<void> {
|
|
220
249
|
const index = { files_id: 1, n: 1 };
|
|
221
250
|
|
|
251
|
+
let remainingTimeMS;
|
|
252
|
+
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
|
253
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
254
|
+
);
|
|
255
|
+
|
|
222
256
|
let indexes;
|
|
223
257
|
try {
|
|
224
|
-
indexes = await stream.chunks
|
|
258
|
+
indexes = await stream.chunks
|
|
259
|
+
.listIndexes({
|
|
260
|
+
timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined,
|
|
261
|
+
timeoutMS: remainingTimeMS
|
|
262
|
+
})
|
|
263
|
+
.toArray();
|
|
225
264
|
} catch (error) {
|
|
226
265
|
if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) {
|
|
227
266
|
indexes = [];
|
|
@@ -239,10 +278,14 @@ async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise<void>
|
|
|
239
278
|
});
|
|
240
279
|
|
|
241
280
|
if (!hasChunksIndex) {
|
|
281
|
+
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
|
282
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
283
|
+
);
|
|
242
284
|
await stream.chunks.createIndex(index, {
|
|
243
285
|
...stream.writeConcern,
|
|
244
286
|
background: true,
|
|
245
|
-
unique: true
|
|
287
|
+
unique: true,
|
|
288
|
+
timeoutMS: remainingTimeMS
|
|
246
289
|
});
|
|
247
290
|
}
|
|
248
291
|
}
|
|
@@ -270,13 +313,28 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void {
|
|
|
270
313
|
return;
|
|
271
314
|
}
|
|
272
315
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
316
|
+
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
|
|
317
|
+
if (remainingTimeMS != null && remainingTimeMS <= 0) {
|
|
318
|
+
return handleError(
|
|
319
|
+
stream,
|
|
320
|
+
new MongoOperationTimeoutError(
|
|
321
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
322
|
+
),
|
|
323
|
+
callback
|
|
324
|
+
);
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
stream.files
|
|
328
|
+
.insertOne(gridFSFile, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
|
|
329
|
+
.then(
|
|
330
|
+
() => {
|
|
331
|
+
stream.gridFSFile = gridFSFile;
|
|
332
|
+
callback();
|
|
333
|
+
},
|
|
334
|
+
error => {
|
|
335
|
+
return handleError(stream, error, callback);
|
|
336
|
+
}
|
|
337
|
+
);
|
|
280
338
|
return;
|
|
281
339
|
}
|
|
282
340
|
|
|
@@ -284,7 +342,16 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void {
|
|
|
284
342
|
}
|
|
285
343
|
|
|
286
344
|
async function checkIndexes(stream: GridFSBucketWriteStream): Promise<void> {
|
|
287
|
-
|
|
345
|
+
let remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
|
346
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
347
|
+
);
|
|
348
|
+
const doc = await stream.files.findOne(
|
|
349
|
+
{},
|
|
350
|
+
{
|
|
351
|
+
projection: { _id: 1 },
|
|
352
|
+
timeoutMS: remainingTimeMS
|
|
353
|
+
}
|
|
354
|
+
);
|
|
288
355
|
if (doc != null) {
|
|
289
356
|
// If at least one document exists assume the collection has the required index
|
|
290
357
|
return;
|
|
@@ -293,8 +360,15 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise<void> {
|
|
|
293
360
|
const index = { filename: 1, uploadDate: 1 };
|
|
294
361
|
|
|
295
362
|
let indexes;
|
|
363
|
+
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
|
364
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
365
|
+
);
|
|
366
|
+
const listIndexesOptions = {
|
|
367
|
+
timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined,
|
|
368
|
+
timeoutMS: remainingTimeMS
|
|
369
|
+
};
|
|
296
370
|
try {
|
|
297
|
-
indexes = await stream.files.listIndexes().toArray();
|
|
371
|
+
indexes = await stream.files.listIndexes(listIndexesOptions).toArray();
|
|
298
372
|
} catch (error) {
|
|
299
373
|
if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) {
|
|
300
374
|
indexes = [];
|
|
@@ -312,7 +386,11 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise<void> {
|
|
|
312
386
|
});
|
|
313
387
|
|
|
314
388
|
if (!hasFileIndex) {
|
|
315
|
-
|
|
389
|
+
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
|
390
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
391
|
+
);
|
|
392
|
+
|
|
393
|
+
await stream.files.createIndex(index, { background: false, timeoutMS: remainingTimeMS });
|
|
316
394
|
}
|
|
317
395
|
|
|
318
396
|
await checkChunksIndex(stream);
|
|
@@ -386,6 +464,18 @@ function doWrite(
|
|
|
386
464
|
let doc: GridFSChunk;
|
|
387
465
|
if (spaceRemaining === 0) {
|
|
388
466
|
doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore));
|
|
467
|
+
|
|
468
|
+
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
|
|
469
|
+
if (remainingTimeMS != null && remainingTimeMS <= 0) {
|
|
470
|
+
return handleError(
|
|
471
|
+
stream,
|
|
472
|
+
new MongoOperationTimeoutError(
|
|
473
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
474
|
+
),
|
|
475
|
+
callback
|
|
476
|
+
);
|
|
477
|
+
}
|
|
478
|
+
|
|
389
479
|
++stream.state.outstandingRequests;
|
|
390
480
|
++outstandingRequests;
|
|
391
481
|
|
|
@@ -393,17 +483,21 @@ function doWrite(
|
|
|
393
483
|
return;
|
|
394
484
|
}
|
|
395
485
|
|
|
396
|
-
stream.chunks
|
|
397
|
-
(
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
486
|
+
stream.chunks
|
|
487
|
+
.insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
|
|
488
|
+
.then(
|
|
489
|
+
() => {
|
|
490
|
+
--stream.state.outstandingRequests;
|
|
491
|
+
--outstandingRequests;
|
|
492
|
+
|
|
493
|
+
if (!outstandingRequests) {
|
|
494
|
+
checkDone(stream, callback);
|
|
495
|
+
}
|
|
496
|
+
},
|
|
497
|
+
error => {
|
|
498
|
+
return handleError(stream, error, callback);
|
|
403
499
|
}
|
|
404
|
-
|
|
405
|
-
error => handleError(stream, error, callback)
|
|
406
|
-
);
|
|
500
|
+
);
|
|
407
501
|
|
|
408
502
|
spaceRemaining = stream.chunkSizeBytes;
|
|
409
503
|
stream.pos = 0;
|
|
@@ -420,8 +514,6 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void
|
|
|
420
514
|
return checkDone(stream, callback);
|
|
421
515
|
}
|
|
422
516
|
|
|
423
|
-
++stream.state.outstandingRequests;
|
|
424
|
-
|
|
425
517
|
// Create a new buffer to make sure the buffer isn't bigger than it needs
|
|
426
518
|
// to be.
|
|
427
519
|
const remnant = Buffer.alloc(stream.pos);
|
|
@@ -433,13 +525,28 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void
|
|
|
433
525
|
return;
|
|
434
526
|
}
|
|
435
527
|
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
528
|
+
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
|
|
529
|
+
if (remainingTimeMS != null && remainingTimeMS <= 0) {
|
|
530
|
+
return handleError(
|
|
531
|
+
stream,
|
|
532
|
+
new MongoOperationTimeoutError(
|
|
533
|
+
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
|
534
|
+
),
|
|
535
|
+
callback
|
|
536
|
+
);
|
|
537
|
+
}
|
|
538
|
+
++stream.state.outstandingRequests;
|
|
539
|
+
stream.chunks
|
|
540
|
+
.insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
|
|
541
|
+
.then(
|
|
542
|
+
() => {
|
|
543
|
+
--stream.state.outstandingRequests;
|
|
544
|
+
checkDone(stream, callback);
|
|
545
|
+
},
|
|
546
|
+
error => {
|
|
547
|
+
return handleError(stream, error, callback);
|
|
548
|
+
}
|
|
549
|
+
);
|
|
443
550
|
}
|
|
444
551
|
|
|
445
552
|
function isAborted(stream: GridFSBucketWriteStream, callback: Callback<void>): boolean {
|
package/src/index.ts
CHANGED
|
@@ -10,6 +10,7 @@ import { ListCollectionsCursor } from './cursor/list_collections_cursor';
|
|
|
10
10
|
import { ListIndexesCursor } from './cursor/list_indexes_cursor';
|
|
11
11
|
import type { RunCommandCursor } from './cursor/run_command_cursor';
|
|
12
12
|
import { Db } from './db';
|
|
13
|
+
import { ExplainableCursor } from './explain';
|
|
13
14
|
import { GridFSBucket } from './gridfs';
|
|
14
15
|
import { GridFSBucketReadStream } from './gridfs/download';
|
|
15
16
|
import { GridFSBucketWriteStream } from './gridfs/upload';
|
|
@@ -36,7 +37,11 @@ export {
|
|
|
36
37
|
Timestamp,
|
|
37
38
|
UUID
|
|
38
39
|
} from './bson';
|
|
39
|
-
export {
|
|
40
|
+
export {
|
|
41
|
+
type AnyBulkWriteOperation,
|
|
42
|
+
type BulkWriteOptions,
|
|
43
|
+
MongoBulkWriteError
|
|
44
|
+
} from './bulk/common';
|
|
40
45
|
export { ClientEncryption } from './client-side-encryption/client_encryption';
|
|
41
46
|
export { ChangeStreamCursor } from './cursor/change_stream_cursor';
|
|
42
47
|
export {
|
|
@@ -66,6 +71,7 @@ export {
|
|
|
66
71
|
MongoNetworkTimeoutError,
|
|
67
72
|
MongoNotConnectedError,
|
|
68
73
|
MongoOIDCError,
|
|
74
|
+
MongoOperationTimeoutError,
|
|
69
75
|
MongoParseError,
|
|
70
76
|
MongoRuntimeError,
|
|
71
77
|
MongoServerClosedError,
|
|
@@ -90,6 +96,7 @@ export {
|
|
|
90
96
|
ClientSession,
|
|
91
97
|
Collection,
|
|
92
98
|
Db,
|
|
99
|
+
ExplainableCursor,
|
|
93
100
|
FindCursor,
|
|
94
101
|
GridFSBucket,
|
|
95
102
|
GridFSBucketReadStream,
|
|
@@ -108,7 +115,7 @@ export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypt
|
|
|
108
115
|
export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi';
|
|
109
116
|
export { AuthMechanism } from './cmap/auth/providers';
|
|
110
117
|
export { Compressor } from './cmap/wire_protocol/compression';
|
|
111
|
-
export { CURSOR_FLAGS } from './cursor/abstract_cursor';
|
|
118
|
+
export { CURSOR_FLAGS, CursorTimeoutMode } from './cursor/abstract_cursor';
|
|
112
119
|
export { MongoErrorLabel } from './error';
|
|
113
120
|
export { ExplainVerbosity } from './explain';
|
|
114
121
|
export { ServerApiVersion } from './mongo_client';
|
|
@@ -358,6 +365,7 @@ export type {
|
|
|
358
365
|
CursorStreamOptions
|
|
359
366
|
} from './cursor/abstract_cursor';
|
|
360
367
|
export type {
|
|
368
|
+
CursorTimeoutContext,
|
|
361
369
|
InitialCursorResponse,
|
|
362
370
|
InternalAbstractCursorOptions
|
|
363
371
|
} from './cursor/abstract_cursor';
|
|
@@ -566,7 +574,13 @@ export type {
|
|
|
566
574
|
RTTSampler,
|
|
567
575
|
ServerMonitoringMode
|
|
568
576
|
} from './sdam/monitor';
|
|
569
|
-
export type {
|
|
577
|
+
export type {
|
|
578
|
+
Server,
|
|
579
|
+
ServerCommandOptions,
|
|
580
|
+
ServerEvents,
|
|
581
|
+
ServerOptions,
|
|
582
|
+
ServerPrivate
|
|
583
|
+
} from './sdam/server';
|
|
570
584
|
export type {
|
|
571
585
|
ServerDescription,
|
|
572
586
|
ServerDescriptionOptions,
|
|
@@ -597,7 +611,15 @@ export type {
|
|
|
597
611
|
WithTransactionCallback
|
|
598
612
|
} from './sessions';
|
|
599
613
|
export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort';
|
|
600
|
-
export type {
|
|
614
|
+
export type {
|
|
615
|
+
CSOTTimeoutContext,
|
|
616
|
+
CSOTTimeoutContextOptions,
|
|
617
|
+
LegacyTimeoutContext,
|
|
618
|
+
LegacyTimeoutContextOptions,
|
|
619
|
+
Timeout,
|
|
620
|
+
TimeoutContext,
|
|
621
|
+
TimeoutContextOptions
|
|
622
|
+
} from './timeout';
|
|
601
623
|
export type { Transaction, TransactionOptions, TxnState } from './transactions';
|
|
602
624
|
export type {
|
|
603
625
|
BufferPool,
|
package/src/mongo_client.ts
CHANGED
|
@@ -130,7 +130,10 @@ export type SupportedNodeConnectionOptions = SupportedTLSConnectionOptions &
|
|
|
130
130
|
export interface MongoClientOptions extends BSONSerializeOptions, SupportedNodeConnectionOptions {
|
|
131
131
|
/** Specifies the name of the replica set, if the mongod is a member of a replica set. */
|
|
132
132
|
replicaSet?: string;
|
|
133
|
-
/**
|
|
133
|
+
/**
|
|
134
|
+
* @experimental
|
|
135
|
+
* Specifies the time an operation will run until it throws a timeout error
|
|
136
|
+
*/
|
|
134
137
|
timeoutMS?: number;
|
|
135
138
|
/** Enables or disables TLS/SSL for the connection. */
|
|
136
139
|
tls?: boolean;
|
|
@@ -482,6 +485,10 @@ export class MongoClient extends TypedEventEmitter<MongoClientEvents> implements
|
|
|
482
485
|
return this.s.bsonOptions;
|
|
483
486
|
}
|
|
484
487
|
|
|
488
|
+
get timeoutMS(): number | undefined {
|
|
489
|
+
return this.s.options.timeoutMS;
|
|
490
|
+
}
|
|
491
|
+
|
|
485
492
|
/**
|
|
486
493
|
* Executes a client bulk write operation, available on server 8.0+.
|
|
487
494
|
* @param models - The client bulk write models.
|
|
@@ -508,6 +515,13 @@ export class MongoClient extends TypedEventEmitter<MongoClientEvents> implements
|
|
|
508
515
|
/**
|
|
509
516
|
* Connect to MongoDB using a url
|
|
510
517
|
*
|
|
518
|
+
* @remarks
|
|
519
|
+
* Calling `connect` is optional since the first operation you perform will call `connect` if it's needed.
|
|
520
|
+
* `timeoutMS` will bound the time any operation can take before throwing a timeout error.
|
|
521
|
+
* However, when the operation being run is automatically connecting your `MongoClient` the `timeoutMS` will not apply to the time taken to connect the MongoClient.
|
|
522
|
+
* This means the time to setup the `MongoClient` does not count against `timeoutMS`.
|
|
523
|
+
* If you are using `timeoutMS` we recommend connecting your client explicitly in advance of any operation to avoid this inconsistent execution time.
|
|
524
|
+
*
|
|
511
525
|
* @see docs.mongodb.org/manual/reference/connection-string/
|
|
512
526
|
*/
|
|
513
527
|
async connect(): Promise<this> {
|
|
@@ -688,7 +702,7 @@ export class MongoClient extends TypedEventEmitter<MongoClientEvents> implements
|
|
|
688
702
|
|
|
689
703
|
// Default to db from connection string if not provided
|
|
690
704
|
if (!dbName) {
|
|
691
|
-
dbName = this.options.dbName;
|
|
705
|
+
dbName = this.s.options.dbName;
|
|
692
706
|
}
|
|
693
707
|
|
|
694
708
|
// Copy the options and add out internal override of the not shared flag
|
|
@@ -705,6 +719,13 @@ export class MongoClient extends TypedEventEmitter<MongoClientEvents> implements
|
|
|
705
719
|
* Connect to MongoDB using a url
|
|
706
720
|
*
|
|
707
721
|
* @remarks
|
|
722
|
+
* Calling `connect` is optional since the first operation you perform will call `connect` if it's needed.
|
|
723
|
+
* `timeoutMS` will bound the time any operation can take before throwing a timeout error.
|
|
724
|
+
* However, when the operation being run is automatically connecting your `MongoClient` the `timeoutMS` will not apply to the time taken to connect the MongoClient.
|
|
725
|
+
* This means the time to setup the `MongoClient` does not count against `timeoutMS`.
|
|
726
|
+
* If you are using `timeoutMS` we recommend connecting your client explicitly in advance of any operation to avoid this inconsistent execution time.
|
|
727
|
+
*
|
|
728
|
+
* @remarks
|
|
708
729
|
* The programmatically provided options take precedence over the URI options.
|
|
709
730
|
*
|
|
710
731
|
* @see https://www.mongodb.com/docs/manual/reference/connection-string/
|
|
@@ -789,6 +810,58 @@ export class MongoClient extends TypedEventEmitter<MongoClientEvents> implements
|
|
|
789
810
|
* - The first is to provide the schema that may be defined for all the data within the current cluster
|
|
790
811
|
* - The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument
|
|
791
812
|
*
|
|
813
|
+
* @remarks
|
|
814
|
+
* When `timeoutMS` is configured for a change stream, it will have different behaviour depending
|
|
815
|
+
* on whether the change stream is in iterator mode or emitter mode. In both cases, a change
|
|
816
|
+
* stream will time out if it does not receive a change event within `timeoutMS` of the last change
|
|
817
|
+
* event.
|
|
818
|
+
*
|
|
819
|
+
* Note that if a change stream is consistently timing out when watching a collection, database or
|
|
820
|
+
* client that is being changed, then this may be due to the server timing out before it can finish
|
|
821
|
+
* processing the existing oplog. To address this, restart the change stream with a higher
|
|
822
|
+
* `timeoutMS`.
|
|
823
|
+
*
|
|
824
|
+
* If the change stream times out the initial aggregate operation to establish the change stream on
|
|
825
|
+
* the server, then the client will close the change stream. If the getMore calls to the server
|
|
826
|
+
* time out, then the change stream will be left open, but will throw a MongoOperationTimeoutError
|
|
827
|
+
* when in iterator mode and emit an error event that returns a MongoOperationTimeoutError in
|
|
828
|
+
* emitter mode.
|
|
829
|
+
*
|
|
830
|
+
* To determine whether or not the change stream is still open following a timeout, check the
|
|
831
|
+
* {@link ChangeStream.closed} getter.
|
|
832
|
+
*
|
|
833
|
+
* @example
|
|
834
|
+
* In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream.
|
|
835
|
+
* The next call can just be retried after this succeeds.
|
|
836
|
+
* ```ts
|
|
837
|
+
* const changeStream = collection.watch([], { timeoutMS: 100 });
|
|
838
|
+
* try {
|
|
839
|
+
* await changeStream.next();
|
|
840
|
+
* } catch (e) {
|
|
841
|
+
* if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
|
|
842
|
+
* await changeStream.next();
|
|
843
|
+
* }
|
|
844
|
+
* throw e;
|
|
845
|
+
* }
|
|
846
|
+
* ```
|
|
847
|
+
*
|
|
848
|
+
* @example
|
|
849
|
+
* In emitter mode, if the change stream goes `timeoutMS` without emitting a change event, it will
|
|
850
|
+
* emit an error event that returns a MongoOperationTimeoutError, but will not close the change
|
|
851
|
+
* stream unless the resume attempt fails. There is no need to re-establish change listeners as
|
|
852
|
+
* this will automatically continue emitting change events once the resume attempt completes.
|
|
853
|
+
*
|
|
854
|
+
* ```ts
|
|
855
|
+
* const changeStream = collection.watch([], { timeoutMS: 100 });
|
|
856
|
+
* changeStream.on('change', console.log);
|
|
857
|
+
* changeStream.on('error', e => {
|
|
858
|
+
* if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
|
|
859
|
+
* // do nothing
|
|
860
|
+
* } else {
|
|
861
|
+
* changeStream.close();
|
|
862
|
+
* }
|
|
863
|
+
* });
|
|
864
|
+
* ```
|
|
792
865
|
* @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
|
|
793
866
|
* @param options - Optional settings for the command
|
|
794
867
|
* @typeParam TSchema - Type of the data being detected by the change stream
|
|
@@ -952,6 +1025,5 @@ export interface MongoOptions
|
|
|
952
1025
|
* TODO: NODE-5671 - remove internal flag
|
|
953
1026
|
*/
|
|
954
1027
|
mongodbLogPath?: 'stderr' | 'stdout' | MongoDBLogWritable;
|
|
955
|
-
/** @internal TODO(NODE-5688): make this public */
|
|
956
1028
|
timeoutMS?: number;
|
|
957
1029
|
}
|