mongodb 6.9.0-dev.20241010.sha.6ecf198f → 6.9.0-dev.20241011.sha.8def42de
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/beta.d.ts +39 -1
- package/lib/cmap/wire_protocol/responses.js +3 -0
- package/lib/cmap/wire_protocol/responses.js.map +1 -1
- package/lib/cursor/client_bulk_write_cursor.js +1 -5
- package/lib/cursor/client_bulk_write_cursor.js.map +1 -1
- package/lib/error.js +24 -3
- package/lib/error.js.map +1 -1
- package/lib/index.js +4 -3
- package/lib/index.js.map +1 -1
- package/lib/mongo_client.js +3 -0
- package/lib/mongo_client.js.map +1 -1
- package/lib/operations/client_bulk_write/client_bulk_write.js +30 -10
- package/lib/operations/client_bulk_write/client_bulk_write.js.map +1 -1
- package/lib/operations/client_bulk_write/command_builder.js +62 -4
- package/lib/operations/client_bulk_write/command_builder.js.map +1 -1
- package/lib/operations/client_bulk_write/executor.js +43 -7
- package/lib/operations/client_bulk_write/executor.js.map +1 -1
- package/lib/operations/client_bulk_write/results_merger.js +116 -33
- package/lib/operations/client_bulk_write/results_merger.js.map +1 -1
- package/lib/operations/execute_operation.js +7 -0
- package/lib/operations/execute_operation.js.map +1 -1
- package/lib/operations/operation.js +5 -1
- package/lib/operations/operation.js.map +1 -1
- package/mongodb.d.ts +39 -1
- package/package.json +1 -1
- package/src/cmap/wire_protocol/responses.ts +4 -0
- package/src/cursor/client_bulk_write_cursor.ts +2 -8
- package/src/error.ts +44 -2
- package/src/index.ts +2 -0
- package/src/mongo_client.ts +5 -0
- package/src/operations/client_bulk_write/client_bulk_write.ts +36 -10
- package/src/operations/client_bulk_write/command_builder.ts +84 -5
- package/src/operations/client_bulk_write/common.ts +6 -0
- package/src/operations/client_bulk_write/executor.ts +48 -7
- package/src/operations/client_bulk_write/results_merger.ts +120 -40
- package/src/operations/execute_operation.ts +8 -0
- package/src/operations/operation.ts +6 -1
|
@@ -27,6 +27,14 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
27
27
|
this.ns = new MongoDBNamespace('admin', '$cmd');
|
|
28
28
|
}
|
|
29
29
|
|
|
30
|
+
override resetBatch(): boolean {
|
|
31
|
+
return this.commandBuilder.resetBatch();
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
override get canRetryWrite(): boolean {
|
|
35
|
+
return this.commandBuilder.isBatchRetryable;
|
|
36
|
+
}
|
|
37
|
+
|
|
30
38
|
/**
|
|
31
39
|
* Execute the command. Superclass will handle write concern, etc.
|
|
32
40
|
* @param server - The server.
|
|
@@ -41,14 +49,20 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
41
49
|
|
|
42
50
|
if (server.description.type === ServerType.LoadBalancer) {
|
|
43
51
|
if (session) {
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
52
|
+
let connection;
|
|
53
|
+
if (!session.pinnedConnection) {
|
|
54
|
+
// Checkout a connection to build the command.
|
|
55
|
+
connection = await server.pool.checkOut();
|
|
56
|
+
// Pin the connection to the session so it get used to execute the command and we do not
|
|
57
|
+
// perform a double check-in/check-out.
|
|
58
|
+
session.pin(connection);
|
|
59
|
+
} else {
|
|
60
|
+
connection = session.pinnedConnection;
|
|
61
|
+
}
|
|
49
62
|
command = this.commandBuilder.buildBatch(
|
|
50
63
|
connection.hello?.maxMessageSizeBytes,
|
|
51
|
-
connection.hello?.maxWriteBatchSize
|
|
64
|
+
connection.hello?.maxWriteBatchSize,
|
|
65
|
+
connection.hello?.maxBsonObjectSize
|
|
52
66
|
);
|
|
53
67
|
} else {
|
|
54
68
|
throw new MongoClientBulkWriteExecutionError(
|
|
@@ -59,16 +73,26 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
59
73
|
// At this point we have a server and the auto connect code has already
|
|
60
74
|
// run in executeOperation, so the server description will be populated.
|
|
61
75
|
// We can use that to build the command.
|
|
62
|
-
if (
|
|
76
|
+
if (
|
|
77
|
+
!server.description.maxWriteBatchSize ||
|
|
78
|
+
!server.description.maxMessageSizeBytes ||
|
|
79
|
+
!server.description.maxBsonObjectSize
|
|
80
|
+
) {
|
|
63
81
|
throw new MongoClientBulkWriteExecutionError(
|
|
64
|
-
'In order to execute a client bulk write, both maxWriteBatchSize and
|
|
82
|
+
'In order to execute a client bulk write, both maxWriteBatchSize, maxMessageSizeBytes and maxBsonObjectSize must be provided by the servers hello response.'
|
|
65
83
|
);
|
|
66
84
|
}
|
|
67
85
|
command = this.commandBuilder.buildBatch(
|
|
68
86
|
server.description.maxMessageSizeBytes,
|
|
69
|
-
server.description.maxWriteBatchSize
|
|
87
|
+
server.description.maxWriteBatchSize,
|
|
88
|
+
server.description.maxBsonObjectSize
|
|
70
89
|
);
|
|
71
90
|
}
|
|
91
|
+
|
|
92
|
+
// Check after the batch is built if we cannot retry it and override the option.
|
|
93
|
+
if (!this.canRetryWrite) {
|
|
94
|
+
this.options.willRetryWrite = false;
|
|
95
|
+
}
|
|
72
96
|
return await super.executeCommand(server, session, command, ClientBulkWriteCursorResponse);
|
|
73
97
|
}
|
|
74
98
|
}
|
|
@@ -77,5 +101,7 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
77
101
|
defineAspects(ClientBulkWriteOperation, [
|
|
78
102
|
Aspect.WRITE_OPERATION,
|
|
79
103
|
Aspect.SKIP_COLLATION,
|
|
80
|
-
Aspect.CURSOR_CREATING
|
|
104
|
+
Aspect.CURSOR_CREATING,
|
|
105
|
+
Aspect.RETRYABLE,
|
|
106
|
+
Aspect.COMMAND_BATCHING
|
|
81
107
|
]);
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import { BSON, type Document } from '../../bson';
|
|
2
2
|
import { DocumentSequence } from '../../cmap/commands';
|
|
3
|
+
import { MongoAPIError, MongoInvalidArgumentError } from '../../error';
|
|
3
4
|
import { type PkFactory } from '../../mongo_client';
|
|
4
5
|
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
|
|
5
|
-
import { DEFAULT_PK_FACTORY } from '../../utils';
|
|
6
|
+
import { DEFAULT_PK_FACTORY, hasAtomicOperators } from '../../utils';
|
|
6
7
|
import { type CollationOptions } from '../command';
|
|
7
8
|
import { type Hint } from '../operation';
|
|
8
9
|
import type {
|
|
@@ -38,8 +39,14 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
38
39
|
models: AnyClientBulkWriteModel[];
|
|
39
40
|
options: ClientBulkWriteOptions;
|
|
40
41
|
pkFactory: PkFactory;
|
|
42
|
+
/** The current index in the models array that is being processed. */
|
|
41
43
|
currentModelIndex: number;
|
|
44
|
+
/** The model index that the builder was on when it finished the previous batch. Used for resets when retrying. */
|
|
45
|
+
previousModelIndex: number;
|
|
46
|
+
/** The last array of operations that were created. Used by the results merger for indexing results. */
|
|
42
47
|
lastOperations: Document[];
|
|
48
|
+
/** Returns true if the current batch being created has no multi-updates. */
|
|
49
|
+
isBatchRetryable: boolean;
|
|
43
50
|
|
|
44
51
|
/**
|
|
45
52
|
* Create the command builder.
|
|
@@ -54,7 +61,9 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
54
61
|
this.options = options;
|
|
55
62
|
this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY;
|
|
56
63
|
this.currentModelIndex = 0;
|
|
64
|
+
this.previousModelIndex = 0;
|
|
57
65
|
this.lastOperations = [];
|
|
66
|
+
this.isBatchRetryable = true;
|
|
58
67
|
}
|
|
59
68
|
|
|
60
69
|
/**
|
|
@@ -76,27 +85,57 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
76
85
|
return this.currentModelIndex < this.models.length;
|
|
77
86
|
}
|
|
78
87
|
|
|
88
|
+
/**
|
|
89
|
+
* When we need to retry a command we need to set the current
|
|
90
|
+
* model index back to its previous value.
|
|
91
|
+
*/
|
|
92
|
+
resetBatch(): boolean {
|
|
93
|
+
this.currentModelIndex = this.previousModelIndex;
|
|
94
|
+
return true;
|
|
95
|
+
}
|
|
96
|
+
|
|
79
97
|
/**
|
|
80
98
|
* Build a single batch of a client bulk write command.
|
|
81
99
|
* @param maxMessageSizeBytes - The max message size in bytes.
|
|
82
100
|
* @param maxWriteBatchSize - The max write batch size.
|
|
83
101
|
* @returns The client bulk write command.
|
|
84
102
|
*/
|
|
85
|
-
buildBatch(
|
|
103
|
+
buildBatch(
|
|
104
|
+
maxMessageSizeBytes: number,
|
|
105
|
+
maxWriteBatchSize: number,
|
|
106
|
+
maxBsonObjectSize: number
|
|
107
|
+
): ClientBulkWriteCommand {
|
|
108
|
+
// We start by assuming the batch has no multi-updates, so it is retryable
|
|
109
|
+
// until we find them.
|
|
110
|
+
this.isBatchRetryable = true;
|
|
86
111
|
let commandLength = 0;
|
|
87
112
|
let currentNamespaceIndex = 0;
|
|
88
113
|
const command: ClientBulkWriteCommand = this.baseCommand();
|
|
89
114
|
const namespaces = new Map<string, number>();
|
|
115
|
+
// In the case of retries we need to mark where we started this batch.
|
|
116
|
+
this.previousModelIndex = this.currentModelIndex;
|
|
90
117
|
|
|
91
118
|
while (this.currentModelIndex < this.models.length) {
|
|
92
119
|
const model = this.models[this.currentModelIndex];
|
|
93
120
|
const ns = model.namespace;
|
|
94
121
|
const nsIndex = namespaces.get(ns);
|
|
95
122
|
|
|
123
|
+
// Multi updates are not retryable.
|
|
124
|
+
if (model.name === 'deleteMany' || model.name === 'updateMany') {
|
|
125
|
+
this.isBatchRetryable = false;
|
|
126
|
+
}
|
|
127
|
+
|
|
96
128
|
if (nsIndex != null) {
|
|
97
129
|
// Build the operation and serialize it to get the bytes buffer.
|
|
98
130
|
const operation = buildOperation(model, nsIndex, this.pkFactory);
|
|
99
|
-
|
|
131
|
+
let operationBuffer;
|
|
132
|
+
try {
|
|
133
|
+
operationBuffer = BSON.serialize(operation);
|
|
134
|
+
} catch (cause) {
|
|
135
|
+
throw new MongoInvalidArgumentError(`Could not serialize operation to BSON`, { cause });
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
|
|
100
139
|
|
|
101
140
|
// Check if the operation buffer can fit in the command. If it can,
|
|
102
141
|
// then add the operation to the document sequence and increment the
|
|
@@ -119,9 +158,18 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
119
158
|
// construct our nsInfo and ops documents and buffers.
|
|
120
159
|
namespaces.set(ns, currentNamespaceIndex);
|
|
121
160
|
const nsInfo = { ns: ns };
|
|
122
|
-
const nsInfoBuffer = BSON.serialize(nsInfo);
|
|
123
161
|
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
|
|
124
|
-
|
|
162
|
+
let nsInfoBuffer;
|
|
163
|
+
let operationBuffer;
|
|
164
|
+
try {
|
|
165
|
+
nsInfoBuffer = BSON.serialize(nsInfo);
|
|
166
|
+
operationBuffer = BSON.serialize(operation);
|
|
167
|
+
} catch (cause) {
|
|
168
|
+
throw new MongoInvalidArgumentError(`Could not serialize ns info to BSON`, { cause });
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
validateBufferSize('nsInfo', nsInfoBuffer, maxBsonObjectSize);
|
|
172
|
+
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
|
|
125
173
|
|
|
126
174
|
// Check if the operation and nsInfo buffers can fit in the command. If they
|
|
127
175
|
// can, then add the operation and nsInfo to their respective document
|
|
@@ -179,6 +227,14 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
179
227
|
}
|
|
180
228
|
}
|
|
181
229
|
|
|
230
|
+
function validateBufferSize(name: string, buffer: Uint8Array, maxBsonObjectSize: number) {
|
|
231
|
+
if (buffer.length > maxBsonObjectSize) {
|
|
232
|
+
throw new MongoInvalidArgumentError(
|
|
233
|
+
`Client bulk write operation ${name} of length ${buffer.length} exceeds the max bson object size of ${maxBsonObjectSize}`
|
|
234
|
+
);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
182
238
|
/** @internal */
|
|
183
239
|
interface ClientInsertOperation {
|
|
184
240
|
insert: number;
|
|
@@ -293,6 +349,18 @@ export const buildUpdateManyOperation = (
|
|
|
293
349
|
return createUpdateOperation(model, index, true);
|
|
294
350
|
};
|
|
295
351
|
|
|
352
|
+
/**
|
|
353
|
+
* Validate the update document.
|
|
354
|
+
* @param update - The update document.
|
|
355
|
+
*/
|
|
356
|
+
function validateUpdate(update: Document) {
|
|
357
|
+
if (!hasAtomicOperators(update)) {
|
|
358
|
+
throw new MongoAPIError(
|
|
359
|
+
'Client bulk write update models must only contain atomic modifiers (start with $) and must not be empty.'
|
|
360
|
+
);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
|
|
296
364
|
/**
|
|
297
365
|
* Creates a delete operation based on the parameters.
|
|
298
366
|
*/
|
|
@@ -301,6 +369,11 @@ function createUpdateOperation(
|
|
|
301
369
|
index: number,
|
|
302
370
|
multi: boolean
|
|
303
371
|
): ClientUpdateOperation {
|
|
372
|
+
// Update documents provided in UpdateOne and UpdateMany write models are
|
|
373
|
+
// required only to contain atomic modifiers (i.e. keys that start with "$").
|
|
374
|
+
// Drivers MUST throw an error if an update document is empty or if the
|
|
375
|
+
// document's first key does not start with "$".
|
|
376
|
+
validateUpdate(model.update);
|
|
304
377
|
const document: ClientUpdateOperation = {
|
|
305
378
|
update: index,
|
|
306
379
|
multi: multi,
|
|
@@ -343,6 +416,12 @@ export const buildReplaceOneOperation = (
|
|
|
343
416
|
model: ClientReplaceOneModel,
|
|
344
417
|
index: number
|
|
345
418
|
): ClientReplaceOneOperation => {
|
|
419
|
+
if (hasAtomicOperators(model.replacement)) {
|
|
420
|
+
throw new MongoAPIError(
|
|
421
|
+
'Client bulk write replace models must not contain atomic modifiers (start with $) and must not be empty.'
|
|
422
|
+
);
|
|
423
|
+
}
|
|
424
|
+
|
|
346
425
|
const document: ClientReplaceOneOperation = {
|
|
347
426
|
update: index,
|
|
348
427
|
multi: false,
|
|
@@ -181,6 +181,12 @@ export interface ClientBulkWriteResult {
|
|
|
181
181
|
deleteResults?: Map<number, ClientDeleteResult>;
|
|
182
182
|
}
|
|
183
183
|
|
|
184
|
+
/** @public */
|
|
185
|
+
export interface ClientBulkWriteError {
|
|
186
|
+
code: number;
|
|
187
|
+
message: string;
|
|
188
|
+
}
|
|
189
|
+
|
|
184
190
|
/** @public */
|
|
185
191
|
export interface ClientInsertOneResult {
|
|
186
192
|
/**
|
|
@@ -1,4 +1,9 @@
|
|
|
1
1
|
import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
|
|
2
|
+
import {
|
|
3
|
+
MongoClientBulkWriteError,
|
|
4
|
+
MongoClientBulkWriteExecutionError,
|
|
5
|
+
MongoServerError
|
|
6
|
+
} from '../../error';
|
|
2
7
|
import { type MongoClient } from '../../mongo_client';
|
|
3
8
|
import { WriteConcern } from '../../write_concern';
|
|
4
9
|
import { executeOperation } from '../execute_operation';
|
|
@@ -31,9 +36,18 @@ export class ClientBulkWriteExecutor {
|
|
|
31
36
|
operations: AnyClientBulkWriteModel[],
|
|
32
37
|
options?: ClientBulkWriteOptions
|
|
33
38
|
) {
|
|
39
|
+
if (operations.length === 0) {
|
|
40
|
+
throw new MongoClientBulkWriteExecutionError('No client bulk write models were provided.');
|
|
41
|
+
}
|
|
42
|
+
|
|
34
43
|
this.client = client;
|
|
35
44
|
this.operations = operations;
|
|
36
|
-
this.options = {
|
|
45
|
+
this.options = {
|
|
46
|
+
ordered: true,
|
|
47
|
+
bypassDocumentValidation: false,
|
|
48
|
+
verboseResults: false,
|
|
49
|
+
...options
|
|
50
|
+
};
|
|
37
51
|
|
|
38
52
|
// If no write concern was provided, we inherit one from the client.
|
|
39
53
|
if (!this.options.writeConcern) {
|
|
@@ -65,15 +79,42 @@ export class ClientBulkWriteExecutor {
|
|
|
65
79
|
} else {
|
|
66
80
|
const resultsMerger = new ClientBulkWriteResultsMerger(this.options);
|
|
67
81
|
// For each command will will create and exhaust a cursor for the results.
|
|
68
|
-
let currentBatchOffset = 0;
|
|
69
82
|
while (commandBuilder.hasNextBatch()) {
|
|
70
83
|
const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options);
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
84
|
+
try {
|
|
85
|
+
await resultsMerger.merge(cursor);
|
|
86
|
+
} catch (error) {
|
|
87
|
+
// Write concern errors are recorded in the writeConcernErrors field on MongoClientBulkWriteError.
|
|
88
|
+
// When a write concern error is encountered, it should not terminate execution of the bulk write
|
|
89
|
+
// for either ordered or unordered bulk writes. However, drivers MUST throw an exception at the end
|
|
90
|
+
// of execution if any write concern errors were observed.
|
|
91
|
+
if (error instanceof MongoServerError && !(error instanceof MongoClientBulkWriteError)) {
|
|
92
|
+
// Server side errors need to be wrapped inside a MongoClientBulkWriteError, where the root
|
|
93
|
+
// cause is the error property and a partial result is to be included.
|
|
94
|
+
const bulkWriteError = new MongoClientBulkWriteError({
|
|
95
|
+
message: 'Mongo client bulk write encountered an error during execution'
|
|
96
|
+
});
|
|
97
|
+
bulkWriteError.cause = error;
|
|
98
|
+
bulkWriteError.partialResult = resultsMerger.result;
|
|
99
|
+
throw bulkWriteError;
|
|
100
|
+
} else {
|
|
101
|
+
// Client side errors are just thrown.
|
|
102
|
+
throw error;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
76
105
|
}
|
|
106
|
+
|
|
107
|
+
// If we have write concern errors or unordered write errors at the end we throw.
|
|
108
|
+
if (resultsMerger.writeConcernErrors.length > 0 || resultsMerger.writeErrors.size > 0) {
|
|
109
|
+
const error = new MongoClientBulkWriteError({
|
|
110
|
+
message: 'Mongo client bulk write encountered errors during execution.'
|
|
111
|
+
});
|
|
112
|
+
error.writeConcernErrors = resultsMerger.writeConcernErrors;
|
|
113
|
+
error.writeErrors = resultsMerger.writeErrors;
|
|
114
|
+
error.partialResult = resultsMerger.result;
|
|
115
|
+
throw error;
|
|
116
|
+
}
|
|
117
|
+
|
|
77
118
|
return resultsMerger.result;
|
|
78
119
|
}
|
|
79
120
|
}
|
|
@@ -1,6 +1,9 @@
|
|
|
1
|
+
import { MongoWriteConcernError } from '../..';
|
|
1
2
|
import { type Document } from '../../bson';
|
|
2
|
-
import { type
|
|
3
|
+
import { type ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
|
|
4
|
+
import { MongoClientBulkWriteError } from '../../error';
|
|
3
5
|
import {
|
|
6
|
+
type ClientBulkWriteError,
|
|
4
7
|
type ClientBulkWriteOptions,
|
|
5
8
|
type ClientBulkWriteResult,
|
|
6
9
|
type ClientDeleteResult,
|
|
@@ -15,6 +18,9 @@ import {
|
|
|
15
18
|
export class ClientBulkWriteResultsMerger {
|
|
16
19
|
result: ClientBulkWriteResult;
|
|
17
20
|
options: ClientBulkWriteOptions;
|
|
21
|
+
currentBatchOffset: number;
|
|
22
|
+
writeConcernErrors: Document[];
|
|
23
|
+
writeErrors: Map<number, ClientBulkWriteError>;
|
|
18
24
|
|
|
19
25
|
/**
|
|
20
26
|
* Instantiate the merger.
|
|
@@ -22,6 +28,9 @@ export class ClientBulkWriteResultsMerger {
|
|
|
22
28
|
*/
|
|
23
29
|
constructor(options: ClientBulkWriteOptions) {
|
|
24
30
|
this.options = options;
|
|
31
|
+
this.currentBatchOffset = 0;
|
|
32
|
+
this.writeConcernErrors = [];
|
|
33
|
+
this.writeErrors = new Map();
|
|
25
34
|
this.result = {
|
|
26
35
|
insertedCount: 0,
|
|
27
36
|
upsertedCount: 0,
|
|
@@ -47,55 +56,126 @@ export class ClientBulkWriteResultsMerger {
|
|
|
47
56
|
* @param documents - The documents in the cursor.
|
|
48
57
|
* @returns The current result.
|
|
49
58
|
*/
|
|
50
|
-
merge(
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
documents: Document[]
|
|
55
|
-
): ClientBulkWriteResult {
|
|
56
|
-
// Update the counts from the cursor response.
|
|
57
|
-
this.result.insertedCount += response.insertedCount;
|
|
58
|
-
this.result.upsertedCount += response.upsertedCount;
|
|
59
|
-
this.result.matchedCount += response.matchedCount;
|
|
60
|
-
this.result.modifiedCount += response.modifiedCount;
|
|
61
|
-
this.result.deletedCount += response.deletedCount;
|
|
62
|
-
|
|
63
|
-
if (this.options.verboseResults) {
|
|
64
|
-
// Iterate all the documents in the cursor and update the result.
|
|
65
|
-
for (const document of documents) {
|
|
59
|
+
async merge(cursor: ClientBulkWriteCursor): Promise<ClientBulkWriteResult> {
|
|
60
|
+
let writeConcernErrorResult;
|
|
61
|
+
try {
|
|
62
|
+
for await (const document of cursor) {
|
|
66
63
|
// Only add to maps if ok: 1
|
|
67
64
|
if (document.ok === 1) {
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
65
|
+
if (this.options.verboseResults) {
|
|
66
|
+
this.processDocument(cursor, document);
|
|
67
|
+
}
|
|
68
|
+
} else {
|
|
69
|
+
// If an individual write error is encountered during an ordered bulk write, drivers MUST
|
|
70
|
+
// record the error in writeErrors and immediately throw the exception. Otherwise, drivers
|
|
71
|
+
// MUST continue to iterate the results cursor and execute any further bulkWrite batches.
|
|
72
|
+
if (this.options.ordered) {
|
|
73
|
+
const error = new MongoClientBulkWriteError({
|
|
74
|
+
message: 'Mongo client ordered bulk write encountered a write error.'
|
|
75
|
+
});
|
|
76
|
+
error.writeErrors.set(document.idx + this.currentBatchOffset, {
|
|
77
|
+
code: document.code,
|
|
78
|
+
message: document.errmsg
|
|
79
|
+
});
|
|
80
|
+
error.partialResult = this.result;
|
|
81
|
+
throw error;
|
|
82
|
+
} else {
|
|
83
|
+
this.writeErrors.set(document.idx + this.currentBatchOffset, {
|
|
84
|
+
code: document.code,
|
|
85
|
+
message: document.errmsg
|
|
74
86
|
});
|
|
75
87
|
}
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
} catch (error) {
|
|
91
|
+
if (error instanceof MongoWriteConcernError) {
|
|
92
|
+
const result = error.result;
|
|
93
|
+
writeConcernErrorResult = {
|
|
94
|
+
insertedCount: result.nInserted,
|
|
95
|
+
upsertedCount: result.nUpserted,
|
|
96
|
+
matchedCount: result.nMatched,
|
|
97
|
+
modifiedCount: result.nModified,
|
|
98
|
+
deletedCount: result.nDeleted,
|
|
99
|
+
writeConcernError: result.writeConcernError
|
|
100
|
+
};
|
|
101
|
+
if (this.options.verboseResults && result.cursor.firstBatch) {
|
|
102
|
+
for (const document of result.cursor.firstBatch) {
|
|
103
|
+
if (document.ok === 1) {
|
|
104
|
+
this.processDocument(cursor, document);
|
|
86
105
|
}
|
|
87
|
-
this.result.updateResults?.set(document.idx + currentBatchOffset, result);
|
|
88
|
-
}
|
|
89
|
-
// Handle delete results.
|
|
90
|
-
if ('delete' in operation) {
|
|
91
|
-
this.result.deleteResults?.set(document.idx + currentBatchOffset, {
|
|
92
|
-
deletedCount: document.n
|
|
93
|
-
});
|
|
94
106
|
}
|
|
95
107
|
}
|
|
108
|
+
} else {
|
|
109
|
+
throw error;
|
|
110
|
+
}
|
|
111
|
+
} finally {
|
|
112
|
+
// Update the counts from the cursor response.
|
|
113
|
+
if (cursor.response) {
|
|
114
|
+
const response = cursor.response;
|
|
115
|
+
this.incrementCounts(response);
|
|
96
116
|
}
|
|
117
|
+
|
|
118
|
+
// Increment the batch offset.
|
|
119
|
+
this.currentBatchOffset += cursor.operations.length;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// If we have write concern errors ensure they are added.
|
|
123
|
+
if (writeConcernErrorResult) {
|
|
124
|
+
const writeConcernError = writeConcernErrorResult.writeConcernError as Document;
|
|
125
|
+
this.incrementCounts(writeConcernErrorResult);
|
|
126
|
+
this.writeConcernErrors.push({
|
|
127
|
+
code: writeConcernError.code,
|
|
128
|
+
message: writeConcernError.errmsg
|
|
129
|
+
});
|
|
97
130
|
}
|
|
98
131
|
|
|
99
132
|
return this.result;
|
|
100
133
|
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Process an individual document in the results.
|
|
137
|
+
* @param cursor - The cursor.
|
|
138
|
+
* @param document - The document to process.
|
|
139
|
+
*/
|
|
140
|
+
private processDocument(cursor: ClientBulkWriteCursor, document: Document) {
|
|
141
|
+
// Get the corresponding operation from the command.
|
|
142
|
+
const operation = cursor.operations[document.idx];
|
|
143
|
+
// Handle insert results.
|
|
144
|
+
if ('insert' in operation) {
|
|
145
|
+
this.result.insertResults?.set(document.idx + this.currentBatchOffset, {
|
|
146
|
+
insertedId: operation.document._id
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
// Handle update results.
|
|
150
|
+
if ('update' in operation) {
|
|
151
|
+
const result: ClientUpdateResult = {
|
|
152
|
+
matchedCount: document.n,
|
|
153
|
+
modifiedCount: document.nModified ?? 0,
|
|
154
|
+
// Check if the bulk did actually upsert.
|
|
155
|
+
didUpsert: document.upserted != null
|
|
156
|
+
};
|
|
157
|
+
if (document.upserted) {
|
|
158
|
+
result.upsertedId = document.upserted._id;
|
|
159
|
+
}
|
|
160
|
+
this.result.updateResults?.set(document.idx + this.currentBatchOffset, result);
|
|
161
|
+
}
|
|
162
|
+
// Handle delete results.
|
|
163
|
+
if ('delete' in operation) {
|
|
164
|
+
this.result.deleteResults?.set(document.idx + this.currentBatchOffset, {
|
|
165
|
+
deletedCount: document.n
|
|
166
|
+
});
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Increment the result counts.
|
|
172
|
+
* @param document - The document with the results.
|
|
173
|
+
*/
|
|
174
|
+
private incrementCounts(document: Document) {
|
|
175
|
+
this.result.insertedCount += document.insertedCount;
|
|
176
|
+
this.result.upsertedCount += document.upsertedCount;
|
|
177
|
+
this.result.matchedCount += document.matchedCount;
|
|
178
|
+
this.result.modifiedCount += document.modifiedCount;
|
|
179
|
+
this.result.deletedCount += document.deletedCount;
|
|
180
|
+
}
|
|
101
181
|
}
|
|
@@ -230,6 +230,10 @@ async function tryOperation<
|
|
|
230
230
|
});
|
|
231
231
|
}
|
|
232
232
|
|
|
233
|
+
if (operation.hasAspect(Aspect.COMMAND_BATCHING) && !operation.canRetryWrite) {
|
|
234
|
+
throw previousOperationError;
|
|
235
|
+
}
|
|
236
|
+
|
|
233
237
|
if (hasWriteAspect && !isRetryableWriteError(previousOperationError))
|
|
234
238
|
throw previousOperationError;
|
|
235
239
|
|
|
@@ -260,6 +264,10 @@ async function tryOperation<
|
|
|
260
264
|
}
|
|
261
265
|
|
|
262
266
|
try {
|
|
267
|
+
// If tries > 0 and we are command batching we need to reset the batch.
|
|
268
|
+
if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) {
|
|
269
|
+
operation.resetBatch();
|
|
270
|
+
}
|
|
263
271
|
return await operation.execute(server, session);
|
|
264
272
|
} catch (operationError) {
|
|
265
273
|
if (!(operationError instanceof MongoError)) throw operationError;
|
|
@@ -11,7 +11,8 @@ export const Aspect = {
|
|
|
11
11
|
EXPLAINABLE: Symbol('EXPLAINABLE'),
|
|
12
12
|
SKIP_COLLATION: Symbol('SKIP_COLLATION'),
|
|
13
13
|
CURSOR_CREATING: Symbol('CURSOR_CREATING'),
|
|
14
|
-
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER')
|
|
14
|
+
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER'),
|
|
15
|
+
COMMAND_BATCHING: Symbol('COMMAND_BATCHING')
|
|
15
16
|
} as const;
|
|
16
17
|
|
|
17
18
|
/** @public */
|
|
@@ -98,6 +99,10 @@ export abstract class AbstractOperation<TResult = any> {
|
|
|
98
99
|
this[kSession] = undefined;
|
|
99
100
|
}
|
|
100
101
|
|
|
102
|
+
resetBatch(): boolean {
|
|
103
|
+
return true;
|
|
104
|
+
}
|
|
105
|
+
|
|
101
106
|
get canRetryRead(): boolean {
|
|
102
107
|
return this.hasAspect(Aspect.RETRYABLE) && this.hasAspect(Aspect.READ_OPERATION);
|
|
103
108
|
}
|