mongodb 6.9.0-dev.20241010.sha.6ecf198f → 6.9.0-dev.20241012.sha.a473de95
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/beta.d.ts +52 -3
- package/lib/cmap/wire_protocol/responses.js +3 -0
- package/lib/cmap/wire_protocol/responses.js.map +1 -1
- package/lib/cursor/client_bulk_write_cursor.js +1 -5
- package/lib/cursor/client_bulk_write_cursor.js.map +1 -1
- package/lib/error.js +24 -3
- package/lib/error.js.map +1 -1
- package/lib/index.js +4 -3
- package/lib/index.js.map +1 -1
- package/lib/mongo_client.js +3 -0
- package/lib/mongo_client.js.map +1 -1
- package/lib/operations/aggregate.js.map +1 -1
- package/lib/operations/client_bulk_write/client_bulk_write.js +30 -10
- package/lib/operations/client_bulk_write/client_bulk_write.js.map +1 -1
- package/lib/operations/client_bulk_write/command_builder.js +62 -4
- package/lib/operations/client_bulk_write/command_builder.js.map +1 -1
- package/lib/operations/client_bulk_write/executor.js +43 -7
- package/lib/operations/client_bulk_write/executor.js.map +1 -1
- package/lib/operations/client_bulk_write/results_merger.js +116 -33
- package/lib/operations/client_bulk_write/results_merger.js.map +1 -1
- package/lib/operations/execute_operation.js +7 -0
- package/lib/operations/execute_operation.js.map +1 -1
- package/lib/operations/find.js.map +1 -1
- package/lib/operations/operation.js +5 -1
- package/lib/operations/operation.js.map +1 -1
- package/mongodb.d.ts +52 -3
- package/package.json +1 -1
- package/src/cmap/wire_protocol/responses.ts +4 -0
- package/src/cursor/client_bulk_write_cursor.ts +2 -8
- package/src/error.ts +44 -2
- package/src/index.ts +2 -0
- package/src/mongo_client.ts +5 -0
- package/src/operations/aggregate.ts +9 -1
- package/src/operations/client_bulk_write/client_bulk_write.ts +36 -10
- package/src/operations/client_bulk_write/command_builder.ts +84 -5
- package/src/operations/client_bulk_write/common.ts +6 -0
- package/src/operations/client_bulk_write/executor.ts +48 -7
- package/src/operations/client_bulk_write/results_merger.ts +120 -40
- package/src/operations/execute_operation.ts +8 -0
- package/src/operations/find.ts +8 -1
- package/src/operations/operation.ts +6 -1
package/mongodb.d.ts
CHANGED
|
@@ -368,7 +368,7 @@ export declare class Admin {
|
|
|
368
368
|
/* Excluded from this release type: AggregateOperation */
|
|
369
369
|
|
|
370
370
|
/** @public */
|
|
371
|
-
export declare interface AggregateOptions extends CommandOperationOptions {
|
|
371
|
+
export declare interface AggregateOptions extends Omit<CommandOperationOptions, 'explain'> {
|
|
372
372
|
/** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */
|
|
373
373
|
allowDiskUse?: boolean;
|
|
374
374
|
/** The number of documents to return per batch. See [aggregation documentation](https://www.mongodb.com/docs/manual/reference/command/aggregate). */
|
|
@@ -388,6 +388,12 @@ export declare interface AggregateOptions extends CommandOperationOptions {
|
|
|
388
388
|
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
|
389
389
|
let?: Document;
|
|
390
390
|
out?: string;
|
|
391
|
+
/**
|
|
392
|
+
* Specifies the verbosity mode for the explain output.
|
|
393
|
+
* @deprecated This API is deprecated in favor of `collection.aggregate().explain()`
|
|
394
|
+
* or `db.aggregate().explain()`.
|
|
395
|
+
*/
|
|
396
|
+
explain?: ExplainOptions['explain'];
|
|
391
397
|
}
|
|
392
398
|
|
|
393
399
|
/**
|
|
@@ -1487,6 +1493,12 @@ export declare interface ChangeStreamUpdateDocument<TSchema extends Document = D
|
|
|
1487
1493
|
fullDocumentBeforeChange?: TSchema;
|
|
1488
1494
|
}
|
|
1489
1495
|
|
|
1496
|
+
/** @public */
|
|
1497
|
+
export declare interface ClientBulkWriteError {
|
|
1498
|
+
code: number;
|
|
1499
|
+
message: string;
|
|
1500
|
+
}
|
|
1501
|
+
|
|
1490
1502
|
/** @public */
|
|
1491
1503
|
export declare interface ClientBulkWriteOptions extends CommandOperationOptions {
|
|
1492
1504
|
/**
|
|
@@ -4192,7 +4204,7 @@ export declare class FindOperators {
|
|
|
4192
4204
|
* @public
|
|
4193
4205
|
* @typeParam TSchema - Unused schema definition, deprecated usage, only specify `FindOptions` with no generic
|
|
4194
4206
|
*/
|
|
4195
|
-
export declare interface FindOptions<TSchema extends Document = Document> extends Omit<CommandOperationOptions, 'writeConcern'> {
|
|
4207
|
+
export declare interface FindOptions<TSchema extends Document = Document> extends Omit<CommandOperationOptions, 'writeConcern' | 'explain'> {
|
|
4196
4208
|
/** Sets the limit of documents returned in the query. */
|
|
4197
4209
|
limit?: number;
|
|
4198
4210
|
/** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */
|
|
@@ -4240,6 +4252,11 @@ export declare interface FindOptions<TSchema extends Document = Document> extend
|
|
|
4240
4252
|
* @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored.
|
|
4241
4253
|
*/
|
|
4242
4254
|
oplogReplay?: boolean;
|
|
4255
|
+
/**
|
|
4256
|
+
* Specifies the verbosity mode for the explain output.
|
|
4257
|
+
* @deprecated This API is deprecated in favor of `collection.find().explain()`.
|
|
4258
|
+
*/
|
|
4259
|
+
explain?: ExplainOptions['explain'];
|
|
4243
4260
|
}
|
|
4244
4261
|
|
|
4245
4262
|
/** @public */
|
|
@@ -5319,6 +5336,36 @@ export declare class MongoClientBulkWriteCursorError extends MongoRuntimeError {
|
|
|
5319
5336
|
get name(): string;
|
|
5320
5337
|
}
|
|
5321
5338
|
|
|
5339
|
+
/**
|
|
5340
|
+
* An error indicating that an error occurred when executing the bulk write.
|
|
5341
|
+
*
|
|
5342
|
+
* @public
|
|
5343
|
+
* @category Error
|
|
5344
|
+
*/
|
|
5345
|
+
export declare class MongoClientBulkWriteError extends MongoServerError {
|
|
5346
|
+
/**
|
|
5347
|
+
* Write concern errors that occurred while executing the bulk write. This list may have
|
|
5348
|
+
* multiple items if more than one server command was required to execute the bulk write.
|
|
5349
|
+
*/
|
|
5350
|
+
writeConcernErrors: Document[];
|
|
5351
|
+
/**
|
|
5352
|
+
* Errors that occurred during the execution of individual write operations. This map will
|
|
5353
|
+
* contain at most one entry if the bulk write was ordered.
|
|
5354
|
+
*/
|
|
5355
|
+
writeErrors: Map<number, ClientBulkWriteError>;
|
|
5356
|
+
/**
|
|
5357
|
+
* The results of any successful operations that were performed before the error was
|
|
5358
|
+
* encountered.
|
|
5359
|
+
*/
|
|
5360
|
+
partialResult?: ClientBulkWriteResult;
|
|
5361
|
+
/**
|
|
5362
|
+
* Initialize the client bulk write error.
|
|
5363
|
+
* @param message - The error message.
|
|
5364
|
+
*/
|
|
5365
|
+
constructor(message: ErrorDescription);
|
|
5366
|
+
get name(): string;
|
|
5367
|
+
}
|
|
5368
|
+
|
|
5322
5369
|
/**
|
|
5323
5370
|
* An error indicating that an error occurred on the client when executing a client bulk write.
|
|
5324
5371
|
*
|
|
@@ -5988,7 +6035,9 @@ export declare class MongoInvalidArgumentError extends MongoAPIError {
|
|
|
5988
6035
|
*
|
|
5989
6036
|
* @public
|
|
5990
6037
|
**/
|
|
5991
|
-
constructor(message: string
|
|
6038
|
+
constructor(message: string, options?: {
|
|
6039
|
+
cause?: Error;
|
|
6040
|
+
});
|
|
5992
6041
|
get name(): string;
|
|
5993
6042
|
}
|
|
5994
6043
|
|
package/package.json
CHANGED
|
@@ -354,4 +354,8 @@ export class ClientBulkWriteCursorResponse extends CursorResponse {
|
|
|
354
354
|
get deletedCount() {
|
|
355
355
|
return this.get('nDeleted', BSONType.int, true);
|
|
356
356
|
}
|
|
357
|
+
|
|
358
|
+
get writeConcernError() {
|
|
359
|
+
return this.get('writeConcernError', BSONType.object, false);
|
|
360
|
+
}
|
|
357
361
|
}
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import { type Document } from 'bson';
|
|
2
2
|
|
|
3
3
|
import { type ClientBulkWriteCursorResponse } from '../cmap/wire_protocol/responses';
|
|
4
|
-
import { MongoClientBulkWriteCursorError } from '../error';
|
|
5
4
|
import type { MongoClient } from '../mongo_client';
|
|
6
5
|
import { ClientBulkWriteOperation } from '../operations/client_bulk_write/client_bulk_write';
|
|
7
6
|
import { type ClientBulkWriteCommandBuilder } from '../operations/client_bulk_write/command_builder';
|
|
@@ -48,16 +47,11 @@ export class ClientBulkWriteCursor extends AbstractCursor {
|
|
|
48
47
|
* We need a way to get the top level cursor response fields for
|
|
49
48
|
* generating the bulk write result, so we expose this here.
|
|
50
49
|
*/
|
|
51
|
-
get response(): ClientBulkWriteCursorResponse {
|
|
50
|
+
get response(): ClientBulkWriteCursorResponse | null {
|
|
52
51
|
if (this.cursorResponse) return this.cursorResponse;
|
|
53
|
-
|
|
54
|
-
'No client bulk write cursor response returned from the server.'
|
|
55
|
-
);
|
|
52
|
+
return null;
|
|
56
53
|
}
|
|
57
54
|
|
|
58
|
-
/**
|
|
59
|
-
* Get the last set of operations the cursor executed.
|
|
60
|
-
*/
|
|
61
55
|
get operations(): Document[] {
|
|
62
56
|
return this.commandBuilder.lastOperations;
|
|
63
57
|
}
|
package/src/error.ts
CHANGED
|
@@ -1,4 +1,8 @@
|
|
|
1
1
|
import type { Document } from './bson';
|
|
2
|
+
import {
|
|
3
|
+
type ClientBulkWriteError,
|
|
4
|
+
type ClientBulkWriteResult
|
|
5
|
+
} from './operations/client_bulk_write/common';
|
|
2
6
|
import type { ServerType } from './sdam/common';
|
|
3
7
|
import type { TopologyVersion } from './sdam/server_description';
|
|
4
8
|
import type { TopologyDescription } from './sdam/topology_description';
|
|
@@ -616,6 +620,44 @@ export class MongoGCPError extends MongoOIDCError {
|
|
|
616
620
|
}
|
|
617
621
|
}
|
|
618
622
|
|
|
623
|
+
/**
|
|
624
|
+
* An error indicating that an error occurred when executing the bulk write.
|
|
625
|
+
*
|
|
626
|
+
* @public
|
|
627
|
+
* @category Error
|
|
628
|
+
*/
|
|
629
|
+
export class MongoClientBulkWriteError extends MongoServerError {
|
|
630
|
+
/**
|
|
631
|
+
* Write concern errors that occurred while executing the bulk write. This list may have
|
|
632
|
+
* multiple items if more than one server command was required to execute the bulk write.
|
|
633
|
+
*/
|
|
634
|
+
writeConcernErrors: Document[];
|
|
635
|
+
/**
|
|
636
|
+
* Errors that occurred during the execution of individual write operations. This map will
|
|
637
|
+
* contain at most one entry if the bulk write was ordered.
|
|
638
|
+
*/
|
|
639
|
+
writeErrors: Map<number, ClientBulkWriteError>;
|
|
640
|
+
/**
|
|
641
|
+
* The results of any successful operations that were performed before the error was
|
|
642
|
+
* encountered.
|
|
643
|
+
*/
|
|
644
|
+
partialResult?: ClientBulkWriteResult;
|
|
645
|
+
|
|
646
|
+
/**
|
|
647
|
+
* Initialize the client bulk write error.
|
|
648
|
+
* @param message - The error message.
|
|
649
|
+
*/
|
|
650
|
+
constructor(message: ErrorDescription) {
|
|
651
|
+
super(message);
|
|
652
|
+
this.writeConcernErrors = [];
|
|
653
|
+
this.writeErrors = new Map();
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
override get name(): string {
|
|
657
|
+
return 'MongoClientBulkWriteError';
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
|
|
619
661
|
/**
|
|
620
662
|
* An error indicating that an error occurred when processing bulk write results.
|
|
621
663
|
*
|
|
@@ -1047,8 +1089,8 @@ export class MongoInvalidArgumentError extends MongoAPIError {
|
|
|
1047
1089
|
*
|
|
1048
1090
|
* @public
|
|
1049
1091
|
**/
|
|
1050
|
-
constructor(message: string) {
|
|
1051
|
-
super(message);
|
|
1092
|
+
constructor(message: string, options?: { cause?: Error }) {
|
|
1093
|
+
super(message, options);
|
|
1052
1094
|
}
|
|
1053
1095
|
|
|
1054
1096
|
override get name(): string {
|
package/src/index.ts
CHANGED
|
@@ -46,6 +46,7 @@ export {
|
|
|
46
46
|
MongoBatchReExecutionError,
|
|
47
47
|
MongoChangeStreamError,
|
|
48
48
|
MongoClientBulkWriteCursorError,
|
|
49
|
+
MongoClientBulkWriteError,
|
|
49
50
|
MongoClientBulkWriteExecutionError,
|
|
50
51
|
MongoCompatibilityError,
|
|
51
52
|
MongoCursorExhaustedError,
|
|
@@ -477,6 +478,7 @@ export type {
|
|
|
477
478
|
} from './operations/aggregate';
|
|
478
479
|
export type {
|
|
479
480
|
AnyClientBulkWriteModel,
|
|
481
|
+
ClientBulkWriteError,
|
|
480
482
|
ClientBulkWriteOptions,
|
|
481
483
|
ClientBulkWriteResult,
|
|
482
484
|
ClientDeleteManyModel,
|
package/src/mongo_client.ts
CHANGED
|
@@ -493,6 +493,11 @@ export class MongoClient extends TypedEventEmitter<MongoClientEvents> implements
|
|
|
493
493
|
models: AnyClientBulkWriteModel[],
|
|
494
494
|
options?: ClientBulkWriteOptions
|
|
495
495
|
): Promise<ClientBulkWriteResult | { ok: 1 }> {
|
|
496
|
+
if (this.autoEncrypter) {
|
|
497
|
+
throw new MongoInvalidArgumentError(
|
|
498
|
+
'MongoClient bulkWrite does not currently support automatic encryption.'
|
|
499
|
+
);
|
|
500
|
+
}
|
|
496
501
|
return await new ClientBulkWriteExecutor(this, models, options).execute();
|
|
497
502
|
}
|
|
498
503
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { Document } from '../bson';
|
|
2
2
|
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
|
|
3
3
|
import { MongoInvalidArgumentError } from '../error';
|
|
4
|
+
import { type ExplainOptions } from '../explain';
|
|
4
5
|
import type { Server } from '../sdam/server';
|
|
5
6
|
import type { ClientSession } from '../sessions';
|
|
6
7
|
import { maxWireVersion, type MongoDBNamespace } from '../utils';
|
|
@@ -14,7 +15,7 @@ export const DB_AGGREGATE_COLLECTION = 1 as const;
|
|
|
14
15
|
const MIN_WIRE_VERSION_$OUT_READ_CONCERN_SUPPORT = 8;
|
|
15
16
|
|
|
16
17
|
/** @public */
|
|
17
|
-
export interface AggregateOptions extends CommandOperationOptions {
|
|
18
|
+
export interface AggregateOptions extends Omit<CommandOperationOptions, 'explain'> {
|
|
18
19
|
/** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */
|
|
19
20
|
allowDiskUse?: boolean;
|
|
20
21
|
/** The number of documents to return per batch. See [aggregation documentation](https://www.mongodb.com/docs/manual/reference/command/aggregate). */
|
|
@@ -35,6 +36,13 @@ export interface AggregateOptions extends CommandOperationOptions {
|
|
|
35
36
|
let?: Document;
|
|
36
37
|
|
|
37
38
|
out?: string;
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Specifies the verbosity mode for the explain output.
|
|
42
|
+
* @deprecated This API is deprecated in favor of `collection.aggregate().explain()`
|
|
43
|
+
* or `db.aggregate().explain()`.
|
|
44
|
+
*/
|
|
45
|
+
explain?: ExplainOptions['explain'];
|
|
38
46
|
}
|
|
39
47
|
|
|
40
48
|
/** @internal */
|
|
@@ -27,6 +27,14 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
27
27
|
this.ns = new MongoDBNamespace('admin', '$cmd');
|
|
28
28
|
}
|
|
29
29
|
|
|
30
|
+
override resetBatch(): boolean {
|
|
31
|
+
return this.commandBuilder.resetBatch();
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
override get canRetryWrite(): boolean {
|
|
35
|
+
return this.commandBuilder.isBatchRetryable;
|
|
36
|
+
}
|
|
37
|
+
|
|
30
38
|
/**
|
|
31
39
|
* Execute the command. Superclass will handle write concern, etc.
|
|
32
40
|
* @param server - The server.
|
|
@@ -41,14 +49,20 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
41
49
|
|
|
42
50
|
if (server.description.type === ServerType.LoadBalancer) {
|
|
43
51
|
if (session) {
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
52
|
+
let connection;
|
|
53
|
+
if (!session.pinnedConnection) {
|
|
54
|
+
// Checkout a connection to build the command.
|
|
55
|
+
connection = await server.pool.checkOut();
|
|
56
|
+
// Pin the connection to the session so it get used to execute the command and we do not
|
|
57
|
+
// perform a double check-in/check-out.
|
|
58
|
+
session.pin(connection);
|
|
59
|
+
} else {
|
|
60
|
+
connection = session.pinnedConnection;
|
|
61
|
+
}
|
|
49
62
|
command = this.commandBuilder.buildBatch(
|
|
50
63
|
connection.hello?.maxMessageSizeBytes,
|
|
51
|
-
connection.hello?.maxWriteBatchSize
|
|
64
|
+
connection.hello?.maxWriteBatchSize,
|
|
65
|
+
connection.hello?.maxBsonObjectSize
|
|
52
66
|
);
|
|
53
67
|
} else {
|
|
54
68
|
throw new MongoClientBulkWriteExecutionError(
|
|
@@ -59,16 +73,26 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
59
73
|
// At this point we have a server and the auto connect code has already
|
|
60
74
|
// run in executeOperation, so the server description will be populated.
|
|
61
75
|
// We can use that to build the command.
|
|
62
|
-
if (
|
|
76
|
+
if (
|
|
77
|
+
!server.description.maxWriteBatchSize ||
|
|
78
|
+
!server.description.maxMessageSizeBytes ||
|
|
79
|
+
!server.description.maxBsonObjectSize
|
|
80
|
+
) {
|
|
63
81
|
throw new MongoClientBulkWriteExecutionError(
|
|
64
|
-
'In order to execute a client bulk write, both maxWriteBatchSize and
|
|
82
|
+
'In order to execute a client bulk write, both maxWriteBatchSize, maxMessageSizeBytes and maxBsonObjectSize must be provided by the servers hello response.'
|
|
65
83
|
);
|
|
66
84
|
}
|
|
67
85
|
command = this.commandBuilder.buildBatch(
|
|
68
86
|
server.description.maxMessageSizeBytes,
|
|
69
|
-
server.description.maxWriteBatchSize
|
|
87
|
+
server.description.maxWriteBatchSize,
|
|
88
|
+
server.description.maxBsonObjectSize
|
|
70
89
|
);
|
|
71
90
|
}
|
|
91
|
+
|
|
92
|
+
// Check after the batch is built if we cannot retry it and override the option.
|
|
93
|
+
if (!this.canRetryWrite) {
|
|
94
|
+
this.options.willRetryWrite = false;
|
|
95
|
+
}
|
|
72
96
|
return await super.executeCommand(server, session, command, ClientBulkWriteCursorResponse);
|
|
73
97
|
}
|
|
74
98
|
}
|
|
@@ -77,5 +101,7 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
77
101
|
defineAspects(ClientBulkWriteOperation, [
|
|
78
102
|
Aspect.WRITE_OPERATION,
|
|
79
103
|
Aspect.SKIP_COLLATION,
|
|
80
|
-
Aspect.CURSOR_CREATING
|
|
104
|
+
Aspect.CURSOR_CREATING,
|
|
105
|
+
Aspect.RETRYABLE,
|
|
106
|
+
Aspect.COMMAND_BATCHING
|
|
81
107
|
]);
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import { BSON, type Document } from '../../bson';
|
|
2
2
|
import { DocumentSequence } from '../../cmap/commands';
|
|
3
|
+
import { MongoAPIError, MongoInvalidArgumentError } from '../../error';
|
|
3
4
|
import { type PkFactory } from '../../mongo_client';
|
|
4
5
|
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
|
|
5
|
-
import { DEFAULT_PK_FACTORY } from '../../utils';
|
|
6
|
+
import { DEFAULT_PK_FACTORY, hasAtomicOperators } from '../../utils';
|
|
6
7
|
import { type CollationOptions } from '../command';
|
|
7
8
|
import { type Hint } from '../operation';
|
|
8
9
|
import type {
|
|
@@ -38,8 +39,14 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
38
39
|
models: AnyClientBulkWriteModel[];
|
|
39
40
|
options: ClientBulkWriteOptions;
|
|
40
41
|
pkFactory: PkFactory;
|
|
42
|
+
/** The current index in the models array that is being processed. */
|
|
41
43
|
currentModelIndex: number;
|
|
44
|
+
/** The model index that the builder was on when it finished the previous batch. Used for resets when retrying. */
|
|
45
|
+
previousModelIndex: number;
|
|
46
|
+
/** The last array of operations that were created. Used by the results merger for indexing results. */
|
|
42
47
|
lastOperations: Document[];
|
|
48
|
+
/** Returns true if the current batch being created has no multi-updates. */
|
|
49
|
+
isBatchRetryable: boolean;
|
|
43
50
|
|
|
44
51
|
/**
|
|
45
52
|
* Create the command builder.
|
|
@@ -54,7 +61,9 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
54
61
|
this.options = options;
|
|
55
62
|
this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY;
|
|
56
63
|
this.currentModelIndex = 0;
|
|
64
|
+
this.previousModelIndex = 0;
|
|
57
65
|
this.lastOperations = [];
|
|
66
|
+
this.isBatchRetryable = true;
|
|
58
67
|
}
|
|
59
68
|
|
|
60
69
|
/**
|
|
@@ -76,27 +85,57 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
76
85
|
return this.currentModelIndex < this.models.length;
|
|
77
86
|
}
|
|
78
87
|
|
|
88
|
+
/**
|
|
89
|
+
* When we need to retry a command we need to set the current
|
|
90
|
+
* model index back to its previous value.
|
|
91
|
+
*/
|
|
92
|
+
resetBatch(): boolean {
|
|
93
|
+
this.currentModelIndex = this.previousModelIndex;
|
|
94
|
+
return true;
|
|
95
|
+
}
|
|
96
|
+
|
|
79
97
|
/**
|
|
80
98
|
* Build a single batch of a client bulk write command.
|
|
81
99
|
* @param maxMessageSizeBytes - The max message size in bytes.
|
|
82
100
|
* @param maxWriteBatchSize - The max write batch size.
|
|
83
101
|
* @returns The client bulk write command.
|
|
84
102
|
*/
|
|
85
|
-
buildBatch(
|
|
103
|
+
buildBatch(
|
|
104
|
+
maxMessageSizeBytes: number,
|
|
105
|
+
maxWriteBatchSize: number,
|
|
106
|
+
maxBsonObjectSize: number
|
|
107
|
+
): ClientBulkWriteCommand {
|
|
108
|
+
// We start by assuming the batch has no multi-updates, so it is retryable
|
|
109
|
+
// until we find them.
|
|
110
|
+
this.isBatchRetryable = true;
|
|
86
111
|
let commandLength = 0;
|
|
87
112
|
let currentNamespaceIndex = 0;
|
|
88
113
|
const command: ClientBulkWriteCommand = this.baseCommand();
|
|
89
114
|
const namespaces = new Map<string, number>();
|
|
115
|
+
// In the case of retries we need to mark where we started this batch.
|
|
116
|
+
this.previousModelIndex = this.currentModelIndex;
|
|
90
117
|
|
|
91
118
|
while (this.currentModelIndex < this.models.length) {
|
|
92
119
|
const model = this.models[this.currentModelIndex];
|
|
93
120
|
const ns = model.namespace;
|
|
94
121
|
const nsIndex = namespaces.get(ns);
|
|
95
122
|
|
|
123
|
+
// Multi updates are not retryable.
|
|
124
|
+
if (model.name === 'deleteMany' || model.name === 'updateMany') {
|
|
125
|
+
this.isBatchRetryable = false;
|
|
126
|
+
}
|
|
127
|
+
|
|
96
128
|
if (nsIndex != null) {
|
|
97
129
|
// Build the operation and serialize it to get the bytes buffer.
|
|
98
130
|
const operation = buildOperation(model, nsIndex, this.pkFactory);
|
|
99
|
-
|
|
131
|
+
let operationBuffer;
|
|
132
|
+
try {
|
|
133
|
+
operationBuffer = BSON.serialize(operation);
|
|
134
|
+
} catch (cause) {
|
|
135
|
+
throw new MongoInvalidArgumentError(`Could not serialize operation to BSON`, { cause });
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
|
|
100
139
|
|
|
101
140
|
// Check if the operation buffer can fit in the command. If it can,
|
|
102
141
|
// then add the operation to the document sequence and increment the
|
|
@@ -119,9 +158,18 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
119
158
|
// construct our nsInfo and ops documents and buffers.
|
|
120
159
|
namespaces.set(ns, currentNamespaceIndex);
|
|
121
160
|
const nsInfo = { ns: ns };
|
|
122
|
-
const nsInfoBuffer = BSON.serialize(nsInfo);
|
|
123
161
|
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
|
|
124
|
-
|
|
162
|
+
let nsInfoBuffer;
|
|
163
|
+
let operationBuffer;
|
|
164
|
+
try {
|
|
165
|
+
nsInfoBuffer = BSON.serialize(nsInfo);
|
|
166
|
+
operationBuffer = BSON.serialize(operation);
|
|
167
|
+
} catch (cause) {
|
|
168
|
+
throw new MongoInvalidArgumentError(`Could not serialize ns info to BSON`, { cause });
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
validateBufferSize('nsInfo', nsInfoBuffer, maxBsonObjectSize);
|
|
172
|
+
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
|
|
125
173
|
|
|
126
174
|
// Check if the operation and nsInfo buffers can fit in the command. If they
|
|
127
175
|
// can, then add the operation and nsInfo to their respective document
|
|
@@ -179,6 +227,14 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
179
227
|
}
|
|
180
228
|
}
|
|
181
229
|
|
|
230
|
+
function validateBufferSize(name: string, buffer: Uint8Array, maxBsonObjectSize: number) {
|
|
231
|
+
if (buffer.length > maxBsonObjectSize) {
|
|
232
|
+
throw new MongoInvalidArgumentError(
|
|
233
|
+
`Client bulk write operation ${name} of length ${buffer.length} exceeds the max bson object size of ${maxBsonObjectSize}`
|
|
234
|
+
);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
182
238
|
/** @internal */
|
|
183
239
|
interface ClientInsertOperation {
|
|
184
240
|
insert: number;
|
|
@@ -293,6 +349,18 @@ export const buildUpdateManyOperation = (
|
|
|
293
349
|
return createUpdateOperation(model, index, true);
|
|
294
350
|
};
|
|
295
351
|
|
|
352
|
+
/**
|
|
353
|
+
* Validate the update document.
|
|
354
|
+
* @param update - The update document.
|
|
355
|
+
*/
|
|
356
|
+
function validateUpdate(update: Document) {
|
|
357
|
+
if (!hasAtomicOperators(update)) {
|
|
358
|
+
throw new MongoAPIError(
|
|
359
|
+
'Client bulk write update models must only contain atomic modifiers (start with $) and must not be empty.'
|
|
360
|
+
);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
|
|
296
364
|
/**
|
|
297
365
|
* Creates a delete operation based on the parameters.
|
|
298
366
|
*/
|
|
@@ -301,6 +369,11 @@ function createUpdateOperation(
|
|
|
301
369
|
index: number,
|
|
302
370
|
multi: boolean
|
|
303
371
|
): ClientUpdateOperation {
|
|
372
|
+
// Update documents provided in UpdateOne and UpdateMany write models are
|
|
373
|
+
// required only to contain atomic modifiers (i.e. keys that start with "$").
|
|
374
|
+
// Drivers MUST throw an error if an update document is empty or if the
|
|
375
|
+
// document's first key does not start with "$".
|
|
376
|
+
validateUpdate(model.update);
|
|
304
377
|
const document: ClientUpdateOperation = {
|
|
305
378
|
update: index,
|
|
306
379
|
multi: multi,
|
|
@@ -343,6 +416,12 @@ export const buildReplaceOneOperation = (
|
|
|
343
416
|
model: ClientReplaceOneModel,
|
|
344
417
|
index: number
|
|
345
418
|
): ClientReplaceOneOperation => {
|
|
419
|
+
if (hasAtomicOperators(model.replacement)) {
|
|
420
|
+
throw new MongoAPIError(
|
|
421
|
+
'Client bulk write replace models must not contain atomic modifiers (start with $) and must not be empty.'
|
|
422
|
+
);
|
|
423
|
+
}
|
|
424
|
+
|
|
346
425
|
const document: ClientReplaceOneOperation = {
|
|
347
426
|
update: index,
|
|
348
427
|
multi: false,
|
|
@@ -181,6 +181,12 @@ export interface ClientBulkWriteResult {
|
|
|
181
181
|
deleteResults?: Map<number, ClientDeleteResult>;
|
|
182
182
|
}
|
|
183
183
|
|
|
184
|
+
/** @public */
|
|
185
|
+
export interface ClientBulkWriteError {
|
|
186
|
+
code: number;
|
|
187
|
+
message: string;
|
|
188
|
+
}
|
|
189
|
+
|
|
184
190
|
/** @public */
|
|
185
191
|
export interface ClientInsertOneResult {
|
|
186
192
|
/**
|
|
@@ -1,4 +1,9 @@
|
|
|
1
1
|
import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
|
|
2
|
+
import {
|
|
3
|
+
MongoClientBulkWriteError,
|
|
4
|
+
MongoClientBulkWriteExecutionError,
|
|
5
|
+
MongoServerError
|
|
6
|
+
} from '../../error';
|
|
2
7
|
import { type MongoClient } from '../../mongo_client';
|
|
3
8
|
import { WriteConcern } from '../../write_concern';
|
|
4
9
|
import { executeOperation } from '../execute_operation';
|
|
@@ -31,9 +36,18 @@ export class ClientBulkWriteExecutor {
|
|
|
31
36
|
operations: AnyClientBulkWriteModel[],
|
|
32
37
|
options?: ClientBulkWriteOptions
|
|
33
38
|
) {
|
|
39
|
+
if (operations.length === 0) {
|
|
40
|
+
throw new MongoClientBulkWriteExecutionError('No client bulk write models were provided.');
|
|
41
|
+
}
|
|
42
|
+
|
|
34
43
|
this.client = client;
|
|
35
44
|
this.operations = operations;
|
|
36
|
-
this.options = {
|
|
45
|
+
this.options = {
|
|
46
|
+
ordered: true,
|
|
47
|
+
bypassDocumentValidation: false,
|
|
48
|
+
verboseResults: false,
|
|
49
|
+
...options
|
|
50
|
+
};
|
|
37
51
|
|
|
38
52
|
// If no write concern was provided, we inherit one from the client.
|
|
39
53
|
if (!this.options.writeConcern) {
|
|
@@ -65,15 +79,42 @@ export class ClientBulkWriteExecutor {
|
|
|
65
79
|
} else {
|
|
66
80
|
const resultsMerger = new ClientBulkWriteResultsMerger(this.options);
|
|
67
81
|
// For each command will will create and exhaust a cursor for the results.
|
|
68
|
-
let currentBatchOffset = 0;
|
|
69
82
|
while (commandBuilder.hasNextBatch()) {
|
|
70
83
|
const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options);
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
84
|
+
try {
|
|
85
|
+
await resultsMerger.merge(cursor);
|
|
86
|
+
} catch (error) {
|
|
87
|
+
// Write concern errors are recorded in the writeConcernErrors field on MongoClientBulkWriteError.
|
|
88
|
+
// When a write concern error is encountered, it should not terminate execution of the bulk write
|
|
89
|
+
// for either ordered or unordered bulk writes. However, drivers MUST throw an exception at the end
|
|
90
|
+
// of execution if any write concern errors were observed.
|
|
91
|
+
if (error instanceof MongoServerError && !(error instanceof MongoClientBulkWriteError)) {
|
|
92
|
+
// Server side errors need to be wrapped inside a MongoClientBulkWriteError, where the root
|
|
93
|
+
// cause is the error property and a partial result is to be included.
|
|
94
|
+
const bulkWriteError = new MongoClientBulkWriteError({
|
|
95
|
+
message: 'Mongo client bulk write encountered an error during execution'
|
|
96
|
+
});
|
|
97
|
+
bulkWriteError.cause = error;
|
|
98
|
+
bulkWriteError.partialResult = resultsMerger.result;
|
|
99
|
+
throw bulkWriteError;
|
|
100
|
+
} else {
|
|
101
|
+
// Client side errors are just thrown.
|
|
102
|
+
throw error;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
76
105
|
}
|
|
106
|
+
|
|
107
|
+
// If we have write concern errors or unordered write errors at the end we throw.
|
|
108
|
+
if (resultsMerger.writeConcernErrors.length > 0 || resultsMerger.writeErrors.size > 0) {
|
|
109
|
+
const error = new MongoClientBulkWriteError({
|
|
110
|
+
message: 'Mongo client bulk write encountered errors during execution.'
|
|
111
|
+
});
|
|
112
|
+
error.writeConcernErrors = resultsMerger.writeConcernErrors;
|
|
113
|
+
error.writeErrors = resultsMerger.writeErrors;
|
|
114
|
+
error.partialResult = resultsMerger.result;
|
|
115
|
+
throw error;
|
|
116
|
+
}
|
|
117
|
+
|
|
77
118
|
return resultsMerger.result;
|
|
78
119
|
}
|
|
79
120
|
}
|