mongodb 6.9.0-dev.20241001.sha.85f7dcf9 → 6.9.0-dev.20241003.sha.91f30357
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/beta.d.ts +50 -22
- package/lib/cmap/commands.js +47 -17
- package/lib/cmap/commands.js.map +1 -1
- package/lib/cursor/client_bulk_write_cursor.js +11 -5
- package/lib/cursor/client_bulk_write_cursor.js.map +1 -1
- package/lib/error.js +30 -4
- package/lib/error.js.map +1 -1
- package/lib/index.js +5 -4
- package/lib/index.js.map +1 -1
- package/lib/operations/client_bulk_write/client_bulk_write.js +32 -4
- package/lib/operations/client_bulk_write/client_bulk_write.js.map +1 -1
- package/lib/operations/client_bulk_write/command_builder.js +78 -15
- package/lib/operations/client_bulk_write/command_builder.js.map +1 -1
- package/lib/operations/client_bulk_write/executor.js +20 -26
- package/lib/operations/client_bulk_write/executor.js.map +1 -1
- package/lib/operations/client_bulk_write/results_merger.js +9 -4
- package/lib/operations/client_bulk_write/results_merger.js.map +1 -1
- package/lib/sdam/server.js +2 -1
- package/lib/sdam/server.js.map +1 -1
- package/lib/sdam/server_description.js +3 -0
- package/lib/sdam/server_description.js.map +1 -1
- package/lib/sdam/topology_description.js.map +1 -1
- package/mongodb.d.ts +50 -22
- package/package.json +1 -1
- package/src/cmap/commands.ts +53 -17
- package/src/cursor/client_bulk_write_cursor.ts +21 -8
- package/src/error.ts +29 -2
- package/src/index.ts +2 -1
- package/src/operations/client_bulk_write/client_bulk_write.ts +43 -7
- package/src/operations/client_bulk_write/command_builder.ts +89 -17
- package/src/operations/client_bulk_write/executor.ts +20 -39
- package/src/operations/client_bulk_write/results_merger.ts +9 -3
- package/src/sdam/server.ts +2 -1
- package/src/sdam/server_description.ts +9 -0
- package/src/sdam/topology_description.ts +0 -1
package/src/cmap/commands.ts
CHANGED
|
@@ -429,10 +429,60 @@ export interface OpMsgOptions {
|
|
|
429
429
|
|
|
430
430
|
/** @internal */
|
|
431
431
|
export class DocumentSequence {
|
|
432
|
+
field: string;
|
|
432
433
|
documents: Document[];
|
|
434
|
+
serializedDocumentsLength: number;
|
|
435
|
+
private chunks: Uint8Array[];
|
|
436
|
+
private header: Buffer;
|
|
433
437
|
|
|
434
|
-
|
|
435
|
-
|
|
438
|
+
/**
|
|
439
|
+
* Create a new document sequence for the provided field.
|
|
440
|
+
* @param field - The field it will replace.
|
|
441
|
+
*/
|
|
442
|
+
constructor(field: string, documents?: Document[]) {
|
|
443
|
+
this.field = field;
|
|
444
|
+
this.documents = [];
|
|
445
|
+
this.chunks = [];
|
|
446
|
+
this.serializedDocumentsLength = 0;
|
|
447
|
+
// Document sequences starts with type 1 at the first byte.
|
|
448
|
+
// Field strings must always be UTF-8.
|
|
449
|
+
const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1);
|
|
450
|
+
buffer[0] = 1;
|
|
451
|
+
// Third part is the field name at offset 5 with trailing null byte.
|
|
452
|
+
encodeUTF8Into(buffer, `${this.field}\0`, 5);
|
|
453
|
+
this.chunks.push(buffer);
|
|
454
|
+
this.header = buffer;
|
|
455
|
+
if (documents) {
|
|
456
|
+
for (const doc of documents) {
|
|
457
|
+
this.push(doc, BSON.serialize(doc));
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
/**
|
|
463
|
+
* Push a document to the document sequence. Will serialize the document
|
|
464
|
+
* as well and return the current serialized length of all documents.
|
|
465
|
+
* @param document - The document to add.
|
|
466
|
+
* @param buffer - The serialized document in raw BSON.
|
|
467
|
+
* @returns The new total document sequence length.
|
|
468
|
+
*/
|
|
469
|
+
push(document: Document, buffer: Uint8Array): number {
|
|
470
|
+
this.serializedDocumentsLength += buffer.length;
|
|
471
|
+
// Push the document.
|
|
472
|
+
this.documents.push(document);
|
|
473
|
+
// Push the document raw bson.
|
|
474
|
+
this.chunks.push(buffer);
|
|
475
|
+
// Write the new length.
|
|
476
|
+
this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1);
|
|
477
|
+
return this.serializedDocumentsLength + this.header.length;
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
/**
|
|
481
|
+
* Get the fully serialized bytes for the document sequence section.
|
|
482
|
+
* @returns The section bytes.
|
|
483
|
+
*/
|
|
484
|
+
toBin(): Uint8Array {
|
|
485
|
+
return Buffer.concat(this.chunks);
|
|
436
486
|
}
|
|
437
487
|
}
|
|
438
488
|
|
|
@@ -543,21 +593,7 @@ export class OpMsgRequest {
|
|
|
543
593
|
const chunks = [];
|
|
544
594
|
for (const [key, value] of Object.entries(document)) {
|
|
545
595
|
if (value instanceof DocumentSequence) {
|
|
546
|
-
|
|
547
|
-
const buffer = Buffer.allocUnsafe(1 + 4 + key.length + 1);
|
|
548
|
-
buffer[0] = 1;
|
|
549
|
-
// Third part is the field name at offset 5 with trailing null byte.
|
|
550
|
-
encodeUTF8Into(buffer, `${key}\0`, 5);
|
|
551
|
-
chunks.push(buffer);
|
|
552
|
-
// Fourth part are the documents' bytes.
|
|
553
|
-
let docsLength = 0;
|
|
554
|
-
for (const doc of value.documents) {
|
|
555
|
-
const docBson = this.serializeBson(doc);
|
|
556
|
-
docsLength += docBson.length;
|
|
557
|
-
chunks.push(docBson);
|
|
558
|
-
}
|
|
559
|
-
// Second part of the sequence is the length at offset 1;
|
|
560
|
-
buffer.writeInt32LE(4 + key.length + 1 + docsLength, 1);
|
|
596
|
+
chunks.push(value.toBin());
|
|
561
597
|
// Why are we removing the field from the command? This is because it needs to be
|
|
562
598
|
// removed in the OP_MSG request first section, and DocumentSequence is not a
|
|
563
599
|
// BSON type and is specific to the MongoDB wire protocol so there's nothing
|
|
@@ -1,8 +1,10 @@
|
|
|
1
|
-
import type
|
|
1
|
+
import { type Document } from 'bson';
|
|
2
|
+
|
|
2
3
|
import { type ClientBulkWriteCursorResponse } from '../cmap/wire_protocol/responses';
|
|
3
|
-
import {
|
|
4
|
+
import { MongoClientBulkWriteCursorError } from '../error';
|
|
4
5
|
import type { MongoClient } from '../mongo_client';
|
|
5
6
|
import { ClientBulkWriteOperation } from '../operations/client_bulk_write/client_bulk_write';
|
|
7
|
+
import { type ClientBulkWriteCommandBuilder } from '../operations/client_bulk_write/command_builder';
|
|
6
8
|
import { type ClientBulkWriteOptions } from '../operations/client_bulk_write/common';
|
|
7
9
|
import { executeOperation } from '../operations/execute_operation';
|
|
8
10
|
import type { ClientSession } from '../sessions';
|
|
@@ -24,17 +26,21 @@ export interface ClientBulkWriteCursorOptions
|
|
|
24
26
|
* @internal
|
|
25
27
|
*/
|
|
26
28
|
export class ClientBulkWriteCursor extends AbstractCursor {
|
|
27
|
-
|
|
29
|
+
commandBuilder: ClientBulkWriteCommandBuilder;
|
|
28
30
|
/** @internal */
|
|
29
31
|
private cursorResponse?: ClientBulkWriteCursorResponse;
|
|
30
32
|
/** @internal */
|
|
31
33
|
private clientBulkWriteOptions: ClientBulkWriteOptions;
|
|
32
34
|
|
|
33
35
|
/** @internal */
|
|
34
|
-
constructor(
|
|
36
|
+
constructor(
|
|
37
|
+
client: MongoClient,
|
|
38
|
+
commandBuilder: ClientBulkWriteCommandBuilder,
|
|
39
|
+
options: ClientBulkWriteOptions = {}
|
|
40
|
+
) {
|
|
35
41
|
super(client, new MongoDBNamespace('admin', '$cmd'), options);
|
|
36
42
|
|
|
37
|
-
this.
|
|
43
|
+
this.commandBuilder = commandBuilder;
|
|
38
44
|
this.clientBulkWriteOptions = options;
|
|
39
45
|
}
|
|
40
46
|
|
|
@@ -44,22 +50,29 @@ export class ClientBulkWriteCursor extends AbstractCursor {
|
|
|
44
50
|
*/
|
|
45
51
|
get response(): ClientBulkWriteCursorResponse {
|
|
46
52
|
if (this.cursorResponse) return this.cursorResponse;
|
|
47
|
-
throw new
|
|
53
|
+
throw new MongoClientBulkWriteCursorError(
|
|
48
54
|
'No client bulk write cursor response returned from the server.'
|
|
49
55
|
);
|
|
50
56
|
}
|
|
51
57
|
|
|
58
|
+
/**
|
|
59
|
+
* Get the last set of operations the cursor executed.
|
|
60
|
+
*/
|
|
61
|
+
get operations(): Document[] {
|
|
62
|
+
return this.commandBuilder.lastOperations;
|
|
63
|
+
}
|
|
64
|
+
|
|
52
65
|
clone(): ClientBulkWriteCursor {
|
|
53
66
|
const clonedOptions = mergeOptions({}, this.clientBulkWriteOptions);
|
|
54
67
|
delete clonedOptions.session;
|
|
55
|
-
return new ClientBulkWriteCursor(this.client, this.
|
|
68
|
+
return new ClientBulkWriteCursor(this.client, this.commandBuilder, {
|
|
56
69
|
...clonedOptions
|
|
57
70
|
});
|
|
58
71
|
}
|
|
59
72
|
|
|
60
73
|
/** @internal */
|
|
61
74
|
async _initialize(session: ClientSession): Promise<InitialCursorResponse> {
|
|
62
|
-
const clientBulkWriteOperation = new ClientBulkWriteOperation(this.
|
|
75
|
+
const clientBulkWriteOperation = new ClientBulkWriteOperation(this.commandBuilder, {
|
|
63
76
|
...this.clientBulkWriteOptions,
|
|
64
77
|
...this.cursorOptions,
|
|
65
78
|
session
|
package/src/error.ts
CHANGED
|
@@ -622,7 +622,7 @@ export class MongoGCPError extends MongoOIDCError {
|
|
|
622
622
|
* @public
|
|
623
623
|
* @category Error
|
|
624
624
|
*/
|
|
625
|
-
export class
|
|
625
|
+
export class MongoClientBulkWriteCursorError extends MongoRuntimeError {
|
|
626
626
|
/**
|
|
627
627
|
* **Do not use this constructor!**
|
|
628
628
|
*
|
|
@@ -639,7 +639,34 @@ export class MongoBulkWriteCursorError extends MongoRuntimeError {
|
|
|
639
639
|
}
|
|
640
640
|
|
|
641
641
|
override get name(): string {
|
|
642
|
-
return '
|
|
642
|
+
return 'MongoClientBulkWriteCursorError';
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
/**
|
|
647
|
+
* An error indicating that an error occurred on the client when executing a client bulk write.
|
|
648
|
+
*
|
|
649
|
+
* @public
|
|
650
|
+
* @category Error
|
|
651
|
+
*/
|
|
652
|
+
export class MongoClientBulkWriteExecutionError extends MongoRuntimeError {
|
|
653
|
+
/**
|
|
654
|
+
* **Do not use this constructor!**
|
|
655
|
+
*
|
|
656
|
+
* Meant for internal use only.
|
|
657
|
+
*
|
|
658
|
+
* @remarks
|
|
659
|
+
* This class is only meant to be constructed within the driver. This constructor is
|
|
660
|
+
* not subject to semantic versioning compatibility guarantees and may change at any time.
|
|
661
|
+
*
|
|
662
|
+
* @public
|
|
663
|
+
**/
|
|
664
|
+
constructor(message: string) {
|
|
665
|
+
super(message);
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
override get name(): string {
|
|
669
|
+
return 'MongoClientBulkWriteExecutionError';
|
|
643
670
|
}
|
|
644
671
|
}
|
|
645
672
|
|
package/src/index.ts
CHANGED
|
@@ -44,8 +44,9 @@ export {
|
|
|
44
44
|
MongoAWSError,
|
|
45
45
|
MongoAzureError,
|
|
46
46
|
MongoBatchReExecutionError,
|
|
47
|
-
MongoBulkWriteCursorError,
|
|
48
47
|
MongoChangeStreamError,
|
|
48
|
+
MongoClientBulkWriteCursorError,
|
|
49
|
+
MongoClientBulkWriteExecutionError,
|
|
49
50
|
MongoCompatibilityError,
|
|
50
51
|
MongoCursorExhaustedError,
|
|
51
52
|
MongoCursorInUseError,
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
1
|
+
import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta';
|
|
3
2
|
import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses';
|
|
4
3
|
import type { Server } from '../../sdam/server';
|
|
5
4
|
import type { ClientSession } from '../../sessions';
|
|
6
5
|
import { MongoDBNamespace } from '../../utils';
|
|
7
6
|
import { CommandOperation } from '../command';
|
|
8
7
|
import { Aspect, defineAspects } from '../operation';
|
|
8
|
+
import { type ClientBulkWriteCommandBuilder } from './command_builder';
|
|
9
9
|
import { type ClientBulkWriteOptions } from './common';
|
|
10
10
|
|
|
11
11
|
/**
|
|
@@ -13,16 +13,16 @@ import { type ClientBulkWriteOptions } from './common';
|
|
|
13
13
|
* @internal
|
|
14
14
|
*/
|
|
15
15
|
export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCursorResponse> {
|
|
16
|
-
|
|
16
|
+
commandBuilder: ClientBulkWriteCommandBuilder;
|
|
17
17
|
override options: ClientBulkWriteOptions;
|
|
18
18
|
|
|
19
19
|
override get commandName() {
|
|
20
20
|
return 'bulkWrite' as const;
|
|
21
21
|
}
|
|
22
22
|
|
|
23
|
-
constructor(
|
|
23
|
+
constructor(commandBuilder: ClientBulkWriteCommandBuilder, options: ClientBulkWriteOptions) {
|
|
24
24
|
super(undefined, options);
|
|
25
|
-
this.
|
|
25
|
+
this.commandBuilder = commandBuilder;
|
|
26
26
|
this.options = options;
|
|
27
27
|
this.ns = new MongoDBNamespace('admin', '$cmd');
|
|
28
28
|
}
|
|
@@ -37,9 +37,45 @@ export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCu
|
|
|
37
37
|
server: Server,
|
|
38
38
|
session: ClientSession | undefined
|
|
39
39
|
): Promise<ClientBulkWriteCursorResponse> {
|
|
40
|
-
|
|
40
|
+
let command;
|
|
41
|
+
|
|
42
|
+
if (server.description.type === ServerType.LoadBalancer) {
|
|
43
|
+
if (session) {
|
|
44
|
+
// Checkout a connection to build the command.
|
|
45
|
+
const connection = await server.pool.checkOut();
|
|
46
|
+
// Pin the connection to the session so it get used to execute the command and we do not
|
|
47
|
+
// perform a double check-in/check-out.
|
|
48
|
+
session.pin(connection);
|
|
49
|
+
command = this.commandBuilder.buildBatch(
|
|
50
|
+
connection.hello?.maxMessageSizeBytes,
|
|
51
|
+
connection.hello?.maxWriteBatchSize
|
|
52
|
+
);
|
|
53
|
+
} else {
|
|
54
|
+
throw new MongoClientBulkWriteExecutionError(
|
|
55
|
+
'Session provided to the client bulk write operation must be present.'
|
|
56
|
+
);
|
|
57
|
+
}
|
|
58
|
+
} else {
|
|
59
|
+
// At this point we have a server and the auto connect code has already
|
|
60
|
+
// run in executeOperation, so the server description will be populated.
|
|
61
|
+
// We can use that to build the command.
|
|
62
|
+
if (!server.description.maxWriteBatchSize || !server.description.maxMessageSizeBytes) {
|
|
63
|
+
throw new MongoClientBulkWriteExecutionError(
|
|
64
|
+
'In order to execute a client bulk write, both maxWriteBatchSize and maxMessageSizeBytes must be provided by the servers hello response.'
|
|
65
|
+
);
|
|
66
|
+
}
|
|
67
|
+
command = this.commandBuilder.buildBatch(
|
|
68
|
+
server.description.maxMessageSizeBytes,
|
|
69
|
+
server.description.maxWriteBatchSize
|
|
70
|
+
);
|
|
71
|
+
}
|
|
72
|
+
return await super.executeCommand(server, session, command, ClientBulkWriteCursorResponse);
|
|
41
73
|
}
|
|
42
74
|
}
|
|
43
75
|
|
|
44
76
|
// Skipping the collation as it goes on the individual ops.
|
|
45
|
-
defineAspects(ClientBulkWriteOperation, [
|
|
77
|
+
defineAspects(ClientBulkWriteOperation, [
|
|
78
|
+
Aspect.WRITE_OPERATION,
|
|
79
|
+
Aspect.SKIP_COLLATION,
|
|
80
|
+
Aspect.CURSOR_CREATING
|
|
81
|
+
]);
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { type Document } from '../../bson';
|
|
1
|
+
import { BSON, type Document } from '../../bson';
|
|
2
2
|
import { DocumentSequence } from '../../cmap/commands';
|
|
3
3
|
import { type PkFactory } from '../../mongo_client';
|
|
4
4
|
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
|
|
@@ -28,11 +28,18 @@ export interface ClientBulkWriteCommand {
|
|
|
28
28
|
comment?: any;
|
|
29
29
|
}
|
|
30
30
|
|
|
31
|
+
/**
|
|
32
|
+
* The bytes overhead for the extra fields added post command generation.
|
|
33
|
+
*/
|
|
34
|
+
const MESSAGE_OVERHEAD_BYTES = 1000;
|
|
35
|
+
|
|
31
36
|
/** @internal */
|
|
32
37
|
export class ClientBulkWriteCommandBuilder {
|
|
33
38
|
models: AnyClientBulkWriteModel[];
|
|
34
39
|
options: ClientBulkWriteOptions;
|
|
35
40
|
pkFactory: PkFactory;
|
|
41
|
+
currentModelIndex: number;
|
|
42
|
+
lastOperations: Document[];
|
|
36
43
|
|
|
37
44
|
/**
|
|
38
45
|
* Create the command builder.
|
|
@@ -46,6 +53,8 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
46
53
|
this.models = models;
|
|
47
54
|
this.options = options;
|
|
48
55
|
this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY;
|
|
56
|
+
this.currentModelIndex = 0;
|
|
57
|
+
this.lastOperations = [];
|
|
49
58
|
}
|
|
50
59
|
|
|
51
60
|
/**
|
|
@@ -60,34 +69,96 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
60
69
|
}
|
|
61
70
|
|
|
62
71
|
/**
|
|
63
|
-
*
|
|
72
|
+
* Determines if there is another batch to process.
|
|
73
|
+
* @returns True if not all batches have been built.
|
|
74
|
+
*/
|
|
75
|
+
hasNextBatch(): boolean {
|
|
76
|
+
return this.currentModelIndex < this.models.length;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Build a single batch of a client bulk write command.
|
|
81
|
+
* @param maxMessageSizeBytes - The max message size in bytes.
|
|
82
|
+
* @param maxWriteBatchSize - The max write batch size.
|
|
83
|
+
* @returns The client bulk write command.
|
|
64
84
|
*/
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
const operations = [];
|
|
85
|
+
buildBatch(maxMessageSizeBytes: number, maxWriteBatchSize: number): ClientBulkWriteCommand {
|
|
86
|
+
let commandLength = 0;
|
|
68
87
|
let currentNamespaceIndex = 0;
|
|
88
|
+
const command: ClientBulkWriteCommand = this.baseCommand();
|
|
69
89
|
const namespaces = new Map<string, number>();
|
|
70
|
-
|
|
90
|
+
|
|
91
|
+
while (this.currentModelIndex < this.models.length) {
|
|
92
|
+
const model = this.models[this.currentModelIndex];
|
|
71
93
|
const ns = model.namespace;
|
|
72
|
-
const
|
|
73
|
-
|
|
74
|
-
|
|
94
|
+
const nsIndex = namespaces.get(ns);
|
|
95
|
+
|
|
96
|
+
if (nsIndex != null) {
|
|
97
|
+
// Build the operation and serialize it to get the bytes buffer.
|
|
98
|
+
const operation = buildOperation(model, nsIndex, this.pkFactory);
|
|
99
|
+
const operationBuffer = BSON.serialize(operation);
|
|
100
|
+
|
|
101
|
+
// Check if the operation buffer can fit in the command. If it can,
|
|
102
|
+
// then add the operation to the document sequence and increment the
|
|
103
|
+
// current length as long as the ops don't exceed the maxWriteBatchSize.
|
|
104
|
+
if (
|
|
105
|
+
commandLength + operationBuffer.length < maxMessageSizeBytes &&
|
|
106
|
+
command.ops.documents.length < maxWriteBatchSize
|
|
107
|
+
) {
|
|
108
|
+
// Pushing to the ops document sequence returns the total byte length of the document sequence.
|
|
109
|
+
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
|
|
110
|
+
// Increment the builder's current model index.
|
|
111
|
+
this.currentModelIndex++;
|
|
112
|
+
} else {
|
|
113
|
+
// The operation cannot fit in the current command and will need to
|
|
114
|
+
// go in the next batch. Exit the loop.
|
|
115
|
+
break;
|
|
116
|
+
}
|
|
75
117
|
} else {
|
|
118
|
+
// The namespace is not already in the nsInfo so we will set it in the map, and
|
|
119
|
+
// construct our nsInfo and ops documents and buffers.
|
|
76
120
|
namespaces.set(ns, currentNamespaceIndex);
|
|
77
|
-
|
|
78
|
-
|
|
121
|
+
const nsInfo = { ns: ns };
|
|
122
|
+
const nsInfoBuffer = BSON.serialize(nsInfo);
|
|
123
|
+
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
|
|
124
|
+
const operationBuffer = BSON.serialize(operation);
|
|
125
|
+
|
|
126
|
+
// Check if the operation and nsInfo buffers can fit in the command. If they
|
|
127
|
+
// can, then add the operation and nsInfo to their respective document
|
|
128
|
+
// sequences and increment the current length as long as the ops don't exceed
|
|
129
|
+
// the maxWriteBatchSize.
|
|
130
|
+
if (
|
|
131
|
+
commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
|
|
132
|
+
command.ops.documents.length < maxWriteBatchSize
|
|
133
|
+
) {
|
|
134
|
+
// Pushing to the ops document sequence returns the total byte length of the document sequence.
|
|
135
|
+
commandLength =
|
|
136
|
+
MESSAGE_OVERHEAD_BYTES +
|
|
137
|
+
command.nsInfo.push(nsInfo, nsInfoBuffer) +
|
|
138
|
+
command.ops.push(operation, operationBuffer);
|
|
139
|
+
// We've added a new namespace, increment the namespace index.
|
|
140
|
+
currentNamespaceIndex++;
|
|
141
|
+
// Increment the builder's current model index.
|
|
142
|
+
this.currentModelIndex++;
|
|
143
|
+
} else {
|
|
144
|
+
// The operation cannot fit in the current command and will need to
|
|
145
|
+
// go in the next batch. Exit the loop.
|
|
146
|
+
break;
|
|
147
|
+
}
|
|
79
148
|
}
|
|
80
149
|
}
|
|
150
|
+
// Set the last operations and return the command.
|
|
151
|
+
this.lastOperations = command.ops.documents;
|
|
152
|
+
return command;
|
|
153
|
+
}
|
|
81
154
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
// The base command.
|
|
155
|
+
private baseCommand(): ClientBulkWriteCommand {
|
|
85
156
|
const command: ClientBulkWriteCommand = {
|
|
86
157
|
bulkWrite: 1,
|
|
87
158
|
errorsOnly: this.errorsOnly,
|
|
88
159
|
ordered: this.options.ordered ?? true,
|
|
89
|
-
ops: new DocumentSequence(
|
|
90
|
-
nsInfo: new DocumentSequence(nsInfo)
|
|
160
|
+
ops: new DocumentSequence('ops'),
|
|
161
|
+
nsInfo: new DocumentSequence('nsInfo')
|
|
91
162
|
};
|
|
92
163
|
// Add bypassDocumentValidation if it was present in the options.
|
|
93
164
|
if (this.options.bypassDocumentValidation != null) {
|
|
@@ -103,7 +174,8 @@ export class ClientBulkWriteCommandBuilder {
|
|
|
103
174
|
if (this.options.comment !== undefined) {
|
|
104
175
|
command.comment = this.options.comment;
|
|
105
176
|
}
|
|
106
|
-
|
|
177
|
+
|
|
178
|
+
return command;
|
|
107
179
|
}
|
|
108
180
|
}
|
|
109
181
|
|
|
@@ -1,11 +1,9 @@
|
|
|
1
|
-
import { type Document } from 'bson';
|
|
2
|
-
|
|
3
1
|
import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
|
|
4
2
|
import { type MongoClient } from '../../mongo_client';
|
|
5
3
|
import { WriteConcern } from '../../write_concern';
|
|
6
4
|
import { executeOperation } from '../execute_operation';
|
|
7
5
|
import { ClientBulkWriteOperation } from './client_bulk_write';
|
|
8
|
-
import {
|
|
6
|
+
import { ClientBulkWriteCommandBuilder } from './command_builder';
|
|
9
7
|
import {
|
|
10
8
|
type AnyClientBulkWriteModel,
|
|
11
9
|
type ClientBulkWriteOptions,
|
|
@@ -57,43 +55,26 @@ export class ClientBulkWriteExecutor {
|
|
|
57
55
|
this.options,
|
|
58
56
|
pkFactory
|
|
59
57
|
);
|
|
60
|
-
|
|
58
|
+
// Unacknowledged writes need to execute all batches and return { ok: 1}
|
|
61
59
|
if (this.options.writeConcern?.w === 0) {
|
|
62
|
-
|
|
60
|
+
while (commandBuilder.hasNextBatch()) {
|
|
61
|
+
const operation = new ClientBulkWriteOperation(commandBuilder, this.options);
|
|
62
|
+
await executeOperation(this.client, operation);
|
|
63
|
+
}
|
|
64
|
+
return { ok: 1 };
|
|
65
|
+
} else {
|
|
66
|
+
const resultsMerger = new ClientBulkWriteResultsMerger(this.options);
|
|
67
|
+
// For each command will will create and exhaust a cursor for the results.
|
|
68
|
+
let currentBatchOffset = 0;
|
|
69
|
+
while (commandBuilder.hasNextBatch()) {
|
|
70
|
+
const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options);
|
|
71
|
+
const docs = await cursor.toArray();
|
|
72
|
+
const operations = cursor.operations;
|
|
73
|
+
resultsMerger.merge(currentBatchOffset, operations, cursor.response, docs);
|
|
74
|
+
// Set the new batch index so we can back back to the index in the original models.
|
|
75
|
+
currentBatchOffset += operations.length;
|
|
76
|
+
}
|
|
77
|
+
return resultsMerger.result;
|
|
63
78
|
}
|
|
64
|
-
return await executeAcknowledged(this.client, this.options, commands);
|
|
65
|
-
}
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
/**
|
|
69
|
-
* Execute an acknowledged bulk write.
|
|
70
|
-
*/
|
|
71
|
-
async function executeAcknowledged(
|
|
72
|
-
client: MongoClient,
|
|
73
|
-
options: ClientBulkWriteOptions,
|
|
74
|
-
commands: ClientBulkWriteCommand[]
|
|
75
|
-
): Promise<ClientBulkWriteResult> {
|
|
76
|
-
const resultsMerger = new ClientBulkWriteResultsMerger(options);
|
|
77
|
-
// For each command will will create and exhaust a cursor for the results.
|
|
78
|
-
for (const command of commands) {
|
|
79
|
-
const cursor = new ClientBulkWriteCursor(client, command, options);
|
|
80
|
-
const docs = await cursor.toArray();
|
|
81
|
-
resultsMerger.merge(command.ops.documents, cursor.response, docs);
|
|
82
|
-
}
|
|
83
|
-
return resultsMerger.result;
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
/**
|
|
87
|
-
* Execute an unacknowledged bulk write.
|
|
88
|
-
*/
|
|
89
|
-
async function executeUnacknowledged(
|
|
90
|
-
client: MongoClient,
|
|
91
|
-
options: ClientBulkWriteOptions,
|
|
92
|
-
commands: Document[]
|
|
93
|
-
): Promise<{ ok: 1 }> {
|
|
94
|
-
for (const command of commands) {
|
|
95
|
-
const operation = new ClientBulkWriteOperation(command, options);
|
|
96
|
-
await executeOperation(client, operation);
|
|
97
79
|
}
|
|
98
|
-
return { ok: 1 };
|
|
99
80
|
}
|
|
@@ -42,11 +42,13 @@ export class ClientBulkWriteResultsMerger {
|
|
|
42
42
|
|
|
43
43
|
/**
|
|
44
44
|
* Merge the results in the cursor to the existing result.
|
|
45
|
+
* @param currentBatchOffset - The offset index to the original models.
|
|
45
46
|
* @param response - The cursor response.
|
|
46
47
|
* @param documents - The documents in the cursor.
|
|
47
48
|
* @returns The current result.
|
|
48
49
|
*/
|
|
49
50
|
merge(
|
|
51
|
+
currentBatchOffset: number,
|
|
50
52
|
operations: Document[],
|
|
51
53
|
response: ClientBulkWriteCursorResponse,
|
|
52
54
|
documents: Document[]
|
|
@@ -67,7 +69,9 @@ export class ClientBulkWriteResultsMerger {
|
|
|
67
69
|
const operation = operations[document.idx];
|
|
68
70
|
// Handle insert results.
|
|
69
71
|
if ('insert' in operation) {
|
|
70
|
-
this.result.insertResults?.set(document.idx, {
|
|
72
|
+
this.result.insertResults?.set(document.idx + currentBatchOffset, {
|
|
73
|
+
insertedId: operation.document._id
|
|
74
|
+
});
|
|
71
75
|
}
|
|
72
76
|
// Handle update results.
|
|
73
77
|
if ('update' in operation) {
|
|
@@ -80,11 +84,13 @@ export class ClientBulkWriteResultsMerger {
|
|
|
80
84
|
if (document.upserted) {
|
|
81
85
|
result.upsertedId = document.upserted._id;
|
|
82
86
|
}
|
|
83
|
-
this.result.updateResults?.set(document.idx, result);
|
|
87
|
+
this.result.updateResults?.set(document.idx + currentBatchOffset, result);
|
|
84
88
|
}
|
|
85
89
|
// Handle delete results.
|
|
86
90
|
if ('delete' in operation) {
|
|
87
|
-
this.result.deleteResults?.set(document.idx, {
|
|
91
|
+
this.result.deleteResults?.set(document.idx + currentBatchOffset, {
|
|
92
|
+
deletedCount: document.n
|
|
93
|
+
});
|
|
88
94
|
}
|
|
89
95
|
}
|
|
90
96
|
}
|
package/src/sdam/server.ts
CHANGED
|
@@ -69,6 +69,12 @@ export class ServerDescription {
|
|
|
69
69
|
setVersion: number | null;
|
|
70
70
|
electionId: ObjectId | null;
|
|
71
71
|
logicalSessionTimeoutMinutes: number | null;
|
|
72
|
+
/** The max message size in bytes for the server. */
|
|
73
|
+
maxMessageSizeBytes: number | null;
|
|
74
|
+
/** The max number of writes in a bulk write command. */
|
|
75
|
+
maxWriteBatchSize: number | null;
|
|
76
|
+
/** The max bson object size. */
|
|
77
|
+
maxBsonObjectSize: number | null;
|
|
72
78
|
|
|
73
79
|
// NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level
|
|
74
80
|
$clusterTime?: ClusterTime;
|
|
@@ -111,6 +117,9 @@ export class ServerDescription {
|
|
|
111
117
|
this.setVersion = hello?.setVersion ?? null;
|
|
112
118
|
this.electionId = hello?.electionId ?? null;
|
|
113
119
|
this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
|
|
120
|
+
this.maxMessageSizeBytes = hello?.maxMessageSizeBytes ?? null;
|
|
121
|
+
this.maxWriteBatchSize = hello?.maxWriteBatchSize ?? null;
|
|
122
|
+
this.maxBsonObjectSize = hello?.maxBsonObjectSize ?? null;
|
|
114
123
|
this.primary = hello?.primary ?? null;
|
|
115
124
|
this.me = hello?.me?.toLowerCase() ?? null;
|
|
116
125
|
this.$clusterTime = hello?.$clusterTime ?? null;
|