mongodb 6.9.0-dev.20241010.sha.6ecf198f → 6.9.0-dev.20241012.sha.a473de95
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/beta.d.ts +52 -3
- package/lib/cmap/wire_protocol/responses.js +3 -0
- package/lib/cmap/wire_protocol/responses.js.map +1 -1
- package/lib/cursor/client_bulk_write_cursor.js +1 -5
- package/lib/cursor/client_bulk_write_cursor.js.map +1 -1
- package/lib/error.js +24 -3
- package/lib/error.js.map +1 -1
- package/lib/index.js +4 -3
- package/lib/index.js.map +1 -1
- package/lib/mongo_client.js +3 -0
- package/lib/mongo_client.js.map +1 -1
- package/lib/operations/aggregate.js.map +1 -1
- package/lib/operations/client_bulk_write/client_bulk_write.js +30 -10
- package/lib/operations/client_bulk_write/client_bulk_write.js.map +1 -1
- package/lib/operations/client_bulk_write/command_builder.js +62 -4
- package/lib/operations/client_bulk_write/command_builder.js.map +1 -1
- package/lib/operations/client_bulk_write/executor.js +43 -7
- package/lib/operations/client_bulk_write/executor.js.map +1 -1
- package/lib/operations/client_bulk_write/results_merger.js +116 -33
- package/lib/operations/client_bulk_write/results_merger.js.map +1 -1
- package/lib/operations/execute_operation.js +7 -0
- package/lib/operations/execute_operation.js.map +1 -1
- package/lib/operations/find.js.map +1 -1
- package/lib/operations/operation.js +5 -1
- package/lib/operations/operation.js.map +1 -1
- package/mongodb.d.ts +52 -3
- package/package.json +1 -1
- package/src/cmap/wire_protocol/responses.ts +4 -0
- package/src/cursor/client_bulk_write_cursor.ts +2 -8
- package/src/error.ts +44 -2
- package/src/index.ts +2 -0
- package/src/mongo_client.ts +5 -0
- package/src/operations/aggregate.ts +9 -1
- package/src/operations/client_bulk_write/client_bulk_write.ts +36 -10
- package/src/operations/client_bulk_write/command_builder.ts +84 -5
- package/src/operations/client_bulk_write/common.ts +6 -0
- package/src/operations/client_bulk_write/executor.ts +48 -7
- package/src/operations/client_bulk_write/results_merger.ts +120 -40
- package/src/operations/execute_operation.ts +8 -0
- package/src/operations/find.ts +8 -1
- package/src/operations/operation.ts +6 -1
|
@@ -1,6 +1,9 @@
|
|
|
1
|
+
import { MongoWriteConcernError } from '../..';
|
|
1
2
|
import { type Document } from '../../bson';
|
|
2
|
-
import { type
|
|
3
|
+
import { type ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
|
|
4
|
+
import { MongoClientBulkWriteError } from '../../error';
|
|
3
5
|
import {
|
|
6
|
+
type ClientBulkWriteError,
|
|
4
7
|
type ClientBulkWriteOptions,
|
|
5
8
|
type ClientBulkWriteResult,
|
|
6
9
|
type ClientDeleteResult,
|
|
@@ -15,6 +18,9 @@ import {
|
|
|
15
18
|
export class ClientBulkWriteResultsMerger {
|
|
16
19
|
result: ClientBulkWriteResult;
|
|
17
20
|
options: ClientBulkWriteOptions;
|
|
21
|
+
currentBatchOffset: number;
|
|
22
|
+
writeConcernErrors: Document[];
|
|
23
|
+
writeErrors: Map<number, ClientBulkWriteError>;
|
|
18
24
|
|
|
19
25
|
/**
|
|
20
26
|
* Instantiate the merger.
|
|
@@ -22,6 +28,9 @@ export class ClientBulkWriteResultsMerger {
|
|
|
22
28
|
*/
|
|
23
29
|
constructor(options: ClientBulkWriteOptions) {
|
|
24
30
|
this.options = options;
|
|
31
|
+
this.currentBatchOffset = 0;
|
|
32
|
+
this.writeConcernErrors = [];
|
|
33
|
+
this.writeErrors = new Map();
|
|
25
34
|
this.result = {
|
|
26
35
|
insertedCount: 0,
|
|
27
36
|
upsertedCount: 0,
|
|
@@ -47,55 +56,126 @@ export class ClientBulkWriteResultsMerger {
|
|
|
47
56
|
* @param documents - The documents in the cursor.
|
|
48
57
|
* @returns The current result.
|
|
49
58
|
*/
|
|
50
|
-
merge(
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
documents: Document[]
|
|
55
|
-
): ClientBulkWriteResult {
|
|
56
|
-
// Update the counts from the cursor response.
|
|
57
|
-
this.result.insertedCount += response.insertedCount;
|
|
58
|
-
this.result.upsertedCount += response.upsertedCount;
|
|
59
|
-
this.result.matchedCount += response.matchedCount;
|
|
60
|
-
this.result.modifiedCount += response.modifiedCount;
|
|
61
|
-
this.result.deletedCount += response.deletedCount;
|
|
62
|
-
|
|
63
|
-
if (this.options.verboseResults) {
|
|
64
|
-
// Iterate all the documents in the cursor and update the result.
|
|
65
|
-
for (const document of documents) {
|
|
59
|
+
async merge(cursor: ClientBulkWriteCursor): Promise<ClientBulkWriteResult> {
|
|
60
|
+
let writeConcernErrorResult;
|
|
61
|
+
try {
|
|
62
|
+
for await (const document of cursor) {
|
|
66
63
|
// Only add to maps if ok: 1
|
|
67
64
|
if (document.ok === 1) {
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
65
|
+
if (this.options.verboseResults) {
|
|
66
|
+
this.processDocument(cursor, document);
|
|
67
|
+
}
|
|
68
|
+
} else {
|
|
69
|
+
// If an individual write error is encountered during an ordered bulk write, drivers MUST
|
|
70
|
+
// record the error in writeErrors and immediately throw the exception. Otherwise, drivers
|
|
71
|
+
// MUST continue to iterate the results cursor and execute any further bulkWrite batches.
|
|
72
|
+
if (this.options.ordered) {
|
|
73
|
+
const error = new MongoClientBulkWriteError({
|
|
74
|
+
message: 'Mongo client ordered bulk write encountered a write error.'
|
|
75
|
+
});
|
|
76
|
+
error.writeErrors.set(document.idx + this.currentBatchOffset, {
|
|
77
|
+
code: document.code,
|
|
78
|
+
message: document.errmsg
|
|
79
|
+
});
|
|
80
|
+
error.partialResult = this.result;
|
|
81
|
+
throw error;
|
|
82
|
+
} else {
|
|
83
|
+
this.writeErrors.set(document.idx + this.currentBatchOffset, {
|
|
84
|
+
code: document.code,
|
|
85
|
+
message: document.errmsg
|
|
74
86
|
});
|
|
75
87
|
}
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
} catch (error) {
|
|
91
|
+
if (error instanceof MongoWriteConcernError) {
|
|
92
|
+
const result = error.result;
|
|
93
|
+
writeConcernErrorResult = {
|
|
94
|
+
insertedCount: result.nInserted,
|
|
95
|
+
upsertedCount: result.nUpserted,
|
|
96
|
+
matchedCount: result.nMatched,
|
|
97
|
+
modifiedCount: result.nModified,
|
|
98
|
+
deletedCount: result.nDeleted,
|
|
99
|
+
writeConcernError: result.writeConcernError
|
|
100
|
+
};
|
|
101
|
+
if (this.options.verboseResults && result.cursor.firstBatch) {
|
|
102
|
+
for (const document of result.cursor.firstBatch) {
|
|
103
|
+
if (document.ok === 1) {
|
|
104
|
+
this.processDocument(cursor, document);
|
|
86
105
|
}
|
|
87
|
-
this.result.updateResults?.set(document.idx + currentBatchOffset, result);
|
|
88
|
-
}
|
|
89
|
-
// Handle delete results.
|
|
90
|
-
if ('delete' in operation) {
|
|
91
|
-
this.result.deleteResults?.set(document.idx + currentBatchOffset, {
|
|
92
|
-
deletedCount: document.n
|
|
93
|
-
});
|
|
94
106
|
}
|
|
95
107
|
}
|
|
108
|
+
} else {
|
|
109
|
+
throw error;
|
|
110
|
+
}
|
|
111
|
+
} finally {
|
|
112
|
+
// Update the counts from the cursor response.
|
|
113
|
+
if (cursor.response) {
|
|
114
|
+
const response = cursor.response;
|
|
115
|
+
this.incrementCounts(response);
|
|
96
116
|
}
|
|
117
|
+
|
|
118
|
+
// Increment the batch offset.
|
|
119
|
+
this.currentBatchOffset += cursor.operations.length;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// If we have write concern errors ensure they are added.
|
|
123
|
+
if (writeConcernErrorResult) {
|
|
124
|
+
const writeConcernError = writeConcernErrorResult.writeConcernError as Document;
|
|
125
|
+
this.incrementCounts(writeConcernErrorResult);
|
|
126
|
+
this.writeConcernErrors.push({
|
|
127
|
+
code: writeConcernError.code,
|
|
128
|
+
message: writeConcernError.errmsg
|
|
129
|
+
});
|
|
97
130
|
}
|
|
98
131
|
|
|
99
132
|
return this.result;
|
|
100
133
|
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Process an individual document in the results.
|
|
137
|
+
* @param cursor - The cursor.
|
|
138
|
+
* @param document - The document to process.
|
|
139
|
+
*/
|
|
140
|
+
private processDocument(cursor: ClientBulkWriteCursor, document: Document) {
|
|
141
|
+
// Get the corresponding operation from the command.
|
|
142
|
+
const operation = cursor.operations[document.idx];
|
|
143
|
+
// Handle insert results.
|
|
144
|
+
if ('insert' in operation) {
|
|
145
|
+
this.result.insertResults?.set(document.idx + this.currentBatchOffset, {
|
|
146
|
+
insertedId: operation.document._id
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
// Handle update results.
|
|
150
|
+
if ('update' in operation) {
|
|
151
|
+
const result: ClientUpdateResult = {
|
|
152
|
+
matchedCount: document.n,
|
|
153
|
+
modifiedCount: document.nModified ?? 0,
|
|
154
|
+
// Check if the bulk did actually upsert.
|
|
155
|
+
didUpsert: document.upserted != null
|
|
156
|
+
};
|
|
157
|
+
if (document.upserted) {
|
|
158
|
+
result.upsertedId = document.upserted._id;
|
|
159
|
+
}
|
|
160
|
+
this.result.updateResults?.set(document.idx + this.currentBatchOffset, result);
|
|
161
|
+
}
|
|
162
|
+
// Handle delete results.
|
|
163
|
+
if ('delete' in operation) {
|
|
164
|
+
this.result.deleteResults?.set(document.idx + this.currentBatchOffset, {
|
|
165
|
+
deletedCount: document.n
|
|
166
|
+
});
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Increment the result counts.
|
|
172
|
+
* @param document - The document with the results.
|
|
173
|
+
*/
|
|
174
|
+
private incrementCounts(document: Document) {
|
|
175
|
+
this.result.insertedCount += document.insertedCount;
|
|
176
|
+
this.result.upsertedCount += document.upsertedCount;
|
|
177
|
+
this.result.matchedCount += document.matchedCount;
|
|
178
|
+
this.result.modifiedCount += document.modifiedCount;
|
|
179
|
+
this.result.deletedCount += document.deletedCount;
|
|
180
|
+
}
|
|
101
181
|
}
|
|
@@ -230,6 +230,10 @@ async function tryOperation<
|
|
|
230
230
|
});
|
|
231
231
|
}
|
|
232
232
|
|
|
233
|
+
if (operation.hasAspect(Aspect.COMMAND_BATCHING) && !operation.canRetryWrite) {
|
|
234
|
+
throw previousOperationError;
|
|
235
|
+
}
|
|
236
|
+
|
|
233
237
|
if (hasWriteAspect && !isRetryableWriteError(previousOperationError))
|
|
234
238
|
throw previousOperationError;
|
|
235
239
|
|
|
@@ -260,6 +264,10 @@ async function tryOperation<
|
|
|
260
264
|
}
|
|
261
265
|
|
|
262
266
|
try {
|
|
267
|
+
// If tries > 0 and we are command batching we need to reset the batch.
|
|
268
|
+
if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) {
|
|
269
|
+
operation.resetBatch();
|
|
270
|
+
}
|
|
263
271
|
return await operation.execute(server, session);
|
|
264
272
|
} catch (operationError) {
|
|
265
273
|
if (!(operationError instanceof MongoError)) throw operationError;
|
package/src/operations/find.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { Document } from '../bson';
|
|
2
2
|
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
|
|
3
3
|
import { MongoInvalidArgumentError } from '../error';
|
|
4
|
+
import { type ExplainOptions } from '../explain';
|
|
4
5
|
import { ReadConcern } from '../read_concern';
|
|
5
6
|
import type { Server } from '../sdam/server';
|
|
6
7
|
import type { ClientSession } from '../sessions';
|
|
@@ -15,7 +16,7 @@ import { Aspect, defineAspects, type Hint } from './operation';
|
|
|
15
16
|
*/
|
|
16
17
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
17
18
|
export interface FindOptions<TSchema extends Document = Document>
|
|
18
|
-
extends Omit<CommandOperationOptions, 'writeConcern'> {
|
|
19
|
+
extends Omit<CommandOperationOptions, 'writeConcern' | 'explain'> {
|
|
19
20
|
/** Sets the limit of documents returned in the query. */
|
|
20
21
|
limit?: number;
|
|
21
22
|
/** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */
|
|
@@ -63,6 +64,12 @@ export interface FindOptions<TSchema extends Document = Document>
|
|
|
63
64
|
* @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored.
|
|
64
65
|
*/
|
|
65
66
|
oplogReplay?: boolean;
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Specifies the verbosity mode for the explain output.
|
|
70
|
+
* @deprecated This API is deprecated in favor of `collection.find().explain()`.
|
|
71
|
+
*/
|
|
72
|
+
explain?: ExplainOptions['explain'];
|
|
66
73
|
}
|
|
67
74
|
|
|
68
75
|
/** @internal */
|
|
@@ -11,7 +11,8 @@ export const Aspect = {
|
|
|
11
11
|
EXPLAINABLE: Symbol('EXPLAINABLE'),
|
|
12
12
|
SKIP_COLLATION: Symbol('SKIP_COLLATION'),
|
|
13
13
|
CURSOR_CREATING: Symbol('CURSOR_CREATING'),
|
|
14
|
-
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER')
|
|
14
|
+
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER'),
|
|
15
|
+
COMMAND_BATCHING: Symbol('COMMAND_BATCHING')
|
|
15
16
|
} as const;
|
|
16
17
|
|
|
17
18
|
/** @public */
|
|
@@ -98,6 +99,10 @@ export abstract class AbstractOperation<TResult = any> {
|
|
|
98
99
|
this[kSession] = undefined;
|
|
99
100
|
}
|
|
100
101
|
|
|
102
|
+
resetBatch(): boolean {
|
|
103
|
+
return true;
|
|
104
|
+
}
|
|
105
|
+
|
|
101
106
|
get canRetryRead(): boolean {
|
|
102
107
|
return this.hasAspect(Aspect.RETRYABLE) && this.hasAspect(Aspect.READ_OPERATION);
|
|
103
108
|
}
|