ueberdb2 4.2.61 → 4.2.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +388 -386
  2. package/package.json +3 -3
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
- 'use strict';var require$$0$6=require('util'),require$$1$7=require('console'),require$$0$m=require('process'),require$$0$9=require('dns'),require$$0$7=require('net'),require$$0$8=require('events'),require$$0$a=require('crypto'),require$$0$b=require('stream'),require$$1$2=require('tls'),os$3=require('os'),require$$1$3=require('path'),require$$0$c=require('fs'),https$6=require('https'),require$$0$d=require('zlib'),require$$0$e=require('url'),require$$0$f=require('vm'),http$5=require('http'),require$$0$g=require('assert'),require$$1$4=require('tty'),node_path=require('node:path'),child_process=require('child_process'),require$$0$h=require('buffer'),require$$1$5=require('querystring'),require$$13=require('stream/web'),require$$0$j=require('node:stream'),require$$1$6=require('node:util'),require$$0$i=require('node:events'),require$$0$k=require('worker_threads'),require$$2$2=require('perf_hooks'),require$$5$1=require('util/types'),require$$4$2=require('async_hooks'),require$$1$8=require('string_decoder'),require$$0$l=require('diagnostics_channel'),require$$1$9=require('timers'),require$$0$n=require('fs/promises'),require$$5$2=require('constants'),require$$0$o=require('dgram'),require$$2$3=require('punycode');function _interopNamespaceDefault(e){var n=Object.create(null);if(e){Object.keys(e).forEach(function(k){if(k!=='default'){var d=Object.getOwnPropertyDescriptor(e,k);Object.defineProperty(n,k,d.get?d:{enumerable:true,get:function(){return e[k]}});}})}n.default=e;return Object.freeze(n)}var os__namespace=/*#__PURE__*/_interopNamespaceDefault(os$3);var https__namespace=/*#__PURE__*/_interopNamespaceDefault(https$6);var require$$0__namespace=/*#__PURE__*/_interopNamespaceDefault(require$$0$d);var http__namespace=/*#__PURE__*/_interopNamespaceDefault(http$5);var child_process__namespace=/*#__PURE__*/_interopNamespaceDefault(child_process);// @ts-nocheck
1
+ 'use strict';var require$$0$6=require('util'),require$$1$7=require('console'),require$$0$m=require('process'),require$$0$9=require('dns'),require$$0$7=require('net'),require$$0$8=require('events'),require$$0$a=require('crypto'),require$$0$b=require('stream'),require$$1$2=require('tls'),os$3=require('os'),require$$1$3=require('path'),require$$0$c=require('fs'),https$6=require('https'),require$$0$d=require('zlib'),require$$0$e=require('url'),require$$0$f=require('vm'),http$5=require('http'),require$$0$g=require('assert'),require$$1$4=require('tty'),node_path=require('node:path'),child_process=require('child_process'),require$$0$h=require('buffer'),require$$1$5=require('querystring'),require$$13=require('stream/web'),require$$0$j=require('node:stream'),require$$1$6=require('node:util'),require$$0$i=require('node:events'),require$$0$k=require('worker_threads'),require$$2$2=require('perf_hooks'),require$$5$2=require('util/types'),require$$4$1=require('async_hooks'),require$$1$8=require('string_decoder'),require$$0$l=require('diagnostics_channel'),require$$1$9=require('timers'),require$$0$n=require('fs/promises'),require$$5$3=require('constants'),require$$0$o=require('dgram'),require$$2$3=require('punycode');function _interopNamespaceDefault(e){var n=Object.create(null);if(e){Object.keys(e).forEach(function(k){if(k!=='default'){var d=Object.getOwnPropertyDescriptor(e,k);Object.defineProperty(n,k,d.get?d:{enumerable:true,get:function(){return e[k]}});}})}n.default=e;return Object.freeze(n)}var os__namespace=/*#__PURE__*/_interopNamespaceDefault(os$3);var https__namespace=/*#__PURE__*/_interopNamespaceDefault(https$6);var require$$0__namespace=/*#__PURE__*/_interopNamespaceDefault(require$$0$d);var http__namespace=/*#__PURE__*/_interopNamespaceDefault(http$5);var child_process__namespace=/*#__PURE__*/_interopNamespaceDefault(child_process);// @ts-nocheck
2
2
  /**
3
3
  * 2011 Peter 'Pita' Martischka
4
4
  *
@@ -60604,7 +60604,7 @@ class Couch_db extends AbstractDatabase {
60604
60604
  this.agent = null;
60605
60605
  }
60606
60606
  }var dirty$1 = {};var dirty = {};Object.defineProperty(dirty, "__esModule", { value: true });
60607
- const fs_1 = require$$0$c;
60607
+ const fs_1$1 = require$$0$c;
60608
60608
  const events_1$5 = require$$0$8;
60609
60609
  class Dirty extends events_1$5.EventEmitter {
60610
60610
  path;
@@ -60707,7 +60707,7 @@ class Dirty extends events_1$5.EventEmitter {
60707
60707
  });
60708
60708
  return;
60709
60709
  }
60710
- this._readStream = (0, fs_1.createReadStream)(this.path, {
60710
+ this._readStream = (0, fs_1$1.createReadStream)(this.path, {
60711
60711
  encoding: 'utf-8',
60712
60712
  flags: 'r',
60713
60713
  });
@@ -60760,7 +60760,7 @@ class Dirty extends events_1$5.EventEmitter {
60760
60760
  this._readStream = null;
60761
60761
  this.emit('read_close');
60762
60762
  });
60763
- this._writeStream = (0, fs_1.createWriteStream)(this.path, {
60763
+ this._writeStream = (0, fs_1$1.createWriteStream)(this.path, {
60764
60764
  encoding: 'utf-8',
60765
60765
  flags: 'a',
60766
60766
  });
@@ -66388,7 +66388,7 @@ const debug_1$5 = tslib_1$4.__importDefault(srcExports);
66388
66388
  const os_1$1 = tslib_1$4.__importDefault(os$3);
66389
66389
  const zlib_1$1 = tslib_1$4.__importDefault(require$$0$d);
66390
66390
  const buffer_1$2 = tslib_1$4.__importDefault(require$$0$h);
66391
- const util_1$3 = require$$0$6;
66391
+ const util_1$2 = require$$0$6;
66392
66392
  const ms_1 = tslib_1$4.__importDefault(requireMs());
66393
66393
  const errors_1$9 = errors$e;
66394
66394
  const Diagnostic_1 = tslib_1$4.__importDefault(Diagnostic$2);
@@ -66396,8 +66396,8 @@ const Serializer_1 = tslib_1$4.__importDefault(Serializer$4);
66396
66396
  const symbols_1 = requireSymbols$5();
66397
66397
  const { version: clientVersion$1 } = require$$10; // eslint-disable-line
66398
66398
  const debug$f = (0, debug_1$5.default)('elasticsearch');
66399
- const gzip = (0, util_1$3.promisify)(zlib_1$1.default.gzip);
66400
- const unzip = (0, util_1$3.promisify)(zlib_1$1.default.unzip);
66399
+ const gzip = (0, util_1$2.promisify)(zlib_1$1.default.gzip);
66400
+ const unzip = (0, util_1$2.promisify)(zlib_1$1.default.unzip);
66401
66401
  const { createGzip } = zlib_1$1.default;
66402
66402
  const userAgent = `elastic-transport-js/${clientVersion$1} (${os_1$1.default.platform()} ${os_1$1.default.release()}-${os_1$1.default.arch()}; Node.js ${process.version})`; // eslint-disable-line
66403
66403
  let Transport$2=class Transport {
@@ -70479,7 +70479,7 @@ function requireUtil$7 () {
70479
70479
  const { performance } = require$$2$2;
70480
70480
  const { isBlobLike, toUSVString, ReadableStreamFrom } = requireUtil$8();
70481
70481
  const assert = require$$0$g;
70482
- const { isUint8Array } = require$$5$1;
70482
+ const { isUint8Array } = require$$5$2;
70483
70483
 
70484
70484
  // https://nodejs.org/api/crypto.html#determining-if-crypto-support-is-unavailable
70485
70485
  /** @type {import('crypto')|undefined} */
@@ -73491,7 +73491,7 @@ function requireBody () {
73491
73491
  const { kBodyUsed } = requireSymbols$4();
73492
73492
  const assert = require$$0$g;
73493
73493
  const { isErrored } = requireUtil$8();
73494
- const { isUint8Array, isArrayBuffer } = require$$5$1;
73494
+ const { isUint8Array, isArrayBuffer } = require$$5$2;
73495
73495
  const { File: UndiciFile } = requireFile();
73496
73496
  const { parseMIMEType, serializeAMimeType } = requireDataURL();
73497
73497
 
@@ -79152,7 +79152,7 @@ function requireApiRequest () {
79152
79152
  } = requireErrors$2();
79153
79153
  const util = requireUtil$8();
79154
79154
  const { getResolveErrorBodyCallback } = requireUtil$6();
79155
- const { AsyncResource } = require$$4$2;
79155
+ const { AsyncResource } = require$$4$1;
79156
79156
  const { addSignal, removeSignal } = requireAbortSignal();
79157
79157
 
79158
79158
  class RequestHandler extends AsyncResource {
@@ -79339,7 +79339,7 @@ function requireApiStream () {
79339
79339
  } = requireErrors$2();
79340
79340
  const util = requireUtil$8();
79341
79341
  const { getResolveErrorBodyCallback } = requireUtil$6();
79342
- const { AsyncResource } = require$$4$2;
79342
+ const { AsyncResource } = require$$4$1;
79343
79343
  const { addSignal, removeSignal } = requireAbortSignal();
79344
79344
 
79345
79345
  class StreamHandler extends AsyncResource {
@@ -79568,7 +79568,7 @@ function requireApiPipeline () {
79568
79568
  RequestAbortedError
79569
79569
  } = requireErrors$2();
79570
79570
  const util = requireUtil$8();
79571
- const { AsyncResource } = require$$4$2;
79571
+ const { AsyncResource } = require$$4$1;
79572
79572
  const { addSignal, removeSignal } = requireAbortSignal();
79573
79573
  const assert = require$$0$g;
79574
79574
 
@@ -79813,7 +79813,7 @@ function requireApiUpgrade () {
79813
79813
  hasRequiredApiUpgrade = 1;
79814
79814
 
79815
79815
  const { InvalidArgumentError, RequestAbortedError, SocketError } = requireErrors$2();
79816
- const { AsyncResource } = require$$4$2;
79816
+ const { AsyncResource } = require$$4$1;
79817
79817
  const util = requireUtil$8();
79818
79818
  const { addSignal, removeSignal } = requireAbortSignal();
79819
79819
  const assert = require$$0$g;
@@ -79923,7 +79923,7 @@ function requireApiConnect () {
79923
79923
  if (hasRequiredApiConnect) return apiConnect;
79924
79924
  hasRequiredApiConnect = 1;
79925
79925
 
79926
- const { AsyncResource } = require$$4$2;
79926
+ const { AsyncResource } = require$$4$1;
79927
79927
  const { InvalidArgumentError, RequestAbortedError, SocketError } = requireErrors$2();
79928
79928
  const util = requireUtil$8();
79929
79929
  const { addSignal, removeSignal } = requireAbortSignal();
@@ -112431,9 +112431,14 @@ bson$1.setInternalBufferSize = setInternalBufferSize;(function (exports) {
112431
112431
  if (message.errorLabels) {
112432
112432
  this[kErrorLabels] = new Set(message.errorLabels);
112433
112433
  }
112434
+ this.errorResponse = message;
112434
112435
  for (const name in message) {
112435
- if (name !== 'errorLabels' && name !== 'errmsg' && name !== 'message')
112436
+ if (name !== 'errorLabels' &&
112437
+ name !== 'errmsg' &&
112438
+ name !== 'message' &&
112439
+ name !== 'errorResponse') {
112436
112440
  this[name] = message[name];
112441
+ }
112437
112442
  }
112438
112443
  }
112439
112444
  get name() {
@@ -114258,8 +114263,8 @@ class WriteConcern {
114258
114263
  }
114259
114264
  write_concern.WriteConcern = WriteConcern;(function (exports) {
114260
114265
  Object.defineProperty(exports, "__esModule", { value: true });
114261
- exports.COSMOS_DB_CHECK = exports.DOCUMENT_DB_CHECK = exports.TimeoutController = exports.request = exports.matchesParentDomain = exports.parseUnsignedInteger = exports.parseInteger = exports.compareObjectId = exports.commandSupportsReadConcern = exports.shuffle = exports.supportsRetryableWrites = exports.enumToString = exports.emitWarningOnce = exports.emitWarning = exports.MONGODB_WARNING_CODE = exports.DEFAULT_PK_FACTORY = exports.HostAddress = exports.BufferPool = exports.List = exports.deepCopy = exports.isRecord = exports.setDifference = exports.isHello = exports.isSuperset = exports.resolveOptions = exports.hasAtomicOperators = exports.calculateDurationInMs = exports.now = exports.makeStateMachine = exports.errorStrictEqual = exports.arrayStrictEqual = exports.eachAsync = exports.maxWireVersion = exports.uuidV4 = exports.makeCounter = exports.MongoDBCollectionNamespace = exports.MongoDBNamespace = exports.ns = exports.getTopology = exports.decorateWithExplain = exports.decorateWithReadConcern = exports.decorateWithCollation = exports.isPromiseLike = exports.applyRetryableWrites = exports.filterOptions = exports.mergeOptions = exports.isObject = exports.normalizeHintField = exports.hostMatchesWildcards = exports.ByteUtils = void 0;
114262
- exports.randomBytes = exports.promiseWithResolvers = exports.isHostMatch = exports.COSMOS_DB_MSG = exports.DOCUMENT_DB_MSG = void 0;
114266
+ exports.DOCUMENT_DB_MSG = exports.COSMOS_DB_CHECK = exports.DOCUMENT_DB_CHECK = exports.TimeoutController = exports.request = exports.matchesParentDomain = exports.parseUnsignedInteger = exports.parseInteger = exports.compareObjectId = exports.commandSupportsReadConcern = exports.shuffle = exports.supportsRetryableWrites = exports.enumToString = exports.emitWarningOnce = exports.emitWarning = exports.MONGODB_WARNING_CODE = exports.DEFAULT_PK_FACTORY = exports.HostAddress = exports.BufferPool = exports.List = exports.deepCopy = exports.isRecord = exports.setDifference = exports.isHello = exports.isSuperset = exports.resolveOptions = exports.hasAtomicOperators = exports.calculateDurationInMs = exports.now = exports.makeStateMachine = exports.errorStrictEqual = exports.arrayStrictEqual = exports.maxWireVersion = exports.uuidV4 = exports.makeCounter = exports.MongoDBCollectionNamespace = exports.MongoDBNamespace = exports.ns = exports.getTopology = exports.decorateWithExplain = exports.decorateWithReadConcern = exports.decorateWithCollation = exports.isPromiseLike = exports.applyRetryableWrites = exports.filterOptions = exports.mergeOptions = exports.isObject = exports.normalizeHintField = exports.hostMatchesWildcards = exports.ByteUtils = void 0;
114267
+ exports.once = exports.randomBytes = exports.promiseWithResolvers = exports.isHostMatch = exports.COSMOS_DB_MSG = void 0;
114263
114268
  const crypto = require$$0$a;
114264
114269
  const http = http$5;
114265
114270
  const timers_1 = require$$1$9;
@@ -114572,38 +114577,6 @@ write_concern.WriteConcern = WriteConcern;(function (exports) {
114572
114577
  return 0;
114573
114578
  }
114574
114579
  exports.maxWireVersion = maxWireVersion;
114575
- /**
114576
- * Applies the function `eachFn` to each item in `arr`, in parallel.
114577
- * @internal
114578
- *
114579
- * @param arr - An array of items to asynchronously iterate over
114580
- * @param eachFn - A function to call on each item of the array. The callback signature is `(item, callback)`, where the callback indicates iteration is complete.
114581
- * @param callback - The callback called after every item has been iterated
114582
- */
114583
- function eachAsync(arr, eachFn, callback) {
114584
- arr = arr || [];
114585
- let idx = 0;
114586
- let awaiting = 0;
114587
- for (idx = 0; idx < arr.length; ++idx) {
114588
- awaiting++;
114589
- eachFn(arr[idx], eachCallback);
114590
- }
114591
- if (awaiting === 0) {
114592
- callback();
114593
- return;
114594
- }
114595
- function eachCallback(err) {
114596
- awaiting--;
114597
- if (err) {
114598
- callback(err);
114599
- return;
114600
- }
114601
- if (idx === arr.length && awaiting <= 0) {
114602
- callback();
114603
- }
114604
- }
114605
- }
114606
- exports.eachAsync = eachAsync;
114607
114580
  /** @internal */
114608
114581
  function arrayStrictEqual(arr, arr2) {
114609
114582
  if (!Array.isArray(arr) || !Array.isArray(arr2)) {
@@ -115299,6 +115272,30 @@ write_concern.WriteConcern = WriteConcern;(function (exports) {
115299
115272
  }
115300
115273
  exports.promiseWithResolvers = promiseWithResolvers;
115301
115274
  exports.randomBytes = (0, util_1.promisify)(crypto.randomBytes);
115275
+ /**
115276
+ * Replicates the events.once helper.
115277
+ *
115278
+ * Removes unused signal logic and It **only** supports 0 or 1 argument events.
115279
+ *
115280
+ * @param ee - An event emitter that may emit `ev`
115281
+ * @param name - An event name to wait for
115282
+ */
115283
+ async function once(ee, name) {
115284
+ const { promise, resolve, reject } = promiseWithResolvers();
115285
+ const onEvent = (data) => resolve(data);
115286
+ const onError = (error) => reject(error);
115287
+ ee.once(name, onEvent).once('error', onError);
115288
+ try {
115289
+ const res = await promise;
115290
+ ee.off('error', onError);
115291
+ return res;
115292
+ }
115293
+ catch (error) {
115294
+ ee.off(name, onEvent);
115295
+ throw error;
115296
+ }
115297
+ }
115298
+ exports.once = once;
115302
115299
 
115303
115300
  } (utils$c));var operation = {};(function (exports) {
115304
115301
  Object.defineProperty(exports, "__esModule", { value: true });
@@ -115372,7 +115369,7 @@ write_concern.WriteConcern = WriteConcern;(function (exports) {
115372
115369
  } (operation));Object.defineProperty(execute_operation, "__esModule", { value: true });
115373
115370
  execute_operation.executeOperation = void 0;
115374
115371
  const error_1$H = error$5;
115375
- const read_preference_1$3 = read_preference;
115372
+ const read_preference_1$4 = read_preference;
115376
115373
  const server_selection_1$1 = server_selection;
115377
115374
  const utils_1$x = utils$c;
115378
115375
  const operation_1$n = operation;
@@ -115438,9 +115435,9 @@ async function executeOperation(client, operation) {
115438
115435
  else if (session.client !== client) {
115439
115436
  throw new error_1$H.MongoInvalidArgumentError('ClientSession must be from the same MongoClient');
115440
115437
  }
115441
- const readPreference = operation.readPreference ?? read_preference_1$3.ReadPreference.primary;
115438
+ const readPreference = operation.readPreference ?? read_preference_1$4.ReadPreference.primary;
115442
115439
  const inTransaction = !!session?.inTransaction();
115443
- if (inTransaction && !readPreference.equals(read_preference_1$3.ReadPreference.primary)) {
115440
+ if (inTransaction && !readPreference.equals(read_preference_1$4.ReadPreference.primary)) {
115444
115441
  throw new error_1$H.MongoTransactionError(`Read preference in a transaction must be primary, not: ${readPreference.mode}`);
115445
115442
  }
115446
115443
  if (session?.isPinned && session.transaction.isCommitted && !operation.bypassPinningCheck) {
@@ -115932,7 +115929,42 @@ class Admin {
115932
115929
  return this.command({ replSetGetStatus: 1 }, options);
115933
115930
  }
115934
115931
  }
115935
- admin.Admin = Admin;var ordered = {};var common = {};var _delete = {};Object.defineProperty(_delete, "__esModule", { value: true });
115932
+ admin.Admin = Admin;var ordered = {};var common = {};var common_functions = {};Object.defineProperty(common_functions, "__esModule", { value: true });
115933
+ common_functions.maybeAddIdToDocuments = common_functions.indexInformation = void 0;
115934
+ async function indexInformation(db, name, options) {
115935
+ if (options == null) {
115936
+ options = {};
115937
+ }
115938
+ // If we specified full information
115939
+ const full = options.full == null ? false : options.full;
115940
+ // Get the list of indexes of the specified collection
115941
+ const indexes = await db.collection(name).listIndexes(options).toArray();
115942
+ if (full)
115943
+ return indexes;
115944
+ const info = {};
115945
+ for (const index of indexes) {
115946
+ info[index.name] = Object.entries(index.key);
115947
+ }
115948
+ return info;
115949
+ }
115950
+ common_functions.indexInformation = indexInformation;
115951
+ function maybeAddIdToDocuments(coll, docOrDocs, options) {
115952
+ const forceServerObjectId = typeof options.forceServerObjectId === 'boolean'
115953
+ ? options.forceServerObjectId
115954
+ : coll.s.db.options?.forceServerObjectId;
115955
+ // no need to modify the docs if server sets the ObjectId
115956
+ if (forceServerObjectId === true) {
115957
+ return docOrDocs;
115958
+ }
115959
+ const transform = (doc) => {
115960
+ if (doc._id == null) {
115961
+ doc._id = coll.s.pkFactory.createPk();
115962
+ }
115963
+ return doc;
115964
+ };
115965
+ return Array.isArray(docOrDocs) ? docOrDocs.map(transform) : transform(docOrDocs);
115966
+ }
115967
+ common_functions.maybeAddIdToDocuments = maybeAddIdToDocuments;var _delete = {};Object.defineProperty(_delete, "__esModule", { value: true });
115936
115968
  _delete.makeDeleteStatement = _delete.DeleteManyOperation = _delete.DeleteOneOperation = _delete.DeleteOperation = void 0;
115937
115969
  const error_1$E = error$5;
115938
115970
  const command_1$b = command$1;
@@ -116076,41 +116108,7 @@ class BulkWriteOperation extends operation_1$h.AbstractOperation {
116076
116108
  }
116077
116109
  }
116078
116110
  bulk_write.BulkWriteOperation = BulkWriteOperation;
116079
- (0, operation_1$h.defineAspects)(BulkWriteOperation, [operation_1$h.Aspect.WRITE_OPERATION]);var common_functions = {};Object.defineProperty(common_functions, "__esModule", { value: true });
116080
- common_functions.prepareDocs = common_functions.indexInformation = void 0;
116081
- async function indexInformation(db, name, options) {
116082
- if (options == null) {
116083
- options = {};
116084
- }
116085
- // If we specified full information
116086
- const full = options.full == null ? false : options.full;
116087
- // Get the list of indexes of the specified collection
116088
- const indexes = await db.collection(name).listIndexes(options).toArray();
116089
- if (full)
116090
- return indexes;
116091
- const info = {};
116092
- for (const index of indexes) {
116093
- info[index.name] = Object.entries(index.key);
116094
- }
116095
- return info;
116096
- }
116097
- common_functions.indexInformation = indexInformation;
116098
- function prepareDocs(coll, docs, options) {
116099
- const forceServerObjectId = typeof options.forceServerObjectId === 'boolean'
116100
- ? options.forceServerObjectId
116101
- : coll.s.db.options?.forceServerObjectId;
116102
- // no need to modify the docs if server sets the ObjectId
116103
- if (forceServerObjectId === true) {
116104
- return docs;
116105
- }
116106
- return docs.map(doc => {
116107
- if (doc._id == null) {
116108
- doc._id = coll.s.pkFactory.createPk();
116109
- }
116110
- return doc;
116111
- });
116112
- }
116113
- common_functions.prepareDocs = prepareDocs;Object.defineProperty(insert$1, "__esModule", { value: true });
116111
+ (0, operation_1$h.defineAspects)(BulkWriteOperation, [operation_1$h.Aspect.WRITE_OPERATION]);Object.defineProperty(insert$1, "__esModule", { value: true });
116114
116112
  insert$1.InsertManyOperation = insert$1.InsertOneOperation = insert$1.InsertOperation = void 0;
116115
116113
  const error_1$D = error$5;
116116
116114
  const write_concern_1$3 = write_concern;
@@ -116151,7 +116149,7 @@ class InsertOperation extends command_1$a.CommandOperation {
116151
116149
  insert$1.InsertOperation = InsertOperation;
116152
116150
  class InsertOneOperation extends InsertOperation {
116153
116151
  constructor(collection, doc, options) {
116154
- super(collection.s.namespace, (0, common_functions_1$1.prepareDocs)(collection, [doc], options), options);
116152
+ super(collection.s.namespace, (0, common_functions_1$1.maybeAddIdToDocuments)(collection, [doc], options), options);
116155
116153
  }
116156
116154
  async execute(server, session) {
116157
116155
  const res = await super.execute(server, session);
@@ -116186,7 +116184,9 @@ class InsertManyOperation extends operation_1$g.AbstractOperation {
116186
116184
  const coll = this.collection;
116187
116185
  const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference };
116188
116186
  const writeConcern = write_concern_1$3.WriteConcern.fromOptions(options);
116189
- const bulkWriteOperation = new bulk_write_1.BulkWriteOperation(coll, (0, common_functions_1$1.prepareDocs)(coll, this.docs, options).map(document => ({ insertOne: { document } })), options);
116187
+ const bulkWriteOperation = new bulk_write_1.BulkWriteOperation(coll, this.docs.map(document => ({
116188
+ insertOne: { document }
116189
+ })), options);
116190
116190
  try {
116191
116191
  const res = await bulkWriteOperation.execute(server, session);
116192
116192
  return {
@@ -116388,6 +116388,7 @@ update$1.makeUpdateStatement = makeUpdateStatement;
116388
116388
  const util_1 = require$$0$6;
116389
116389
  const bson_1 = bson$2;
116390
116390
  const error_1 = error$5;
116391
+ const common_functions_1 = common_functions;
116391
116392
  const delete_1 = _delete;
116392
116393
  const execute_operation_1 = execute_operation;
116393
116394
  const insert_1 = insert$1;
@@ -116923,6 +116924,7 @@ update$1.makeUpdateStatement = makeUpdateStatement;
116923
116924
  * @internal
116924
116925
  */
116925
116926
  constructor(collection, options, isOrdered) {
116927
+ this.collection = collection;
116926
116928
  // determine whether bulkOperation is ordered or unordered
116927
116929
  this.isOrdered = isOrdered;
116928
116930
  const topology = (0, utils_1.getTopology)(collection);
@@ -117026,9 +117028,9 @@ update$1.makeUpdateStatement = makeUpdateStatement;
117026
117028
  * ```
117027
117029
  */
117028
117030
  insert(document) {
117029
- if (document._id == null && !shouldForceServerObjectId(this)) {
117030
- document._id = new bson_1.ObjectId();
117031
- }
117031
+ (0, common_functions_1.maybeAddIdToDocuments)(this.collection, document, {
117032
+ forceServerObjectId: this.shouldForceServerObjectId()
117033
+ });
117032
117034
  return this.addToOperationsList(exports.BatchType.INSERT, document);
117033
117035
  }
117034
117036
  /**
@@ -117082,18 +117084,13 @@ update$1.makeUpdateStatement = makeUpdateStatement;
117082
117084
  throw new error_1.MongoInvalidArgumentError('Operation must be an object with an operation key');
117083
117085
  }
117084
117086
  if ('insertOne' in op) {
117085
- const forceServerObjectId = shouldForceServerObjectId(this);
117086
- if (op.insertOne && op.insertOne.document == null) {
117087
- // NOTE: provided for legacy support, but this is a malformed operation
117088
- if (forceServerObjectId !== true && op.insertOne._id == null) {
117089
- op.insertOne._id = new bson_1.ObjectId();
117090
- }
117091
- return this.addToOperationsList(exports.BatchType.INSERT, op.insertOne);
117092
- }
117093
- if (forceServerObjectId !== true && op.insertOne.document._id == null) {
117094
- op.insertOne.document._id = new bson_1.ObjectId();
117095
- }
117096
- return this.addToOperationsList(exports.BatchType.INSERT, op.insertOne.document);
117087
+ const forceServerObjectId = this.shouldForceServerObjectId();
117088
+ const document = op.insertOne && op.insertOne.document == null
117089
+ ? // TODO(NODE-6003): remove support for omitting the `documents` subdocument in bulk inserts
117090
+ op.insertOne
117091
+ : op.insertOne.document;
117092
+ (0, common_functions_1.maybeAddIdToDocuments)(this.collection, document, { forceServerObjectId });
117093
+ return this.addToOperationsList(exports.BatchType.INSERT, document);
117097
117094
  }
117098
117095
  if ('replaceOne' in op || 'updateOne' in op || 'updateMany' in op) {
117099
117096
  if ('replaceOne' in op) {
@@ -117223,6 +117220,10 @@ update$1.makeUpdateStatement = makeUpdateStatement;
117223
117220
  }
117224
117221
  return false;
117225
117222
  }
117223
+ shouldForceServerObjectId() {
117224
+ return (this.s.options.forceServerObjectId === true ||
117225
+ this.s.collection.s.db.options?.forceServerObjectId === true);
117226
+ }
117226
117227
  }
117227
117228
  exports.BulkOperationBase = BulkOperationBase;
117228
117229
  Object.defineProperty(BulkOperationBase.prototype, 'length', {
@@ -117231,15 +117232,6 @@ update$1.makeUpdateStatement = makeUpdateStatement;
117231
117232
  return this.s.currentIndex;
117232
117233
  }
117233
117234
  });
117234
- function shouldForceServerObjectId(bulkOperation) {
117235
- if (typeof bulkOperation.s.options.forceServerObjectId === 'boolean') {
117236
- return bulkOperation.s.options.forceServerObjectId;
117237
- }
117238
- if (typeof bulkOperation.s.collection.s.db.options?.forceServerObjectId === 'boolean') {
117239
- return bulkOperation.s.collection.s.db.options?.forceServerObjectId;
117240
- }
117241
- return false;
117242
- }
117243
117235
  function isInsertBatch(batch) {
117244
117236
  return batch.batchType === exports.BatchType.INSERT;
117245
117237
  }
@@ -117261,9 +117253,9 @@ update$1.makeUpdateStatement = makeUpdateStatement;
117261
117253
  ordered.OrderedBulkOperation = void 0;
117262
117254
  const BSON$2 = bson$2;
117263
117255
  const error_1$B = error$5;
117264
- const common_1$6 = common;
117256
+ const common_1$7 = common;
117265
117257
  /** @public */
117266
- class OrderedBulkOperation extends common_1$6.BulkOperationBase {
117258
+ class OrderedBulkOperation extends common_1$7.BulkOperationBase {
117267
117259
  /** @internal */
117268
117260
  constructor(collection, options) {
117269
117261
  super(collection, options, true);
@@ -117282,7 +117274,7 @@ class OrderedBulkOperation extends common_1$6.BulkOperationBase {
117282
117274
  throw new error_1$B.MongoInvalidArgumentError(`Document is larger than the maximum size ${this.s.maxBsonObjectSize}`);
117283
117275
  // Create a new batch object if we don't have a current one
117284
117276
  if (this.s.currentBatch == null) {
117285
- this.s.currentBatch = new common_1$6.Batch(batchType, this.s.currentIndex);
117277
+ this.s.currentBatch = new common_1$7.Batch(batchType, this.s.currentIndex);
117286
117278
  }
117287
117279
  const maxKeySize = this.s.maxKeySize;
117288
117280
  // Check if we need to create a new batch
@@ -117298,12 +117290,12 @@ class OrderedBulkOperation extends common_1$6.BulkOperationBase {
117298
117290
  // Save the batch to the execution stack
117299
117291
  this.s.batches.push(this.s.currentBatch);
117300
117292
  // Create a new batch
117301
- this.s.currentBatch = new common_1$6.Batch(batchType, this.s.currentIndex);
117293
+ this.s.currentBatch = new common_1$7.Batch(batchType, this.s.currentIndex);
117302
117294
  // Reset the current size trackers
117303
117295
  this.s.currentBatchSize = 0;
117304
117296
  this.s.currentBatchSizeBytes = 0;
117305
117297
  }
117306
- if (batchType === common_1$6.BatchType.INSERT) {
117298
+ if (batchType === common_1$7.BatchType.INSERT) {
117307
117299
  this.s.bulkResult.insertedIds.push({
117308
117300
  index: this.s.currentIndex,
117309
117301
  _id: document._id
@@ -117325,9 +117317,9 @@ ordered.OrderedBulkOperation = OrderedBulkOperation;var unordered = {};Object.de
117325
117317
  unordered.UnorderedBulkOperation = void 0;
117326
117318
  const BSON$1 = bson$2;
117327
117319
  const error_1$A = error$5;
117328
- const common_1$5 = common;
117320
+ const common_1$6 = common;
117329
117321
  /** @public */
117330
- class UnorderedBulkOperation extends common_1$5.BulkOperationBase {
117322
+ class UnorderedBulkOperation extends common_1$6.BulkOperationBase {
117331
117323
  /** @internal */
117332
117324
  constructor(collection, options) {
117333
117325
  super(collection, options, false);
@@ -117354,19 +117346,19 @@ class UnorderedBulkOperation extends common_1$5.BulkOperationBase {
117354
117346
  // Holds the current batch
117355
117347
  this.s.currentBatch = undefined;
117356
117348
  // Get the right type of batch
117357
- if (batchType === common_1$5.BatchType.INSERT) {
117349
+ if (batchType === common_1$6.BatchType.INSERT) {
117358
117350
  this.s.currentBatch = this.s.currentInsertBatch;
117359
117351
  }
117360
- else if (batchType === common_1$5.BatchType.UPDATE) {
117352
+ else if (batchType === common_1$6.BatchType.UPDATE) {
117361
117353
  this.s.currentBatch = this.s.currentUpdateBatch;
117362
117354
  }
117363
- else if (batchType === common_1$5.BatchType.DELETE) {
117355
+ else if (batchType === common_1$6.BatchType.DELETE) {
117364
117356
  this.s.currentBatch = this.s.currentRemoveBatch;
117365
117357
  }
117366
117358
  const maxKeySize = this.s.maxKeySize;
117367
117359
  // Create a new batch object if we don't have a current one
117368
117360
  if (this.s.currentBatch == null) {
117369
- this.s.currentBatch = new common_1$5.Batch(batchType, this.s.currentIndex);
117361
+ this.s.currentBatch = new common_1$6.Batch(batchType, this.s.currentIndex);
117370
117362
  }
117371
117363
  // Check if we need to create a new batch
117372
117364
  if (
@@ -117381,7 +117373,7 @@ class UnorderedBulkOperation extends common_1$5.BulkOperationBase {
117381
117373
  // Save the batch to the execution stack
117382
117374
  this.s.batches.push(this.s.currentBatch);
117383
117375
  // Create a new batch
117384
- this.s.currentBatch = new common_1$5.Batch(batchType, this.s.currentIndex);
117376
+ this.s.currentBatch = new common_1$6.Batch(batchType, this.s.currentIndex);
117385
117377
  }
117386
117378
  // We have an array of documents
117387
117379
  if (Array.isArray(document)) {
@@ -117391,17 +117383,17 @@ class UnorderedBulkOperation extends common_1$5.BulkOperationBase {
117391
117383
  this.s.currentBatch.originalIndexes.push(this.s.currentIndex);
117392
117384
  this.s.currentIndex = this.s.currentIndex + 1;
117393
117385
  // Save back the current Batch to the right type
117394
- if (batchType === common_1$5.BatchType.INSERT) {
117386
+ if (batchType === common_1$6.BatchType.INSERT) {
117395
117387
  this.s.currentInsertBatch = this.s.currentBatch;
117396
117388
  this.s.bulkResult.insertedIds.push({
117397
117389
  index: this.s.bulkResult.insertedIds.length,
117398
117390
  _id: document._id
117399
117391
  });
117400
117392
  }
117401
- else if (batchType === common_1$5.BatchType.UPDATE) {
117393
+ else if (batchType === common_1$6.BatchType.UPDATE) {
117402
117394
  this.s.currentUpdateBatch = this.s.currentBatch;
117403
117395
  }
117404
- else if (batchType === common_1$5.BatchType.DELETE) {
117396
+ else if (batchType === common_1$6.BatchType.DELETE) {
117405
117397
  this.s.currentRemoveBatch = this.s.currentBatch;
117406
117398
  }
117407
117399
  // Update current batch size
@@ -117585,7 +117577,8 @@ unordered.UnorderedBulkOperation = UnorderedBulkOperation;var change_stream = {}
117585
117577
  function createStdioLogger(stream) {
117586
117578
  return {
117587
117579
  write: (0, util_1.promisify)((log, cb) => {
117588
- stream.write((0, util_1.inspect)(log, { compact: true, breakLength: Infinity }), 'utf-8', cb);
117580
+ const logLine = (0, util_1.inspect)(log, { compact: true, breakLength: Infinity });
117581
+ stream.write(`${logLine}\n`, 'utf-8', cb);
117589
117582
  return;
117590
117583
  })
117591
117584
  };
@@ -118256,19 +118249,19 @@ server_description.compareTopologyVersion = server_description.parseServerType =
118256
118249
  const bson_1$b = bson$2;
118257
118250
  const error_1$x = error$5;
118258
118251
  const utils_1$r = utils$c;
118259
- const common_1$4 = common$1;
118252
+ const common_1$5 = common$1;
118260
118253
  const WRITABLE_SERVER_TYPES = new Set([
118261
- common_1$4.ServerType.RSPrimary,
118262
- common_1$4.ServerType.Standalone,
118263
- common_1$4.ServerType.Mongos,
118264
- common_1$4.ServerType.LoadBalancer
118254
+ common_1$5.ServerType.RSPrimary,
118255
+ common_1$5.ServerType.Standalone,
118256
+ common_1$5.ServerType.Mongos,
118257
+ common_1$5.ServerType.LoadBalancer
118265
118258
  ]);
118266
118259
  const DATA_BEARING_SERVER_TYPES = new Set([
118267
- common_1$4.ServerType.RSPrimary,
118268
- common_1$4.ServerType.RSSecondary,
118269
- common_1$4.ServerType.Mongos,
118270
- common_1$4.ServerType.Standalone,
118271
- common_1$4.ServerType.LoadBalancer
118260
+ common_1$5.ServerType.RSPrimary,
118261
+ common_1$5.ServerType.RSSecondary,
118262
+ common_1$5.ServerType.Mongos,
118263
+ common_1$5.ServerType.Standalone,
118264
+ common_1$5.ServerType.LoadBalancer
118272
118265
  ]);
118273
118266
  /**
118274
118267
  * The client's view of a single server, based on the most recent hello outcome.
@@ -118321,7 +118314,7 @@ class ServerDescription {
118321
118314
  }
118322
118315
  /** Is this server available for reads*/
118323
118316
  get isReadable() {
118324
- return this.type === common_1$4.ServerType.RSSecondary || this.isWritable;
118317
+ return this.type === common_1$5.ServerType.RSSecondary || this.isWritable;
118325
118318
  }
118326
118319
  /** Is this server data bearing */
118327
118320
  get isDataBearing() {
@@ -118369,35 +118362,35 @@ server_description.ServerDescription = ServerDescription;
118369
118362
  // Parses a `hello` message and determines the server type
118370
118363
  function parseServerType(hello, options) {
118371
118364
  if (options?.loadBalanced) {
118372
- return common_1$4.ServerType.LoadBalancer;
118365
+ return common_1$5.ServerType.LoadBalancer;
118373
118366
  }
118374
118367
  if (!hello || !hello.ok) {
118375
- return common_1$4.ServerType.Unknown;
118368
+ return common_1$5.ServerType.Unknown;
118376
118369
  }
118377
118370
  if (hello.isreplicaset) {
118378
- return common_1$4.ServerType.RSGhost;
118371
+ return common_1$5.ServerType.RSGhost;
118379
118372
  }
118380
118373
  if (hello.msg && hello.msg === 'isdbgrid') {
118381
- return common_1$4.ServerType.Mongos;
118374
+ return common_1$5.ServerType.Mongos;
118382
118375
  }
118383
118376
  if (hello.setName) {
118384
118377
  if (hello.hidden) {
118385
- return common_1$4.ServerType.RSOther;
118378
+ return common_1$5.ServerType.RSOther;
118386
118379
  }
118387
118380
  else if (hello.isWritablePrimary) {
118388
- return common_1$4.ServerType.RSPrimary;
118381
+ return common_1$5.ServerType.RSPrimary;
118389
118382
  }
118390
118383
  else if (hello.secondary) {
118391
- return common_1$4.ServerType.RSSecondary;
118384
+ return common_1$5.ServerType.RSSecondary;
118392
118385
  }
118393
118386
  else if (hello.arbiterOnly) {
118394
- return common_1$4.ServerType.RSArbiter;
118387
+ return common_1$5.ServerType.RSArbiter;
118395
118388
  }
118396
118389
  else {
118397
- return common_1$4.ServerType.RSOther;
118390
+ return common_1$5.ServerType.RSOther;
118398
118391
  }
118399
118392
  }
118400
- return common_1$4.ServerType.Standalone;
118393
+ return common_1$5.ServerType.Standalone;
118401
118394
  }
118402
118395
  server_description.parseServerType = parseServerType;
118403
118396
  function tagsStrictEqual(tags, tags2) {
@@ -118442,19 +118435,19 @@ topology_description.TopologyDescription = void 0;
118442
118435
  const WIRE_CONSTANTS = constants$4;
118443
118436
  const error_1$w = error$5;
118444
118437
  const utils_1$q = utils$c;
118445
- const common_1$3 = common$1;
118438
+ const common_1$4 = common$1;
118446
118439
  const server_description_1$1 = server_description;
118447
118440
  // constants related to compatibility checks
118448
118441
  const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION;
118449
118442
  const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION;
118450
118443
  const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION;
118451
118444
  const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION;
118452
- const MONGOS_OR_UNKNOWN = new Set([common_1$3.ServerType.Mongos, common_1$3.ServerType.Unknown]);
118453
- const MONGOS_OR_STANDALONE = new Set([common_1$3.ServerType.Mongos, common_1$3.ServerType.Standalone]);
118445
+ const MONGOS_OR_UNKNOWN = new Set([common_1$4.ServerType.Mongos, common_1$4.ServerType.Unknown]);
118446
+ const MONGOS_OR_STANDALONE = new Set([common_1$4.ServerType.Mongos, common_1$4.ServerType.Standalone]);
118454
118447
  const NON_PRIMARY_RS_MEMBERS = new Set([
118455
- common_1$3.ServerType.RSSecondary,
118456
- common_1$3.ServerType.RSArbiter,
118457
- common_1$3.ServerType.RSOther
118448
+ common_1$4.ServerType.RSSecondary,
118449
+ common_1$4.ServerType.RSArbiter,
118450
+ common_1$4.ServerType.RSOther
118458
118451
  ]);
118459
118452
  /**
118460
118453
  * Representation of a deployment of servers
@@ -118466,7 +118459,7 @@ class TopologyDescription {
118466
118459
  */
118467
118460
  constructor(topologyType, serverDescriptions = null, setName = null, maxSetVersion = null, maxElectionId = null, commonWireVersion = null, options = null) {
118468
118461
  options = options ?? {};
118469
- this.type = topologyType ?? common_1$3.TopologyType.Unknown;
118462
+ this.type = topologyType ?? common_1$4.TopologyType.Unknown;
118470
118463
  this.servers = serverDescriptions ?? new Map();
118471
118464
  this.stale = false;
118472
118465
  this.compatible = true;
@@ -118479,8 +118472,8 @@ class TopologyDescription {
118479
118472
  // determine server compatibility
118480
118473
  for (const serverDescription of this.servers.values()) {
118481
118474
  // Load balancer mode is always compatible.
118482
- if (serverDescription.type === common_1$3.ServerType.Unknown ||
118483
- serverDescription.type === common_1$3.ServerType.LoadBalancer) {
118475
+ if (serverDescription.type === common_1$4.ServerType.Unknown ||
118476
+ serverDescription.type === common_1$4.ServerType.LoadBalancer) {
118484
118477
  continue;
118485
118478
  }
118486
118479
  if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) {
@@ -118583,7 +118576,7 @@ class TopologyDescription {
118583
118576
  if (typeof serverDescription.setName === 'string' &&
118584
118577
  typeof setName === 'string' &&
118585
118578
  serverDescription.setName !== setName) {
118586
- if (topologyType === common_1$3.TopologyType.Single) {
118579
+ if (topologyType === common_1$4.TopologyType.Single) {
118587
118580
  // "Single" Topology with setName mismatch is direct connection usage, mark unknown do not remove
118588
118581
  serverDescription = new server_description_1$1.ServerDescription(address);
118589
118582
  }
@@ -118593,28 +118586,28 @@ class TopologyDescription {
118593
118586
  }
118594
118587
  // update the actual server description
118595
118588
  serverDescriptions.set(address, serverDescription);
118596
- if (topologyType === common_1$3.TopologyType.Single) {
118589
+ if (topologyType === common_1$4.TopologyType.Single) {
118597
118590
  // once we are defined as single, that never changes
118598
- return new TopologyDescription(common_1$3.TopologyType.Single, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
118591
+ return new TopologyDescription(common_1$4.TopologyType.Single, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
118599
118592
  }
118600
- if (topologyType === common_1$3.TopologyType.Unknown) {
118601
- if (serverType === common_1$3.ServerType.Standalone && this.servers.size !== 1) {
118593
+ if (topologyType === common_1$4.TopologyType.Unknown) {
118594
+ if (serverType === common_1$4.ServerType.Standalone && this.servers.size !== 1) {
118602
118595
  serverDescriptions.delete(address);
118603
118596
  }
118604
118597
  else {
118605
118598
  topologyType = topologyTypeForServerType(serverType);
118606
118599
  }
118607
118600
  }
118608
- if (topologyType === common_1$3.TopologyType.Sharded) {
118601
+ if (topologyType === common_1$4.TopologyType.Sharded) {
118609
118602
  if (!MONGOS_OR_UNKNOWN.has(serverType)) {
118610
118603
  serverDescriptions.delete(address);
118611
118604
  }
118612
118605
  }
118613
- if (topologyType === common_1$3.TopologyType.ReplicaSetNoPrimary) {
118606
+ if (topologyType === common_1$4.TopologyType.ReplicaSetNoPrimary) {
118614
118607
  if (MONGOS_OR_STANDALONE.has(serverType)) {
118615
118608
  serverDescriptions.delete(address);
118616
118609
  }
118617
- if (serverType === common_1$3.ServerType.RSPrimary) {
118610
+ if (serverType === common_1$4.ServerType.RSPrimary) {
118618
118611
  const result = updateRsFromPrimary(serverDescriptions, serverDescription, setName, maxSetVersion, maxElectionId);
118619
118612
  topologyType = result[0];
118620
118613
  setName = result[1];
@@ -118627,12 +118620,12 @@ class TopologyDescription {
118627
118620
  setName = result[1];
118628
118621
  }
118629
118622
  }
118630
- if (topologyType === common_1$3.TopologyType.ReplicaSetWithPrimary) {
118623
+ if (topologyType === common_1$4.TopologyType.ReplicaSetWithPrimary) {
118631
118624
  if (MONGOS_OR_STANDALONE.has(serverType)) {
118632
118625
  serverDescriptions.delete(address);
118633
118626
  topologyType = checkHasPrimary(serverDescriptions);
118634
118627
  }
118635
- else if (serverType === common_1$3.ServerType.RSPrimary) {
118628
+ else if (serverType === common_1$4.ServerType.RSPrimary) {
118636
118629
  const result = updateRsFromPrimary(serverDescriptions, serverDescription, setName, maxSetVersion, maxElectionId);
118637
118630
  topologyType = result[0];
118638
118631
  setName = result[1];
@@ -118659,7 +118652,7 @@ class TopologyDescription {
118659
118652
  * Determines if the topology description has any known servers
118660
118653
  */
118661
118654
  get hasKnownServers() {
118662
- return Array.from(this.servers.values()).some((sd) => sd.type !== common_1$3.ServerType.Unknown);
118655
+ return Array.from(this.servers.values()).some((sd) => sd.type !== common_1$4.ServerType.Unknown);
118663
118656
  }
118664
118657
  /**
118665
118658
  * Determines if this topology description has a data-bearing server available.
@@ -118678,17 +118671,17 @@ class TopologyDescription {
118678
118671
  topology_description.TopologyDescription = TopologyDescription;
118679
118672
  function topologyTypeForServerType(serverType) {
118680
118673
  switch (serverType) {
118681
- case common_1$3.ServerType.Standalone:
118682
- return common_1$3.TopologyType.Single;
118683
- case common_1$3.ServerType.Mongos:
118684
- return common_1$3.TopologyType.Sharded;
118685
- case common_1$3.ServerType.RSPrimary:
118686
- return common_1$3.TopologyType.ReplicaSetWithPrimary;
118687
- case common_1$3.ServerType.RSOther:
118688
- case common_1$3.ServerType.RSSecondary:
118689
- return common_1$3.TopologyType.ReplicaSetNoPrimary;
118674
+ case common_1$4.ServerType.Standalone:
118675
+ return common_1$4.TopologyType.Single;
118676
+ case common_1$4.ServerType.Mongos:
118677
+ return common_1$4.TopologyType.Sharded;
118678
+ case common_1$4.ServerType.RSPrimary:
118679
+ return common_1$4.TopologyType.ReplicaSetWithPrimary;
118680
+ case common_1$4.ServerType.RSOther:
118681
+ case common_1$4.ServerType.RSSecondary:
118682
+ return common_1$4.TopologyType.ReplicaSetNoPrimary;
118690
118683
  default:
118691
- return common_1$3.TopologyType.Unknown;
118684
+ return common_1$4.TopologyType.Unknown;
118692
118685
  }
118693
118686
  }
118694
118687
  function updateRsFromPrimary(serverDescriptions, serverDescription, setName = null, maxSetVersion = null, maxElectionId = null) {
@@ -118736,7 +118729,7 @@ function updateRsFromPrimary(serverDescriptions, serverDescription, setName = nu
118736
118729
  }
118737
118730
  // We've heard from the primary. Is it the same primary as before?
118738
118731
  for (const [address, server] of serverDescriptions) {
118739
- if (server.type === common_1$3.ServerType.RSPrimary && server.address !== serverDescription.address) {
118732
+ if (server.type === common_1$4.ServerType.RSPrimary && server.address !== serverDescription.address) {
118740
118733
  // Reset old primary's type to Unknown.
118741
118734
  serverDescriptions.set(address, new server_description_1$1.ServerDescription(server.address));
118742
118735
  // There can only be one primary
@@ -118771,7 +118764,7 @@ function updateRsWithPrimaryFromMember(serverDescriptions, serverDescription, se
118771
118764
  return checkHasPrimary(serverDescriptions);
118772
118765
  }
118773
118766
  function updateRsNoPrimaryFromMember(serverDescriptions, serverDescription, setName = null) {
118774
- const topologyType = common_1$3.TopologyType.ReplicaSetNoPrimary;
118767
+ const topologyType = common_1$4.TopologyType.ReplicaSetNoPrimary;
118775
118768
  setName = setName ?? serverDescription.setName;
118776
118769
  if (setName !== serverDescription.setName) {
118777
118770
  serverDescriptions.delete(serverDescription.address);
@@ -118789,28 +118782,24 @@ function updateRsNoPrimaryFromMember(serverDescriptions, serverDescription, setN
118789
118782
  }
118790
118783
  function checkHasPrimary(serverDescriptions) {
118791
118784
  for (const serverDescription of serverDescriptions.values()) {
118792
- if (serverDescription.type === common_1$3.ServerType.RSPrimary) {
118793
- return common_1$3.TopologyType.ReplicaSetWithPrimary;
118785
+ if (serverDescription.type === common_1$4.ServerType.RSPrimary) {
118786
+ return common_1$4.TopologyType.ReplicaSetWithPrimary;
118794
118787
  }
118795
118788
  }
118796
- return common_1$3.TopologyType.ReplicaSetNoPrimary;
118789
+ return common_1$4.TopologyType.ReplicaSetNoPrimary;
118797
118790
  }Object.defineProperty(shared$6, "__esModule", { value: true });
118798
118791
  shared$6.isSharded = shared$6.getReadPreference = void 0;
118799
118792
  const error_1$v = error$5;
118800
- const read_preference_1$2 = read_preference;
118801
- const common_1$2 = common$1;
118793
+ const read_preference_1$3 = read_preference;
118794
+ const common_1$3 = common$1;
118802
118795
  const topology_description_1 = topology_description;
118803
118796
  function getReadPreference(options) {
118804
- // Default to command version of the readPreference
118805
- let readPreference = options?.readPreference ?? read_preference_1$2.ReadPreference.primary;
118806
- // If we have an option readPreference override the command one
118807
- if (options?.readPreference) {
118808
- readPreference = options.readPreference;
118809
- }
118797
+ // Default to command version of the readPreference.
118798
+ let readPreference = options?.readPreference ?? read_preference_1$3.ReadPreference.primary;
118810
118799
  if (typeof readPreference === 'string') {
118811
- readPreference = read_preference_1$2.ReadPreference.fromString(readPreference);
118800
+ readPreference = read_preference_1$3.ReadPreference.fromString(readPreference);
118812
118801
  }
118813
- if (!(readPreference instanceof read_preference_1$2.ReadPreference)) {
118802
+ if (!(readPreference instanceof read_preference_1$3.ReadPreference)) {
118814
118803
  throw new error_1$v.MongoInvalidArgumentError('Option "readPreference" must be a ReadPreference instance');
118815
118804
  }
118816
118805
  return readPreference;
@@ -118820,14 +118809,14 @@ function isSharded(topologyOrServer) {
118820
118809
  if (topologyOrServer == null) {
118821
118810
  return false;
118822
118811
  }
118823
- if (topologyOrServer.description && topologyOrServer.description.type === common_1$2.ServerType.Mongos) {
118812
+ if (topologyOrServer.description && topologyOrServer.description.type === common_1$3.ServerType.Mongos) {
118824
118813
  return true;
118825
118814
  }
118826
118815
  // NOTE: This is incredibly inefficient, and should be removed once command construction
118827
- // happens based on `Server` not `Topology`.
118816
+ // happens based on `Server` not `Topology`.
118828
118817
  if (topologyOrServer.description && topologyOrServer.description instanceof topology_description_1.TopologyDescription) {
118829
118818
  const servers = Array.from(topologyOrServer.description.servers.values());
118830
- return servers.some((server) => server.type === common_1$2.ServerType.Mongos);
118819
+ return servers.some((server) => server.type === common_1$3.ServerType.Mongos);
118831
118820
  }
118832
118821
  return false;
118833
118822
  }
@@ -118972,7 +118961,7 @@ shared$6.isSharded = isSharded;var transactions = {};(function (exports) {
118972
118961
  } (transactions));var _a$7;
118973
118962
  Object.defineProperty(sessions, "__esModule", { value: true });
118974
118963
  sessions.updateSessionFromResponse = sessions.applySession = sessions.ServerSessionPool = sessions.ServerSession = sessions.maybeClearPinnedConnection = sessions.ClientSession = void 0;
118975
- const util_1$2 = require$$0$6;
118964
+ const util_1$1 = require$$0$6;
118976
118965
  const bson_1$a = bson$2;
118977
118966
  const metrics_1 = metrics;
118978
118967
  const shared_1$1 = shared$6;
@@ -118982,8 +118971,8 @@ const mongo_types_1$3 = mongo_types;
118982
118971
  const execute_operation_1$5 = execute_operation;
118983
118972
  const run_command_1$1 = run_command;
118984
118973
  const read_concern_1$1 = read_concern;
118985
- const read_preference_1$1 = read_preference;
118986
- const common_1$1 = common$1;
118974
+ const read_preference_1$2 = read_preference;
118975
+ const common_1$2 = common$1;
118987
118976
  const transactions_1 = transactions;
118988
118977
  const utils_1$p = utils$c;
118989
118978
  const write_concern_1$2 = write_concern;
@@ -119073,7 +119062,7 @@ class ClientSession extends mongo_types_1$3.TypedEventEmitter {
119073
119062
  return this[kSnapshotEnabled];
119074
119063
  }
119075
119064
  get loadBalanced() {
119076
- return this.client.topology?.description.type === common_1$1.TopologyType.LoadBalanced;
119065
+ return this.client.topology?.description.type === common_1$2.TopologyType.LoadBalanced;
119077
119066
  }
119078
119067
  /** @internal */
119079
119068
  get pinnedConnection() {
@@ -119164,7 +119153,7 @@ class ClientSession extends mongo_types_1$3.TypedEventEmitter {
119164
119153
  ) {
119165
119154
  throw new error_1$u.MongoInvalidArgumentError('input cluster time must have a valid "signature" property with BSON Binary hash and BSON Long keyId');
119166
119155
  }
119167
- (0, common_1$1._advanceClusterTime)(this, clusterTime);
119156
+ (0, common_1$2._advanceClusterTime)(this, clusterTime);
119168
119157
  }
119169
119158
  /**
119170
119159
  * Used to determine if this session equals another
@@ -119390,7 +119379,7 @@ function attemptTransaction(session, startTime, fn, options = {}) {
119390
119379
  return maybeRetryOrThrow(err);
119391
119380
  });
119392
119381
  }
119393
- const endTransactionAsync = (0, util_1$2.promisify)(endTransaction);
119382
+ const endTransactionAsync = (0, util_1$1.promisify)(endTransaction);
119394
119383
  function endTransaction(session, commandName, callback) {
119395
119384
  // handle any initial problematic cases
119396
119385
  const txnState = session.transaction.state;
@@ -119492,7 +119481,7 @@ function endTransaction(session, commandName, callback) {
119492
119481
  }
119493
119482
  (0, execute_operation_1$5.executeOperation)(session.client, new run_command_1$1.RunAdminCommandOperation(command, {
119494
119483
  session,
119495
- readPreference: read_preference_1$1.ReadPreference.primary,
119484
+ readPreference: read_preference_1$2.ReadPreference.primary,
119496
119485
  bypassPinningCheck: true
119497
119486
  })).then(() => commandHandler(), commandHandler);
119498
119487
  return;
@@ -119502,7 +119491,7 @@ function endTransaction(session, commandName, callback) {
119502
119491
  // send the command
119503
119492
  (0, execute_operation_1$5.executeOperation)(session.client, new run_command_1$1.RunAdminCommandOperation(command, {
119504
119493
  session,
119505
- readPreference: read_preference_1$1.ReadPreference.primary,
119494
+ readPreference: read_preference_1$2.ReadPreference.primary,
119506
119495
  bypassPinningCheck: true
119507
119496
  })).then(() => handleFirstCommandAttempt(), handleFirstCommandAttempt);
119508
119497
  }
@@ -119688,7 +119677,7 @@ function applySession(session, command, options) {
119688
119677
  sessions.applySession = applySession;
119689
119678
  function updateSessionFromResponse(session, document) {
119690
119679
  if (document.$clusterTime) {
119691
- (0, common_1$1._advanceClusterTime)(session, document.$clusterTime);
119680
+ (0, common_1$2._advanceClusterTime)(session, document.$clusterTime);
119692
119681
  }
119693
119682
  if (document.operationTime && session && session.supports.causalConsistency) {
119694
119683
  session.advanceOperationTime(document.operationTime);
@@ -121231,7 +121220,7 @@ class FindCursor extends abstract_cursor_1$3.AbstractCursor {
121231
121220
  find_cursor.FindCursor = FindCursor;var list_indexes_cursor = {};var indexes = {};Object.defineProperty(indexes, "__esModule", { value: true });
121232
121221
  indexes.IndexInformationOperation = indexes.IndexExistsOperation = indexes.ListIndexesOperation = indexes.DropIndexOperation = indexes.EnsureIndexOperation = indexes.CreateIndexOperation = indexes.CreateIndexesOperation = indexes.IndexesOperation = void 0;
121233
121222
  const error_1$q = error$5;
121234
- const read_preference_1 = read_preference;
121223
+ const read_preference_1$1 = read_preference;
121235
121224
  const utils_1$l = utils$c;
121236
121225
  const command_1$6 = command$1;
121237
121226
  const common_functions_1 = common_functions;
@@ -121370,7 +121359,7 @@ indexes.CreateIndexOperation = CreateIndexOperation;
121370
121359
  class EnsureIndexOperation extends CreateIndexOperation {
121371
121360
  constructor(db, collectionName, indexSpec, options) {
121372
121361
  super(db, collectionName, indexSpec, options);
121373
- this.readPreference = read_preference_1.ReadPreference.primary;
121362
+ this.readPreference = read_preference_1$1.ReadPreference.primary;
121374
121363
  this.db = db;
121375
121364
  this.collectionName = collectionName;
121376
121365
  }
@@ -176440,7 +176429,7 @@ function requireLib$2 () {
176440
176429
  } (lib$6));
176441
176430
  return lib$6;
176442
176431
  }var client_metadata = {};var name$6 = "mongodb";
176443
- var version$6 = "6.4.0";
176432
+ var version$6 = "6.5.0";
176444
176433
  var description$2 = "The official MongoDB driver for Node.js";
176445
176434
  var main$3 = "lib/index.js";
176446
176435
  var files$2 = [
@@ -176465,7 +176454,7 @@ var author$2 = {
176465
176454
  email: "dbx-node@mongodb.com"
176466
176455
  };
176467
176456
  var dependencies$3 = {
176468
- "@mongodb-js/saslprep": "^1.1.0",
176457
+ "@mongodb-js/saslprep": "^1.1.5",
176469
176458
  bson: "^6.4.0",
176470
176459
  "mongodb-connection-string-url": "^3.0.0"
176471
176460
  };
@@ -176612,7 +176601,7 @@ var tsd = {
176612
176601
  moduleResolution: "node"
176613
176602
  }
176614
176603
  };
176615
- var require$$4$1 = {
176604
+ var require$$5$1 = {
176616
176605
  name: name$6,
176617
176606
  version: version$6,
176618
176607
  description: description$2,
@@ -176633,13 +176622,14 @@ var require$$4$1 = {
176633
176622
  scripts: scripts$3,
176634
176623
  tsd: tsd
176635
176624
  };Object.defineProperty(client_metadata, "__esModule", { value: true });
176636
- client_metadata.getFAASEnv = client_metadata.makeClientMetadata = client_metadata.LimitedSizeDocument = void 0;
176625
+ client_metadata.getFAASEnv = client_metadata.addContainerMetadata = client_metadata.makeClientMetadata = client_metadata.LimitedSizeDocument = void 0;
176626
+ const fs_1 = require$$0$c;
176637
176627
  const os$1 = os$3;
176638
176628
  const process$4 = require$$0$m;
176639
176629
  const bson_1$8 = bson$2;
176640
176630
  const error_1$i = error$5;
176641
176631
  // eslint-disable-next-line @typescript-eslint/no-var-requires
176642
- const NODE_DRIVER_VERSION = require$$4$1.version;
176632
+ const NODE_DRIVER_VERSION = require$$5$1.version;
176643
176633
  /** @internal */
176644
176634
  class LimitedSizeDocument {
176645
176635
  constructor(maxSize) {
@@ -176733,6 +176723,48 @@ function makeClientMetadata(options) {
176733
176723
  return metadataDocument.toObject();
176734
176724
  }
176735
176725
  client_metadata.makeClientMetadata = makeClientMetadata;
176726
+ let dockerPromise;
176727
+ /** @internal */
176728
+ async function getContainerMetadata() {
176729
+ const containerMetadata = {};
176730
+ dockerPromise ??= fs_1.promises.access('/.dockerenv').then(() => true, () => false);
176731
+ const isDocker = await dockerPromise;
176732
+ const { KUBERNETES_SERVICE_HOST = '' } = process$4.env;
176733
+ const isKubernetes = KUBERNETES_SERVICE_HOST.length > 0 ? true : false;
176734
+ if (isDocker)
176735
+ containerMetadata.runtime = 'docker';
176736
+ if (isKubernetes)
176737
+ containerMetadata.orchestrator = 'kubernetes';
176738
+ return containerMetadata;
176739
+ }
176740
+ /**
176741
+ * @internal
176742
+ * Re-add each metadata value.
176743
+ * Attempt to add new env container metadata, but keep old data if it does not fit.
176744
+ */
176745
+ async function addContainerMetadata(originalMetadata) {
176746
+ const containerMetadata = await getContainerMetadata();
176747
+ if (Object.keys(containerMetadata).length === 0)
176748
+ return originalMetadata;
176749
+ const extendedMetadata = new LimitedSizeDocument(512);
176750
+ const extendedEnvMetadata = { ...originalMetadata?.env, container: containerMetadata };
176751
+ for (const [key, val] of Object.entries(originalMetadata)) {
176752
+ if (key !== 'env') {
176753
+ extendedMetadata.ifItFitsItSits(key, val);
176754
+ }
176755
+ else {
176756
+ if (!extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata)) {
176757
+ // add in old data if newer / extended metadata does not fit
176758
+ extendedMetadata.ifItFitsItSits('env', val);
176759
+ }
176760
+ }
176761
+ }
176762
+ if (!('env' in originalMetadata)) {
176763
+ extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata);
176764
+ }
176765
+ return extendedMetadata.toObject();
176766
+ }
176767
+ client_metadata.addContainerMetadata = addContainerMetadata;
176736
176768
  /**
176737
176769
  * Collects FaaS metadata.
176738
176770
  * - `name` MUST be the last key in the Map returned.
@@ -176811,7 +176843,6 @@ function requireCommands$4 () {
176811
176843
  commands$9.OpCompressedRequest = commands$9.OpMsgResponse = commands$9.OpMsgRequest = commands$9.OpQueryResponse = commands$9.OpQueryRequest = void 0;
176812
176844
  const BSON = bson$2;
176813
176845
  const error_1 = error$5;
176814
- const read_preference_1 = read_preference;
176815
176846
  const compression_1 = requireCompression();
176816
176847
  const constants_1 = constants$4;
176817
176848
  // Incrementing request id
@@ -177139,9 +177170,6 @@ function requireCommands$4 () {
177139
177170
  throw new error_1.MongoInvalidArgumentError('Query document must be specified for query');
177140
177171
  // Basic options
177141
177172
  this.command.$db = databaseName;
177142
- if (options.readPreference && options.readPreference.mode !== read_preference_1.ReadPreference.PRIMARY) {
177143
- this.command.$readPreference = options.readPreference.toJSON();
177144
- }
177145
177173
  // Ensure empty options
177146
177174
  this.options = options ?? {};
177147
177175
  // Additional options
@@ -179064,7 +179092,7 @@ function requireEncrypter () {
179064
179092
  } (command_monitoring_events));var stream_description = {};Object.defineProperty(stream_description, "__esModule", { value: true });
179065
179093
  stream_description.StreamDescription = void 0;
179066
179094
  const bson_1$6 = bson$2;
179067
- const common_1 = common$1;
179095
+ const common_1$1 = common$1;
179068
179096
  const server_description_1 = server_description;
179069
179097
  const RESPONSE_FIELDS = [
179070
179098
  'minWireVersion',
@@ -179079,7 +179107,7 @@ class StreamDescription {
179079
179107
  constructor(address, options) {
179080
179108
  this.hello = null;
179081
179109
  this.address = address;
179082
- this.type = common_1.ServerType.Unknown;
179110
+ this.type = common_1$1.ServerType.Unknown;
179083
179111
  this.minWireVersion = undefined;
179084
179112
  this.maxWireVersion = undefined;
179085
179113
  this.maxBsonObjectSize = 16777216;
@@ -179136,10 +179164,9 @@ const utils_1$g = utils$c;
179136
179164
  * https://nodejs.org/api/events.html#eventsonemitter-eventname-options
179137
179165
  *
179138
179166
  * Returns an AsyncIterator that iterates each 'data' event emitted from emitter.
179139
- * It will reject upon an error event or if the provided signal is aborted.
179167
+ * It will reject upon an error event.
179140
179168
  */
179141
- function onData(emitter, options) {
179142
- const signal = options.signal;
179169
+ function onData(emitter) {
179143
179170
  // Setup pending events and pending promise lists
179144
179171
  /**
179145
179172
  * When the caller has not yet called .next(), we store the
@@ -179198,17 +179225,7 @@ function onData(emitter, options) {
179198
179225
  // Adding event handlers
179199
179226
  emitter.on('data', eventHandler);
179200
179227
  emitter.on('error', errorHandler);
179201
- if (signal.aborted) {
179202
- // If the signal is aborted, set up the first .next() call to be a rejection
179203
- queueMicrotask(abortListener);
179204
- }
179205
- else {
179206
- signal.addEventListener('abort', abortListener, { once: true });
179207
- }
179208
179228
  return iterator;
179209
- function abortListener() {
179210
- errorHandler(signal.reason);
179211
- }
179212
179229
  function eventHandler(value) {
179213
179230
  const promise = unconsumedPromises.shift();
179214
179231
  if (promise != null)
@@ -179228,7 +179245,6 @@ function onData(emitter, options) {
179228
179245
  // Adding event handlers
179229
179246
  emitter.off('data', eventHandler);
179230
179247
  emitter.off('error', errorHandler);
179231
- signal.removeEventListener('abort', abortListener);
179232
179248
  finished = true;
179233
179249
  const doneResult = { value: undefined, done: finished };
179234
179250
  for (const promise of unconsumedPromises) {
@@ -179241,11 +179257,12 @@ on_data.onData = onData;Object.defineProperty(connection$3, "__esModule", { valu
179241
179257
  connection$3.CryptoConnection = connection$3.SizedMessageTransform = connection$3.Connection = connection$3.hasSessionSupport = void 0;
179242
179258
  const stream_1$2 = require$$0$b;
179243
179259
  const timers_1$2 = require$$1$9;
179244
- const util_1$1 = require$$0$6;
179245
179260
  const constants_1$3 = constants$3;
179246
179261
  const error_1$f = error$5;
179247
179262
  const mongo_logger_1 = mongo_logger;
179248
179263
  const mongo_types_1$2 = mongo_types;
179264
+ const read_preference_1 = read_preference;
179265
+ const common_1 = common$1;
179249
179266
  const sessions_1 = sessions;
179250
179267
  const utils_1$f = utils$c;
179251
179268
  const command_monitoring_events_1 = command_monitoring_events;
@@ -179279,7 +179296,12 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179279
179296
  this.lastHelloMS = -1;
179280
179297
  this.helloOk = false;
179281
179298
  this.delayedTimeoutId = null;
179299
+ /** Indicates that the connection (including underlying TCP socket) has been closed. */
179300
+ this.closed = false;
179282
179301
  this.clusterTime = null;
179302
+ this.error = null;
179303
+ this.dataEvents = null;
179304
+ this.socket = stream;
179283
179305
  this.id = options.id;
179284
179306
  this.address = streamIdentifier(stream, options);
179285
179307
  this.socketTimeoutMS = options.socketTimeoutMS ?? 0;
@@ -179290,31 +179312,12 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179290
179312
  this.description = new stream_description_1.StreamDescription(this.address, options);
179291
179313
  this.generation = options.generation;
179292
179314
  this.lastUseTime = (0, utils_1$f.now)();
179293
- this.socket = stream;
179294
- // TODO: Remove signal from connection layer
179295
- this.controller = new AbortController();
179296
- const { signal } = this.controller;
179297
- this.signal = signal;
179298
- const { promise: aborted, reject } = (0, utils_1$f.promiseWithResolvers)();
179299
- aborted.then(undefined, () => null); // Prevent unhandled rejection
179300
- this.signal.addEventListener('abort', function onAbort() {
179301
- reject(signal.reason);
179302
- }, { once: true });
179303
- this.aborted = aborted;
179304
179315
  this.messageStream = this.socket
179305
179316
  .on('error', this.onError.bind(this))
179306
179317
  .pipe(new SizedMessageTransform({ connection: this }))
179307
179318
  .on('error', this.onError.bind(this));
179308
179319
  this.socket.on('close', this.onClose.bind(this));
179309
179320
  this.socket.on('timeout', this.onTimeout.bind(this));
179310
- const socketWrite = (0, util_1$1.promisify)(this.socket.write.bind(this.socket));
179311
- this.socketWrite = async (buffer) => {
179312
- return Promise.race([socketWrite(buffer), this.aborted]);
179313
- };
179314
- }
179315
- /** Indicates that the connection (including underlying TCP socket) has been closed. */
179316
- get closed() {
179317
- return this.signal.aborted;
179318
179321
  }
179319
179322
  get hello() {
179320
179323
  return this.description.hello;
@@ -179365,15 +179368,10 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179365
179368
  this.cleanup(new error_1$f.MongoNetworkTimeoutError(message, { beforeHandshake }));
179366
179369
  }, 1).unref(); // No need for this timer to hold the event loop open
179367
179370
  }
179368
- destroy(options, callback) {
179371
+ destroy() {
179369
179372
  if (this.closed) {
179370
- if (typeof callback === 'function')
179371
- process.nextTick(callback);
179372
179373
  return;
179373
179374
  }
179374
- if (typeof callback === 'function') {
179375
- this.once('close', () => process.nextTick(() => callback()));
179376
- }
179377
179375
  // load balanced mode requires that these listeners remain on the connection
179378
179376
  // after cleanup on timeouts, errors or close so we remove them before calling
179379
179377
  // cleanup.
@@ -179395,7 +179393,9 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179395
179393
  return;
179396
179394
  }
179397
179395
  this.socket.destroy();
179398
- this.controller.abort(error);
179396
+ this.error = error;
179397
+ this.dataEvents?.throw(error).then(undefined, () => null); // squash unhandled rejection
179398
+ this.closed = true;
179399
179399
  this.emit(Connection.CLOSE);
179400
179400
  }
179401
179401
  prepareCommand(db, command, options) {
@@ -179428,14 +179428,34 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179428
179428
  if (clusterTime) {
179429
179429
  cmd.$clusterTime = clusterTime;
179430
179430
  }
179431
- if ((0, shared_1.isSharded)(this) &&
179432
- !this.supportsOpMsg &&
179433
- readPreference &&
179434
- readPreference.mode !== 'primary') {
179435
- cmd = {
179436
- $query: cmd,
179437
- $readPreference: readPreference.toJSON()
179438
- };
179431
+ // For standalone, drivers MUST NOT set $readPreference.
179432
+ if (this.description.type !== common_1.ServerType.Standalone) {
179433
+ if (!(0, shared_1.isSharded)(this) &&
179434
+ !this.description.loadBalanced &&
179435
+ this.supportsOpMsg &&
179436
+ options.directConnection === true &&
179437
+ readPreference?.mode === 'primary') {
179438
+ // For mongos and load balancers with 'primary' mode, drivers MUST NOT set $readPreference.
179439
+ // For all other types with a direct connection, if the read preference is 'primary'
179440
+ // (driver sets 'primary' as default if no read preference is configured),
179441
+ // the $readPreference MUST be set to 'primaryPreferred'
179442
+ // to ensure that any server type can handle the request.
179443
+ cmd.$readPreference = read_preference_1.ReadPreference.primaryPreferred.toJSON();
179444
+ }
179445
+ else if ((0, shared_1.isSharded)(this) && !this.supportsOpMsg && readPreference?.mode !== 'primary') {
179446
+ // When sending a read operation via OP_QUERY and the $readPreference modifier,
179447
+ // the query MUST be provided using the $query modifier.
179448
+ cmd = {
179449
+ $query: cmd,
179450
+ $readPreference: readPreference.toJSON()
179451
+ };
179452
+ }
179453
+ else if (readPreference?.mode !== 'primary') {
179454
+ // For mode 'primary', drivers MUST NOT set $readPreference.
179455
+ // For all other read preference modes (i.e. 'secondary', 'primaryPreferred', ...),
179456
+ // drivers MUST set $readPreference
179457
+ cmd.$readPreference = readPreference.toJSON();
179458
+ }
179439
179459
  }
179440
179460
  const commandOptions = {
179441
179461
  numberToSkip: 0,
@@ -179443,8 +179463,7 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179443
179463
  checkKeys: false,
179444
179464
  // This value is not overridable
179445
179465
  secondaryOk: readPreference.secondaryOk(),
179446
- ...options,
179447
- readPreference // ensure we pass in ReadPreference instance
179466
+ ...options
179448
179467
  };
179449
179468
  const message = this.supportsOpMsg
179450
179469
  ? new commands_1$3.OpMsgRequest(db, cmd, commandOptions)
@@ -179553,7 +179572,8 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179553
179572
  exhaustLoop().catch(replyListener);
179554
179573
  }
179555
179574
  throwIfAborted() {
179556
- this.signal.throwIfAborted();
179575
+ if (this.error)
179576
+ throw this.error;
179557
179577
  }
179558
179578
  /**
179559
179579
  * @internal
@@ -179569,7 +179589,9 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179569
179589
  zlibCompressionLevel: options.zlibCompressionLevel ?? 0
179570
179590
  });
179571
179591
  const buffer = Buffer.concat(await finalCommand.toBin());
179572
- return this.socketWrite(buffer);
179592
+ if (this.socket.write(buffer))
179593
+ return;
179594
+ return (0, utils_1$f.once)(this.socket, 'drain');
179573
179595
  }
179574
179596
  /**
179575
179597
  * @internal
@@ -179581,13 +179603,20 @@ let Connection$2=class Connection extends mongo_types_1$2.TypedEventEmitter {
179581
179603
  * Note that `for-await` loops call `return` automatically when the loop is exited.
179582
179604
  */
179583
179605
  async *readMany() {
179584
- for await (const message of (0, on_data_1.onData)(this.messageStream, { signal: this.signal })) {
179585
- const response = await (0, compression_1.decompressResponse)(message);
179586
- yield response;
179587
- if (!response.moreToCome) {
179588
- return;
179606
+ try {
179607
+ this.dataEvents = (0, on_data_1.onData)(this.messageStream);
179608
+ for await (const message of this.dataEvents) {
179609
+ const response = await (0, compression_1.decompressResponse)(message);
179610
+ yield response;
179611
+ if (!response.moreToCome) {
179612
+ return;
179613
+ }
179589
179614
  }
179590
179615
  }
179616
+ finally {
179617
+ this.dataEvents = null;
179618
+ this.throwIfAborted();
179619
+ }
179591
179620
  }
179592
179621
  };
179593
179622
  /** @event */
@@ -179700,7 +179729,7 @@ connection$3.CryptoConnection = CryptoConnection;(function (exports) {
179700
179729
  return connection;
179701
179730
  }
179702
179731
  catch (error) {
179703
- connection?.destroy({ force: false });
179732
+ connection?.destroy();
179704
179733
  throw error;
179705
179734
  }
179706
179735
  }
@@ -179804,10 +179833,11 @@ connection$3.CryptoConnection = CryptoConnection;(function (exports) {
179804
179833
  const options = authContext.options;
179805
179834
  const compressors = options.compressors ? options.compressors : [];
179806
179835
  const { serverApi } = authContext.connection;
179836
+ const clientMetadata = await options.extendedMetadata;
179807
179837
  const handshakeDoc = {
179808
179838
  [serverApi?.version || options.loadBalanced === true ? 'hello' : constants_1.LEGACY_HELLO_COMMAND]: 1,
179809
179839
  helloOk: true,
179810
- client: options.metadata,
179840
+ client: clientMetadata,
179811
179841
  compression: compressors
179812
179842
  };
179813
179843
  if (options.loadBalanced === true) {
@@ -179907,7 +179937,6 @@ connection$3.CryptoConnection = CryptoConnection;(function (exports) {
179907
179937
  const useTLS = options.tls ?? false;
179908
179938
  const noDelay = options.noDelay ?? true;
179909
179939
  const connectTimeoutMS = options.connectTimeoutMS ?? 30000;
179910
- const rejectUnauthorized = options.rejectUnauthorized ?? true;
179911
179940
  const existingSocket = options.existingSocket;
179912
179941
  let socket;
179913
179942
  if (options.proxyHost != null) {
@@ -179959,10 +179988,6 @@ connection$3.CryptoConnection = CryptoConnection;(function (exports) {
179959
179988
  }
179960
179989
  catch (error) {
179961
179990
  socket.destroy();
179962
- if ('authorizationError' in socket && socket.authorizationError != null && rejectUnauthorized) {
179963
- // TODO(NODE-5192): wrap this with a MongoError subclass
179964
- throw socket.authorizationError;
179965
- }
179966
179991
  throw error;
179967
179992
  }
179968
179993
  finally {
@@ -180540,8 +180565,7 @@ errors$8.WaitQueueTimeoutError = WaitQueueTimeoutError;(function (exports) {
180540
180565
  maxIdleTimeMS: options.maxIdleTimeMS ?? 0,
180541
180566
  waitQueueTimeoutMS: options.waitQueueTimeoutMS ?? 0,
180542
180567
  minPoolSizeCheckFrequencyMS: options.minPoolSizeCheckFrequencyMS ?? 100,
180543
- autoEncrypter: options.autoEncrypter,
180544
- metadata: options.metadata
180568
+ autoEncrypter: options.autoEncrypter
180545
180569
  });
180546
180570
  if (this.options.minPoolSize > this.options.maxPoolSize) {
180547
180571
  throw new error_1.MongoInvalidArgumentError('Connection pool minimum size must not be greater than maximum pool size');
@@ -180741,20 +180765,15 @@ errors$8.WaitQueueTimeoutError = WaitQueueTimeoutError;(function (exports) {
180741
180765
  interruptInUseConnections(minGeneration) {
180742
180766
  for (const connection of this[kCheckedOut]) {
180743
180767
  if (connection.generation <= minGeneration) {
180744
- this.checkIn(connection);
180745
180768
  connection.onError(new errors_1.PoolClearedOnNetworkError(this));
180769
+ this.checkIn(connection);
180746
180770
  }
180747
180771
  }
180748
180772
  }
180749
- close(_options, _cb) {
180750
- let options = _options;
180751
- const callback = (_cb ?? _options);
180752
- if (typeof options === 'function') {
180753
- options = {};
180754
- }
180755
- options = Object.assign({ force: false }, options);
180773
+ /** Close the pool */
180774
+ close() {
180756
180775
  if (this.closed) {
180757
- return callback();
180776
+ return;
180758
180777
  }
180759
180778
  // immediately cancel any in-flight connections
180760
180779
  this[kCancellationToken].emit('cancel');
@@ -180765,14 +180784,12 @@ errors$8.WaitQueueTimeoutError = WaitQueueTimeoutError;(function (exports) {
180765
180784
  this[kPoolState] = exports.PoolState.closed;
180766
180785
  this.clearMinPoolSizeTimer();
180767
180786
  this.processWaitQueue();
180768
- (0, utils_1.eachAsync)(this[kConnections].toArray(), (conn, cb) => {
180787
+ for (const conn of this[kConnections]) {
180769
180788
  this.emitAndLog(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, conn, 'poolClosed'));
180770
- conn.destroy({ force: !!options.force }, cb);
180771
- }, err => {
180772
- this[kConnections].clear();
180773
- this.emitAndLog(ConnectionPool.CONNECTION_POOL_CLOSED, new connection_pool_events_1.ConnectionPoolClosedEvent(this));
180774
- callback(err);
180775
- });
180789
+ conn.destroy();
180790
+ }
180791
+ this[kConnections].clear();
180792
+ this.emitAndLog(ConnectionPool.CONNECTION_POOL_CLOSED, new connection_pool_events_1.ConnectionPoolClosedEvent(this));
180776
180793
  }
180777
180794
  /**
180778
180795
  * @internal
@@ -180805,7 +180822,7 @@ errors$8.WaitQueueTimeoutError = WaitQueueTimeoutError;(function (exports) {
180805
180822
  destroyConnection(connection, reason) {
180806
180823
  this.emitAndLog(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, connection, reason));
180807
180824
  // destroy the connection
180808
- process.nextTick(() => connection.destroy({ force: false }));
180825
+ connection.destroy();
180809
180826
  }
180810
180827
  connectionIsStale(connection) {
180811
180828
  const serviceId = connection.serviceId;
@@ -180850,7 +180867,7 @@ errors$8.WaitQueueTimeoutError = WaitQueueTimeoutError;(function (exports) {
180850
180867
  // The pool might have closed since we started trying to create a connection
180851
180868
  if (this[kPoolState] !== exports.PoolState.ready) {
180852
180869
  this[kPending]--;
180853
- connection.destroy({ force: true });
180870
+ connection.destroy();
180854
180871
  callback(this.closed ? new errors_1.PoolClosedError(this) : new errors_1.PoolClearedError(this));
180855
180872
  return;
180856
180873
  }
@@ -181158,29 +181175,17 @@ function requireServer$1 () {
181158
181175
  }
181159
181176
  }
181160
181177
  /** Destroy the server connection */
181161
- destroy(options, callback) {
181162
- if (typeof options === 'function') {
181163
- callback = options;
181164
- options = { force: false };
181165
- }
181166
- options = Object.assign({}, { force: false }, options);
181178
+ destroy() {
181167
181179
  if (this.s.state === common_1.STATE_CLOSED) {
181168
- if (typeof callback === 'function') {
181169
- callback();
181170
- }
181171
181180
  return;
181172
181181
  }
181173
181182
  stateTransition(this, common_1.STATE_CLOSING);
181174
181183
  if (!this.loadBalanced) {
181175
181184
  this.monitor?.close();
181176
181185
  }
181177
- this.pool.close(options, err => {
181178
- stateTransition(this, common_1.STATE_CLOSED);
181179
- this.emit('closed');
181180
- if (typeof callback === 'function') {
181181
- callback(err);
181182
- }
181183
- });
181186
+ this.pool.close();
181187
+ stateTransition(this, common_1.STATE_CLOSED);
181188
+ this.emit('closed');
181184
181189
  }
181185
181190
  /**
181186
181191
  * Immediately schedule monitoring of this server. If there already an attempt being made
@@ -181203,7 +181208,10 @@ function requireServer$1 () {
181203
181208
  throw new error_1.MongoServerClosedError();
181204
181209
  }
181205
181210
  // Clone the options
181206
- const finalOptions = Object.assign({}, options, { wireProtocolCommand: false });
181211
+ const finalOptions = Object.assign({}, options, {
181212
+ wireProtocolCommand: false,
181213
+ directConnection: this.topology.s.options.directConnection
181214
+ });
181207
181215
  // There are cases where we need to flag the read preference not to get sent in
181208
181216
  // the command, such as pre-5.0 servers attempting to perform an aggregate write
181209
181217
  // with a non-primary read preference. In this case the effective read preference
@@ -181574,7 +181582,7 @@ function requireMonitor () {
181574
181582
  monitor.rttPinger?.close();
181575
181583
  monitor.rttPinger = undefined;
181576
181584
  monitor[kCancellationToken].emit('cancel');
181577
- monitor.connection?.destroy({ force: true });
181585
+ monitor.connection?.destroy();
181578
181586
  monitor.connection = null;
181579
181587
  }
181580
181588
  function useStreamingProtocol(monitor, topologyVersion) {
@@ -181601,7 +181609,7 @@ function requireMonitor () {
181601
181609
  const isAwaitable = useStreamingProtocol(monitor, topologyVersion);
181602
181610
  monitor.emitAndLogHeartbeat(server_1.Server.SERVER_HEARTBEAT_STARTED, monitor[kServer].topology.s.id, undefined, new events_1.ServerHeartbeatStartedEvent(monitor.address, isAwaitable));
181603
181611
  function onHeartbeatFailed(err) {
181604
- monitor.connection?.destroy({ force: true });
181612
+ monitor.connection?.destroy();
181605
181613
  monitor.connection = null;
181606
181614
  monitor.emitAndLogHeartbeat(server_1.Server.SERVER_HEARTBEAT_FAILED, monitor[kServer].topology.s.id, undefined, new events_1.ServerHeartbeatFailedEvent(monitor.address, (0, utils_1.calculateDurationInMs)(start), err, awaited));
181607
181615
  const error = !(err instanceof error_1.MongoError)
@@ -181684,12 +181692,12 @@ function requireMonitor () {
181684
181692
  return connection;
181685
181693
  }
181686
181694
  catch (error) {
181687
- connection.destroy({ force: false });
181695
+ connection.destroy();
181688
181696
  throw error;
181689
181697
  }
181690
181698
  })().then(connection => {
181691
181699
  if (isInCloseState(monitor)) {
181692
- connection.destroy({ force: true });
181700
+ connection.destroy();
181693
181701
  return;
181694
181702
  }
181695
181703
  monitor.connection = connection;
@@ -181757,7 +181765,7 @@ function requireMonitor () {
181757
181765
  close() {
181758
181766
  this.closed = true;
181759
181767
  (0, timers_1.clearTimeout)(this[kMonitorId]);
181760
- this.connection?.destroy({ force: true });
181768
+ this.connection?.destroy();
181761
181769
  this.connection = undefined;
181762
181770
  }
181763
181771
  }
@@ -181771,7 +181779,7 @@ function requireMonitor () {
181771
181779
  }
181772
181780
  function measureAndReschedule(conn) {
181773
181781
  if (rttPinger.closed) {
181774
- conn?.destroy({ force: true });
181782
+ conn?.destroy();
181775
181783
  return;
181776
181784
  }
181777
181785
  if (rttPinger.connection == null) {
@@ -181792,7 +181800,7 @@ function requireMonitor () {
181792
181800
  }
181793
181801
  const commandName = connection.serverApi?.version || connection.helloOk ? 'hello' : constants_1.LEGACY_HELLO_COMMAND;
181794
181802
  connection.command((0, utils_1.ns)('admin.$cmd'), { [commandName]: 1 }, undefined).then(() => measureAndReschedule(), () => {
181795
- rttPinger.connection?.destroy({ force: true });
181803
+ rttPinger.connection?.destroy();
181796
181804
  rttPinger.connection = undefined;
181797
181805
  rttPinger[kRoundTripTime] = 0;
181798
181806
  return;
@@ -181937,8 +181945,14 @@ function requireConnection_string () {
181937
181945
  // TODO(NODE-3484): Replace with MongoConnectionStringError
181938
181946
  throw new error_1.MongoAPIError('URI must include hostname, domain name, and tld');
181939
181947
  }
181940
- // Resolve the SRV record and use the result as the list of hosts to connect to.
181948
+ // Asynchronously start TXT resolution so that we do not have to wait until
181949
+ // the SRV record is resolved before starting a second DNS query.
181941
181950
  const lookupAddress = options.srvHost;
181951
+ const txtResolutionPromise = dns.promises.resolveTxt(lookupAddress);
181952
+ txtResolutionPromise.catch(() => {
181953
+ /* rejections will be handled later */
181954
+ });
181955
+ // Resolve the SRV record and use the result as the list of hosts to connect to.
181942
181956
  const addresses = await dns.promises.resolveSrv(`_${options.srvServiceName}._tcp.${lookupAddress}`);
181943
181957
  if (addresses.length === 0) {
181944
181958
  throw new error_1.MongoAPIError('No addresses found at host');
@@ -181950,10 +181964,10 @@ function requireConnection_string () {
181950
181964
  }
181951
181965
  const hostAddresses = addresses.map(r => utils_1.HostAddress.fromString(`${r.name}:${r.port ?? 27017}`));
181952
181966
  validateLoadBalancedOptions(hostAddresses, options, true);
181953
- // Resolve TXT record and add options from there if they exist.
181967
+ // Use the result of resolving the TXT record and add options from there if they exist.
181954
181968
  let record;
181955
181969
  try {
181956
- record = await dns.promises.resolveTxt(lookupAddress);
181970
+ record = await txtResolutionPromise;
181957
181971
  }
181958
181972
  catch (error) {
181959
181973
  if (error.code !== 'ENODATA' && error.code !== 'ENOTFOUND') {
@@ -182295,6 +182309,9 @@ function requireConnection_string () {
182295
182309
  }
182296
182310
  mongoOptions.mongoLoggerOptions = mongo_logger_1.MongoLogger.resolveOptions(loggerEnvOptions, loggerClientOptions);
182297
182311
  mongoOptions.metadata = (0, client_metadata_1.makeClientMetadata)(mongoOptions);
182312
+ mongoOptions.extendedMetadata = (0, client_metadata_1.addContainerMetadata)(mongoOptions.metadata).catch(() => {
182313
+ /* rejections will be handled later */
182314
+ });
182298
182315
  return mongoOptions;
182299
182316
  }
182300
182317
  exports.parseOptions = parseOptions;
@@ -184288,8 +184305,11 @@ const index_1$1 = __importDefault$8(dist$b);
184288
184305
  const memory_code_points_1 = memoryCodePoints;
184289
184306
  const code_points_data_1 = __importDefault$8(codePointsData);
184290
184307
  const codePoints = (0, memory_code_points_1.createMemoryCodePoints)(code_points_data_1.default);
184291
- const saslprep = index_1$1.default.bind(null, codePoints);
184292
- Object.assign(saslprep, { saslprep, default: saslprep });
184308
+ function saslprep(input, opts) {
184309
+ return (0, index_1$1.default)(codePoints, input, opts);
184310
+ }
184311
+ saslprep.saslprep = saslprep;
184312
+ saslprep.default = saslprep;
184293
184313
  var node = saslprep;Object.defineProperty(scram, "__esModule", { value: true });
184294
184314
  scram.ScramSHA256 = scram.ScramSHA1 = void 0;
184295
184315
  const saslprep_1 = node;
@@ -185002,7 +185022,8 @@ function requireTopology () {
185002
185022
  const selectServerOptions = { operationName: 'ping', ...options };
185003
185023
  this.selectServer((0, server_selection_1.readPreferenceServerSelector)(readPreference), selectServerOptions, (err, server) => {
185004
185024
  if (err) {
185005
- return this.close({ force: false }, () => exitWithError(err));
185025
+ this.close();
185026
+ return exitWithError(err);
185006
185027
  }
185007
185028
  const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true;
185008
185029
  if (!skipPingOnConnect && server && this.s.credentials) {
@@ -185020,30 +185041,26 @@ function requireTopology () {
185020
185041
  callback?.(undefined, this);
185021
185042
  });
185022
185043
  }
185023
- close(options, callback) {
185024
- options = options ?? { force: false };
185044
+ /** Close this topology */
185045
+ close() {
185025
185046
  if (this.s.state === common_1.STATE_CLOSED || this.s.state === common_1.STATE_CLOSING) {
185026
- return callback?.();
185047
+ return;
185027
185048
  }
185028
- const destroyedServers = Array.from(this.s.servers.values(), server => {
185029
- return (0, util_1.promisify)(destroyServer)(server, this, { force: !!options?.force });
185030
- });
185031
- Promise.all(destroyedServers)
185032
- .then(() => {
185033
- this.s.servers.clear();
185034
- stateTransition(this, common_1.STATE_CLOSING);
185035
- drainWaitQueue(this[kWaitQueue], new error_1.MongoTopologyClosedError());
185036
- (0, common_1.drainTimerQueue)(this.s.connectionTimers);
185037
- if (this.s.srvPoller) {
185038
- this.s.srvPoller.stop();
185039
- this.s.srvPoller.removeListener(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
185040
- }
185041
- this.removeListener(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
185042
- stateTransition(this, common_1.STATE_CLOSED);
185043
- // emit an event for close
185044
- this.emitAndLog(Topology.TOPOLOGY_CLOSED, new events_1.TopologyClosedEvent(this.s.id));
185045
- })
185046
- .finally(() => callback?.());
185049
+ for (const server of this.s.servers.values()) {
185050
+ destroyServer(server, this);
185051
+ }
185052
+ this.s.servers.clear();
185053
+ stateTransition(this, common_1.STATE_CLOSING);
185054
+ drainWaitQueue(this[kWaitQueue], new error_1.MongoTopologyClosedError());
185055
+ (0, common_1.drainTimerQueue)(this.s.connectionTimers);
185056
+ if (this.s.srvPoller) {
185057
+ this.s.srvPoller.stop();
185058
+ this.s.srvPoller.removeListener(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
185059
+ }
185060
+ this.removeListener(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
185061
+ stateTransition(this, common_1.STATE_CLOSED);
185062
+ // emit an event for close
185063
+ this.emitAndLog(Topology.TOPOLOGY_CLOSED, new events_1.TopologyClosedEvent(this.s.id));
185047
185064
  }
185048
185065
  /**
185049
185066
  * Selects a server according to the selection predicate provided
@@ -185231,20 +185248,15 @@ function requireTopology () {
185231
185248
  Topology.TIMEOUT = constants_1.TIMEOUT;
185232
185249
  topology.Topology = Topology;
185233
185250
  /** Destroys a server, and removes all event listeners from the instance */
185234
- function destroyServer(server, topology, options, callback) {
185235
- options = options ?? { force: false };
185251
+ function destroyServer(server, topology) {
185236
185252
  for (const event of constants_1.LOCAL_SERVER_EVENTS) {
185237
185253
  server.removeAllListeners(event);
185238
185254
  }
185239
- server.destroy(options, () => {
185240
- topology.emitAndLog(Topology.SERVER_CLOSED, new events_1.ServerClosedEvent(topology.s.id, server.description.address));
185241
- for (const event of constants_1.SERVER_RELAY_EVENTS) {
185242
- server.removeAllListeners(event);
185243
- }
185244
- if (typeof callback === 'function') {
185245
- callback();
185246
- }
185247
- });
185255
+ server.destroy();
185256
+ topology.emitAndLog(Topology.SERVER_CLOSED, new events_1.ServerClosedEvent(topology.s.id, server.description.address));
185257
+ for (const event of constants_1.SERVER_RELAY_EVENTS) {
185258
+ server.removeAllListeners(event);
185259
+ }
185248
185260
  }
185249
185261
  /** Predicts the TopologyType from options */
185250
185262
  function topologyTypeFromOptions(options) {
@@ -185679,7 +185691,7 @@ function requireMongo_client () {
185679
185691
  await (0, util_1.promisify)(callback => this.topology?.connect(options, callback))();
185680
185692
  }
185681
185693
  catch (error) {
185682
- this.topology?.close({ force: true });
185694
+ this.topology?.close();
185683
185695
  throw error;
185684
185696
  }
185685
185697
  };
@@ -185727,21 +185739,11 @@ function requireMongo_client () {
185727
185739
  // clear out references to old topology
185728
185740
  const topology = this.topology;
185729
185741
  this.topology = undefined;
185730
- await new Promise((resolve, reject) => {
185731
- topology.close({ force }, error => {
185732
- if (error)
185733
- return reject(error);
185734
- const { encrypter } = this[kOptions];
185735
- if (encrypter) {
185736
- return encrypter.closeCallback(this, force, error => {
185737
- if (error)
185738
- return reject(error);
185739
- resolve();
185740
- });
185741
- }
185742
- resolve();
185743
- });
185744
- });
185742
+ topology.close();
185743
+ const { encrypter } = this[kOptions];
185744
+ if (encrypter) {
185745
+ await encrypter.close(this, force);
185746
+ }
185745
185747
  }
185746
185748
  /**
185747
185749
  * Create a new Db instance sharing the current socket connections.
@@ -268729,7 +268731,7 @@ handler.AttentionTokenHandler = AttentionTokenHandler;(function (module, exports
268729
268731
  var tls = _interopRequireWildcard(require$$1$2);
268730
268732
  var net = _interopRequireWildcard(require$$0$7);
268731
268733
  var _dns = _interopRequireDefault(require$$0$9);
268732
- var _constants = _interopRequireDefault(require$$5$2);
268734
+ var _constants = _interopRequireDefault(require$$5$3);
268733
268735
  var _stream = require$$0$b;
268734
268736
  var _identity = require$$7$1;
268735
268737
  var _bulkLoad = _interopRequireDefault(bulkLoadExports);