mongo 2.19.3 → 2.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/Rakefile +27 -154
- data/lib/mongo/cluster/topology/base.rb +16 -0
- data/lib/mongo/cluster.rb +27 -1
- data/lib/mongo/collection/view/iterable.rb +1 -0
- data/lib/mongo/collection.rb +4 -2
- data/lib/mongo/error/transactions_not_supported.rb +34 -0
- data/lib/mongo/error.rb +1 -0
- data/lib/mongo/grid/fs_bucket.rb +6 -0
- data/lib/mongo/monitoring/event/secure.rb +1 -1
- data/lib/mongo/operation/shared/executable.rb +43 -27
- data/lib/mongo/operation/shared/response_handling.rb +23 -25
- data/lib/mongo/retryable/read_worker.rb +7 -6
- data/lib/mongo/retryable/write_worker.rb +7 -4
- data/lib/mongo/retryable.rb +2 -2
- data/lib/mongo/server/app_metadata/environment.rb +64 -9
- data/lib/mongo/server/app_metadata.rb +5 -4
- data/lib/mongo/server/description/features.rb +1 -0
- data/lib/mongo/server_selector/base.rb +32 -6
- data/lib/mongo/session/server_session/dirtyable.rb +52 -0
- data/lib/mongo/session/server_session.rb +3 -0
- data/lib/mongo/session/session_pool.rb +12 -18
- data/lib/mongo/session.rb +32 -0
- data/lib/mongo/uri.rb +0 -4
- data/lib/mongo/version.rb +1 -1
- data/mongo.gemspec +1 -7
- data/spec/atlas/atlas_connectivity_spec.rb +4 -4
- data/spec/faas/ruby-sam-app/Gemfile +9 -0
- data/spec/faas/ruby-sam-app/mongodb/Gemfile +4 -0
- data/spec/faas/ruby-sam-app/mongodb/app.rb +149 -0
- data/spec/faas/ruby-sam-app/template.yaml +48 -0
- data/spec/integration/client_side_encryption/corpus_spec.rb +10 -2
- data/spec/integration/retryable_reads_errors_spec.rb +161 -8
- data/spec/integration/retryable_writes_errors_spec.rb +156 -0
- data/spec/mongo/cluster_spec.rb +36 -0
- data/spec/mongo/collection/view/aggregation_spec.rb +6 -1
- data/spec/mongo/collection/view/explainable_spec.rb +2 -0
- data/spec/mongo/collection_crud_spec.rb +1 -1
- data/spec/mongo/operation/insert_spec.rb +1 -1
- data/spec/mongo/retryable/write_worker_spec.rb +39 -0
- data/spec/mongo/server/app_metadata/environment_spec.rb +135 -0
- data/spec/mongo/server/app_metadata_spec.rb +12 -2
- data/spec/mongo/server/connection_spec.rb +4 -0
- data/spec/mongo/session/session_pool_spec.rb +1 -16
- data/spec/mongo/session_transaction_spec.rb +15 -0
- data/spec/mongo/uri_spec.rb +0 -9
- data/spec/runners/crud/test.rb +0 -8
- data/spec/runners/crud.rb +1 -1
- data/spec/runners/transactions/test.rb +12 -3
- data/spec/runners/unified/assertions.rb +16 -3
- data/spec/runners/unified/crud_operations.rb +12 -0
- data/spec/runners/unified/support_operations.rb +3 -5
- data/spec/runners/unified/test.rb +8 -1
- data/spec/shared/lib/mrss/docker_runner.rb +3 -0
- data/spec/shared/share/Dockerfile.erb +20 -69
- data/spec/shared/shlib/server.sh +1 -0
- data/spec/shared/shlib/set_env.sh +5 -28
- data/spec/spec_tests/data/client_side_encryption/explain.yml +2 -2
- data/spec/spec_tests/data/connection_string/invalid-uris.yml +0 -10
- data/spec/spec_tests/data/connection_string/valid-options.yml +13 -0
- data/spec/spec_tests/data/crud_unified/find-test-all-options.yml +348 -0
- data/spec/spec_tests/data/index_management/createSearchIndex.yml +5 -3
- data/spec/spec_tests/data/index_management/createSearchIndexes.yml +7 -4
- data/spec/spec_tests/data/index_management/dropSearchIndex.yml +2 -1
- data/spec/spec_tests/data/index_management/listSearchIndexes.yml +13 -7
- data/spec/spec_tests/data/index_management/updateSearchIndex.yml +2 -1
- data/spec/spec_tests/data/retryable_writes/unified/bulkWrite-serverErrors.yml +3 -6
- data/spec/spec_tests/data/retryable_writes/unified/insertOne-serverErrors.yml +3 -6
- data/spec/spec_tests/data/run_command_unified/runCommand.yml +319 -0
- data/spec/spec_tests/data/sessions_unified/driver-sessions-dirty-session-errors.yml +351 -0
- data/spec/spec_tests/data/unified/valid-pass/poc-crud.yml +1 -1
- data/spec/spec_tests/data/unified/valid-pass/poc-retryable-writes.yml +7 -7
- data/spec/spec_tests/data/unified/valid-pass/poc-sessions.yml +3 -4
- data/spec/spec_tests/data/unified/valid-pass/poc-transactions-convenient-api.yml +1 -1
- data/spec/spec_tests/data/unified/valid-pass/poc-transactions-mongos-pin-auto.yml +1 -1
- data/spec/spec_tests/data/unified/valid-pass/poc-transactions.yml +3 -3
- data/spec/spec_tests/run_command_unified_spec.rb +13 -0
- data/spec/spec_tests/sdam_unified_spec.rb +2 -0
- data/spec/support/constraints.rb +6 -0
- data/spec/support/ocsp +1 -1
- data/spec/support/recording_logger.rb +27 -0
- data.tar.gz.sig +0 -0
- metadata +1272 -1253
- metadata.gz.sig +0 -0
- data/spec/spec_tests/data/cmap/pool-clear-interrupt-immediately.yml +0 -49
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 281c66c9e5d92b0eb8040c342bb0dfcf554c9b5420ec803bc68748472d74a1ae
|
4
|
+
data.tar.gz: 423f31b24accb3ce674ea28a7aa8fcaf028730eb1370187c23f5304ba30698f6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4f414d30bf15f7c32ac9181e25e707caa80447975690fc6c0eb9d8fa37a784f8c4d21125b385d5a6d7efdb4bd751ee2c9c4bea313adbf78d20de3b019c25ac8a
|
7
|
+
data.tar.gz: 7a83ba71f6a36d886769d755fb5ea45e6d1fae37bd92b490c58e9f643f51c25d5a489ed79928d67975f067d258c738f54cc3e5caecca6eb0bad2e2bf49d81613
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/Rakefile
CHANGED
@@ -119,6 +119,32 @@ end
|
|
119
119
|
|
120
120
|
task :release => ['release:check_private_key', 'release:do']
|
121
121
|
|
122
|
+
desc 'Build and validate the evergreen config'
|
123
|
+
task eg: %w[ eg:build eg:validate ]
|
124
|
+
|
125
|
+
# 'eg' == 'evergreen', but evergreen is too many letters for convenience
|
126
|
+
namespace :eg do
|
127
|
+
desc 'Builds the .evergreen/config.yml file from the templates'
|
128
|
+
task :build do
|
129
|
+
ruby '.evergreen/update-evergreen-configs'
|
130
|
+
end
|
131
|
+
|
132
|
+
desc 'Validates the .evergreen/config.yml file'
|
133
|
+
task :validate do
|
134
|
+
system 'evergreen validate --project mongo-ruby-driver .evergreen/config.yml'
|
135
|
+
end
|
136
|
+
|
137
|
+
desc 'Updates the evergreen executable to the latest available version'
|
138
|
+
task :update do
|
139
|
+
system 'evergreen get-update --install'
|
140
|
+
end
|
141
|
+
|
142
|
+
desc 'Runs the current branch as an evergreen patch'
|
143
|
+
task :patch do
|
144
|
+
system 'evergreen patch --uncommitted --project mongo-ruby-driver --browse --auto-description --yes'
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
122
148
|
desc "Generate all documentation"
|
123
149
|
task :docs => 'docs:yard'
|
124
150
|
|
@@ -131,157 +157,4 @@ namespace :docs do
|
|
131
157
|
end
|
132
158
|
end
|
133
159
|
|
134
|
-
|
135
|
-
|
136
|
-
# Some require data files, available from the drivers team. See the comments above each task for details."
|
137
|
-
namespace :benchmark do
|
138
|
-
desc "Run the driver benchmark tests."
|
139
|
-
|
140
|
-
namespace :micro do
|
141
|
-
desc "Run the common driver micro benchmarking tests"
|
142
|
-
|
143
|
-
namespace :flat do
|
144
|
-
desc "Benchmarking for flat bson documents."
|
145
|
-
|
146
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called flat_bson.json.
|
147
|
-
task :encode do
|
148
|
-
puts "MICRO BENCHMARK:: FLAT:: ENCODE"
|
149
|
-
Mongo::Benchmarking::Micro.run(:flat, :encode)
|
150
|
-
end
|
151
|
-
|
152
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called flat_bson.json.
|
153
|
-
task :decode do
|
154
|
-
puts "MICRO BENCHMARK:: FLAT:: DECODE"
|
155
|
-
Mongo::Benchmarking::Micro.run(:flat, :decode)
|
156
|
-
end
|
157
|
-
end
|
158
|
-
|
159
|
-
namespace :deep do
|
160
|
-
desc "Benchmarking for deep bson documents."
|
161
|
-
|
162
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called deep_bson.json.
|
163
|
-
task :encode do
|
164
|
-
puts "MICRO BENCHMARK:: DEEP:: ENCODE"
|
165
|
-
Mongo::Benchmarking::Micro.run(:deep, :encode)
|
166
|
-
end
|
167
|
-
|
168
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called deep_bson.json.
|
169
|
-
task :decode do
|
170
|
-
puts "MICRO BENCHMARK:: DEEP:: DECODE"
|
171
|
-
Mongo::Benchmarking::Micro.run(:deep, :decode)
|
172
|
-
end
|
173
|
-
end
|
174
|
-
|
175
|
-
namespace :full do
|
176
|
-
desc "Benchmarking for full bson documents."
|
177
|
-
|
178
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called full_bson.json.
|
179
|
-
task :encode do
|
180
|
-
puts "MICRO BENCHMARK:: FULL:: ENCODE"
|
181
|
-
Mongo::Benchmarking::Micro.run(:full, :encode)
|
182
|
-
end
|
183
|
-
|
184
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called full_bson.json.
|
185
|
-
task :decode do
|
186
|
-
puts "MICRO BENCHMARK:: FULL:: DECODE"
|
187
|
-
Mongo::Benchmarking::Micro.run(:full, :decode)
|
188
|
-
end
|
189
|
-
end
|
190
|
-
end
|
191
|
-
|
192
|
-
namespace :single_doc do
|
193
|
-
desc "Run the common driver single-document benchmarking tests"
|
194
|
-
task :command do
|
195
|
-
puts "SINGLE DOC BENCHMARK:: COMMAND"
|
196
|
-
Mongo::Benchmarking::SingleDoc.run(:command)
|
197
|
-
end
|
198
|
-
|
199
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json.
|
200
|
-
task :find_one do
|
201
|
-
puts "SINGLE DOC BENCHMARK:: FIND ONE BY ID"
|
202
|
-
Mongo::Benchmarking::SingleDoc.run(:find_one)
|
203
|
-
end
|
204
|
-
|
205
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json.
|
206
|
-
task :insert_one_small do
|
207
|
-
puts "SINGLE DOC BENCHMARK:: INSERT ONE SMALL DOCUMENT"
|
208
|
-
Mongo::Benchmarking::SingleDoc.run(:insert_one_small)
|
209
|
-
end
|
210
|
-
|
211
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json.
|
212
|
-
task :insert_one_large do
|
213
|
-
puts "SINGLE DOC BENCHMARK:: INSERT ONE LARGE DOCUMENT"
|
214
|
-
Mongo::Benchmarking::SingleDoc.run(:insert_one_large)
|
215
|
-
end
|
216
|
-
end
|
217
|
-
|
218
|
-
namespace :multi_doc do
|
219
|
-
desc "Run the common driver multi-document benchmarking tests"
|
220
|
-
|
221
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json.
|
222
|
-
task :find_many do
|
223
|
-
puts "MULTI DOCUMENT BENCHMARK:: FIND MANY"
|
224
|
-
Mongo::Benchmarking::MultiDoc.run(:find_many)
|
225
|
-
end
|
226
|
-
|
227
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json.
|
228
|
-
task :bulk_insert_small do
|
229
|
-
puts "MULTI DOCUMENT BENCHMARK:: BULK INSERT SMALL"
|
230
|
-
Mongo::Benchmarking::MultiDoc.run(:bulk_insert_small)
|
231
|
-
end
|
232
|
-
|
233
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json.
|
234
|
-
task :bulk_insert_large do
|
235
|
-
puts "MULTI DOCUMENT BENCHMARK:: BULK INSERT LARGE"
|
236
|
-
Mongo::Benchmarking::MultiDoc.run(:bulk_insert_large)
|
237
|
-
end
|
238
|
-
|
239
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE.
|
240
|
-
task :gridfs_upload do
|
241
|
-
puts "MULTI DOCUMENT BENCHMARK:: GRIDFS UPLOAD"
|
242
|
-
Mongo::Benchmarking::MultiDoc.run(:gridfs_upload)
|
243
|
-
end
|
244
|
-
|
245
|
-
# Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE.
|
246
|
-
task :gridfs_download do
|
247
|
-
puts "MULTI DOCUMENT BENCHMARK:: GRIDFS DOWNLOAD"
|
248
|
-
Mongo::Benchmarking::MultiDoc.run(:gridfs_download)
|
249
|
-
end
|
250
|
-
end
|
251
|
-
|
252
|
-
namespace :parallel do
|
253
|
-
desc "Run the common driver paralell ETL benchmarking tests"
|
254
|
-
|
255
|
-
# Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI,
|
256
|
-
# with the files used in this task.
|
257
|
-
task :import do
|
258
|
-
puts "PARALLEL ETL BENCHMARK:: IMPORT"
|
259
|
-
Mongo::Benchmarking::Parallel.run(:import)
|
260
|
-
end
|
261
|
-
|
262
|
-
# Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI,
|
263
|
-
# with the files used in this task.
|
264
|
-
# Requirement: Another directory in "#{Mongo::Benchmarking::DATA_PATH}/LDJSON_MULTI"
|
265
|
-
# called 'output'.
|
266
|
-
task :export do
|
267
|
-
puts "PARALLEL ETL BENCHMARK:: EXPORT"
|
268
|
-
Mongo::Benchmarking::Parallel.run(:export)
|
269
|
-
end
|
270
|
-
|
271
|
-
# Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI,
|
272
|
-
# with the files used in this task.
|
273
|
-
task :gridfs_upload do
|
274
|
-
puts "PARALLEL ETL BENCHMARK:: GRIDFS UPLOAD"
|
275
|
-
Mongo::Benchmarking::Parallel.run(:gridfs_upload)
|
276
|
-
end
|
277
|
-
|
278
|
-
# Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI,
|
279
|
-
# with the files used in this task.
|
280
|
-
# Requirement: Another directory in "#{Mongo::Benchmarking::DATA_PATH}/GRIDFS_MULTI"
|
281
|
-
# called 'output'.
|
282
|
-
task :gridfs_download do
|
283
|
-
puts "PARALLEL ETL BENCHMARK:: GRIDFS DOWNLOAD"
|
284
|
-
Mongo::Benchmarking::Parallel.run(:gridfs_download)
|
285
|
-
end
|
286
|
-
end
|
287
|
-
end
|
160
|
+
load 'profile/driver_bench/rake/tasks.rake'
|
@@ -211,6 +211,22 @@ module Mongo
|
|
211
211
|
end
|
212
212
|
end
|
213
213
|
|
214
|
+
# Compares each server address against the list of patterns.
|
215
|
+
#
|
216
|
+
# @param [ Array<String> ] patterns the URL suffixes to compare
|
217
|
+
# each server against.
|
218
|
+
#
|
219
|
+
# @return [ true | false ] whether any of the addresses match any of
|
220
|
+
# the patterns or not.
|
221
|
+
#
|
222
|
+
# @api private
|
223
|
+
def server_hosts_match_any?(patterns)
|
224
|
+
server_descriptions.any? do |addr_spec, _desc|
|
225
|
+
addr, _port = addr_spec.split(/:/)
|
226
|
+
patterns.any? { |pattern| addr.end_with?(pattern) }
|
227
|
+
end
|
228
|
+
end
|
229
|
+
|
214
230
|
private
|
215
231
|
|
216
232
|
# Validates and/or transforms options as necessary for the topology.
|
data/lib/mongo/cluster.rb
CHANGED
@@ -157,7 +157,7 @@ module Mongo
|
|
157
157
|
# @sdam_flow_lock covers just the sdam flow. Note it does not apply
|
158
158
|
# to @topology replacements which are done under @update_lock.
|
159
159
|
@sdam_flow_lock = Mutex.new
|
160
|
-
Session::SessionPool.
|
160
|
+
@session_pool = Session::SessionPool.new(self)
|
161
161
|
|
162
162
|
if seeds.empty? && load_balanced?
|
163
163
|
raise ArgumentError, 'Load-balanced clusters with no seeds are prohibited'
|
@@ -186,6 +186,8 @@ module Mongo
|
|
186
186
|
recreate_topology(topology, opening_topology)
|
187
187
|
end
|
188
188
|
|
189
|
+
possibly_warn_about_compatibility!
|
190
|
+
|
189
191
|
if load_balanced?
|
190
192
|
# We are required by the specifications to produce certain SDAM events
|
191
193
|
# when in load-balanced topology.
|
@@ -1082,6 +1084,30 @@ module Mongo
|
|
1082
1084
|
Monitoring::Event::TopologyChanged.new(previous_topology, @topology)
|
1083
1085
|
)
|
1084
1086
|
end
|
1087
|
+
|
1088
|
+
COSMOSDB_HOST_PATTERNS = %w[ .cosmos.azure.com ]
|
1089
|
+
COSMOSDB_LOG_MESSAGE = 'You appear to be connected to a CosmosDB cluster. ' \
|
1090
|
+
'For more information regarding feature compatibility and support please visit ' \
|
1091
|
+
'https://www.mongodb.com/supportability/cosmosdb'
|
1092
|
+
|
1093
|
+
DOCUMENTDB_HOST_PATTERNS = %w[ .docdb.amazonaws.com .docdb-elastic.amazonaws.com ]
|
1094
|
+
DOCUMENTDB_LOG_MESSAGE = 'You appear to be connected to a DocumentDB cluster. ' \
|
1095
|
+
'For more information regarding feature compatibility and support please visit ' \
|
1096
|
+
'https://www.mongodb.com/supportability/documentdb'
|
1097
|
+
|
1098
|
+
# Compares the server hosts with address suffixes of known services
|
1099
|
+
# that provide limited MongoDB API compatibility, and warns about them.
|
1100
|
+
def possibly_warn_about_compatibility!
|
1101
|
+
if topology.server_hosts_match_any?(COSMOSDB_HOST_PATTERNS)
|
1102
|
+
log_info COSMOSDB_LOG_MESSAGE
|
1103
|
+
return
|
1104
|
+
end
|
1105
|
+
|
1106
|
+
if topology.server_hosts_match_any?(DOCUMENTDB_HOST_PATTERNS)
|
1107
|
+
log_info DOCUMENTDB_LOG_MESSAGE
|
1108
|
+
return
|
1109
|
+
end
|
1110
|
+
end
|
1085
1111
|
end
|
1086
1112
|
end
|
1087
1113
|
|
@@ -162,6 +162,7 @@ module Mongo
|
|
162
162
|
let: options[:let],
|
163
163
|
limit: limit,
|
164
164
|
allow_disk_use: options[:allow_disk_use],
|
165
|
+
allow_partial_results: options[:allow_partial_results],
|
165
166
|
read: read,
|
166
167
|
read_concern: options[:read_concern] || read_concern,
|
167
168
|
batch_size: batch_size,
|
data/lib/mongo/collection.rb
CHANGED
@@ -339,7 +339,9 @@ module Mongo
|
|
339
339
|
# inserted or updated documents where the clustered index key value
|
340
340
|
# matches an existing value in the index.
|
341
341
|
# - *:name* -- Optional. A name that uniquely identifies the clustered index.
|
342
|
-
# @option opts [ Hash ] :collation The collation to use
|
342
|
+
# @option opts [ Hash ] :collation The collation to use when creating the
|
343
|
+
# collection. This option will not be sent to the server when calling
|
344
|
+
# collection methods.
|
343
345
|
# @option opts [ Hash ] :encrypted_fields Hash describing encrypted fields
|
344
346
|
# for queryable encryption.
|
345
347
|
# @option opts [ Integer ] :expire_after Number indicating
|
@@ -788,7 +790,7 @@ module Mongo
|
|
788
790
|
def insert_one(document, opts = {})
|
789
791
|
QueryCache.clear_namespace(namespace)
|
790
792
|
|
791
|
-
client.
|
793
|
+
client.with_session(opts) do |session|
|
792
794
|
write_concern = if opts[:write_concern]
|
793
795
|
WriteConcern.get(opts[:write_concern])
|
794
796
|
else
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright (C) 2019-2020 MongoDB Inc.
|
4
|
+
#
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
# you may not use this file except in compliance with the License.
|
7
|
+
# You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
|
17
|
+
module Mongo
|
18
|
+
class Error
|
19
|
+
# Transactions are not supported by the cluster. There might be the
|
20
|
+
# following reasons:
|
21
|
+
# - topology is standalone
|
22
|
+
# - topology is replica set and server version is < 4.0
|
23
|
+
# - topology is sharded and server version is < 4.2
|
24
|
+
#
|
25
|
+
# @param [ String ] reason The reason why transactions are no supported.
|
26
|
+
#
|
27
|
+
# @since 2.7.0
|
28
|
+
class TransactionsNotSupported < Error
|
29
|
+
def initialize(reason)
|
30
|
+
super("Transactions are not supported for the cluster: #{reason}")
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
data/lib/mongo/error.rb
CHANGED
@@ -217,6 +217,7 @@ require 'mongo/error/missing_service_id'
|
|
217
217
|
require 'mongo/error/server_api_conflict'
|
218
218
|
require 'mongo/error/server_api_not_supported'
|
219
219
|
require 'mongo/error/server_not_usable'
|
220
|
+
require 'mongo/error/transactions_not_supported'
|
220
221
|
require 'mongo/error/unknown_payload_type'
|
221
222
|
require 'mongo/error/unmet_dependency'
|
222
223
|
require 'mongo/error/unsupported_option'
|
data/lib/mongo/grid/fs_bucket.rb
CHANGED
@@ -58,7 +58,7 @@ module Mongo
|
|
58
58
|
# According to Command Monitoring spec,for hello/legacy hello commands
|
59
59
|
# when speculativeAuthenticate is present, their commands AND replies
|
60
60
|
# MUST be redacted from the events.
|
61
|
-
# See https://github.com/mongodb/specifications/blob/master/source/command-monitoring/command-monitoring.rst#security
|
61
|
+
# See https://github.com/mongodb/specifications/blob/master/source/command-logging-and-monitoring/command-logging-and-monitoring.rst#security
|
62
62
|
true
|
63
63
|
else
|
64
64
|
false
|
@@ -15,6 +15,8 @@
|
|
15
15
|
# See the License for the specific language governing permissions and
|
16
16
|
# limitations under the License.
|
17
17
|
|
18
|
+
require 'mongo/error'
|
19
|
+
|
18
20
|
module Mongo
|
19
21
|
module Operation
|
20
22
|
|
@@ -30,40 +32,42 @@ module Mongo
|
|
30
32
|
session&.materialize_if_needed
|
31
33
|
unpin_maybe(session, connection) do
|
32
34
|
add_error_labels(connection, context) do
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
if session
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
35
|
+
check_for_network_error do
|
36
|
+
add_server_diagnostics(connection) do
|
37
|
+
get_result(connection, context, options).tap do |result|
|
38
|
+
if session
|
39
|
+
if session.in_transaction? &&
|
40
|
+
connection.description.load_balancer?
|
41
|
+
then
|
42
|
+
if session.pinned_connection_global_id
|
43
|
+
unless session.pinned_connection_global_id == connection.global_id
|
44
|
+
raise(
|
45
|
+
Error::InternalDriverError,
|
46
|
+
"Expected operation to use connection #{session.pinned_connection_global_id} but it used #{connection.global_id}"
|
47
|
+
)
|
48
|
+
end
|
49
|
+
else
|
50
|
+
session.pin_to_connection(connection.global_id)
|
51
|
+
connection.pin
|
45
52
|
end
|
46
|
-
else
|
47
|
-
session.pin_to_connection(connection.global_id)
|
48
|
-
connection.pin
|
49
53
|
end
|
50
|
-
end
|
51
54
|
|
52
|
-
|
53
|
-
|
55
|
+
if session.snapshot? && !session.snapshot_timestamp
|
56
|
+
session.snapshot_timestamp = result.snapshot_timestamp
|
57
|
+
end
|
54
58
|
end
|
55
|
-
end
|
56
59
|
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
60
|
+
if result.has_cursor_id? &&
|
61
|
+
connection.description.load_balancer?
|
62
|
+
then
|
63
|
+
if result.cursor_id == 0
|
64
|
+
connection.unpin
|
65
|
+
else
|
66
|
+
connection.pin
|
67
|
+
end
|
64
68
|
end
|
69
|
+
process_result(result, connection)
|
65
70
|
end
|
66
|
-
process_result(result, connection)
|
67
71
|
end
|
68
72
|
end
|
69
73
|
end
|
@@ -144,6 +148,18 @@ module Mongo
|
|
144
148
|
connection.server.scan_semaphore.signal
|
145
149
|
end
|
146
150
|
end
|
151
|
+
|
152
|
+
NETWORK_ERRORS = [
|
153
|
+
Error::SocketError,
|
154
|
+
Error::SocketTimeoutError
|
155
|
+
].freeze
|
156
|
+
|
157
|
+
def check_for_network_error
|
158
|
+
yield
|
159
|
+
rescue *NETWORK_ERRORS
|
160
|
+
session&.dirty!
|
161
|
+
raise
|
162
|
+
end
|
147
163
|
end
|
148
164
|
end
|
149
165
|
end
|
@@ -50,35 +50,33 @@ module Mongo
|
|
50
50
|
# the operation is performed.
|
51
51
|
# @param [ Mongo::Operation::Context ] context The operation context.
|
52
52
|
def add_error_labels(connection, context)
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
end
|
53
|
+
yield
|
54
|
+
rescue Mongo::Error::SocketError => e
|
55
|
+
if context.in_transaction? && !context.committing_transaction?
|
56
|
+
e.add_label('TransientTransactionError')
|
57
|
+
end
|
58
|
+
if context.committing_transaction?
|
59
|
+
e.add_label('UnknownTransactionCommitResult')
|
60
|
+
end
|
62
61
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
end
|
62
|
+
maybe_add_retryable_write_error_label!(e, connection, context)
|
63
|
+
|
64
|
+
raise e
|
65
|
+
rescue Mongo::Error::SocketTimeoutError => e
|
66
|
+
maybe_add_retryable_write_error_label!(e, connection, context)
|
67
|
+
raise e
|
68
|
+
rescue Mongo::Error::OperationFailure => e
|
69
|
+
if context.committing_transaction?
|
70
|
+
if e.write_retryable? || e.wtimeout? || (e.write_concern_error? &&
|
71
|
+
!Session::UNLABELED_WRITE_CONCERN_CODES.include?(e.write_concern_error_code)
|
72
|
+
) || e.max_time_ms_expired?
|
73
|
+
e.add_label('UnknownTransactionCommitResult')
|
76
74
|
end
|
75
|
+
end
|
77
76
|
|
78
|
-
|
77
|
+
maybe_add_retryable_write_error_label!(e, connection, context)
|
79
78
|
|
80
|
-
|
81
|
-
end
|
79
|
+
raise e
|
82
80
|
end
|
83
81
|
|
84
82
|
# Unpins the session and/or the connection if the yielded to block
|
@@ -190,12 +190,13 @@ module Mongo
|
|
190
190
|
#
|
191
191
|
# @return [ Result ] The result of the operation.
|
192
192
|
def modern_read_with_retry(session, server_selector, &block)
|
193
|
-
|
193
|
+
server = select_server(cluster, server_selector, session)
|
194
|
+
yield server
|
194
195
|
rescue *retryable_exceptions, Error::OperationFailure, Auth::Unauthorized, Error::PoolError => e
|
195
196
|
e.add_notes('modern retry', 'attempt 1')
|
196
197
|
raise e if session.in_transaction?
|
197
198
|
raise e if !is_retryable_exception?(e) && !e.write_retryable?
|
198
|
-
retry_read(e, session, server_selector, &block)
|
199
|
+
retry_read(e, session, server_selector, failed_server: server, &block)
|
199
200
|
end
|
200
201
|
|
201
202
|
# Attempts to do a "legacy" read with retry. The operation will be
|
@@ -257,12 +258,14 @@ module Mongo
|
|
257
258
|
# being run on.
|
258
259
|
# @param [ Mongo::ServerSelector::Selectable ] server_selector Server
|
259
260
|
# selector for the operation.
|
261
|
+
# @param [ Mongo::Server ] failed_server The server on which the original
|
262
|
+
# operation failed.
|
260
263
|
# @param [ Proc ] block The block to execute.
|
261
264
|
#
|
262
265
|
# @return [ Result ] The result of the operation.
|
263
|
-
def retry_read(original_error, session, server_selector, &block)
|
266
|
+
def retry_read(original_error, session, server_selector, failed_server: nil, &block)
|
264
267
|
begin
|
265
|
-
server = select_server(cluster, server_selector, session)
|
268
|
+
server = select_server(cluster, server_selector, session, failed_server)
|
266
269
|
rescue Error, Error::AuthError => e
|
267
270
|
original_error.add_note("later retry failed: #{e.class}: #{e}")
|
268
271
|
raise original_error
|
@@ -289,8 +292,6 @@ module Mongo
|
|
289
292
|
raise original_error
|
290
293
|
end
|
291
294
|
end
|
292
|
-
|
293
295
|
end
|
294
|
-
|
295
296
|
end
|
296
297
|
end
|
@@ -103,8 +103,9 @@ module Mongo
|
|
103
103
|
def nro_write_with_retry(write_concern, context:, &block)
|
104
104
|
session = context.session
|
105
105
|
server = select_server(cluster, ServerSelector.primary, session)
|
106
|
+
options = session&.client&.options || {}
|
106
107
|
|
107
|
-
if
|
108
|
+
if options[:retry_writes]
|
108
109
|
begin
|
109
110
|
server.with_connection(connection_global_id: context.connection_global_id) do |connection|
|
110
111
|
yield connection, nil, context
|
@@ -240,7 +241,7 @@ module Mongo
|
|
240
241
|
|
241
242
|
# Context#with creates a new context, which is not necessary here
|
242
243
|
# but the API is less prone to misuse this way.
|
243
|
-
retry_write(e, txn_num, context: context.with(is_retry: true), &block)
|
244
|
+
retry_write(e, txn_num, context: context.with(is_retry: true), failed_server: server, &block)
|
244
245
|
end
|
245
246
|
|
246
247
|
# Called after a failed write, this will retry the write no more than
|
@@ -250,9 +251,11 @@ module Mongo
|
|
250
251
|
# retry.
|
251
252
|
# @param [ Number ] txn_num The transaction number.
|
252
253
|
# @param [ Operation::Context ] context The context for the operation.
|
254
|
+
# @param [ Mongo::Server ] failed_server The server on which the original
|
255
|
+
# operation failed.
|
253
256
|
#
|
254
257
|
# @return [ Result ] The result of the operation.
|
255
|
-
def retry_write(original_error, txn_num, context:, &block)
|
258
|
+
def retry_write(original_error, txn_num, context:, failed_server: nil, &block)
|
256
259
|
session = context.session
|
257
260
|
|
258
261
|
# We do not request a scan of the cluster here, because error handling
|
@@ -260,7 +263,7 @@ module Mongo
|
|
260
263
|
# server description and/or topology as necessary (specifically,
|
261
264
|
# a socket error or a not master error should have marked the respective
|
262
265
|
# server unknown). Here we just need to wait for server selection.
|
263
|
-
server = select_server(cluster, ServerSelector.primary, session)
|
266
|
+
server = select_server(cluster, ServerSelector.primary, session, failed_server)
|
264
267
|
|
265
268
|
unless server.retry_writes?
|
266
269
|
# Do not need to add "modern retry" here, it should already be on
|
data/lib/mongo/retryable.rb
CHANGED
@@ -46,8 +46,8 @@ module Mongo
|
|
46
46
|
# @api private
|
47
47
|
#
|
48
48
|
# @return [ Mongo::Server ] A server matching the server preference.
|
49
|
-
def select_server(cluster, server_selector, session)
|
50
|
-
server_selector.select_server(cluster, nil, session)
|
49
|
+
def select_server(cluster, server_selector, session, failed_server = nil)
|
50
|
+
server_selector.select_server(cluster, nil, session, deprioritized: [failed_server].compact)
|
51
51
|
end
|
52
52
|
|
53
53
|
# Returns the read worker for handling retryable reads.
|